convert from svn repository: remove tags directory
[lttv.git] / trunk / verif / nico-md-merge / pan.c
1 /*** Generated by Spin Version 5.1.6 -- 9 May 2008 ***/
2 /*** From source: pan.spin ***/
3
4 #ifdef SC
5 #define _FILE_OFFSET_BITS 64
6 #endif
7 #include <stdio.h>
8 #include <signal.h>
9 #include <stdlib.h>
10 #include <stdarg.h>
11 #include <string.h>
12 #include <ctype.h>
13 #include <errno.h>
14 #if defined(WIN32) || defined(WIN64)
15 #include <time.h>
16 #else
17 #include <unistd.h>
18 #include <sys/times.h>
19 #endif
20 #include <sys/types.h>
21 #include <sys/stat.h>
22 #include <fcntl.h>
23 #define Offsetof(X, Y) ((unsigned long)(&(((X *)0)->Y)))
24 #ifndef max
25 #define max(a,b) (((a)<(b)) ? (b) : (a))
26 #endif
27 #ifndef PRINTF
28 int Printf(const char *fmt, ...); /* prototype only */
29 #endif
30 #include "pan.h"
31 #ifdef LOOPSTATE
32 double cnt_loops;
33 #endif
34 State A_Root; /* seed-state for cycles */
35 State now; /* the full state-vector */
36 #undef C_States
37 #if defined(C_States) && defined(HAS_TRACK)
38 void
39 c_update(uchar *p_t_r)
40 {
41 #ifdef VERBOSE
42 printf("c_update %u\n", p_t_r);
43 #endif
44 }
45 void
46 c_revert(uchar *p_t_r)
47 {
48 #ifdef VERBOSE
49 printf("c_revert %u\n", p_t_r);
50 #endif
51 }
52 #endif
53 void
54 globinit(void)
55 {
56 }
57 void
58 locinit5(int h)
59 {
60 }
61 void
62 locinit4(int h)
63 {
64 }
65 void
66 locinit3(int h)
67 {
68 }
69 void
70 locinit2(int h)
71 {
72 }
73 void
74 locinit1(int h)
75 {
76 }
77 void
78 locinit0(int h)
79 {
80 }
81 #ifdef CNTRSTACK
82 #define onstack_now() (LL[trpt->j6] && LL[trpt->j7])
83 #define onstack_put() LL[trpt->j6]++; LL[trpt->j7]++
84 #define onstack_zap() LL[trpt->j6]--; LL[trpt->j7]--
85 #endif
86 #if !defined(SAFETY) && !defined(NOCOMP)
87 #define V_A (((now._a_t&1)?2:1) << (now._a_t&2))
88 #define A_V (((now._a_t&1)?1:2) << (now._a_t&2))
89 int S_A = 0;
90 #else
91 #define V_A 0
92 #define A_V 0
93 #define S_A 0
94 #endif
95 #ifdef MA
96 #undef onstack_now
97 #undef onstack_put
98 #undef onstack_zap
99 #define onstack_put() ;
100 #define onstack_zap() gstore((char *) &now, vsize, 4)
101 #else
102 #if defined(FULLSTACK) && !defined(BITSTATE)
103 #define onstack_put() trpt->ostate = Lstate
104 #define onstack_zap() { \
105 if (trpt->ostate) \
106 trpt->ostate->tagged = \
107 (S_A)? (trpt->ostate->tagged&~V_A) : 0; \
108 }
109 #endif
110 #endif
111 #ifndef NO_V_PROVISO
112 #define V_PROVISO
113 #endif
114 #if !defined(NO_RESIZE) && !defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(SPACE) && NCORE==1
115 #define AUTO_RESIZE
116 #endif
117
118 struct H_el {
119 struct H_el *nxt;
120 #ifdef FULLSTACK
121 unsigned int tagged;
122 #if defined(BITSTATE) && !defined(NOREDUCE) && !defined(SAFETY)
123 unsigned int proviso;
124 #endif
125 #endif
126 #if defined(CHECK) || (defined(COLLAPSE) && !defined(FULLSTACK))
127 unsigned long st_id;
128 #endif
129 #if !defined(SAFETY) || defined(REACH)
130 unsigned int D;
131 #endif
132 #if NCORE>1
133 /* could cost 1 extra word: 4 bytes if 32-bit and 8 bytes if 64-bit */
134 #ifdef V_PROVISO
135 uchar cpu_id; /* id of cpu that created the state */
136 #endif
137 #endif
138 #ifdef COLLAPSE
139 #if VECTORSZ<65536
140 unsigned short ln;
141 #else
142 unsigned long ln;
143 #endif
144 #endif
145 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
146 unsigned long m_K1;
147 #endif
148 unsigned long state;
149 } **H_tab, **S_Tab;
150
151 typedef struct Trail {
152 int st; /* current state */
153 uchar pr; /* process id */
154 uchar tau; /* 8 bit-flags */
155 uchar o_pm; /* 8 more bit-flags */
156 #if 0
157 Meaning of bit-flags:
158 tau&1 -> timeout enabled
159 tau&2 -> request to enable timeout 1 level up (in claim)
160 tau&4 -> current transition is a claim move
161 tau&8 -> current transition is an atomic move
162 tau&16 -> last move was truncated on stack
163 tau&32 -> current transition is a preselected move
164 tau&64 -> at least one next state is not on the stack
165 tau&128 -> current transition is a stutter move
166 o_pm&1 -> the current pid moved -- implements else
167 o_pm&2 -> this is an acceptance state
168 o_pm&4 -> this is a progress state
169 o_pm&8 -> fairness alg rule 1 undo mark
170 o_pm&16 -> fairness alg rule 3 undo mark
171 o_pm&32 -> fairness alg rule 2 undo mark
172 o_pm&64 -> the current proc applied rule2
173 o_pm&128 -> a fairness, dummy move - all procs blocked
174 #endif
175 #ifdef NSUCC
176 uchar n_succ; /* nr of successor states */
177 #endif
178 #if defined(FULLSTACK) && defined(MA) && !defined(BFS)
179 uchar proviso;
180 #endif
181 #ifndef BFS
182 uchar o_n, o_ot; /* to save locals */
183 #endif
184 uchar o_m;
185 #ifdef EVENT_TRACE
186 #if nstates_event<256
187 uchar o_event;
188 #else
189 unsigned short o_event;
190 #endif
191 #endif
192 int o_tt;
193 #ifndef BFS
194 short o_To;
195 #ifdef RANDOMIZE
196 short oo_i;
197 #endif
198 #endif
199 #if defined(HAS_UNLESS) && !defined(BFS)
200 int e_state; /* if escape trans - state of origin */
201 #endif
202 #if (defined(FULLSTACK) && !defined(MA)) || defined(BFS) || (NCORE>1)
203 struct H_el *ostate; /* pointer to stored state */
204 #endif
205 #if defined(CNTRSTACK) && !defined(BFS)
206 long j6, j7;
207 #endif
208 Trans *o_t;
209 #ifdef SCHED
210 /* based on Qadeer&Rehof, Tacas 2005, LNCS 3440, pp. 93-107 */
211 #if NCORE>1
212 #error "-DSCHED cannot be combined with -DNCORE (yet)"
213 #endif
214 int sched_limit;
215 #endif
216 #ifdef HAS_SORTED
217 short ipt;
218 #endif
219 union {
220 int oval;
221 int *ovals;
222 } bup;
223 } Trail;
224 Trail *trail, *trpt;
225 FILE *efd;
226 uchar *this;
227 long maxdepth=10000;
228 long omaxdepth=10000;
229 #ifdef SCHED
230 int sched_max = 10;
231 #endif
232 #ifdef PERMUTED
233 uchar permuted = 1;
234 #else
235 uchar permuted = 0;
236 #endif
237 double quota; /* time limit */
238 #if NCORE>1
239 long z_handoff = -1;
240 #endif
241 #ifdef SC
242 char *stackfile;
243 #endif
244 uchar *SS, *LL;
245 uchar HASH_NR = 0;
246
247 double memcnt = (double) 0;
248 double memlim = (double) (1<<30); /* 1 GB */
249 #if NCORE>1
250 double mem_reserved = (double) 0;
251 #endif
252
253 /* for emalloc: */
254 static char *have;
255 static long left = 0L;
256 static double fragment = (double) 0;
257 static unsigned long grow;
258
259 unsigned int HASH_CONST[] = {
260 /* asuming 4 bytes per int */
261 0x88888EEF, 0x00400007,
262 0x04c11db7, 0x100d4e63,
263 0x0fc22f87, 0x3ff0c3ff,
264 0x38e84cd7, 0x02b148e9,
265 0x98b2e49d, 0xb616d379,
266 0xa5247fd9, 0xbae92a15,
267 0xb91c8bc5, 0x8e5880f3,
268 0xacd7c069, 0xb4c44bb3,
269 0x2ead1fb7, 0x8e428171,
270 0xdbebd459, 0x828ae611,
271 0x6cb25933, 0x86cdd651,
272 0x9e8f5f21, 0xd5f8d8e7,
273 0x9c4e956f, 0xb5cf2c71,
274 0x2e805a6d, 0x33fc3a55,
275 0xaf203ed1, 0xe31f5909,
276 0x5276db35, 0x0c565ef7,
277 0x273d1aa5, 0x8923b1dd,
278 0
279 };
280 #if NCORE>1
281 extern int core_id;
282 #endif
283 long mreached=0;
284 int done=0, errors=0, Nrun=1;
285 int c_init_done=0;
286 char *c_stack_start = (char *) 0;
287 double nstates=0, nlinks=0, truncs=0, truncs2=0;
288 double nlost=0, nShadow=0, hcmp=0, ngrabs=0;
289 #if defined(ZAPH) && defined(BITSTATE)
290 double zstates = 0;
291 #endif
292 int c_init_run;
293 #ifdef BFS
294 double midrv=0, failedrv=0, revrv=0;
295 #endif
296 unsigned long nr_states=0; /* nodes in DFA */
297 long Fa=0, Fh=0, Zh=0, Zn=0;
298 long PUT=0, PROBE=0, ZAPS=0;
299 long Ccheck=0, Cholds=0;
300 int a_cycles=0, upto=1, strict=0, verbose = 0, signoff = 0;
301 #ifdef HAS_CODE
302 int gui = 0, coltrace = 0, readtrail = 0;
303 int whichtrail = 0, onlyproc = -1, silent = 0;
304 #endif
305 int state_tables=0, fairness=0, no_rck=0, Nr_Trails=0;
306 char simvals[128];
307 #ifndef INLINE
308 int TstOnly=0;
309 #endif
310 unsigned long mask, nmask;
311 #ifdef BITSTATE
312 int ssize=23; /* 1 Mb */
313 #else
314 int ssize=19; /* 512K slots */
315 #endif
316 int hmax=0, svmax=0, smax=0;
317 int Maxbody=0, XX;
318 uchar *noptr; /* used by macro Pptr(x) */
319 #ifdef VAR_RANGES
320 void logval(char *, int);
321 void dumpranges(void);
322 #endif
323 #ifdef MA
324 #define INLINE_REV
325 extern void dfa_init(unsigned short);
326 extern int dfa_member(unsigned long);
327 extern int dfa_store(uchar *);
328 unsigned int maxgs = 0;
329 #endif
330
331 #ifdef ALIGNED
332 State comp_now __attribute__ ((aligned (8)));
333 /* gcc 64-bit aligned for Itanium2 systems */
334 /* MAJOR runtime penalty if not used on those systems */
335 #else
336 State comp_now; /* compressed state vector */
337 #endif
338
339 State comp_msk;
340 uchar *Mask = (uchar *) &comp_msk;
341 #ifdef COLLAPSE
342 State comp_tmp;
343 static char *scratch = (char *) &comp_tmp;
344 #endif
345 Stack *stack; /* for queues, processes */
346 Svtack *svtack; /* for old state vectors */
347 #ifdef BITSTATE
348 static unsigned int hfns = 3; /* new default */
349 #endif
350 static unsigned long j1;
351 static unsigned long K1, K2;
352 static unsigned long j2, j3, j4;
353 #ifdef BITSTATE
354 static long udmem;
355 #endif
356 static long A_depth = 0;
357 long depth = 0;
358 #if NCORE>1
359 long nr_handoffs = 0;
360 #endif
361 static uchar warned = 0, iterative = 0, exclusive = 0, like_java = 0, every_error = 0;
362 static uchar noasserts = 0, noends = 0, bounded = 0;
363 #if SYNC>0 && ASYNC==0
364 void set_recvs(void);
365 int no_recvs(int);
366 #endif
367 #if SYNC
368 #define IfNotBlocked if (boq != -1) continue;
369 #define UnBlock boq = -1
370 #else
371 #define IfNotBlocked /* cannot block */
372 #define UnBlock /* don't bother */
373 #endif
374
375 #ifdef BITSTATE
376 int (*bstore)(char *, int);
377 int bstore_reg(char *, int);
378 int bstore_mod(char *, int);
379 #endif
380 void active_procs(void);
381 void cleanup(void);
382 void do_the_search(void);
383 void find_shorter(int);
384 void iniglobals(void);
385 void stopped(int);
386 void wrapup(void);
387 int *grab_ints(int);
388 void ungrab_ints(int *, int);
389 #ifndef NOBOUNDCHECK
390 #define Index(x, y) Boundcheck(x, y, II, tt, t)
391 #else
392 #define Index(x, y) x
393 #endif
394 short Air[] = { (short) Air0, (short) Air1, (short) Air2, (short) Air3, (short) Air4, (short) Air5, (short) Air6 };
395 int
396 addproc(int n)
397 { int j, h = now._nr_pr;
398 #ifndef NOCOMP
399 int k;
400 #endif
401 uchar *o_this = this;
402
403 #ifndef INLINE
404 if (TstOnly) return (h < MAXPROC);
405 #endif
406 #ifndef NOBOUNDCHECK
407 /* redefine Index only within this procedure */
408 #undef Index
409 #define Index(x, y) Boundcheck(x, y, 0, 0, 0)
410 #endif
411 if (h >= MAXPROC)
412 Uerror("too many processes");
413 switch (n) {
414 case 0: j = sizeof(P0); break;
415 case 1: j = sizeof(P1); break;
416 case 2: j = sizeof(P2); break;
417 case 3: j = sizeof(P3); break;
418 case 4: j = sizeof(P4); break;
419 case 5: j = sizeof(P5); break;
420 case 6: j = sizeof(P6); break;
421 default: Uerror("bad proc - addproc");
422 }
423 if (vsize%WS)
424 proc_skip[h] = WS-(vsize%WS);
425 else
426 proc_skip[h] = 0;
427 #ifndef NOCOMP
428 for (k = vsize + (int) proc_skip[h]; k > vsize; k--)
429 Mask[k-1] = 1; /* align */
430 #endif
431 vsize += (int) proc_skip[h];
432 proc_offset[h] = vsize;
433 #ifdef SVDUMP
434 if (vprefix > 0)
435 { int dummy = 0;
436 write(svfd, (uchar *) &dummy, sizeof(int)); /* mark */
437 write(svfd, (uchar *) &h, sizeof(int));
438 write(svfd, (uchar *) &n, sizeof(int));
439 #if VECTORSZ>32000
440 write(svfd, (uchar *) &proc_offset[h], sizeof(int));
441 #else
442 write(svfd, (uchar *) &proc_offset[h], sizeof(short));
443 #endif
444 write(svfd, (uchar *) &now, vprefix-4*sizeof(int)); /* padd */
445 }
446 #endif
447 now._nr_pr += 1;
448 if (fairness && ((int) now._nr_pr + 1 >= (8*NFAIR)/2))
449 { printf("pan: error: too many processes -- current");
450 printf(" max is %d procs (-DNFAIR=%d)\n",
451 (8*NFAIR)/2 - 2, NFAIR);
452 printf("\trecompile with -DNFAIR=%d\n",
453 NFAIR+1);
454 pan_exit(1);
455 }
456 vsize += j;
457 #ifndef NOVSZ
458 now._vsz = vsize;
459 #endif
460 #ifndef NOCOMP
461 for (k = 1; k <= Air[n]; k++)
462 Mask[vsize - k] = 1; /* pad */
463 Mask[vsize-j] = 1; /* _pid */
464 #endif
465 hmax = max(hmax, vsize);
466 if (vsize >= VECTORSZ)
467 { printf("pan: error, VECTORSZ too small, recompile pan.c");
468 printf(" with -DVECTORSZ=N with N>%d\n", (int) vsize);
469 Uerror("aborting");
470 }
471 memset((char *)pptr(h), 0, j);
472 this = pptr(h);
473 if (BASE > 0 && h > 0)
474 ((P0 *)this)->_pid = h-BASE;
475 else
476 ((P0 *)this)->_pid = h;
477 switch (n) {
478 case 6: /* np_ */
479 ((P6 *)pptr(h))->_t = 6;
480 ((P6 *)pptr(h))->_p = 0;
481 reached6[0] = 1;
482 accpstate[6][1] = 1;
483 break;
484 case 5: /* :never: */
485 ((P5 *)pptr(h))->_t = 5;
486 ((P5 *)pptr(h))->_p = 5; reached5[5]=1;
487 /* params: */
488 /* locals: */
489 #ifdef VAR_RANGES
490 #endif
491 #ifdef HAS_CODE
492 locinit5(h);
493 #endif
494 break;
495 case 4: /* :init: */
496 ((P4 *)pptr(h))->_t = 4;
497 ((P4 *)pptr(h))->_p = 42; reached4[42]=1;
498 /* params: */
499 /* locals: */
500 ((P4 *)pptr(h))->i = 0;
501 ((P4 *)pptr(h))->j = 0;
502 ((P4 *)pptr(h))->sum = 0;
503 ((P4 *)pptr(h))->commit_sum = 0;
504 #ifdef VAR_RANGES
505 logval(":init::i", ((P4 *)pptr(h))->i);
506 logval(":init::j", ((P4 *)pptr(h))->j);
507 logval(":init::sum", ((P4 *)pptr(h))->sum);
508 logval(":init::commit_sum", ((P4 *)pptr(h))->commit_sum);
509 #endif
510 #ifdef HAS_CODE
511 locinit4(h);
512 #endif
513 break;
514 case 3: /* cleaner */
515 ((P3 *)pptr(h))->_t = 3;
516 ((P3 *)pptr(h))->_p = 8; reached3[8]=1;
517 /* params: */
518 /* locals: */
519 #ifdef VAR_RANGES
520 #endif
521 #ifdef HAS_CODE
522 locinit3(h);
523 #endif
524 break;
525 case 2: /* reader */
526 ((P2 *)pptr(h))->_t = 2;
527 ((P2 *)pptr(h))->_p = 26; reached2[26]=1;
528 /* params: */
529 /* locals: */
530 ((P2 *)pptr(h))->i = 0;
531 ((P2 *)pptr(h))->j = 0;
532 #ifdef VAR_RANGES
533 logval("reader:i", ((P2 *)pptr(h))->i);
534 logval("reader:j", ((P2 *)pptr(h))->j);
535 #endif
536 #ifdef HAS_CODE
537 locinit2(h);
538 #endif
539 break;
540 case 1: /* tracer */
541 ((P1 *)pptr(h))->_t = 1;
542 ((P1 *)pptr(h))->_p = 3; reached1[3]=1;
543 /* params: */
544 /* locals: */
545 ((P1 *)pptr(h))->size = 1;
546 ((P1 *)pptr(h))->prev_off = 0;
547 ((P1 *)pptr(h))->new_off = 0;
548 ((P1 *)pptr(h))->tmp_commit = 0;
549 ((P1 *)pptr(h))->i = 0;
550 ((P1 *)pptr(h))->j = 0;
551 #ifdef VAR_RANGES
552 logval("tracer:size", ((P1 *)pptr(h))->size);
553 logval("tracer:prev_off", ((P1 *)pptr(h))->prev_off);
554 logval("tracer:new_off", ((P1 *)pptr(h))->new_off);
555 logval("tracer:tmp_commit", ((P1 *)pptr(h))->tmp_commit);
556 logval("tracer:i", ((P1 *)pptr(h))->i);
557 logval("tracer:j", ((P1 *)pptr(h))->j);
558 #endif
559 #ifdef HAS_CODE
560 locinit1(h);
561 #endif
562 break;
563 case 0: /* switcher */
564 ((P0 *)pptr(h))->_t = 0;
565 ((P0 *)pptr(h))->_p = 11; reached0[11]=1;
566 /* params: */
567 /* locals: */
568 ((P0 *)pptr(h))->prev_off = 0;
569 ((P0 *)pptr(h))->new_off = 0;
570 ((P0 *)pptr(h))->tmp_commit = 0;
571 ((P0 *)pptr(h))->size = 0;
572 #ifdef VAR_RANGES
573 logval("switcher:prev_off", ((P0 *)pptr(h))->prev_off);
574 logval("switcher:new_off", ((P0 *)pptr(h))->new_off);
575 logval("switcher:tmp_commit", ((P0 *)pptr(h))->tmp_commit);
576 logval("switcher:size", ((P0 *)pptr(h))->size);
577 #endif
578 #ifdef HAS_CODE
579 locinit0(h);
580 #endif
581 break;
582 }
583 this = o_this;
584 return h-BASE;
585 #ifndef NOBOUNDCHECK
586 #undef Index
587 #define Index(x, y) Boundcheck(x, y, II, tt, t)
588 #endif
589 }
590
591 #if defined(BITSTATE) && defined(COLLAPSE)
592 /* just to allow compilation, to generate the error */
593 long col_p(int i, char *z) { return 0; }
594 long col_q(int i, char *z) { return 0; }
595 #endif
596 #ifndef BITSTATE
597 #ifdef COLLAPSE
598 long
599 col_p(int i, char *z)
600 { int j, k; unsigned long ordinal(char *, long, short);
601 char *x, *y;
602 P0 *ptr = (P0 *) pptr(i);
603 switch (ptr->_t) {
604 case 0: j = sizeof(P0); break;
605 case 1: j = sizeof(P1); break;
606 case 2: j = sizeof(P2); break;
607 case 3: j = sizeof(P3); break;
608 case 4: j = sizeof(P4); break;
609 case 5: j = sizeof(P5); break;
610 case 6: j = sizeof(P6); break;
611 default: Uerror("bad proctype - collapse");
612 }
613 if (z) x = z; else x = scratch;
614 y = (char *) ptr; k = proc_offset[i];
615 for ( ; j > 0; j--, y++)
616 if (!Mask[k++]) *x++ = *y;
617 for (j = 0; j < WS-1; j++)
618 *x++ = 0;
619 x -= j;
620 if (z) return (long) (x - z);
621 return ordinal(scratch, x-scratch, (short) (2+ptr->_t));
622 }
623 #endif
624 #endif
625 void
626 run(void)
627 { /* int i; */
628 memset((char *)&now, 0, sizeof(State));
629 vsize = (unsigned long) (sizeof(State) - VECTORSZ);
630 #ifndef NOVSZ
631 now._vsz = vsize;
632 #endif
633 /* optional provisioning statements, e.g. to */
634 /* set hidden variables, used as constants */
635 #ifdef PROV
636 #include PROV
637 #endif
638 settable();
639 Maxbody = max(Maxbody, ((int) sizeof(P0)));
640 Maxbody = max(Maxbody, ((int) sizeof(P1)));
641 Maxbody = max(Maxbody, ((int) sizeof(P2)));
642 Maxbody = max(Maxbody, ((int) sizeof(P3)));
643 Maxbody = max(Maxbody, ((int) sizeof(P4)));
644 Maxbody = max(Maxbody, ((int) sizeof(P5)));
645 Maxbody = max(Maxbody, ((int) sizeof(P6)));
646 reached[0] = reached0;
647 reached[1] = reached1;
648 reached[2] = reached2;
649 reached[3] = reached3;
650 reached[4] = reached4;
651 reached[5] = reached5;
652 reached[6] = reached6;
653 accpstate[0] = (uchar *) emalloc(nstates0);
654 accpstate[1] = (uchar *) emalloc(nstates1);
655 accpstate[2] = (uchar *) emalloc(nstates2);
656 accpstate[3] = (uchar *) emalloc(nstates3);
657 accpstate[4] = (uchar *) emalloc(nstates4);
658 accpstate[5] = (uchar *) emalloc(nstates5);
659 accpstate[6] = (uchar *) emalloc(nstates6);
660 progstate[0] = (uchar *) emalloc(nstates0);
661 progstate[1] = (uchar *) emalloc(nstates1);
662 progstate[2] = (uchar *) emalloc(nstates2);
663 progstate[3] = (uchar *) emalloc(nstates3);
664 progstate[4] = (uchar *) emalloc(nstates4);
665 progstate[5] = (uchar *) emalloc(nstates5);
666 progstate[6] = (uchar *) emalloc(nstates6);
667 loopstate0 = loopstate[0] = (uchar *) emalloc(nstates0);
668 loopstate1 = loopstate[1] = (uchar *) emalloc(nstates1);
669 loopstate2 = loopstate[2] = (uchar *) emalloc(nstates2);
670 loopstate3 = loopstate[3] = (uchar *) emalloc(nstates3);
671 loopstate4 = loopstate[4] = (uchar *) emalloc(nstates4);
672 loopstate5 = loopstate[5] = (uchar *) emalloc(nstates5);
673 loopstate6 = loopstate[6] = (uchar *) emalloc(nstates6);
674 stopstate[0] = (uchar *) emalloc(nstates0);
675 stopstate[1] = (uchar *) emalloc(nstates1);
676 stopstate[2] = (uchar *) emalloc(nstates2);
677 stopstate[3] = (uchar *) emalloc(nstates3);
678 stopstate[4] = (uchar *) emalloc(nstates4);
679 stopstate[5] = (uchar *) emalloc(nstates5);
680 stopstate[6] = (uchar *) emalloc(nstates6);
681 visstate[0] = (uchar *) emalloc(nstates0);
682 visstate[1] = (uchar *) emalloc(nstates1);
683 visstate[2] = (uchar *) emalloc(nstates2);
684 visstate[3] = (uchar *) emalloc(nstates3);
685 visstate[4] = (uchar *) emalloc(nstates4);
686 visstate[5] = (uchar *) emalloc(nstates5);
687 visstate[6] = (uchar *) emalloc(nstates6);
688 mapstate[0] = (short *) emalloc(nstates0 * sizeof(short));
689 mapstate[1] = (short *) emalloc(nstates1 * sizeof(short));
690 mapstate[2] = (short *) emalloc(nstates2 * sizeof(short));
691 mapstate[3] = (short *) emalloc(nstates3 * sizeof(short));
692 mapstate[4] = (short *) emalloc(nstates4 * sizeof(short));
693 mapstate[5] = (short *) emalloc(nstates5 * sizeof(short));
694 mapstate[6] = (short *) emalloc(nstates6 * sizeof(short));
695 #ifdef HAS_CODE
696 #ifdef HAS_CODE
697 #ifdef HAS_CODE
698 #ifdef HAS_CODE
699 #ifdef HAS_CODE
700 #ifdef HAS_CODE
701 #ifdef HAS_CODE
702 NrStates[0] = nstates0;
703 NrStates[1] = nstates1;
704 NrStates[2] = nstates2;
705 NrStates[3] = nstates3;
706 NrStates[4] = nstates4;
707 NrStates[5] = nstates5;
708 NrStates[6] = nstates6;
709 #endif
710 #endif
711 #endif
712 #endif
713 #endif
714 #endif
715 #endif
716 stopstate[0][endstate0] = 1;
717 stopstate[1][endstate1] = 1;
718 stopstate[2][endstate2] = 1;
719 stopstate[3][endstate3] = 1;
720 stopstate[4][endstate4] = 1;
721 stopstate[5][endstate5] = 1;
722 stopstate[6][endstate6] = 1;
723 accpstate[5][7] = 1;
724 stopstate[1][49] = 1;
725 retrans(0, nstates0, start0, src_ln0, reached0, loopstate0);
726 retrans(1, nstates1, start1, src_ln1, reached1, loopstate1);
727 retrans(2, nstates2, start2, src_ln2, reached2, loopstate2);
728 retrans(3, nstates3, start3, src_ln3, reached3, loopstate3);
729 retrans(4, nstates4, start4, src_ln4, reached4, loopstate4);
730 retrans(5, nstates5, start5, src_ln5, reached5, loopstate5);
731 if (state_tables)
732 { printf("\nTransition Type: ");
733 printf("A=atomic; D=d_step; L=local; G=global\n");
734 printf("Source-State Labels: ");
735 printf("p=progress; e=end; a=accept;\n");
736 #ifdef MERGED
737 printf("Note: statement merging was used. Only the first\n");
738 printf(" stmnt executed in each merge sequence is shown\n");
739 printf(" (use spin -a -o3 to disable statement merging)\n");
740 #endif
741 pan_exit(0);
742 }
743 iniglobals();
744 #if defined(VERI) && !defined(NOREDUCE) && !defined(NP)
745 if (!state_tables
746 #ifdef HAS_CODE
747 && !readtrail
748 #endif
749 #if NCORE>1
750 && core_id == 0
751 #endif
752 )
753 { printf("warning: for p.o. reduction to be valid ");
754 printf("the never claim must be stutter-invariant\n");
755 printf("(never claims generated from LTL ");
756 printf("formulae are stutter-invariant)\n");
757 }
758 #endif
759 UnBlock; /* disable rendez-vous */
760 #ifdef BITSTATE
761 if (udmem)
762 { udmem *= 1024L*1024L;
763 #if NCORE>1
764 if (!readtrail)
765 { void init_SS(unsigned long);
766 init_SS((unsigned long) udmem);
767 } else
768 #endif
769 SS = (uchar *) emalloc(udmem);
770 bstore = bstore_mod;
771 } else
772 #if NCORE>1
773 { void init_SS(unsigned long);
774 init_SS(ONE_L<<(ssize-3));
775 }
776 #else
777 SS = (uchar *) emalloc(ONE_L<<(ssize-3));
778 #endif
779 #else
780 hinit();
781 #endif
782 #if defined(FULLSTACK) && defined(BITSTATE)
783 onstack_init();
784 #endif
785 #if defined(CNTRSTACK) && !defined(BFS)
786 LL = (uchar *) emalloc(ONE_L<<(ssize-3));
787 #endif
788 stack = ( Stack *) emalloc(sizeof(Stack));
789 svtack = (Svtack *) emalloc(sizeof(Svtack));
790 /* a place to point for Pptr of non-running procs: */
791 noptr = (uchar *) emalloc(Maxbody * sizeof(char));
792 #ifdef SVDUMP
793 if (vprefix > 0)
794 write(svfd, (uchar *) &vprefix, sizeof(int));
795 #endif
796 #ifdef VERI
797 Addproc(VERI); /* never - pid = 0 */
798 #endif
799 active_procs(); /* started after never */
800 #ifdef EVENT_TRACE
801 now._event = start_event;
802 reached[EVENT_TRACE][start_event] = 1;
803 #endif
804 #ifdef HAS_CODE
805 globinit();
806 #endif
807 #ifdef BITSTATE
808 go_again:
809 #endif
810 do_the_search();
811 #ifdef BITSTATE
812 if (--Nrun > 0 && HASH_CONST[++HASH_NR])
813 { printf("Run %d:\n", HASH_NR);
814 wrap_stats();
815 printf("\n");
816 memset(SS, 0, ONE_L<<(ssize-3));
817 #ifdef CNTRSTACK
818 memset(LL, 0, ONE_L<<(ssize-3));
819 #endif
820 #ifdef FULLSTACK
821 memset((uchar *) S_Tab, 0,
822 maxdepth*sizeof(struct H_el *));
823 #endif
824 nstates=nlinks=truncs=truncs2=ngrabs = 0;
825 nlost=nShadow=hcmp = 0;
826 Fa=Fh=Zh=Zn = 0;
827 PUT=PROBE=ZAPS=Ccheck=Cholds = 0;
828 goto go_again;
829 }
830 #endif
831 }
832 #ifdef HAS_PROVIDED
833 int provided(int, uchar, int, Trans *);
834 #endif
835 #if NCORE>1
836 #define GLOBAL_LOCK (0)
837 #ifndef CS_N
838 #define CS_N (256*NCORE)
839 #endif
840 #ifdef NGQ
841 #define NR_QS (NCORE)
842 #define CS_NR (CS_N+1) /* 2^N + 1, nr critical sections */
843 #define GQ_RD GLOBAL_LOCK
844 #define GQ_WR GLOBAL_LOCK
845 #define CS_ID (1 + (int) (j1 & (CS_N-1))) /* mask: 2^N - 1, zero reserved */
846 #define QLOCK(n) (1+n)
847 #else
848 #define NR_QS (NCORE+1)
849 #define CS_NR (CS_N+3)
850 #define GQ_RD (1)
851 #define GQ_WR (2)
852 #define CS_ID (3 + (int) (j1 & (CS_N-1)))
853 #define QLOCK(n) (3+n)
854 #endif
855
856 void e_critical(int);
857 void x_critical(int);
858
859 #ifndef SEP_STATE
860 #define enter_critical(w) e_critical(w)
861 #define leave_critical(w) x_critical(w)
862 #else
863 #ifdef NGQ
864 #define enter_critical(w) { if (w < 1+NCORE) e_critical(w); }
865 #define leave_critical(w) { if (w < 1+NCORE) x_critical(w); }
866 #else
867 #define enter_critical(w) { if (w < 3+NCORE) e_critical(w); }
868 #define leave_critical(w) { if (w < 3+NCORE) x_critical(w); }
869 #endif
870 #endif
871
872 int
873 cpu_printf(const char *fmt, ...)
874 { va_list args;
875 enter_critical(GLOBAL_LOCK); /* printing */
876 printf("cpu%d: ", core_id);
877 fflush(stdout);
878 va_start(args, fmt);
879 vprintf(fmt, args);
880 va_end(args);
881 fflush(stdout);
882 leave_critical(GLOBAL_LOCK);
883 return 1;
884 }
885 #else
886 int
887 cpu_printf(const char *fmt, ...)
888 { va_list args;
889 va_start(args, fmt);
890 vprintf(fmt, args);
891 va_end(args);
892 return 1;
893 }
894 #endif
895 int
896 Printf(const char *fmt, ...)
897 { /* Make sure the args to Printf
898 * are always evaluated (e.g., they
899 * could contain a run stmnt)
900 * but do not generate the output
901 * during verification runs
902 * unless explicitly wanted
903 * If this fails on your system
904 * compile SPIN itself -DPRINTF
905 * and this code is not generated
906 */
907 #ifdef HAS_CODE
908 if (readtrail)
909 { va_list args;
910 va_start(args, fmt);
911 vprintf(fmt, args);
912 va_end(args);
913 return 1;
914 }
915 #endif
916 #ifdef PRINTF
917 va_list args;
918 va_start(args, fmt);
919 vprintf(fmt, args);
920 va_end(args);
921 #endif
922 return 1;
923 }
924 extern void printm(int);
925 #ifndef SC
926 #define getframe(i) &trail[i];
927 #else
928 static long HHH, DDD, hiwater;
929 static long CNT1, CNT2;
930 static int stackwrite;
931 static int stackread;
932 static Trail frameptr;
933 Trail *
934 getframe(int d)
935 {
936 if (CNT1 == CNT2)
937 return &trail[d];
938
939 if (d >= (CNT1-CNT2)*DDD)
940 return &trail[d - (CNT1-CNT2)*DDD];
941
942 if (!stackread
943 && (stackread = open(stackfile, 0)) < 0)
944 { printf("getframe: cannot open %s\n", stackfile);
945 wrapup();
946 }
947 if (lseek(stackread, d* (off_t) sizeof(Trail), SEEK_SET) == -1
948 || read(stackread, &frameptr, sizeof(Trail)) != sizeof(Trail))
949 { printf("getframe: frame read error\n");
950 wrapup();
951 }
952 return &frameptr;
953 }
954 #endif
955 #if !defined(SAFETY) && !defined(BITSTATE)
956 #if !defined(FULLSTACK) || defined(MA)
957 #define depth_of(x) A_depth /* an estimate */
958 #else
959 int
960 depth_of(struct H_el *s)
961 { Trail *t; int d;
962 for (d = 0; d <= A_depth; d++)
963 { t = getframe(d);
964 if (s == t->ostate)
965 return d;
966 }
967 printf("pan: cannot happen, depth_of\n");
968 return depthfound;
969 }
970 #endif
971 #endif
972 #if NCORE>1
973 extern void cleanup_shm(int);
974 volatile unsigned int *search_terminated; /* to signal early termination */
975 #endif
976 void
977 pan_exit(int val)
978 { void stop_timer(void);
979 if (signoff)
980 { printf("--end of output--\n");
981 }
982 #if NCORE>1
983 if (search_terminated != NULL)
984 { *search_terminated |= 1; /* pan_exit */
985 }
986 #ifdef USE_DISK
987 { void dsk_stats(void);
988 dsk_stats();
989 }
990 #endif
991 if (!state_tables && !readtrail)
992 { cleanup_shm(1);
993 }
994 #endif
995 if (val == 2)
996 { val = 0;
997 } else
998 { stop_timer();
999 }
1000 exit(val);
1001 }
1002 #ifdef HAS_CODE
1003 char *
1004 transmognify(char *s)
1005 { char *v, *w;
1006 static char buf[2][2048];
1007 int i, toggle = 0;
1008 if (!s || strlen(s) > 2047) return s;
1009 memset(buf[0], 0, 2048);
1010 memset(buf[1], 0, 2048);
1011 strcpy(buf[toggle], s);
1012 while ((v = strstr(buf[toggle], "{c_code")))
1013 { *v = '\0'; v++;
1014 strcpy(buf[1-toggle], buf[toggle]);
1015 for (w = v; *w != '}' && *w != '\0'; w++) /* skip */;
1016 if (*w != '}') return s;
1017 *w = '\0'; w++;
1018 for (i = 0; code_lookup[i].c; i++)
1019 if (strcmp(v, code_lookup[i].c) == 0
1020 && strlen(v) == strlen(code_lookup[i].c))
1021 { if (strlen(buf[1-toggle])
1022 + strlen(code_lookup[i].t)
1023 + strlen(w) > 2047)
1024 return s;
1025 strcat(buf[1-toggle], code_lookup[i].t);
1026 break;
1027 }
1028 strcat(buf[1-toggle], w);
1029 toggle = 1 - toggle;
1030 }
1031 buf[toggle][2047] = '\0';
1032 return buf[toggle];
1033 }
1034 #else
1035 char * transmognify(char *s) { return s; }
1036 #endif
1037 #ifdef HAS_CODE
1038 void
1039 add_src_txt(int ot, int tt)
1040 { Trans *t;
1041 char *q;
1042
1043 for (t = trans[ot][tt]; t; t = t->nxt)
1044 { printf("\t\t");
1045 q = transmognify(t->tp);
1046 for ( ; q && *q; q++)
1047 if (*q == '\n')
1048 printf("\\n");
1049 else
1050 putchar(*q);
1051 printf("\n");
1052 }
1053 }
1054 void
1055 wrap_trail(void)
1056 { static int wrap_in_progress = 0;
1057 int i; short II;
1058 P0 *z;
1059
1060 if (wrap_in_progress++) return;
1061
1062 printf("spin: trail ends after %ld steps\n", depth);
1063 if (onlyproc >= 0)
1064 { if (onlyproc >= now._nr_pr) { pan_exit(0); }
1065 II = onlyproc;
1066 z = (P0 *)pptr(II);
1067 printf("%3ld: proc %d (%s) ",
1068 depth, II, procname[z->_t]);
1069 for (i = 0; src_all[i].src; i++)
1070 if (src_all[i].tp == (int) z->_t)
1071 { printf(" line %3d",
1072 src_all[i].src[z->_p]);
1073 break;
1074 }
1075 printf(" (state %2d)", z->_p);
1076 if (!stopstate[z->_t][z->_p])
1077 printf(" (invalid end state)");
1078 printf("\n");
1079 add_src_txt(z->_t, z->_p);
1080 pan_exit(0);
1081 }
1082 printf("#processes %d:\n", now._nr_pr);
1083 if (depth < 0) depth = 0;
1084 for (II = 0; II < now._nr_pr; II++)
1085 { z = (P0 *)pptr(II);
1086 printf("%3ld: proc %d (%s) ",
1087 depth, II, procname[z->_t]);
1088 for (i = 0; src_all[i].src; i++)
1089 if (src_all[i].tp == (int) z->_t)
1090 { printf(" line %3d",
1091 src_all[i].src[z->_p]);
1092 break;
1093 }
1094 printf(" (state %2d)", z->_p);
1095 if (!stopstate[z->_t][z->_p])
1096 printf(" (invalid end state)");
1097 printf("\n");
1098 add_src_txt(z->_t, z->_p);
1099 }
1100 c_globals();
1101 for (II = 0; II < now._nr_pr; II++)
1102 { z = (P0 *)pptr(II);
1103 c_locals(II, z->_t);
1104 }
1105 #ifdef ON_EXIT
1106 ON_EXIT;
1107 #endif
1108 pan_exit(0);
1109 }
1110 FILE *
1111 findtrail(void)
1112 { FILE *fd;
1113 char fnm[512], *q;
1114 char MyFile[512];
1115 char MySuffix[16];
1116 int try_core;
1117 int candidate_files;
1118
1119 if (trailfilename != NULL)
1120 { fd = fopen(trailfilename, "r");
1121 if (fd == NULL)
1122 { printf("pan: cannot find %s\n", trailfilename);
1123 pan_exit(1);
1124 } /* else */
1125 goto success;
1126 }
1127 talk:
1128 try_core = 1;
1129 candidate_files = 0;
1130 tprefix = "trail";
1131 strcpy(MyFile, TrailFile);
1132 do { /* see if there's more than one possible trailfile */
1133 if (whichtrail)
1134 { sprintf(fnm, "%s%d.%s",
1135 MyFile, whichtrail, tprefix);
1136 fd = fopen(fnm, "r");
1137 if (fd != NULL)
1138 { candidate_files++;
1139 if (verbose==100)
1140 printf("trail%d: %s\n",
1141 candidate_files, fnm);
1142 fclose(fd);
1143 }
1144 if ((q = strchr(MyFile, '.')) != NULL)
1145 { *q = '\0';
1146 sprintf(fnm, "%s%d.%s",
1147 MyFile, whichtrail, tprefix);
1148 *q = '.';
1149 fd = fopen(fnm, "r");
1150 if (fd != NULL)
1151 { candidate_files++;
1152 if (verbose==100)
1153 printf("trail%d: %s\n",
1154 candidate_files, fnm);
1155 fclose(fd);
1156 } }
1157 } else
1158 { sprintf(fnm, "%s.%s", MyFile, tprefix);
1159 fd = fopen(fnm, "r");
1160 if (fd != NULL)
1161 { candidate_files++;
1162 if (verbose==100)
1163 printf("trail%d: %s\n",
1164 candidate_files, fnm);
1165 fclose(fd);
1166 }
1167 if ((q = strchr(MyFile, '.')) != NULL)
1168 { *q = '\0';
1169 sprintf(fnm, "%s.%s", MyFile, tprefix);
1170 *q = '.';
1171 fd = fopen(fnm, "r");
1172 if (fd != NULL)
1173 { candidate_files++;
1174 if (verbose==100)
1175 printf("trail%d: %s\n",
1176 candidate_files, fnm);
1177 fclose(fd);
1178 } } }
1179 tprefix = MySuffix;
1180 sprintf(tprefix, "cpu%d_trail", try_core++);
1181 } while (try_core <= NCORE);
1182
1183 if (candidate_files != 1)
1184 { if (verbose != 100)
1185 { printf("error: there are %d trail files:\n",
1186 candidate_files);
1187 verbose = 100;
1188 goto talk;
1189 } else
1190 { printf("pan: rm or mv all except one\n");
1191 exit(1);
1192 } }
1193 try_core = 1;
1194 strcpy(MyFile, TrailFile); /* restore */
1195 tprefix = "trail";
1196 try_again:
1197 if (whichtrail)
1198 { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, tprefix);
1199 fd = fopen(fnm, "r");
1200 if (fd == NULL && (q = strchr(MyFile, '.')))
1201 { *q = '\0';
1202 sprintf(fnm, "%s%d.%s",
1203 MyFile, whichtrail, tprefix);
1204 *q = '.';
1205 fd = fopen(fnm, "r");
1206 }
1207 } else
1208 { sprintf(fnm, "%s.%s", MyFile, tprefix);
1209 fd = fopen(fnm, "r");
1210 if (fd == NULL && (q = strchr(MyFile, '.')))
1211 { *q = '\0';
1212 sprintf(fnm, "%s.%s", MyFile, tprefix);
1213 *q = '.';
1214 fd = fopen(fnm, "r");
1215 } }
1216 if (fd == NULL)
1217 { if (try_core < NCORE)
1218 { tprefix = MySuffix;
1219 sprintf(tprefix, "cpu%d_trail", try_core++);
1220 goto try_again;
1221 }
1222 printf("pan: cannot find trailfile %s\n", fnm);
1223 pan_exit(1);
1224 }
1225 success:
1226 #if NCORE>1 && defined(SEP_STATE)
1227 { void set_root(void); /* for partial traces from local root */
1228 set_root();
1229 }
1230 #endif
1231 return fd;
1232 }
1233
1234 uchar do_transit(Trans *, short);
1235
1236 void
1237 getrail(void)
1238 { FILE *fd;
1239 char *q;
1240 int i, t_id, lastnever=-1; short II;
1241 Trans *t;
1242 P0 *z;
1243
1244 fd = findtrail(); /* exits if unsuccessful */
1245 while (fscanf(fd, "%ld:%d:%d\n", &depth, &i, &t_id) == 3)
1246 { if (depth == -1)
1247 printf("<<<<<START OF CYCLE>>>>>\n");
1248 if (depth < 0)
1249 continue;
1250 if (i > now._nr_pr)
1251 { printf("pan: Error, proc %d invalid pid ", i);
1252 printf("transition %d\n", t_id);
1253 break;
1254 }
1255 II = i;
1256 z = (P0 *)pptr(II);
1257 for (t = trans[z->_t][z->_p]; t; t = t->nxt)
1258 if (t->t_id == (T_ID) t_id)
1259 break;
1260 if (!t)
1261 { for (i = 0; i < NrStates[z->_t]; i++)
1262 { t = trans[z->_t][i];
1263 if (t && t->t_id == (T_ID) t_id)
1264 { printf("\tRecovered at state %d\n", i);
1265 z->_p = i;
1266 goto recovered;
1267 } }
1268 printf("pan: Error, proc %d type %d state %d: ",
1269 II, z->_t, z->_p);
1270 printf("transition %d not found\n", t_id);
1271 printf("pan: list of possible transitions in this process:\n");
1272 if (z->_t >= 0 && z->_t <= _NP_)
1273 for (t = trans[z->_t][z->_p]; t; t = t->nxt)
1274 printf(" t_id %d -- case %d, [%s]\n",
1275 t->t_id, t->forw, t->tp);
1276 break; /* pan_exit(1); */
1277 }
1278 recovered:
1279 q = transmognify(t->tp);
1280 if (gui) simvals[0] = '\0';
1281 this = pptr(II);
1282 trpt->tau |= 1;
1283 if (!do_transit(t, II))
1284 { if (onlyproc >= 0 && II != onlyproc)
1285 goto moveon;
1286 printf("pan: error, next transition UNEXECUTABLE on replay\n");
1287 printf(" most likely causes: missing c_track statements\n");
1288 printf(" or illegal side-effects in c_expr statements\n");
1289 }
1290 if (onlyproc >= 0 && II != onlyproc)
1291 goto moveon;
1292 if (verbose)
1293 { printf("%3ld: proc %2d (%s) ", depth, II, procname[z->_t]);
1294 for (i = 0; src_all[i].src; i++)
1295 if (src_all[i].tp == (int) z->_t)
1296 { printf(" line %3d \"%s\" ",
1297 src_all[i].src[z->_p], PanSource);
1298 break;
1299 }
1300 printf("(state %d) trans {%d,%d} [%s]\n",
1301 z->_p, t_id, t->forw, q?q:"");
1302 c_globals();
1303 for (i = 0; i < now._nr_pr; i++)
1304 { c_locals(i, ((P0 *)pptr(i))->_t);
1305 }
1306 } else
1307 if (strcmp(procname[z->_t], ":never:") == 0)
1308 { if (lastnever != (int) z->_p)
1309 { for (i = 0; src_all[i].src; i++)
1310 if (src_all[i].tp == (int) z->_t)
1311 { printf("MSC: ~G %d\n",
1312 src_all[i].src[z->_p]);
1313 break;
1314 }
1315 if (!src_all[i].src)
1316 printf("MSC: ~R %d\n", z->_p);
1317 }
1318 lastnever = z->_p;
1319 goto sameas;
1320 } else
1321 if (strcmp(procname[z->_t], ":np_:") != 0)
1322 {
1323 sameas: if (no_rck) goto moveon;
1324 if (coltrace)
1325 { printf("%ld: ", depth);
1326 for (i = 0; i < II; i++)
1327 printf("\t\t");
1328 printf("%s(%d):", procname[z->_t], II);
1329 printf("[%s]\n", q?q:"");
1330 } else if (!silent)
1331 { if (strlen(simvals) > 0) {
1332 printf("%3ld: proc %2d (%s)",
1333 depth, II, procname[z->_t]);
1334 for (i = 0; src_all[i].src; i++)
1335 if (src_all[i].tp == (int) z->_t)
1336 { printf(" line %3d \"%s\" ",
1337 src_all[i].src[z->_p], PanSource);
1338 break;
1339 }
1340 printf("(state %d) [values: %s]\n", z->_p, simvals);
1341 }
1342 printf("%3ld: proc %2d (%s)",
1343 depth, II, procname[z->_t]);
1344 for (i = 0; src_all[i].src; i++)
1345 if (src_all[i].tp == (int) z->_t)
1346 { printf(" line %3d \"%s\" ",
1347 src_all[i].src[z->_p], PanSource);
1348 break;
1349 }
1350 printf("(state %d) [%s]\n", z->_p, q?q:"");
1351 /* printf("\n"); */
1352 } }
1353 moveon: z->_p = t->st;
1354 }
1355 wrap_trail();
1356 }
1357 #endif
1358 int
1359 f_pid(int pt)
1360 { int i;
1361 P0 *z;
1362 for (i = 0; i < now._nr_pr; i++)
1363 { z = (P0 *)pptr(i);
1364 if (z->_t == (unsigned) pt)
1365 return BASE+z->_pid;
1366 }
1367 return -1;
1368 }
1369 #ifdef VERI
1370 void check_claim(int);
1371 #endif
1372
1373 #if !defined(HASH64) && !defined(HASH32)
1374 #define HASH32
1375 #endif
1376 #if defined(HASH32) && defined(SAFETY) && !defined(SFH) && !defined(SPACE)
1377 #define SFH
1378 #endif
1379 #if defined(SFH) && (defined(BITSTATE) || defined(COLLAPSE) || defined(HC) || defined(HASH64))
1380 #undef SFH
1381 #endif
1382 #if defined(SFH) && !defined(NOCOMP)
1383 #define NOCOMP /* go for speed */
1384 #endif
1385 #if NCORE>1 && !defined(GLOB_HEAP)
1386 #define SEP_HEAP /* version 5.1.2 */
1387 #endif
1388
1389 #ifdef BITSTATE
1390 int
1391 bstore_mod(char *v, int n) /* hasharray size not a power of two */
1392 { unsigned long x, y;
1393 unsigned int i = 1;
1394
1395 d_hash((uchar *) v, n); /* sets j3, j4, K1, K2 */
1396 x = K1; y = j3;
1397 for (;;)
1398 { if (!(SS[x%udmem]&(1<<y))) break;
1399 if (i == hfns) {
1400 #ifdef DEBUG
1401 printf("Old bitstate\n");
1402 #endif
1403 return 1;
1404 }
1405 x = (x + K2 + i);
1406 y = (y + j4) & 7;
1407 i++;
1408 }
1409 #ifdef RANDSTOR
1410 if (rand()%100 > RANDSTOR) return 0;
1411 #endif
1412 for (;;)
1413 { SS[x%udmem] |= (1<<y);
1414 if (i == hfns) break;
1415 x = (x + K2 + i);
1416 y = (y + j4) & 7;
1417 i++;
1418 }
1419 #ifdef DEBUG
1420 printf("New bitstate\n");
1421 #endif
1422 if (now._a_t&1)
1423 { nShadow++;
1424 }
1425 return 0;
1426 }
1427 int
1428 bstore_reg(char *v, int n) /* extended hashing, Peter Dillinger, 2004 */
1429 { unsigned long x, y;
1430 unsigned int i = 1;
1431
1432 d_hash((uchar *) v, n); /* sets j1-j4 */
1433 x = j2; y = j3;
1434 for (;;)
1435 { if (!(SS[x]&(1<<y))) break;
1436 if (i == hfns) {
1437 #ifdef DEBUG
1438 printf("Old bitstate\n");
1439 #endif
1440 return 1;
1441 }
1442 x = (x + j1 + i) & nmask;
1443 y = (y + j4) & 7;
1444 i++;
1445 }
1446 #ifdef RANDSTOR
1447 if (rand()%100 > RANDSTOR) return 0;
1448 #endif
1449 for (;;)
1450 { SS[x] |= (1<<y);
1451 if (i == hfns) break;
1452 x = (x + j1 + i) & nmask;
1453 y = (y + j4) & 7;
1454 i++;
1455 }
1456 #ifdef DEBUG
1457 printf("New bitstate\n");
1458 #endif
1459 if (now._a_t&1)
1460 { nShadow++;
1461 }
1462 return 0;
1463 }
1464 #endif
1465 unsigned long TMODE = 0666; /* file permission bits for trail files */
1466
1467 int trcnt=1;
1468 char snap[64], fnm[512];
1469
1470 int
1471 make_trail(void)
1472 { int fd;
1473 char *q;
1474 char MyFile[512];
1475 int w_flags = O_CREAT|O_WRONLY|O_TRUNC;
1476
1477 if (exclusive == 1 && iterative == 0)
1478 { w_flags |= O_EXCL;
1479 }
1480
1481 q = strrchr(TrailFile, '/');
1482 if (q == NULL) q = TrailFile; else q++;
1483 strcpy(MyFile, q); /* TrailFile is not a writable string */
1484
1485 if (iterative == 0 && Nr_Trails++ > 0)
1486 { sprintf(fnm, "%s%d.%s",
1487 MyFile, Nr_Trails-1, tprefix);
1488 } else
1489 {
1490 #ifdef PUTPID
1491 sprintf(fnm, "%s%d.%s", MyFile, getpid(), tprefix);
1492 #else
1493 sprintf(fnm, "%s.%s", MyFile, tprefix);
1494 #endif
1495 }
1496 if ((fd = open(fnm, w_flags, TMODE)) < 0)
1497 { if ((q = strchr(MyFile, '.')))
1498 { *q = '\0';
1499 if (iterative == 0 && Nr_Trails-1 > 0)
1500 sprintf(fnm, "%s%d.%s",
1501 MyFile, Nr_Trails-1, tprefix);
1502 else
1503 sprintf(fnm, "%s.%s", MyFile, tprefix);
1504 *q = '.';
1505 fd = open(fnm, w_flags, TMODE);
1506 } }
1507 if (fd < 0)
1508 { printf("pan: cannot create %s\n", fnm);
1509 perror("cause");
1510 } else
1511 {
1512 #if NCORE>1 && (defined(SEP_STATE) || !defined(FULL_TRAIL))
1513 void write_root(void);
1514 write_root();
1515 #else
1516 printf("pan: wrote %s\n", fnm);
1517 #endif
1518 }
1519 return fd;
1520 }
1521
1522 #ifndef FREQ
1523 #define FREQ (1000000)
1524 #endif
1525 #ifdef BFS
1526 #define Q_PROVISO
1527 #ifndef INLINE_REV
1528 #define INLINE_REV
1529 #endif
1530
1531 typedef struct SV_Hold {
1532 State *sv;
1533 int sz;
1534 struct SV_Hold *nxt;
1535 } SV_Hold;
1536
1537 typedef struct EV_Hold {
1538 char *sv;
1539 int sz;
1540 int nrpr;
1541 int nrqs;
1542 char *po;
1543 char *qo;
1544 char *ps, *qs;
1545 struct EV_Hold *nxt;
1546 } EV_Hold;
1547
1548 typedef struct BFS_Trail {
1549 Trail *frame;
1550 SV_Hold *onow;
1551 EV_Hold *omask;
1552 #ifdef Q_PROVISO
1553 struct H_el *lstate;
1554 #endif
1555 short boq;
1556 struct BFS_Trail *nxt;
1557 } BFS_Trail;
1558
1559 BFS_Trail *bfs_trail, *bfs_bot, *bfs_free;
1560
1561 SV_Hold *svhold, *svfree;
1562
1563 #ifdef BFS_DISK
1564 #ifndef BFS_LIMIT
1565 #define BFS_LIMIT 100000
1566 #endif
1567 #ifndef BFS_DSK_LIMIT
1568 #define BFS_DSK_LIMIT 1000000
1569 #endif
1570 #if defined(WIN32) || defined(WIN64)
1571 #define RFLAGS (O_RDONLY|O_BINARY)
1572 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
1573 #else
1574 #define RFLAGS (O_RDONLY)
1575 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
1576 #endif
1577 long bfs_size_limit;
1578 int bfs_dsk_write = -1;
1579 int bfs_dsk_read = -1;
1580 long bfs_dsk_writes, bfs_dsk_reads;
1581 int bfs_dsk_seqno_w, bfs_dsk_seqno_r;
1582 #endif
1583
1584 uchar do_reverse(Trans *, short, uchar);
1585 void snapshot(void);
1586
1587 SV_Hold *
1588 getsv(int n)
1589 { SV_Hold *h = (SV_Hold *) 0, *oh;
1590
1591 oh = (SV_Hold *) 0;
1592 for (h = svfree; h; oh = h, h = h->nxt)
1593 { if (n == h->sz)
1594 { if (!oh)
1595 svfree = h->nxt;
1596 else
1597 oh->nxt = h->nxt;
1598 h->nxt = (SV_Hold *) 0;
1599 break;
1600 }
1601 if (n < h->sz)
1602 { h = (SV_Hold *) 0;
1603 break;
1604 }
1605 /* else continue */
1606 }
1607
1608 if (!h)
1609 { h = (SV_Hold *) emalloc(sizeof(SV_Hold));
1610 h->sz = n;
1611 #ifdef BFS_DISK
1612 if (bfs_size_limit >= BFS_LIMIT)
1613 { h->sv = (State *) 0; /* means: read disk */
1614 bfs_dsk_writes++; /* count */
1615 if (bfs_dsk_write < 0 /* file descriptor */
1616 || bfs_dsk_writes%BFS_DSK_LIMIT == 0)
1617 { char dsk_nm[32];
1618 if (bfs_dsk_write >= 0)
1619 { (void) close(bfs_dsk_write);
1620 }
1621 sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_w++);
1622 bfs_dsk_write = open(dsk_nm, WFLAGS, 0644);
1623 if (bfs_dsk_write < 0)
1624 { Uerror("could not create tmp disk file");
1625 }
1626 printf("pan: created disk file %s\n", dsk_nm);
1627 }
1628 if (write(bfs_dsk_write, (char *) &now, n) != n)
1629 { Uerror("aborting -- disk write failed (disk full?)");
1630 }
1631 return h; /* no memcpy */
1632 }
1633 bfs_size_limit++;
1634 #endif
1635 h->sv = (State *) emalloc(sizeof(State) - VECTORSZ + n);
1636 }
1637
1638 memcpy((char *)h->sv, (char *)&now, n);
1639 return h;
1640 }
1641
1642 EV_Hold *
1643 getsv_mask(int n)
1644 { EV_Hold *h;
1645 static EV_Hold *kept = (EV_Hold *) 0;
1646
1647 for (h = kept; h; h = h->nxt)
1648 if (n == h->sz
1649 && (memcmp((char *) Mask, (char *) h->sv, n) == 0)
1650 && (now._nr_pr == h->nrpr)
1651 && (now._nr_qs == h->nrqs)
1652 #if VECTORSZ>32000
1653 && (memcmp((char *) proc_offset, (char *) h->po, now._nr_pr * sizeof(int)) == 0)
1654 && (memcmp((char *) q_offset, (char *) h->qo, now._nr_qs * sizeof(int)) == 0)
1655 #else
1656 && (memcmp((char *) proc_offset, (char *) h->po, now._nr_pr * sizeof(short)) == 0)
1657 && (memcmp((char *) q_offset, (char *) h->qo, now._nr_qs * sizeof(short)) == 0)
1658 #endif
1659 && (memcmp((char *) proc_skip, (char *) h->ps, now._nr_pr * sizeof(uchar)) == 0)
1660 && (memcmp((char *) q_skip, (char *) h->qs, now._nr_qs * sizeof(uchar)) == 0))
1661 break;
1662 if (!h)
1663 { h = (EV_Hold *) emalloc(sizeof(EV_Hold));
1664 h->sz = n;
1665 h->nrpr = now._nr_pr;
1666 h->nrqs = now._nr_qs;
1667
1668 h->sv = (char *) emalloc(n * sizeof(char));
1669 memcpy((char *) h->sv, (char *) Mask, n);
1670
1671 if (now._nr_pr > 0)
1672 { h->ps = (char *) emalloc(now._nr_pr * sizeof(int));
1673 memcpy((char *) h->ps, (char *) proc_skip, now._nr_pr * sizeof(uchar));
1674 #if VECTORSZ>32000
1675 h->po = (char *) emalloc(now._nr_pr * sizeof(int));
1676 memcpy((char *) h->po, (char *) proc_offset, now._nr_pr * sizeof(int));
1677 #else
1678 h->po = (char *) emalloc(now._nr_pr * sizeof(short));
1679 memcpy((char *) h->po, (char *) proc_offset, now._nr_pr * sizeof(short));
1680 #endif
1681 }
1682 if (now._nr_qs > 0)
1683 { h->qs = (char *) emalloc(now._nr_qs * sizeof(int));
1684 memcpy((char *) h->qs, (char *) q_skip, now._nr_qs * sizeof(uchar));
1685 #if VECTORSZ>32000
1686 h->qo = (char *) emalloc(now._nr_qs * sizeof(int));
1687 memcpy((char *) h->qo, (char *) q_offset, now._nr_qs * sizeof(int));
1688 #else
1689 h->qo = (char *) emalloc(now._nr_qs * sizeof(short));
1690 memcpy((char *) h->qo, (char *) q_offset, now._nr_qs * sizeof(short));
1691 #endif
1692 }
1693
1694 h->nxt = kept;
1695 kept = h;
1696 }
1697 return h;
1698 }
1699
1700 void
1701 freesv(SV_Hold *p)
1702 { SV_Hold *h, *oh;
1703
1704 oh = (SV_Hold *) 0;
1705 for (h = svfree; h; oh = h, h = h->nxt)
1706 if (h->sz >= p->sz)
1707 break;
1708
1709 if (!oh)
1710 { p->nxt = svfree;
1711 svfree = p;
1712 } else
1713 { p->nxt = h;
1714 oh->nxt = p;
1715 }
1716 }
1717
1718 BFS_Trail *
1719 get_bfs_frame(void)
1720 { BFS_Trail *t;
1721
1722 if (bfs_free)
1723 { t = bfs_free;
1724 bfs_free = bfs_free->nxt;
1725 t->nxt = (BFS_Trail *) 0;
1726 } else
1727 { t = (BFS_Trail *) emalloc(sizeof(BFS_Trail));
1728 }
1729 t->frame = (Trail *) emalloc(sizeof(Trail));
1730 return t;
1731 }
1732
1733 void
1734 push_bfs(Trail *f, int d)
1735 { BFS_Trail *t;
1736
1737 t = get_bfs_frame();
1738 memcpy((char *)t->frame, (char *)f, sizeof(Trail));
1739 t->frame->o_tt = d; /* depth */
1740
1741 t->boq = boq;
1742 t->onow = getsv(vsize);
1743 t->omask = getsv_mask(vsize);
1744 #if defined(FULLSTACK) && defined(Q_PROVISO)
1745 t->lstate = Lstate;
1746 #endif
1747 if (!bfs_bot)
1748 { bfs_bot = bfs_trail = t;
1749 } else
1750 { bfs_bot->nxt = t;
1751 bfs_bot = t;
1752 }
1753 #ifdef CHECK
1754 printf("PUSH %u (%d)\n", t->frame, d);
1755 #endif
1756 }
1757
1758 Trail *
1759 pop_bfs(void)
1760 { BFS_Trail *t;
1761
1762 if (!bfs_trail)
1763 return (Trail *) 0;
1764
1765 t = bfs_trail;
1766 bfs_trail = t->nxt;
1767 if (!bfs_trail)
1768 bfs_bot = (BFS_Trail *) 0;
1769 #if defined(Q_PROVISO) && !defined(BITSTATE) && !defined(NOREDUCE)
1770 if (t->lstate) t->lstate->tagged = 0;
1771 #endif
1772
1773 t->nxt = bfs_free;
1774 bfs_free = t;
1775
1776 vsize = t->onow->sz;
1777 boq = t->boq;
1778 #ifdef BFS_DISK
1779 if (t->onow->sv == (State *) 0)
1780 { char dsk_nm[32];
1781 bfs_dsk_reads++; /* count */
1782 if (bfs_dsk_read >= 0 /* file descriptor */
1783 && bfs_dsk_reads%BFS_DSK_LIMIT == 0)
1784 { (void) close(bfs_dsk_read);
1785 sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_r-1);
1786 (void) unlink(dsk_nm);
1787 bfs_dsk_read = -1;
1788 }
1789 if (bfs_dsk_read < 0)
1790 { sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_r++);
1791 bfs_dsk_read = open(dsk_nm, RFLAGS);
1792 if (bfs_dsk_read < 0)
1793 { Uerror("could not open temp disk file");
1794 } }
1795 if (read(bfs_dsk_read, (char *) &now, vsize) != vsize)
1796 { Uerror("bad bfs disk file read");
1797 }
1798 #ifndef NOVSZ
1799 if (now._vsz != vsize)
1800 { Uerror("disk read vsz mismatch");
1801 }
1802 #endif
1803 } else
1804 #endif
1805 memcpy((uchar *) &now, (uchar *) t->onow->sv, vsize);
1806 memcpy((uchar *) Mask, (uchar *) t->omask->sv, vsize);
1807 if (now._nr_pr > 0)
1808 #if VECTORSZ>32000
1809 { memcpy((char *)proc_offset, (char *)t->omask->po, now._nr_pr * sizeof(int));
1810 #else
1811 { memcpy((char *)proc_offset, (char *)t->omask->po, now._nr_pr * sizeof(short));
1812 #endif
1813 memcpy((char *)proc_skip, (char *)t->omask->ps, now._nr_pr * sizeof(uchar));
1814 }
1815 if (now._nr_qs > 0)
1816 #if VECTORSZ>32000
1817 { memcpy((uchar *)q_offset, (uchar *)t->omask->qo, now._nr_qs * sizeof(int));
1818 #else
1819 { memcpy((uchar *)q_offset, (uchar *)t->omask->qo, now._nr_qs * sizeof(short));
1820 #endif
1821 memcpy((uchar *)q_skip, (uchar *)t->omask->qs, now._nr_qs * sizeof(uchar));
1822 }
1823 #ifdef BFS_DISK
1824 if (t->onow->sv != (State *) 0)
1825 #endif
1826 freesv(t->onow); /* omask not freed */
1827 #ifdef CHECK
1828 printf("POP %u (%d)\n", t->frame, t->frame->o_tt);
1829 #endif
1830 return t->frame;
1831 }
1832
1833 void
1834 store_state(Trail *ntrpt, int shortcut, short oboq)
1835 {
1836 #ifdef VERI
1837 Trans *t2 = (Trans *) 0;
1838 uchar ot; int tt, E_state;
1839 uchar o_opm = trpt->o_pm, *othis = this;
1840
1841 if (shortcut)
1842 {
1843 #ifdef VERBOSE
1844 printf("claim: shortcut\n");
1845 #endif
1846 goto store_it; /* no claim move */
1847 }
1848
1849 this = (((uchar *)&now)+proc_offset[0]); /* 0 = never claim */
1850 trpt->o_pm = 0;
1851
1852 tt = (int) ((P0 *)this)->_p;
1853 ot = (uchar) ((P0 *)this)->_t;
1854
1855 #ifdef HAS_UNLESS
1856 E_state = 0;
1857 #endif
1858 for (t2 = trans[ot][tt]; t2; t2 = t2?t2->nxt:(Trans *)0)
1859 {
1860 #ifdef HAS_UNLESS
1861 if (E_state > 0
1862 && E_state != t2->e_trans)
1863 break;
1864 #endif
1865 if (do_transit(t2, 0))
1866 {
1867 #ifdef VERBOSE
1868 if (!reached[ot][t2->st])
1869 printf("depth: %d -- claim move from %d -> %d\n",
1870 trpt->o_tt, ((P0 *)this)->_p, t2->st);
1871 #endif
1872 #ifdef HAS_UNLESS
1873 E_state = t2->e_trans;
1874 #endif
1875 if (t2->st > 0)
1876 { ((P0 *)this)->_p = t2->st;
1877 reached[ot][t2->st] = 1;
1878 #ifndef NOCLAIM
1879 check_claim(t2->st);
1880 #endif
1881 }
1882 if (now._nr_pr == 0) /* claim terminated */
1883 uerror("end state in claim reached");
1884
1885 #ifdef PEG
1886 peg[t2->forw]++;
1887 #endif
1888 trpt->o_pm |= 1;
1889 if (t2->atom&2)
1890 Uerror("atomic in claim not supported in BFS mode");
1891 store_it:
1892
1893 #endif
1894
1895 #ifdef BITSTATE
1896 if (!bstore((char *)&now, vsize))
1897 #else
1898 #ifdef MA
1899 if (!gstore((char *)&now, vsize, 0))
1900 #else
1901 if (!hstore((char *)&now, vsize))
1902 #endif
1903 #endif
1904 { static long sdone = (long) 0; long ndone;
1905 nstates++;
1906 #ifndef NOREDUCE
1907 trpt->tau |= 64;
1908 #endif
1909 ndone = (unsigned long) (nstates/((double) FREQ));
1910 if (ndone != sdone && mreached%10 != 0)
1911 { snapshot();
1912 sdone = ndone;
1913 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
1914 if (nstates > ((double)(1<<(ssize+1))))
1915 { void resize_hashtable(void);
1916 resize_hashtable();
1917 }
1918 #endif
1919 }
1920 #if SYNC
1921 if (boq != -1)
1922 midrv++;
1923 else if (oboq != -1)
1924 { Trail *x;
1925 x = (Trail *) trpt->ostate; /* pre-rv state */
1926 if (x) x->o_pm |= 4; /* mark success */
1927 }
1928 #endif
1929 push_bfs(ntrpt, trpt->o_tt+1);
1930 } else
1931 { truncs++;
1932 #if !defined(NOREDUCE) && defined(FULLSTACK) && defined(Q_PROVISO)
1933 #if !defined(BITSTATE)
1934 if (Lstate && Lstate->tagged) trpt->tau |= 64;
1935 #else
1936 if (trpt->tau&32)
1937 { BFS_Trail *tprov;
1938 for (tprov = bfs_trail; tprov; tprov = tprov->nxt)
1939 if (tprov->onow->sv != (State *) 0
1940 && memcmp((uchar *)&now, (uchar *)tprov->onow->sv, vsize) == 0)
1941 { trpt->tau |= 64;
1942 break; /* state is in queue */
1943 } }
1944 #endif
1945 #endif
1946 }
1947 #ifdef VERI
1948 ((P0 *)this)->_p = tt; /* reset claim */
1949 if (t2)
1950 do_reverse(t2, 0, 0);
1951 else
1952 break;
1953 } }
1954 this = othis;
1955 trpt->o_pm = o_opm;
1956 #endif
1957 }
1958
1959 Trail *ntrpt;
1960
1961 void
1962 bfs(void)
1963 { Trans *t; Trail *otrpt, *x;
1964 uchar _n, _m, ot, nps = 0;
1965 int tt, E_state;
1966 short II, From = (short) (now._nr_pr-1), To = BASE;
1967 short oboq = boq;
1968
1969 ntrpt = (Trail *) emalloc(sizeof(Trail));
1970 trpt->ostate = (struct H_el *) 0;
1971 trpt->tau = 0;
1972
1973 trpt->o_tt = -1;
1974 store_state(ntrpt, 0, oboq); /* initial state */
1975
1976 while ((otrpt = pop_bfs())) /* also restores now */
1977 { memcpy((char *) trpt, (char *) otrpt, sizeof(Trail));
1978 #if defined(C_States) && (HAS_TRACK==1)
1979 c_revert((uchar *) &(now.c_state[0]));
1980 #endif
1981 if (trpt->o_pm & 4)
1982 {
1983 #ifdef VERBOSE
1984 printf("Revisit of atomic not needed (%d)\n",
1985 trpt->o_pm);
1986 #endif
1987 continue;
1988 }
1989 #ifndef NOREDUCE
1990 nps = 0;
1991 #endif
1992 if (trpt->o_pm == 8)
1993 { revrv++;
1994 if (trpt->tau&8)
1995 {
1996 #ifdef VERBOSE
1997 printf("Break atomic (pm:%d,tau:%d)\n",
1998 trpt->o_pm, trpt->tau);
1999 #endif
2000 trpt->tau &= ~8;
2001 }
2002 #ifndef NOREDUCE
2003 else if (trpt->tau&32)
2004 {
2005 #ifdef VERBOSE
2006 printf("Void preselection (pm:%d,tau:%d)\n",
2007 trpt->o_pm, trpt->tau);
2008 #endif
2009 trpt->tau &= ~32;
2010 nps = 1; /* no preselection in repeat */
2011 }
2012 #endif
2013 }
2014 trpt->o_pm &= ~(4|8);
2015 if (trpt->o_tt > mreached)
2016 { mreached = trpt->o_tt;
2017 if (mreached%10 == 0)
2018 { snapshot();
2019 } }
2020 depth = trpt->o_tt;
2021 if (depth >= maxdepth)
2022 {
2023 #if SYNC
2024 Trail *x;
2025 if (boq != -1)
2026 { x = (Trail *) trpt->ostate;
2027 if (x) x->o_pm |= 4; /* not failing */
2028 }
2029 #endif
2030 truncs++;
2031 if (!warned)
2032 { warned = 1;
2033 printf("error: max search depth too small\n");
2034 }
2035 if (bounded)
2036 uerror("depth limit reached");
2037 continue;
2038 }
2039 #ifndef NOREDUCE
2040 if (boq == -1 && !(trpt->tau&8) && nps == 0)
2041 for (II = now._nr_pr-1; II >= BASE; II -= 1)
2042 {
2043 Pickup: this = pptr(II);
2044 tt = (int) ((P0 *)this)->_p;
2045 ot = (uchar) ((P0 *)this)->_t;
2046 if (trans[ot][tt]->atom & 8)
2047 { t = trans[ot][tt];
2048 if (t->qu[0] != 0)
2049 { Ccheck++;
2050 if (!q_cond(II, t))
2051 continue;
2052 Cholds++;
2053 }
2054 From = To = II;
2055 trpt->tau |= 32; /* preselect marker */
2056 #ifdef DEBUG
2057 printf("%3d: proc %d PreSelected (tau=%d)\n",
2058 depth, II, trpt->tau);
2059 #endif
2060 goto MainLoop;
2061 } }
2062 trpt->tau &= ~32;
2063 #endif
2064 Repeat:
2065 if (trpt->tau&8) /* atomic */
2066 { From = To = (short ) trpt->pr;
2067 nlinks++;
2068 } else
2069 { From = now._nr_pr-1;
2070 To = BASE;
2071 }
2072 MainLoop:
2073 _n = _m = 0;
2074 for (II = From; II >= To; II -= 1)
2075 {
2076 this = (((uchar *)&now)+proc_offset[II]);
2077 tt = (int) ((P0 *)this)->_p;
2078 ot = (uchar) ((P0 *)this)->_t;
2079 #if SYNC
2080 /* no rendezvous with same proc */
2081 if (boq != -1 && trpt->pr == II) continue;
2082 #endif
2083 ntrpt->pr = (uchar) II;
2084 ntrpt->st = tt;
2085 trpt->o_pm &= ~1; /* no move yet */
2086 #ifdef EVENT_TRACE
2087 trpt->o_event = now._event;
2088 #endif
2089 #ifdef HAS_PROVIDED
2090 if (!provided(II, ot, tt, t)) continue;
2091 #endif
2092 #ifdef HAS_UNLESS
2093 E_state = 0;
2094 #endif
2095 for (t = trans[ot][tt]; t; t = t->nxt)
2096 {
2097 #ifdef HAS_UNLESS
2098 if (E_state > 0
2099 && E_state != t->e_trans)
2100 break;
2101 #endif
2102 ntrpt->o_t = t;
2103
2104 oboq = boq;
2105
2106 if (!(_m = do_transit(t, II)))
2107 continue;
2108
2109 trpt->o_pm |= 1; /* we moved */
2110 (trpt+1)->o_m = _m; /* for unsend */
2111 #ifdef PEG
2112 peg[t->forw]++;
2113 #endif
2114 #ifdef CHECK
2115 printf("%3d: proc %d exec %d, ",
2116 depth, II, t->forw);
2117 printf("%d to %d, %s %s %s",
2118 tt, t->st, t->tp,
2119 (t->atom&2)?"atomic":"",
2120 (boq != -1)?"rendez-vous":"");
2121 #ifdef HAS_UNLESS
2122 if (t->e_trans)
2123 printf(" (escapes to state %d)", t->st);
2124 #endif
2125 printf(" %saccepting [tau=%d]\n",
2126 (trpt->o_pm&2)?"":"non-", trpt->tau);
2127 #endif
2128 #ifdef HAS_UNLESS
2129 E_state = t->e_trans;
2130 #if SYNC>0
2131 if (t->e_trans > 0 && (boq != -1 /* || oboq != -1 */))
2132 { fprintf(efd, "error: the use of rendezvous stmnt in the escape clause\n");
2133 fprintf(efd, " of an unless stmnt is not compatible with -DBFS\n");
2134 pan_exit(1);
2135 }
2136 #endif
2137 #endif
2138 if (t->st > 0) ((P0 *)this)->_p = t->st;
2139
2140 /* ptr to pred: */ ntrpt->ostate = (struct H_el *) otrpt;
2141 ntrpt->st = tt;
2142 if (boq == -1 && (t->atom&2)) /* atomic */
2143 ntrpt->tau = 8; /* record for next move */
2144 else
2145 ntrpt->tau = 0;
2146
2147 store_state(ntrpt, (boq != -1 || (t->atom&2)), oboq);
2148 #ifdef EVENT_TRACE
2149 now._event = trpt->o_event;
2150 #endif
2151
2152 /* undo move and continue */
2153 trpt++; /* this is where ovals and ipt are set */
2154 do_reverse(t, II, _m); /* restore now. */
2155 trpt--;
2156 #ifdef CHECK
2157 #if NCORE>1
2158 enter_critical(GLOBAL_LOCK); /* in verbose mode only */
2159 printf("cpu%d: ", core_id);
2160 #endif
2161 printf("%3d: proc %d ", depth, II);
2162 printf("reverses %d, %d to %d,",
2163 t->forw, tt, t->st);
2164 printf(" %s [abit=%d,adepth=%d,",
2165 t->tp, now._a_t, A_depth);
2166 printf("tau=%d,%d]\n",
2167 trpt->tau, (trpt-1)->tau);
2168 #if NCORE>1
2169 leave_critical(GLOBAL_LOCK);
2170 #endif
2171 #endif
2172 reached[ot][t->st] = 1;
2173 reached[ot][tt] = 1;
2174
2175 ((P0 *)this)->_p = tt;
2176 _n |= _m;
2177 } }
2178 #ifndef NOREDUCE
2179 /* preselected - no succ definitely outside stack */
2180 if ((trpt->tau&32) && !(trpt->tau&64))
2181 { From = now._nr_pr-1; To = BASE;
2182 #ifdef DEBUG
2183 cpu_printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
2184 depth, II+1, (int) _n, trpt->tau);
2185 #endif
2186 _n = 0; trpt->tau &= ~32;
2187 if (II >= BASE)
2188 goto Pickup;
2189 goto MainLoop;
2190 }
2191 trpt->tau &= ~(32|64);
2192 #endif
2193 if (_n != 0)
2194 continue;
2195 #ifdef DEBUG
2196 printf("%3d: no move [II=%d, tau=%d, boq=%d, _nr_pr=%d]\n",
2197 depth, II, trpt->tau, boq, now._nr_pr);
2198 #endif
2199 if (boq != -1)
2200 { failedrv++;
2201 x = (Trail *) trpt->ostate; /* pre-rv state */
2202 if (!x) continue; /* root state */
2203 if ((x->tau&8) || (x->tau&32)) /* break atomic or preselect at parent */
2204 { x->o_pm |= 8; /* mark failure */
2205 this = (((uchar *)&now)+proc_offset[otrpt->pr]);
2206 #ifdef VERBOSE
2207 printf("\treset state of %d from %d to %d\n",
2208 otrpt->pr, ((P0 *)this)->_p, otrpt->st);
2209 #endif
2210 ((P0 *)this)->_p = otrpt->st;
2211 unsend(boq); /* retract rv offer */
2212 boq = -1;
2213 push_bfs(x, x->o_tt);
2214 #ifdef VERBOSE
2215 printf("failed rv, repush with %d\n", x->o_pm);
2216 #endif
2217 }
2218 #ifdef VERBOSE
2219 else printf("failed rv, tau at parent: %d\n", x->tau);
2220 #endif
2221 } else if (now._nr_pr > 0)
2222 {
2223 if ((trpt->tau&8)) /* atomic */
2224 { trpt->tau &= ~(1|8); /* 1=timeout, 8=atomic */
2225 #ifdef DEBUG
2226 printf("%3d: atomic step proc %d blocks\n",
2227 depth, II+1);
2228 #endif
2229 goto Repeat;
2230 }
2231
2232 if (!(trpt->tau&1)) /* didn't try timeout yet */
2233 { trpt->tau |= 1;
2234 #ifdef DEBUG
2235 printf("%d: timeout\n", depth);
2236 #endif
2237 goto MainLoop;
2238 }
2239 #ifndef VERI
2240 if (!noends && !a_cycles && !endstate())
2241 uerror("invalid end state");
2242 #endif
2243 } }
2244 }
2245
2246 void
2247 putter(Trail *trpt, int fd)
2248 { long j;
2249
2250 if (!trpt) return;
2251
2252 if (trpt != (Trail *) trpt->ostate)
2253 putter((Trail *) trpt->ostate, fd);
2254
2255 if (trpt->o_t)
2256 { sprintf(snap, "%d:%d:%d\n",
2257 trcnt++, trpt->pr, trpt->o_t->t_id);
2258 j = strlen(snap);
2259 if (write(fd, snap, j) != j)
2260 { printf("pan: error writing %s\n", fnm);
2261 pan_exit(1);
2262 } }
2263 }
2264
2265 void
2266 nuerror(char *str)
2267 { int fd = make_trail();
2268 int j;
2269
2270 if (fd < 0) return;
2271 #ifdef VERI
2272 sprintf(snap, "-2:%d:-2\n", VERI);
2273 write(fd, snap, strlen(snap));
2274 #endif
2275 #ifdef MERGED
2276 sprintf(snap, "-4:-4:-4\n");
2277 write(fd, snap, strlen(snap));
2278 #endif
2279 trcnt = 1;
2280 putter(trpt, fd);
2281 if (ntrpt->o_t)
2282 { sprintf(snap, "%d:%d:%d\n",
2283 trcnt++, ntrpt->pr, ntrpt->o_t->t_id);
2284 j = strlen(snap);
2285 if (write(fd, snap, j) != j)
2286 { printf("pan: error writing %s\n", fnm);
2287 pan_exit(1);
2288 } }
2289 close(fd);
2290 if (errors >= upto && upto != 0)
2291 { wrapup();
2292 }
2293 }
2294 #endif
2295 #if NCORE>1
2296 #if defined(WIN32) || defined(WIN64)
2297 #ifndef _CONSOLE
2298 #define _CONSOLE
2299 #endif
2300 #ifdef WIN64
2301 #undef long
2302 #endif
2303 #include <windows.h>
2304
2305 #ifdef WIN64
2306 #define long long long
2307 #endif
2308 #else
2309 #include <sys/ipc.h>
2310 #include <sys/sem.h>
2311 #include <sys/shm.h>
2312 #endif
2313
2314 /* code common to cygwin/linux and win32/win64: */
2315
2316 #ifdef VERBOSE
2317 #define VVERBOSE (1)
2318 #else
2319 #define VVERBOSE (0)
2320 #endif
2321
2322 /* the following values must be larger than 256 and must fit in an int */
2323 #define QUIT 1024 /* terminate now command */
2324 #define QUERY 512 /* termination status query message */
2325 #define QUERY_F 513 /* query failed, cannot quit */
2326
2327 #define GN_FRAMES (int) (GWQ_SIZE / (double) sizeof(SM_frame))
2328 #define LN_FRAMES (int) (LWQ_SIZE / (double) sizeof(SM_frame))
2329
2330 #ifndef VMAX
2331 #define VMAX VECTORSZ
2332 #endif
2333 #ifndef PMAX
2334 #define PMAX 64
2335 #endif
2336 #ifndef QMAX
2337 #define QMAX 64
2338 #endif
2339
2340 #if VECTORSZ>32000
2341 #define OFFT int
2342 #else
2343 #define OFFT short
2344 #endif
2345
2346 #ifdef SET_SEG_SIZE
2347 /* no longer usefule -- being recomputed for local heap size anyway */
2348 double SEG_SIZE = (((double) SET_SEG_SIZE) * 1048576.);
2349 #else
2350 double SEG_SIZE = (1048576.*1024.); /* 1GB default shared memory pool segments */
2351 #endif
2352
2353 double LWQ_SIZE = 0.; /* initialized in main */
2354
2355 #ifdef SET_WQ_SIZE
2356 #ifdef NGQ
2357 #warning SET_WQ_SIZE applies to global queue -- ignored
2358 double GWQ_SIZE = 0.;
2359 #else
2360 double GWQ_SIZE = (((double) SET_WQ_SIZE) * 1048576.);
2361 /* must match the value in pan_proxy.c, if used */
2362 #endif
2363 #else
2364 #ifdef NGQ
2365 double GWQ_SIZE = 0.;
2366 #else
2367 double GWQ_SIZE = (128.*1048576.); /* 128 MB default queue sizes */
2368 #endif
2369 #endif
2370
2371 /* Crash Detection Parameters */
2372 #ifndef ONESECOND
2373 #define ONESECOND (1<<25)
2374 #endif
2375 #ifndef SHORT_T
2376 #define SHORT_T (0.1)
2377 #endif
2378 #ifndef LONG_T
2379 #define LONG_T (600)
2380 #endif
2381
2382 double OneSecond = (double) (ONESECOND); /* waiting for a free slot -- checks crash */
2383 double TenSeconds = 10. * (ONESECOND); /* waiting for a lock -- check for a crash */
2384
2385 /* Termination Detection Params -- waiting for new state input in Get_Full_Frame */
2386 double Delay = ((double) SHORT_T) * (ONESECOND); /* termination detection trigger */
2387 double OneHour = ((double) LONG_T) * (ONESECOND); /* timeout termination detection */
2388
2389 typedef struct SM_frame SM_frame;
2390 typedef struct SM_results SM_results;
2391 typedef struct sh_Allocater sh_Allocater;
2392
2393 struct SM_frame { /* about 6K per slot */
2394 volatile int m_vsize; /* 0 means free slot */
2395 volatile int m_boq; /* >500 is a control message */
2396 #ifdef FULL_TRAIL
2397 volatile struct Stack_Tree *m_stack; /* ptr to previous state */
2398 #endif
2399 volatile uchar m_tau;
2400 volatile uchar m_o_pm;
2401 volatile int nr_handoffs; /* to compute real_depth */
2402 volatile char m_now [VMAX];
2403 volatile char m_Mask [(VMAX + 7)/8];
2404 volatile OFFT m_p_offset[PMAX];
2405 volatile OFFT m_q_offset[QMAX];
2406 volatile uchar m_p_skip [PMAX];
2407 volatile uchar m_q_skip [QMAX];
2408 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
2409 volatile uchar m_c_stack [StackSize];
2410 #endif
2411 };
2412
2413 int proxy_pid; /* id of proxy if nonzero -- receive half */
2414 int store_proxy_pid;
2415 short remote_party;
2416 int proxy_pid_snd; /* id of proxy if nonzero -- send half */
2417 char o_cmdline[512]; /* to pass options to children */
2418
2419 int iamin[CS_NR+NCORE]; /* non-shared */
2420
2421 #if defined(WIN32) || defined(WIN64)
2422 int tas(volatile LONG *);
2423
2424 HANDLE proxy_handle_snd; /* for Windows Create and Terminate */
2425
2426 struct sh_Allocater { /* shared memory for states */
2427 volatile char *dc_arena; /* to allocate states from */
2428 volatile long pattern; /* to detect overruns */
2429 volatile long dc_size; /* nr of bytes left */
2430 volatile void *dc_start; /* where memory segment starts */
2431 volatile void *dc_id; /* to attach, detach, remove shared memory segments */
2432 volatile sh_Allocater *nxt; /* linked list of pools */
2433 };
2434 DWORD worker_pids[NCORE]; /* root mem of pids of all workers created */
2435 HANDLE worker_handles[NCORE]; /* for windows Create and Terminate */
2436 void * shmid [NR_QS]; /* return value from CreateFileMapping */
2437 void * shmid_M; /* shared mem for state allocation in hashtable */
2438
2439 #ifdef SEP_STATE
2440 void *shmid_X;
2441 #else
2442 void *shmid_S; /* shared bitstate arena or hashtable */
2443 #endif
2444 #else
2445 int tas(volatile int *);
2446
2447 struct sh_Allocater { /* shared memory for states */
2448 volatile char *dc_arena; /* to allocate states from */
2449 volatile long pattern; /* to detect overruns */
2450 volatile long dc_size; /* nr of bytes left */
2451 volatile char *dc_start; /* where memory segment starts */
2452 volatile int dc_id; /* to attach, detach, remove shared memory segments */
2453 volatile sh_Allocater *nxt; /* linked list of pools */
2454 };
2455
2456 int worker_pids[NCORE]; /* root mem of pids of all workers created */
2457 int shmid [NR_QS]; /* return value from shmget */
2458 int nibis = 0; /* set after shared mem has been released */
2459 int shmid_M; /* shared mem for state allocation in hashtable */
2460 #ifdef SEP_STATE
2461 long shmid_X;
2462 #else
2463 int shmid_S; /* shared bitstate arena or hashtable */
2464 volatile sh_Allocater *first_pool; /* of shared state memory */
2465 volatile sh_Allocater *last_pool;
2466 #endif
2467 #endif
2468
2469 struct SM_results { /* for shuttling back final stats */
2470 volatile int m_vsize; /* avoid conflicts with frames */
2471 volatile int m_boq; /* these 2 fields are not written in record_info */
2472 /* probably not all fields really need to be volatile */
2473 volatile double m_memcnt;
2474 volatile double m_nstates;
2475 volatile double m_truncs;
2476 volatile double m_truncs2;
2477 volatile double m_nShadow;
2478 volatile double m_nlinks;
2479 volatile double m_ngrabs;
2480 volatile double m_nlost;
2481 volatile double m_hcmp;
2482 volatile double m_frame_wait;
2483 volatile int m_hmax;
2484 volatile int m_svmax;
2485 volatile int m_smax;
2486 volatile int m_mreached;
2487 volatile int m_errors;
2488 volatile int m_VMAX;
2489 volatile short m_PMAX;
2490 volatile short m_QMAX;
2491 volatile uchar m_R; /* reached info for all proctypes */
2492 };
2493
2494 int core_id = 0; /* internal process nr, to know which q to use */
2495 unsigned long nstates_put = 0; /* statistics */
2496 unsigned long nstates_get = 0;
2497 int query_in_progress = 0; /* termination detection */
2498
2499 double free_wait = 0.; /* waiting for a free frame */
2500 double frame_wait = 0.; /* waiting for a full frame */
2501 double lock_wait = 0.; /* waiting for access to cs */
2502 double glock_wait[3]; /* waiting for access to global lock */
2503
2504 char *sprefix = "rst";
2505 uchar was_interrupted, issued_kill, writing_trail;
2506
2507 static SM_frame cur_Root; /* current root, to be safe with error trails */
2508
2509 SM_frame *m_workq [NR_QS]; /* per cpu work queues + global q */
2510 char *shared_mem[NR_QS]; /* return value from shmat */
2511 #ifdef SEP_HEAP
2512 char *my_heap;
2513 long my_size;
2514 #endif
2515 volatile sh_Allocater *dc_shared; /* assigned at initialization */
2516
2517 static int vmax_seen, pmax_seen, qmax_seen;
2518 static double gq_tries, gq_hasroom, gq_hasnoroom;
2519
2520 volatile int *prfree;
2521 volatile int *prfull;
2522 volatile int *prcnt;
2523 volatile int *prmax;
2524
2525 volatile int *sh_lock; /* mutual exclusion locks - in shared memory */
2526 volatile double *is_alive; /* to detect when processes crash */
2527 volatile int *grfree, *grfull, *grcnt, *grmax; /* access to shared global q */
2528 volatile double *gr_readmiss, *gr_writemiss;
2529 static int lrfree; /* used for temporary recording of slot */
2530 static int dfs_phase2;
2531
2532 void mem_put(int); /* handoff state to other cpu */
2533 void mem_put_acc(void); /* liveness mode */
2534 void mem_get(void); /* get state from work queue */
2535 void sudden_stop(char *);
2536 #if 0
2537 void enter_critical(int);
2538 void leave_critical(int);
2539 #endif
2540
2541 void
2542 record_info(SM_results *r)
2543 { int i;
2544 uchar *ptr;
2545
2546 #ifdef SEP_STATE
2547 if (0)
2548 { cpu_printf("nstates %g nshadow %g -- memory %-6.3f Mb\n",
2549 nstates, nShadow, memcnt/(1048576.));
2550 }
2551 r->m_memcnt = 0;
2552 #else
2553 #ifdef BITSTATE
2554 r->m_memcnt = 0; /* it's shared */
2555 #endif
2556 r->m_memcnt = memcnt;
2557 #endif
2558 if (a_cycles && core_id == 1)
2559 { r->m_nstates = nstates;
2560 r->m_nShadow = nstates;
2561 } else
2562 { r->m_nstates = nstates;
2563 r->m_nShadow = nShadow;
2564 }
2565 r->m_truncs = truncs;
2566 r->m_truncs2 = truncs2;
2567 r->m_nlinks = nlinks;
2568 r->m_ngrabs = ngrabs;
2569 r->m_nlost = nlost;
2570 r->m_hcmp = hcmp;
2571 r->m_frame_wait = frame_wait;
2572 r->m_hmax = hmax;
2573 r->m_svmax = svmax;
2574 r->m_smax = smax;
2575 r->m_mreached = mreached;
2576 r->m_errors = errors;
2577 r->m_VMAX = vmax_seen;
2578 r->m_PMAX = (short) pmax_seen;
2579 r->m_QMAX = (short) qmax_seen;
2580 ptr = (uchar *) &(r->m_R);
2581 for (i = 0; i <= _NP_; i++) /* all proctypes */
2582 { memcpy(ptr, reached[i], NrStates[i]*sizeof(uchar));
2583 ptr += NrStates[i]*sizeof(uchar);
2584 }
2585 if (verbose>1)
2586 { cpu_printf("Put Results nstates %g (sz %d)\n", nstates, ptr - &(r->m_R));
2587 }
2588 }
2589
2590 void snapshot(void);
2591
2592 void
2593 retrieve_info(SM_results *r)
2594 { int i, j;
2595 volatile uchar *ptr;
2596
2597 snapshot(); /* for a final report */
2598
2599 enter_critical(GLOBAL_LOCK);
2600 #ifdef SEP_HEAP
2601 if (verbose)
2602 { printf("cpu%d: local heap-left %ld KB (%d MB)\n",
2603 core_id, (int) (my_size/1024), (int) (my_size/1048576));
2604 }
2605 #endif
2606 if (verbose && core_id == 0)
2607 { printf("qmax: ");
2608 for (i = 0; i < NCORE; i++)
2609 { printf("%d ", prmax[i]);
2610 }
2611 #ifndef NGQ
2612 printf("G: %d", *grmax);
2613 #endif
2614 printf("\n");
2615 }
2616 leave_critical(GLOBAL_LOCK);
2617
2618 memcnt += r->m_memcnt;
2619 nstates += r->m_nstates;
2620 nShadow += r->m_nShadow;
2621 truncs += r->m_truncs;
2622 truncs2 += r->m_truncs2;
2623 nlinks += r->m_nlinks;
2624 ngrabs += r->m_ngrabs;
2625 nlost += r->m_nlost;
2626 hcmp += r->m_hcmp;
2627 /* frame_wait += r->m_frame_wait; */
2628 errors += r->m_errors;
2629
2630 if (hmax < r->m_hmax) hmax = r->m_hmax;
2631 if (svmax < r->m_svmax) svmax = r->m_svmax;
2632 if (smax < r->m_smax) smax = r->m_smax;
2633 if (mreached < r->m_mreached) mreached = r->m_mreached;
2634
2635 if (vmax_seen < r->m_VMAX) vmax_seen = r->m_VMAX;
2636 if (pmax_seen < (int) r->m_PMAX) pmax_seen = (int) r->m_PMAX;
2637 if (qmax_seen < (int) r->m_QMAX) qmax_seen = (int) r->m_QMAX;
2638
2639 ptr = &(r->m_R);
2640 for (i = 0; i <= _NP_; i++) /* all proctypes */
2641 { for (j = 0; j < NrStates[i]; j++)
2642 { if (*(ptr + j) != 0)
2643 { reached[i][j] = 1;
2644 } }
2645 ptr += NrStates[i]*sizeof(uchar);
2646 }
2647 if (verbose>1)
2648 { cpu_printf("Got Results (%d)\n", ptr - &(r->m_R));
2649 snapshot();
2650 }
2651 }
2652
2653 #if !defined(WIN32) && !defined(WIN64)
2654 static void
2655 rm_shared_segments(void)
2656 { int m;
2657 volatile sh_Allocater *nxt_pool;
2658 /*
2659 * mark all shared memory segments for removal
2660 * the actual removes wont happen intil last process dies or detaches
2661 * the shmctl calls can return -1 if not all procs have detached yet
2662 */
2663 for (m = 0; m < NR_QS; m++) /* +1 for global q */
2664 { if (shmid[m] != -1)
2665 { (void) shmctl(shmid[m], IPC_RMID, NULL);
2666 } }
2667 #ifdef SEP_STATE
2668 if (shmid_M != -1)
2669 { (void) shmctl(shmid_M, IPC_RMID, NULL);
2670 }
2671 #else
2672 if (shmid_S != -1)
2673 { (void) shmctl(shmid_S, IPC_RMID, NULL);
2674 }
2675 for (last_pool = first_pool; last_pool != NULL; last_pool = nxt_pool)
2676 { shmid_M = (int) (last_pool->dc_id);
2677 nxt_pool = last_pool->nxt; /* as a pre-caution only */
2678 if (shmid_M != -1)
2679 { (void) shmctl(shmid_M, IPC_RMID, NULL);
2680 } }
2681 #endif
2682 }
2683 #endif
2684
2685 void
2686 sudden_stop(char *s)
2687 { char b[64];
2688 int i;
2689
2690 printf("cpu%d: stop - %s\n", core_id, s);
2691 #if !defined(WIN32) && !defined(WIN64)
2692 if (proxy_pid != 0)
2693 { rm_shared_segments();
2694 }
2695 #endif
2696 if (search_terminated != NULL)
2697 { if (*search_terminated != 0)
2698 { if (verbose)
2699 { printf("cpu%d: termination initiated (%d)\n",
2700 core_id, *search_terminated);
2701 }
2702 } else
2703 { if (verbose)
2704 { printf("cpu%d: initiated termination\n", core_id);
2705 }
2706 *search_terminated |= 8; /* sudden_stop */
2707 }
2708 if (core_id == 0)
2709 { if (((*search_terminated) & 4) /* uerror in one of the cpus */
2710 && !((*search_terminated) & (8|32|128|256))) /* abnormal stop */
2711 { if (errors == 0) errors++; /* we know there is at least 1 */
2712 }
2713 wrapup(); /* incomplete stats, but at least something */
2714 }
2715 return;
2716 } /* else: should rarely happen, take more drastic measures */
2717
2718 if (core_id == 0) /* local root process */
2719 { for (i = 1; i < NCORE; i++) /* not for 0 of course */
2720 {
2721 #if defined(WIN32) || defined(WIN64)
2722 DWORD dwExitCode = 0;
2723 GetExitCodeProcess(worker_handles[i], &dwExitCode);
2724 if (dwExitCode == STILL_ACTIVE)
2725 { TerminateProcess(worker_handles[i], 0);
2726 }
2727 printf("cpu0: terminate %d %d\n",
2728 worker_pids[i], (dwExitCode == STILL_ACTIVE));
2729 #else
2730 sprintf(b, "kill -%d %d", SIGKILL, worker_pids[i]);
2731 system(b); /* if this is a proxy: receive half */
2732 printf("cpu0: %s\n", b);
2733 #endif
2734 }
2735 issued_kill++;
2736 } else
2737 { /* on WIN32/WIN64 -- these merely kills the root process... */
2738 if (was_interrupted == 0)
2739 { sprintf(b, "kill -%d %d", SIGINT, worker_pids[0]);
2740 system(b); /* warn the root process */
2741 printf("cpu%d: %s\n", core_id, b);
2742 issued_kill++;
2743 } }
2744 }
2745
2746 #define iam_alive() is_alive[core_id]++
2747
2748 extern int crash_test(double);
2749 extern void crash_reset(void);
2750
2751 int
2752 someone_crashed(int wait_type)
2753 { static double last_value = 0.0;
2754 static int count = 0;
2755
2756 if (search_terminated == NULL
2757 || *search_terminated != 0)
2758 {
2759 if (!(*search_terminated & (8|32|128|256)))
2760 { if (count++ < 100*NCORE)
2761 { return 0;
2762 } }
2763 return 1;
2764 }
2765 /* check left neighbor only */
2766 if (last_value == is_alive[(core_id + NCORE - 1) % NCORE])
2767 { if (count++ >= 100) /* to avoid unnecessary checks */
2768 { return 1;
2769 }
2770 return 0;
2771 }
2772 last_value = is_alive[(core_id + NCORE - 1) % NCORE];
2773 count = 0;
2774 crash_reset();
2775 return 0;
2776 }
2777
2778 void
2779 sleep_report(void)
2780 {
2781 enter_critical(GLOBAL_LOCK);
2782 if (verbose)
2783 {
2784 #ifdef NGQ
2785 printf("cpu%d: locks: global %g\tother %g\t",
2786 core_id, glock_wait[0], lock_wait - glock_wait[0]);
2787 #else
2788 printf("cpu%d: locks: GL %g, RQ %g, WQ %g, HT %g\t",
2789 core_id, glock_wait[0], glock_wait[1], glock_wait[2],
2790 lock_wait - glock_wait[0] - glock_wait[1] - glock_wait[2]);
2791 #endif
2792 printf("waits: states %g slots %g\n", frame_wait, free_wait);
2793 #ifndef NGQ
2794 printf("cpu%d: gq [tries %g, room %g, noroom %g]\n", core_id, gq_tries, gq_hasroom, gq_hasnoroom);
2795 if (core_id == 0 && (*gr_readmiss >= 1.0 || *gr_readmiss >= 1.0 || *grcnt != 0))
2796 printf("cpu0: gq [readmiss: %g, writemiss: %g cnt %d]\n", *gr_readmiss, *gr_writemiss, *grcnt);
2797 #endif
2798 }
2799 if (free_wait > 1000000.)
2800 #ifndef NGQ
2801 if (!a_cycles)
2802 { printf("hint: this search may be faster with a larger work-queue\n");
2803 printf(" (-DSET_WQ_SIZE=N with N>%g), and/or with -DUSE_DISK\n",
2804 GWQ_SIZE/sizeof(SM_frame));
2805 printf(" or with a larger value for -zN (N>%d)\n", z_handoff);
2806 #else
2807 { printf("hint: this search may be faster if compiled without -DNGQ, with -DUSE_DISK, ");
2808 printf("or with a larger -zN (N>%d)\n", z_handoff);
2809 #endif
2810 }
2811 leave_critical(GLOBAL_LOCK);
2812 }
2813
2814 #ifndef MAX_DSK_FILE
2815 #define MAX_DSK_FILE 1000000 /* default is max 1M states per file */
2816 #endif
2817
2818 void
2819 multi_usage(FILE *fd)
2820 { static int warned = 0;
2821 if (warned > 0) { return; } else { warned++; }
2822 fprintf(fd, "\n");
2823 fprintf(fd, "Defining multi-core mode:\n\n");
2824 fprintf(fd, " -DDUAL_CORE --> same as -DNCORE=2\n");
2825 fprintf(fd, " -DQUAD_CORE --> same as -DNCORE=4\n");
2826 fprintf(fd, " -DNCORE=N --> enables multi_core verification if N>1\n");
2827 fprintf(fd, "\n");
2828 fprintf(fd, "Additional directives supported in multi-core mode:\n\n");
2829 fprintf(fd, " -DSEP_STATE --> forces separate statespaces instead of a single shared state space\n");
2830 fprintf(fd, " -DNUSE_DISK --> use disk for storing states when a work queue overflows\n");
2831 fprintf(fd, " -DMAX_DSK_FILE --> max nr of states per diskfile (%d)\n", MAX_DSK_FILE);
2832 fprintf(fd, " -DFULL_TRAIL --> support full error trails (increases memory use)\n");
2833 fprintf(fd, "\n");
2834 fprintf(fd, "More advanced use (should rarely need changing):\n\n");
2835 fprintf(fd, " To change the nr of states that can be stored in the global queue\n");
2836 fprintf(fd, " (lower numbers allow for more states to be stored, prefer multiples of 8):\n");
2837 fprintf(fd, " -DVMAX=N --> upperbound on statevector for handoffs (N=%d)\n", VMAX);
2838 fprintf(fd, " -DPMAX=N --> upperbound on nr of procs (default: N=%d)\n", PMAX);
2839 fprintf(fd, " -DQMAX=N --> upperbound on nr of channels (default: N=%d)\n", QMAX);
2840 fprintf(fd, "\n");
2841 fprintf(fd, " To set the total amount of memory reserved for the global workqueue:\n");
2842 fprintf(fd, " -DSET_WQ_SIZE=N --> default: N=128 (defined in MBytes)\n\n");
2843 fprintf(fd, " To force the use of a single global heap, instead of separate heaps:\n");
2844 fprintf(fd, " -DGLOB_HEAP\n");
2845 fprintf(fd, "\n");
2846 fprintf(fd, " To define a fct to initialize data before spawning processes (use quotes):\n");
2847 fprintf(fd, " \"-DC_INIT=fct()\"\n");
2848 fprintf(fd, "\n");
2849 fprintf(fd, " Timer settings for termination and crash detection:\n");
2850 fprintf(fd, " -DSHORT_T=N --> timeout for termination detection trigger (N=%g)\n", (double) SHORT_T);
2851 fprintf(fd, " -DLONG_T=N --> timeout for giving up on termination detection (N=%g)\n", (double) LONG_T);
2852 fprintf(fd, " -DONESECOND --> (1<<29) --> timeout waiting for a free slot -- to check for crash\n");
2853 fprintf(fd, " -DT_ALERT --> collect stats on crash alert timeouts\n\n");
2854 fprintf(fd, "Help with Linux/Windows/Cygwin configuration for multi-core:\n");
2855 fprintf(fd, " http://spinroot.com/spin/multicore/V5_Readme.html\n");
2856 fprintf(fd, "\n");
2857 }
2858 #if NCORE>1 && defined(FULL_TRAIL)
2859 typedef struct Stack_Tree {
2860 uchar pr; /* process that made transition */
2861 T_ID t_id; /* id of transition */
2862 volatile struct Stack_Tree *prv; /* backward link towards root */
2863 } Stack_Tree;
2864
2865 struct H_el *grab_shared(int);
2866 volatile Stack_Tree **stack_last; /* in shared memory */
2867 char *stack_cache = NULL; /* local */
2868 int nr_cached = 0; /* local */
2869
2870 #ifndef CACHE_NR
2871 #define CACHE_NR 1024
2872 #endif
2873
2874 volatile Stack_Tree *
2875 stack_prefetch(void)
2876 { volatile Stack_Tree *st;
2877
2878 if (nr_cached == 0)
2879 { stack_cache = (char *) grab_shared(CACHE_NR * sizeof(Stack_Tree));
2880 nr_cached = CACHE_NR;
2881 }
2882 st = (volatile Stack_Tree *) stack_cache;
2883 stack_cache += sizeof(Stack_Tree);
2884 nr_cached--;
2885 return st;
2886 }
2887
2888 void
2889 Push_Stack_Tree(short II, T_ID t_id)
2890 { volatile Stack_Tree *st;
2891
2892 st = (volatile Stack_Tree *) stack_prefetch();
2893 st->pr = II;
2894 st->t_id = t_id;
2895 st->prv = (Stack_Tree *) stack_last[core_id];
2896 stack_last[core_id] = st;
2897 }
2898
2899 void
2900 Pop_Stack_Tree(void)
2901 { volatile Stack_Tree *cf = stack_last[core_id];
2902
2903 if (cf)
2904 { stack_last[core_id] = cf->prv;
2905 } else if (nr_handoffs * z_handoff + depth > 0)
2906 { printf("cpu%d: error pop_stack_tree (depth %d)\n",
2907 core_id, depth);
2908 }
2909 }
2910 #endif
2911
2912 void
2913 e_critical(int which)
2914 { double cnt_start;
2915
2916 if (readtrail || iamin[which] > 0)
2917 { if (!readtrail && verbose)
2918 { printf("cpu%d: Double Lock on %d (now %d)\n",
2919 core_id, which, iamin[which]+1);
2920 fflush(stdout);
2921 }
2922 iamin[which]++; /* local variable */
2923 return;
2924 }
2925
2926 cnt_start = lock_wait;
2927
2928 while (sh_lock != NULL) /* as long as we have shared memory */
2929 { int r = tas(&sh_lock[which]);
2930 if (r == 0)
2931 { iamin[which] = 1;
2932 return; /* locked */
2933 }
2934
2935 lock_wait++;
2936 #ifndef NGQ
2937 if (which < 3) { glock_wait[which]++; }
2938 #else
2939 if (which == 0) { glock_wait[which]++; }
2940 #endif
2941 iam_alive();
2942
2943 if (lock_wait - cnt_start > TenSeconds)
2944 { printf("cpu%d: lock timeout on %d\n", core_id, which);
2945 cnt_start = lock_wait;
2946 if (someone_crashed(1))
2947 { sudden_stop("lock timeout");
2948 pan_exit(1);
2949 } } }
2950 }
2951
2952 void
2953 x_critical(int which)
2954 {
2955 if (iamin[which] != 1)
2956 { if (iamin[which] > 1)
2957 { iamin[which]--; /* this is thread-local - no races on this one */
2958 if (!readtrail && verbose)
2959 { printf("cpu%d: Partial Unlock on %d (%d more needed)\n",
2960 core_id, which, iamin[which]);
2961 fflush(stdout);
2962 }
2963 return;
2964 } else /* iamin[which] <= 0 */
2965 { if (!readtrail)
2966 { printf("cpu%d: Invalid Unlock iamin[%d] = %d\n",
2967 core_id, which, iamin[which]);
2968 fflush(stdout);
2969 }
2970 return;
2971 } }
2972
2973 if (sh_lock != NULL)
2974 { iamin[which] = 0;
2975 sh_lock[which] = 0; /* unlock */
2976 }
2977 }
2978
2979 void
2980 #if defined(WIN32) || defined(WIN64)
2981 start_proxy(char *s, DWORD r_pid)
2982 #else
2983 start_proxy(char *s, int r_pid)
2984 #endif
2985 { char Q_arg[16], Z_arg[16], Y_arg[16];
2986 char *args[32], *ptr;
2987 int argcnt = 0;
2988
2989 sprintf(Q_arg, "-Q%d", getpid());
2990 sprintf(Y_arg, "-Y%d", r_pid);
2991 sprintf(Z_arg, "-Z%d", proxy_pid /* core_id */);
2992
2993 args[argcnt++] = "proxy";
2994 args[argcnt++] = s; /* -r or -s */
2995 args[argcnt++] = Q_arg;
2996 args[argcnt++] = Z_arg;
2997 args[argcnt++] = Y_arg;
2998
2999 if (strlen(o_cmdline) > 0)
3000 { ptr = o_cmdline; /* assume args separated by spaces */
3001 do { args[argcnt++] = ptr++;
3002 if ((ptr = strchr(ptr, ' ')) != NULL)
3003 { while (*ptr == ' ')
3004 { *ptr++ = '\0';
3005 }
3006 } else
3007 { break;
3008 }
3009 } while (argcnt < 31);
3010 }
3011 args[argcnt] = NULL;
3012 #if defined(WIN32) || defined(WIN64)
3013 execvp("pan_proxy", args); /* no return */
3014 #else
3015 execvp("./pan_proxy", args); /* no return */
3016 #endif
3017 Uerror("pan_proxy exec failed");
3018 }
3019 /*** end of common code fragment ***/
3020
3021 #if !defined(WIN32) && !defined(WIN64)
3022 void
3023 init_shm(void) /* initialize shared work-queues - linux/cygwin */
3024 { key_t key[NR_QS];
3025 int n, m;
3026 int must_exit = 0;
3027
3028 if (core_id == 0 && verbose)
3029 { printf("cpu0: step 3: allocate shared workqueues %g MB\n",
3030 ((double) NCORE * LWQ_SIZE + GWQ_SIZE) / (1048576.) );
3031 }
3032 for (m = 0; m < NR_QS; m++) /* last q is the global q */
3033 { double qsize = (m == NCORE) ? GWQ_SIZE : LWQ_SIZE;
3034 key[m] = ftok(PanSource, m+1);
3035 if (key[m] == -1)
3036 { perror("ftok shared queues"); must_exit = 1; break;
3037 }
3038
3039 if (core_id == 0) /* root creates */
3040 { /* check for stale copy */
3041 shmid[m] = shmget(key[m], (size_t) qsize, 0600);
3042 if (shmid[m] != -1) /* yes there is one; remove it */
3043 { printf("cpu0: removing stale q%d, status: %d\n",
3044 m, shmctl(shmid[m], IPC_RMID, NULL));
3045 }
3046 shmid[m] = shmget(key[m], (size_t) qsize, 0600|IPC_CREAT|IPC_EXCL);
3047 memcnt += qsize;
3048 } else /* workers attach */
3049 { shmid[m] = shmget(key[m], (size_t) qsize, 0600);
3050 /* never called, since we create shm *before* we fork */
3051 }
3052 if (shmid[m] == -1)
3053 { perror("shmget shared queues"); must_exit = 1; break;
3054 }
3055
3056 shared_mem[m] = (char *) shmat(shmid[m], (void *) 0, 0); /* attach */
3057 if (shared_mem[m] == (char *) -1)
3058 { fprintf(stderr, "error: cannot attach shared wq %d (%d Mb)\n",
3059 m+1, (int) (qsize/(1048576.)));
3060 perror("shmat shared queues"); must_exit = 1; break;
3061 }
3062
3063 m_workq[m] = (SM_frame *) shared_mem[m];
3064 if (core_id == 0)
3065 { int nframes = (m == NCORE) ? GN_FRAMES : LN_FRAMES;
3066 for (n = 0; n < nframes; n++)
3067 { m_workq[m][n].m_vsize = 0;
3068 m_workq[m][n].m_boq = 0;
3069 } } }
3070
3071 if (must_exit)
3072 { rm_shared_segments();
3073 fprintf(stderr, "pan: check './pan --' for usage details\n");
3074 pan_exit(1); /* calls cleanup_shm */
3075 }
3076 }
3077
3078 static uchar *
3079 prep_shmid_S(size_t n) /* either sets SS or H_tab, linux/cygwin */
3080 { char *rval;
3081 #ifndef SEP_STATE
3082 key_t key;
3083
3084 if (verbose && core_id == 0)
3085 {
3086 #ifdef BITSTATE
3087 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
3088 (double) n / (1048576.));
3089 #else
3090 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
3091 (double) n / (1048576.));
3092 #endif
3093 }
3094 #ifdef MEMLIM
3095 if (memcnt + (double) n > memlim)
3096 { printf("cpu0: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
3097 memcnt/1024., n/1024, memlim/(1048576.));
3098 printf("cpu0: insufficient memory -- aborting\n");
3099 exit(1);
3100 }
3101 #endif
3102
3103 key = ftok(PanSource, NCORE+2); /* different from queues */
3104 if (key == -1)
3105 { perror("ftok shared bitstate or hashtable");
3106 fprintf(stderr, "pan: check './pan --' for usage details\n");
3107 pan_exit(1);
3108 }
3109
3110 if (core_id == 0) /* root */
3111 { shmid_S = shmget(key, n, 0600);
3112 if (shmid_S != -1)
3113 { printf("cpu0: removing stale segment, status: %d\n",
3114 shmctl(shmid_S, IPC_RMID, NULL));
3115 }
3116 shmid_S = shmget(key, n, 0600 | IPC_CREAT | IPC_EXCL);
3117 memcnt += (double) n;
3118 } else /* worker */
3119 { shmid_S = shmget(key, n, 0600);
3120 }
3121 if (shmid_S == -1)
3122 { perror("shmget shared bitstate or hashtable too large?");
3123 fprintf(stderr, "pan: check './pan --' for usage details\n");
3124 pan_exit(1);
3125 }
3126
3127 rval = (char *) shmat(shmid_S, (void *) 0, 0); /* attach */
3128 if ((char *) rval == (char *) -1)
3129 { perror("shmat shared bitstate or hashtable");
3130 fprintf(stderr, "pan: check './pan --' for usage details\n");
3131 pan_exit(1);
3132 }
3133 #else
3134 rval = (char *) emalloc(n);
3135 #endif
3136 return (uchar *) rval;
3137 }
3138
3139 #define TRY_AGAIN 1
3140 #define NOT_AGAIN 0
3141
3142 static char shm_prep_result;
3143
3144 static uchar *
3145 prep_state_mem(size_t n) /* sets memory arena for states linux/cygwin */
3146 { char *rval;
3147 key_t key;
3148 static int cnt = 3; /* start larger than earlier ftok calls */
3149
3150 shm_prep_result = NOT_AGAIN; /* default */
3151 if (verbose && core_id == 0)
3152 { printf("cpu0: step 2+: pre-allocate memory arena %d of %6.2g Mb\n",
3153 cnt-3, (double) n / (1048576.));
3154 }
3155 #ifdef MEMLIM
3156 if (memcnt + (double) n > memlim)
3157 { printf("cpu0: error: M %.0f + %.0f Kb exceeds memory limit of %.0f Mb\n",
3158 memcnt/1024.0, (double) n/1024.0, memlim/(1048576.));
3159 return NULL;
3160 }
3161 #endif
3162
3163 key = ftok(PanSource, NCORE+cnt); cnt++;
3164 if (key == -1)
3165 { perror("ftok T");
3166 printf("pan: check './pan --' for usage details\n");
3167 pan_exit(1);
3168 }
3169
3170 if (core_id == 0)
3171 { shmid_M = shmget(key, n, 0600);
3172 if (shmid_M != -1)
3173 { printf("cpu0: removing stale memory segment %d, status: %d\n",
3174 cnt-3, shmctl(shmid_M, IPC_RMID, NULL));
3175 }
3176 shmid_M = shmget(key, n, 0600 | IPC_CREAT | IPC_EXCL);
3177 /* memcnt += (double) n; -- only amount actually used is counted */
3178 } else
3179 { shmid_M = shmget(key, n, 0600);
3180
3181 }
3182 if (shmid_M == -1)
3183 { if (verbose)
3184 { printf("error: failed to get pool of shared memory %d of %.0f Mb\n",
3185 cnt-3, ((double)n)/(1048576.));
3186 perror("state mem");
3187 printf("pan: check './pan --' for usage details\n");
3188 }
3189 shm_prep_result = TRY_AGAIN;
3190 return NULL;
3191 }
3192 rval = (char *) shmat(shmid_M, (void *) 0, 0); /* attach */
3193
3194 if ((char *) rval == (char *) -1)
3195 { printf("cpu%d error: failed to attach pool of shared memory %d of %.0f Mb\n",
3196 core_id, cnt-3, ((double)n)/(1048576.));
3197 perror("state mem");
3198 return NULL;
3199 }
3200 return (uchar *) rval;
3201 }
3202
3203 void
3204 init_HT(unsigned long n) /* cygwin/linux version */
3205 { volatile char *x;
3206 double get_mem;
3207 #ifndef SEP_STATE
3208 volatile char *dc_mem_start;
3209 double need_mem, got_mem = 0.;
3210 #endif
3211
3212 #ifdef SEP_STATE
3213 #ifndef MEMLIM
3214 if (verbose)
3215 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
3216 }
3217 #else
3218 if (verbose)
3219 { printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
3220 MEMLIM, ((double)n/(1048576.)), (((double) NCORE * LWQ_SIZE) + GWQ_SIZE) /(1048576.) );
3221 }
3222 #endif
3223 get_mem = NCORE * sizeof(double) + (1 + CS_NR) * sizeof(void *) + 4*sizeof(void *) + 2*sizeof(double);
3224 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
3225 get_mem += 4 * NCORE * sizeof(void *); /* prfree, prfull, prcnt, prmax */
3226 #ifdef FULL_TRAIL
3227 get_mem += (NCORE) * sizeof(Stack_Tree *); /* NCORE * stack_last */
3228 #endif
3229 x = (volatile char *) prep_state_mem((size_t) get_mem); /* work queues and basic structs */
3230 shmid_X = (long) x;
3231 if (x == NULL)
3232 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
3233 exit(1);
3234 }
3235 search_terminated = (volatile unsigned int *) x; /* comes first */
3236 x += sizeof(void *); /* maintain alignment */
3237
3238 is_alive = (volatile double *) x;
3239 x += NCORE * sizeof(double);
3240
3241 sh_lock = (volatile int *) x;
3242 x += CS_NR * sizeof(void *);
3243
3244 grfree = (volatile int *) x;
3245 x += sizeof(void *);
3246 grfull = (volatile int *) x;
3247 x += sizeof(void *);
3248 grcnt = (volatile int *) x;
3249 x += sizeof(void *);
3250 grmax = (volatile int *) x;
3251 x += sizeof(void *);
3252 prfree = (volatile int *) x;
3253 x += NCORE * sizeof(void *);
3254 prfull = (volatile int *) x;
3255 x += NCORE * sizeof(void *);
3256 prcnt = (volatile int *) x;
3257 x += NCORE * sizeof(void *);
3258 prmax = (volatile int *) x;
3259 x += NCORE * sizeof(void *);
3260 gr_readmiss = (volatile double *) x;
3261 x += sizeof(double);
3262 gr_writemiss = (volatile double *) x;
3263 x += sizeof(double);
3264
3265 #ifdef FULL_TRAIL
3266 stack_last = (volatile Stack_Tree **) x;
3267 x += NCORE * sizeof(Stack_Tree *);
3268 #endif
3269
3270 #ifndef BITSTATE
3271 H_tab = (struct H_el **) emalloc(n);
3272 #endif
3273 #else
3274 #ifndef MEMLIM
3275 #warning MEMLIM not set
3276 #define MEMLIM (2048)
3277 #endif
3278
3279 if (core_id == 0 && verbose)
3280 { printf("cpu0: step 0: -DMEMLIM=%d Mb minus hashtable+workqs (%g + %g Mb) leaves %g Mb\n",
3281 MEMLIM, ((double)n/(1048576.)), (NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.),
3282 (memlim - memcnt - (double) n - (NCORE * LWQ_SIZE + GWQ_SIZE))/(1048576.));
3283 }
3284 #ifndef BITSTATE
3285 H_tab = (struct H_el **) prep_shmid_S((size_t) n); /* hash_table */
3286 #endif
3287 need_mem = memlim - memcnt - ((double) NCORE * LWQ_SIZE) - GWQ_SIZE;
3288 if (need_mem <= 0.)
3289 { Uerror("internal error -- shared state memory");
3290 }
3291
3292 if (core_id == 0 && verbose)
3293 { printf("cpu0: step 2: pre-allocate shared state memory %g Mb\n",
3294 need_mem/(1048576.));
3295 }
3296 #ifdef SEP_HEAP
3297 SEG_SIZE = need_mem / NCORE;
3298 if (verbose && core_id == 0)
3299 { printf("cpu0: setting segsize to %6g MB\n",
3300 SEG_SIZE/(1048576.));
3301 }
3302 #if defined(CYGWIN) || defined(__CYGWIN__)
3303 if (SEG_SIZE > 512.*1024.*1024.)
3304 { printf("warning: reducing SEG_SIZE of %g MB to 512MB (exceeds max for Cygwin)\n",
3305 SEG_SIZE/(1024.*1024.));
3306 SEG_SIZE = 512.*1024.*1024.;
3307 }
3308 #endif
3309 #endif
3310 mem_reserved = need_mem;
3311 while (need_mem > 1024.)
3312 { get_mem = need_mem;
3313 shm_more:
3314 if (get_mem > (double) SEG_SIZE)
3315 { get_mem = (double) SEG_SIZE;
3316 }
3317 if (get_mem <= 0.0) break;
3318
3319 /* for allocating states: */
3320 x = dc_mem_start = (volatile char *) prep_state_mem((size_t) get_mem);
3321 if (x == NULL)
3322 { if (shm_prep_result == NOT_AGAIN
3323 || first_pool != NULL
3324 || SEG_SIZE < (16. * 1048576.))
3325 { break;
3326 }
3327 SEG_SIZE /= 2.;
3328 if (verbose)
3329 { printf("pan: lowered segsize to 0.000000\n", SEG_SIZE);
3330 }
3331 if (SEG_SIZE >= 1024.)
3332 { goto shm_more;
3333 }
3334 break;
3335 }
3336
3337 need_mem -= get_mem;
3338 got_mem += get_mem;
3339 if (first_pool == NULL)
3340 { search_terminated = (volatile unsigned int *) x; /* comes first */
3341 x += sizeof(void *); /* maintain alignment */
3342
3343 is_alive = (volatile double *) x;
3344 x += NCORE * sizeof(double);
3345
3346 sh_lock = (volatile int *) x;
3347 x += CS_NR * sizeof(void *);
3348
3349 grfree = (volatile int *) x;
3350 x += sizeof(void *);
3351 grfull = (volatile int *) x;
3352 x += sizeof(void *);
3353 grcnt = (volatile int *) x;
3354 x += sizeof(void *);
3355 grmax = (volatile int *) x;
3356 x += sizeof(void *);
3357 prfree = (volatile int *) x;
3358 x += NCORE * sizeof(void *);
3359 prfull = (volatile int *) x;
3360 x += NCORE * sizeof(void *);
3361 prcnt = (volatile int *) x;
3362 x += NCORE * sizeof(void *);
3363 prmax = (volatile int *) x;
3364 x += NCORE * sizeof(void *);
3365 gr_readmiss = (volatile double *) x;
3366 x += sizeof(double);
3367 gr_writemiss = (volatile double *) x;
3368 x += sizeof(double);
3369 #ifdef FULL_TRAIL
3370 stack_last = (volatile Stack_Tree **) x;
3371 x += NCORE * sizeof(Stack_Tree *);
3372 #endif
3373 if (((long)x)&(sizeof(void *)-1)) /* 64-bit word alignment */
3374 { x += sizeof(void *)-(((long)x)&(sizeof(void *)-1));
3375 }
3376
3377 #ifdef COLLAPSE
3378 ncomps = (unsigned long *) x;
3379 x += (256+2) * sizeof(unsigned long);
3380 #endif
3381 }
3382
3383 dc_shared = (sh_Allocater *) x; /* must be in shared memory */
3384 x += sizeof(sh_Allocater);
3385
3386 if (core_id == 0) /* root only */
3387 { dc_shared->dc_id = shmid_M;
3388 dc_shared->dc_start = dc_mem_start;
3389 dc_shared->dc_arena = x;
3390 dc_shared->pattern = 1234567; /* protection */
3391 dc_shared->dc_size = (long) get_mem - (long) (x - dc_mem_start);
3392 dc_shared->nxt = (long) 0;
3393
3394 if (last_pool == NULL)
3395 { first_pool = last_pool = dc_shared;
3396 } else
3397 { last_pool->nxt = dc_shared;
3398 last_pool = dc_shared;
3399 }
3400 } else if (first_pool == NULL)
3401 { first_pool = dc_shared;
3402 } }
3403
3404 if (need_mem > 1024.)
3405 { printf("cpu0: could allocate only %g Mb of shared memory (wanted %g more)\n",
3406 got_mem/(1048576.), need_mem/(1048576.));
3407 }
3408
3409 if (!first_pool)
3410 { printf("cpu0: insufficient memory -- aborting.\n");
3411 exit(1);
3412 }
3413 /* we are still single-threaded at this point, with core_id 0 */
3414 dc_shared = first_pool;
3415
3416 #endif
3417 }
3418
3419 /* Test and Set assembly code */
3420
3421 #if defined(i386) || defined(__i386__) || defined(__x86_64__)
3422 int
3423 tas(volatile int *s) /* tested */
3424 { int r;
3425 __asm__ __volatile__(
3426 "xchgl %0, %1 \n\t"
3427 : "=r"(r), "=m"(*s)
3428 : "0"(1), "m"(*s)
3429 : "memory");
3430
3431 return r;
3432 }
3433 #elif defined(__arm__)
3434 int
3435 tas(volatile int *s) /* not tested */
3436 { int r = 1;
3437 __asm__ __volatile__(
3438 "swpb %0, %0, [%3] \n"
3439 : "=r"(r), "=m"(*s)
3440 : "0"(r), "r"(s));
3441
3442 return r;
3443 }
3444 #elif defined(sparc) || defined(__sparc__)
3445 int
3446 tas(volatile int *s) /* not tested */
3447 { int r = 1;
3448 __asm__ __volatile__(
3449 " ldstub [%2], %0 \n"
3450 : "=r"(r), "=m"(*s)
3451 : "r"(s));
3452
3453 return r;
3454 }
3455 #elif defined(ia64) || defined(__ia64__)
3456 /* Intel Itanium */
3457 int
3458 tas(volatile int *s) /* tested */
3459 { long int r;
3460 __asm__ __volatile__(
3461 " xchg4 %0=%1,%2 \n"
3462 : "=r"(r), "+m"(*s)
3463 : "r"(1)
3464 : "memory");
3465 return (int) r;
3466 }
3467 #else
3468 #error missing definition of test and set operation for this platform
3469 #endif
3470
3471 void
3472 cleanup_shm(int val)
3473 { volatile sh_Allocater *nxt_pool;
3474 unsigned long cnt = 0;
3475 int m;
3476
3477 if (nibis != 0)
3478 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id, val);
3479 return;
3480 } else
3481 { nibis = 1;
3482 }
3483 if (search_terminated != NULL)
3484 { *search_terminated |= 16; /* cleanup_shm */
3485 }
3486
3487 for (m = 0; m < NR_QS; m++)
3488 { if (shmdt((void *) shared_mem[m]) > 0)
3489 { perror("shmdt detaching from shared queues");
3490 } }
3491
3492 #ifdef SEP_STATE
3493 if (shmdt((void *) shmid_X) != 0)
3494 { perror("shmdt detaching from shared state memory");
3495 }
3496 #else
3497 #ifdef BITSTATE
3498 if (SS > 0 && shmdt((void *) SS) != 0)
3499 { if (verbose)
3500 { perror("shmdt detaching from shared bitstate arena");
3501 } }
3502 #else
3503 if (core_id == 0)
3504 { /* before detaching: */
3505 for (nxt_pool = dc_shared; nxt_pool != NULL; nxt_pool = nxt_pool->nxt)
3506 { cnt += nxt_pool->dc_size;
3507 }
3508 if (verbose)
3509 { printf("cpu0: done, %ld Mb of shared state memory left\n",
3510 cnt / (long)(1048576));
3511 } }
3512
3513 if (shmdt((void *) H_tab) != 0)
3514 { perror("shmdt detaching from shared hashtable");
3515 }
3516
3517 for (last_pool = first_pool; last_pool != NULL; last_pool = nxt_pool)
3518 { nxt_pool = last_pool->nxt;
3519 if (shmdt((void *) last_pool->dc_start) != 0)
3520 { perror("shmdt detaching from shared state memory");
3521 } }
3522 first_pool = last_pool = NULL; /* precaution */
3523 #endif
3524 #endif
3525 /* detached from shared memory - so cannot use cpu_printf */
3526 if (verbose)
3527 { printf("cpu%d: done -- got %d states from queue\n",
3528 core_id, nstates_get);
3529 }
3530 }
3531
3532 extern void give_up(int);
3533 extern void Read_Queue(int);
3534
3535 void
3536 mem_get(void)
3537 { SM_frame *f;
3538 int is_parent;
3539
3540 #if defined(MA) && !defined(SEP_STATE)
3541 #error MA without SEP_STATE is not supported with multi-core
3542 #endif
3543 #ifdef BFS
3544 #error BFS is not supported with multi-core
3545 #endif
3546 #ifdef SC
3547 #error SC is not supported with multi-core
3548 #endif
3549 init_shm(); /* we are single threaded when this starts */
3550
3551 if (core_id == 0 && verbose)
3552 { printf("cpu0: step 4: calling fork()\n");
3553 }
3554 fflush(stdout);
3555
3556 /* if NCORE > 1 the child or the parent should fork N-1 more times
3557 * the parent is the only process with core_id == 0 and is_parent > 0
3558 * the workers have is_parent = 0 and core_id = 1..NCORE-1
3559 */
3560 if (core_id == 0)
3561 { worker_pids[0] = getpid(); /* for completeness */
3562 while (++core_id < NCORE) /* first worker sees core_id = 1 */
3563 { is_parent = fork();
3564 if (is_parent == -1)
3565 { Uerror("fork failed");
3566 }
3567 if (is_parent == 0) /* this is a worker process */
3568 { if (proxy_pid == core_id) /* always non-zero */
3569 { start_proxy("-r", 0); /* no return */
3570 }
3571 goto adapt; /* root process continues spawning */
3572 }
3573 worker_pids[core_id] = is_parent;
3574 }
3575 /* note that core_id is now NCORE */
3576 if (proxy_pid > 0 && proxy_pid < NCORE)
3577 { proxy_pid_snd = fork();
3578 if (proxy_pid_snd == -1)
3579 { Uerror("proxy fork failed");
3580 }
3581 if (proxy_pid_snd == 0)
3582 { start_proxy("-s", worker_pids[proxy_pid]); /* no return */
3583 } } /* else continue */
3584 if (is_parent > 0)
3585 { core_id = 0; /* reset core_id for root process */
3586 }
3587 } else /* worker */
3588 { static char db0[16]; /* good for up to 10^6 cores */
3589 static char db1[16];
3590 adapt: tprefix = db0; sprefix = db1;
3591 sprintf(tprefix, "cpu%d_trail", core_id);
3592 sprintf(sprefix, "cpu%d_rst", core_id);
3593 memcnt = 0; /* count only additionally allocated memory */
3594 }
3595 signal(SIGINT, give_up);
3596
3597 if (proxy_pid == 0) /* not in a cluster setup, pan_proxy must attach */
3598 { rm_shared_segments(); /* mark all shared segments for removal on exit */
3599 }
3600 if (verbose)
3601 { cpu_printf("starting core_id %d -- pid %d\n", core_id, getpid());
3602 }
3603 #if defined(SEP_HEAP) && !defined(SEP_STATE)
3604 { int i;
3605 volatile sh_Allocater *ptr;
3606 ptr = first_pool;
3607 for (i = 0; i < NCORE && ptr != NULL; i++)
3608 { if (i == core_id)
3609 { my_heap = (char *) ptr->dc_arena;
3610 my_size = (long) ptr->dc_size;
3611 if (verbose)
3612 cpu_printf("local heap %ld MB\n", my_size/(1048576));
3613 break;
3614 }
3615 ptr = ptr->nxt; /* local */
3616 }
3617 if (my_heap == NULL)
3618 { printf("cpu%d: no local heap\n", core_id);
3619 pan_exit(1);
3620 } /* else */
3621 #if defined(CYGWIN) || defined(__CYGWIN__)
3622 ptr = first_pool;
3623 for (i = 0; i < NCORE && ptr != NULL; i++)
3624 { ptr = ptr->nxt; /* local */
3625 }
3626 dc_shared = ptr; /* any remainder */
3627 #else
3628 dc_shared = NULL; /* used all mem for local heaps */
3629 #endif
3630 }
3631 #endif
3632 if (core_id == 0 && !remote_party)
3633 { new_state(); /* cpu0 explores root */
3634 if (verbose)
3635 cpu_printf("done with 1st dfs, nstates %g (put %d states), read q\n",
3636 nstates, nstates_put);
3637 dfs_phase2 = 1;
3638 }
3639 Read_Queue(core_id); /* all cores */
3640
3641 if (verbose)
3642 { cpu_printf("put %6d states into queue -- got %6d\n",
3643 nstates_put, nstates_get);
3644 }
3645 if (proxy_pid != 0)
3646 { rm_shared_segments();
3647 }
3648 done = 1;
3649 wrapup();
3650 exit(0);
3651 }
3652
3653 #else
3654 int unpack_state(SM_frame *, int);
3655 #endif
3656
3657 struct H_el *
3658 grab_shared(int n)
3659 {
3660 #ifndef SEP_STATE
3661 char *rval = (char *) 0;
3662
3663 if (n == 0)
3664 { printf("cpu%d: grab shared zero\n", core_id); fflush(stdout);
3665 return (struct H_el *) rval;
3666 } else if (n&(sizeof(void *)-1))
3667 { n += sizeof(void *)-(n&(sizeof(void *)-1)); /* alignment */
3668 }
3669
3670 #ifdef SEP_HEAP
3671 /* no locking */
3672 if (my_heap != NULL && my_size > n)
3673 { rval = my_heap;
3674 my_heap += n;
3675 my_size -= n;
3676 goto done;
3677 }
3678 #endif
3679
3680 if (!dc_shared)
3681 { sudden_stop("pan: out of memory");
3682 }
3683
3684 /* another lock is always already in effect when this is called */
3685 /* but not always the same lock -- i.e., on different parts of the hashtable */
3686 enter_critical(GLOBAL_LOCK); /* this must be independently mutex */
3687 #if defined(SEP_HEAP) && !defined(WIN32) && !defined(WIN64)
3688 { static int noted = 0;
3689 if (!noted)
3690 { noted = 1;
3691 printf("cpu%d: global heap has %ld bytes left, needed %d\n",
3692 core_id, dc_shared?dc_shared->dc_size:0, n);
3693 } }
3694 #endif
3695 #if 0
3696 if (dc_shared->pattern != 1234567)
3697 { leave_critical(GLOBAL_LOCK);
3698 Uerror("overrun -- memory corruption");
3699 }
3700 #endif
3701 if (dc_shared->dc_size < n)
3702 { if (verbose)
3703 { printf("Next Pool %g Mb + %d\n", memcnt/(1048576.), n);
3704 }
3705 if (dc_shared->nxt == NULL
3706 || dc_shared->nxt->dc_arena == NULL
3707 || dc_shared->nxt->dc_size < n)
3708 { printf("cpu%d: memcnt %g Mb + wanted %d bytes more\n",
3709 core_id, memcnt / (1048576.), n);
3710 leave_critical(GLOBAL_LOCK);
3711 sudden_stop("out of memory -- aborting");
3712 wrapup(); /* exits */
3713 } else
3714 { dc_shared = (sh_Allocater *) dc_shared->nxt;
3715 } }
3716
3717 rval = (char *) dc_shared->dc_arena;
3718 dc_shared->dc_arena += n;
3719 dc_shared->dc_size -= (long) n;
3720 #if 0
3721 if (VVERBOSE)
3722 printf("cpu%d grab shared (%d bytes) -- %ld left\n",
3723 core_id, n, dc_shared->dc_size);
3724 #endif
3725 leave_critical(GLOBAL_LOCK);
3726 done:
3727 memset(rval, 0, n);
3728 memcnt += (double) n;
3729
3730 return (struct H_el *) rval;
3731 #else
3732 return (struct H_el *) emalloc(n);
3733 #endif
3734 }
3735
3736 SM_frame *
3737 Get_Full_Frame(int n)
3738 { SM_frame *f;
3739 double cnt_start = frame_wait;
3740
3741 f = &m_workq[n][prfull[n]];
3742 while (f->m_vsize == 0) /* await full slot LOCK : full frame */
3743 { iam_alive();
3744 #ifndef NGQ
3745 #ifndef SAFETY
3746 if (!a_cycles || core_id != 0)
3747 #endif
3748 if (*grcnt > 0) /* accessed outside lock, but safe even if wrong */
3749 { enter_critical(GQ_RD); /* gq - read access */
3750 if (*grcnt > 0) /* could have changed */
3751 { f = &m_workq[NCORE][*grfull]; /* global q */
3752 if (f->m_vsize == 0)
3753 { /* writer is still filling the slot */
3754 *gr_writemiss++;
3755 f = &m_workq[n][prfull[n]]; /* reset */
3756 } else
3757 { *grfull = (*grfull+1) % (GN_FRAMES);
3758 enter_critical(GQ_WR);
3759 *grcnt = *grcnt - 1;
3760 leave_critical(GQ_WR);
3761 leave_critical(GQ_RD);
3762 return f;
3763 } }
3764 leave_critical(GQ_RD);
3765 }
3766 #endif
3767 if (frame_wait++ - cnt_start > Delay)
3768 { if (0)
3769 { cpu_printf("timeout on q%d -- %u -- query %d\n",
3770 n, f, query_in_progress);
3771 }
3772 return (SM_frame *) 0; /* timeout */
3773 } }
3774 iam_alive();
3775 if (VVERBOSE) cpu_printf("got frame from q%d\n", n);
3776 prfull[n] = (prfull[n] + 1) % (LN_FRAMES);
3777 enter_critical(QLOCK(n));
3778 prcnt[n]--; /* lock out increments */
3779 leave_critical(QLOCK(n));
3780 return f;
3781 }
3782
3783 SM_frame *
3784 Get_Free_Frame(int n)
3785 { SM_frame *f;
3786 double cnt_start = free_wait;
3787
3788 if (VVERBOSE) { cpu_printf("get free frame from q%d\n", n); }
3789
3790 if (n == NCORE) /* global q */
3791 { f = &(m_workq[n][lrfree]);
3792 } else
3793 { f = &(m_workq[n][prfree[n]]);
3794 }
3795 while (f->m_vsize != 0) /* await free slot LOCK : free slot */
3796 { iam_alive();
3797 if (free_wait++ - cnt_start > OneSecond)
3798 { if (verbose)
3799 { cpu_printf("timeout waiting for free slot q%d\n", n);
3800 }
3801 cnt_start = free_wait;
3802 if (someone_crashed(1))
3803 { printf("cpu%d: search terminated\n", core_id);
3804 sudden_stop("get free frame");
3805 pan_exit(1);
3806 } } }
3807 if (n != NCORE)
3808 { prfree[n] = (prfree[n] + 1) % (LN_FRAMES);
3809 enter_critical(QLOCK(n));
3810 prcnt[n]++; /* lock out decrements */
3811 if (prmax[n] < prcnt[n])
3812 { prmax[n] = prcnt[n];
3813 }
3814 leave_critical(QLOCK(n));
3815 }
3816 return f;
3817 }
3818 #ifndef NGQ
3819 int
3820 GlobalQ_HasRoom(void)
3821 { int rval = 0;
3822
3823 gq_tries++;
3824 if (*grcnt < GN_FRAMES) /* there seems to be room */
3825 { enter_critical(GQ_WR); /* gq write access */
3826 if (*grcnt < GN_FRAMES)
3827 { if (m_workq[NCORE][*grfree].m_vsize != 0)
3828 { /* can happen if reader is slow emptying slot */
3829 *gr_readmiss++;
3830 goto out; /* dont wait: release lock and return */
3831 }
3832 lrfree = *grfree; /* Get_Free_Frame use lrfree in this mode */
3833 *grfree = (*grfree + 1) % GN_FRAMES;
3834 *grcnt = *grcnt + 1; /* count nr of slots filled -- no additional lock needed */
3835 if (*grmax < *grcnt) *grmax = *grcnt;
3836 leave_critical(GQ_WR); /* for short lock duration */
3837 gq_hasroom++;
3838 mem_put(NCORE); /* copy state into reserved slot */
3839 rval = 1; /* successfull handoff */
3840 } else
3841 { gq_hasnoroom++;
3842 out: leave_critical(GQ_WR);
3843 } }
3844 return rval;
3845 }
3846 #endif
3847
3848 int
3849 unpack_state(SM_frame *f, int from_q)
3850 { int i, j;
3851 static struct H_el D_State;
3852
3853 if (f->m_vsize > 0)
3854 { boq = f->m_boq;
3855 if (boq > 256)
3856 { cpu_printf("saw control %d, expected state\n", boq);
3857 return 0;
3858 }
3859 vsize = f->m_vsize;
3860 correct:
3861 memcpy((uchar *) &now, (uchar *) f->m_now, vsize);
3862 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
3863 { Mask[i] = (f->m_Mask[i/8] & (1<<j)) ? 1 : 0;
3864 }
3865 if (now._nr_pr > 0)
3866 { memcpy((uchar *) proc_offset, (uchar *) f->m_p_offset, now._nr_pr * sizeof(OFFT));
3867 memcpy((uchar *) proc_skip, (uchar *) f->m_p_skip, now._nr_pr * sizeof(uchar));
3868 }
3869 if (now._nr_qs > 0)
3870 { memcpy((uchar *) q_offset, (uchar *) f->m_q_offset, now._nr_qs * sizeof(OFFT));
3871 memcpy((uchar *) q_skip, (uchar *) f->m_q_skip, now._nr_qs * sizeof(uchar));
3872 }
3873 #ifndef NOVSZ
3874 if (vsize != now._vsz)
3875 { cpu_printf("vsize %d != now._vsz %d (type %d) %d\n",
3876 vsize, now._vsz, f->m_boq, f->m_vsize);
3877 vsize = now._vsz;
3878 goto correct; /* rare event: a race */
3879 }
3880 #endif
3881 hmax = max(hmax, vsize);
3882
3883 if (f != &cur_Root)
3884 { memcpy((uchar *) &cur_Root, (uchar *) f, sizeof(SM_frame));
3885 }
3886
3887 if (((now._a_t) & 1) == 1) /* i.e., when starting nested DFS */
3888 { A_depth = depthfound = 0;
3889 memcpy((uchar *)&A_Root, (uchar *)&now, vsize);
3890 }
3891 nr_handoffs = f->nr_handoffs;
3892 } else
3893 { cpu_printf("pan: state empty\n");
3894 }
3895
3896 depth = 0;
3897 trpt = &trail[1];
3898 trpt->tau = f->m_tau;
3899 trpt->o_pm = f->m_o_pm;
3900
3901 (trpt-1)->ostate = &D_State; /* stub */
3902 trpt->ostate = &D_State;
3903
3904 #ifdef FULL_TRAIL
3905 if (upto > 0)
3906 { stack_last[core_id] = (Stack_Tree *) f->m_stack;
3907 }
3908 #if defined(VERBOSE)
3909 if (stack_last[core_id])
3910 { cpu_printf("%d: UNPACK -- SET m_stack %u (%d,%d)\n",
3911 depth, stack_last[core_id], stack_last[core_id]->pr,
3912 stack_last[core_id]->t_id);
3913 }
3914 #endif
3915 #endif
3916
3917 if (!trpt->o_t)
3918 { static Trans D_Trans;
3919 trpt->o_t = &D_Trans;
3920 }
3921
3922 #ifdef VERI
3923 if ((trpt->tau & 4) != 4)
3924 { trpt->tau |= 4; /* the claim moves first */
3925 cpu_printf("warning: trpt was not up to date\n");
3926 }
3927 #endif
3928
3929 for (i = 0; i < (int) now._nr_pr; i++)
3930 { P0 *ptr = (P0 *) pptr(i);
3931 #ifndef NP
3932 if (accpstate[ptr->_t][ptr->_p])
3933 { trpt->o_pm |= 2;
3934 }
3935 #else
3936 if (progstate[ptr->_t][ptr->_p])
3937 { trpt->o_pm |= 4;
3938 }
3939 #endif
3940 }
3941
3942 #ifdef EVENT_TRACE
3943 #ifndef NP
3944 if (accpstate[EVENT_TRACE][now._event])
3945 { trpt->o_pm |= 2;
3946 }
3947 #else
3948 if (progstate[EVENT_TRACE][now._event])
3949 { trpt->o_pm |= 4;
3950 }
3951 #endif
3952 #endif
3953
3954 #if defined(C_States) && (HAS_TRACK==1)
3955 /* restore state of tracked C objects */
3956 c_revert((uchar *) &(now.c_state[0]));
3957 #if (HAS_STACK==1)
3958 c_unstack((uchar *) f->m_c_stack); /* unmatched tracked data */
3959 #endif
3960 #endif
3961 return 1;
3962 }
3963
3964 void
3965 write_root(void) /* for trail file */
3966 { int fd;
3967
3968 if (iterative == 0 && Nr_Trails > 1)
3969 sprintf(fnm, "%s%d.%s", TrailFile, Nr_Trails-1, sprefix);
3970 else
3971 sprintf(fnm, "%s.%s", TrailFile, sprefix);
3972
3973 if (cur_Root.m_vsize == 0)
3974 { (void) unlink(fnm); /* remove possible old copy */
3975 return; /* its the default initial state */
3976 }
3977
3978 if ((fd = creat(fnm, TMODE)) < 0)
3979 { char *q;
3980 if ((q = strchr(TrailFile, '.')))
3981 { *q = '\0'; /* strip .pml */
3982 if (iterative == 0 && Nr_Trails-1 > 0)
3983 sprintf(fnm, "%s%d.%s", TrailFile, Nr_Trails-1, sprefix);
3984 else
3985 sprintf(fnm, "%s.%s", TrailFile, sprefix);
3986 *q = '.';
3987 fd = creat(fnm, TMODE);
3988 }
3989 if (fd < 0)
3990 { cpu_printf("pan: cannot create %s\n", fnm);
3991 perror("cause");
3992 return;
3993 } }
3994
3995 if (write(fd, &cur_Root, sizeof(SM_frame)) != sizeof(SM_frame))
3996 { cpu_printf("pan: error writing %s\n", fnm);
3997 } else
3998 { cpu_printf("pan: wrote %s\n", fnm);
3999 }
4000 close(fd);
4001 }
4002
4003 void
4004 set_root(void)
4005 { int fd;
4006 char *q;
4007 char MyFile[512];
4008 char MySuffix[16];
4009 char *ssuffix = "rst";
4010 int try_core = 1;
4011
4012 strcpy(MyFile, TrailFile);
4013 try_again:
4014 if (whichtrail > 0)
4015 { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, ssuffix);
4016 fd = open(fnm, O_RDONLY, 0);
4017 if (fd < 0 && (q = strchr(MyFile, '.')))
4018 { *q = '\0'; /* strip .pml */
4019 sprintf(fnm, "%s%d.%s", MyFile, whichtrail, ssuffix);
4020 *q = '.';
4021 fd = open(fnm, O_RDONLY, 0);
4022 }
4023 } else
4024 { sprintf(fnm, "%s.%s", MyFile, ssuffix);
4025 fd = open(fnm, O_RDONLY, 0);
4026 if (fd < 0 && (q = strchr(MyFile, '.')))
4027 { *q = '\0'; /* strip .pml */
4028 sprintf(fnm, "%s.%s", MyFile, ssuffix);
4029 *q = '.';
4030 fd = open(fnm, O_RDONLY, 0);
4031 } }
4032
4033 if (fd < 0)
4034 { if (try_core < NCORE)
4035 { ssuffix = MySuffix;
4036 sprintf(ssuffix, "cpu%d_rst", try_core++);
4037 goto try_again;
4038 }
4039 cpu_printf("no file '%s.rst' or '%s' (not an error)\n", MyFile, fnm);
4040 } else
4041 { if (read(fd, &cur_Root, sizeof(SM_frame)) != sizeof(SM_frame))
4042 { cpu_printf("read error %s\n", fnm);
4043 close(fd);
4044 pan_exit(1);
4045 }
4046 close(fd);
4047 (void) unpack_state(&cur_Root, -2);
4048 #ifdef SEP_STATE
4049 cpu_printf("partial trail -- last few steps only\n");
4050 #endif
4051 cpu_printf("restored root from '%s'\n", fnm);
4052 printf("=====State:=====\n");
4053 { int i, j; P0 *z;
4054 for (i = 0; i < now._nr_pr; i++)
4055 { z = (P0 *)pptr(i);
4056 printf("proc %2d (%s) ", i, procname[z->_t]);
4057 for (j = 0; src_all[j].src; j++)
4058 if (src_all[j].tp == (int) z->_t)
4059 { printf(" line %3d \"%s\" ",
4060 src_all[j].src[z->_p], PanSource);
4061 break;
4062 }
4063 printf("(state %d)\n", z->_p);
4064 c_locals(i, z->_t);
4065 }
4066 c_globals();
4067 }
4068 printf("================\n");
4069 }
4070 }
4071
4072 #ifdef USE_DISK
4073 unsigned long dsk_written, dsk_drained;
4074 void mem_drain(void);
4075 #endif
4076
4077 void
4078 m_clear_frame(SM_frame *f)
4079 { int i, clr_sz = sizeof(SM_results);
4080
4081 for (i = 0; i <= _NP_; i++) /* all proctypes */
4082 { clr_sz += NrStates[i]*sizeof(uchar);
4083 }
4084 memset(f, 0, clr_sz);
4085 /* caution if sizeof(SM_results) > sizeof(SM_frame) */
4086 }
4087
4088 #define TargetQ_Full(n) (m_workq[n][prfree[n]].m_vsize != 0)
4089 #define TargetQ_NotFull(n) (m_workq[n][prfree[n]].m_vsize == 0)
4090
4091 int
4092 AllQueuesEmpty(void)
4093 { int q;
4094 #ifndef NGQ
4095 if (*grcnt != 0)
4096 { return 0;
4097 }
4098 #endif
4099 for (q = 0; q < NCORE; q++)
4100 { if (prcnt[q] != 0)
4101 { return 0;
4102 } }
4103 return 1;
4104 }
4105
4106 void
4107 Read_Queue(int q)
4108 { SM_frame *f, *of;
4109 int remember, target_q;
4110 SM_results *r;
4111 double patience = 0.0;
4112
4113 target_q = (q + 1) % NCORE;
4114
4115 for (;;)
4116 { f = Get_Full_Frame(q);
4117 if (!f) /* 1 second timeout -- and trigger for Query */
4118 { if (someone_crashed(2))
4119 { printf("cpu%d: search terminated [code %d]\n",
4120 core_id, search_terminated?*search_terminated:-1);
4121 sudden_stop("");
4122 pan_exit(1);
4123 }
4124 #ifdef TESTING
4125 /* to profile with cc -pg and gprof pan.exe -- set handoff depth beyond maxdepth */
4126 exit(0);
4127 #endif
4128 remember = *grfree;
4129 if (core_id == 0 /* root can initiate termination */
4130 && remote_party == 0 /* and only the original root */
4131 && query_in_progress == 0 /* unless its already in progress */
4132 && AllQueuesEmpty())
4133 { f = Get_Free_Frame(target_q);
4134 query_in_progress = 1; /* only root process can do this */
4135 if (!f) { Uerror("Fatal1: no free slot"); }
4136 f->m_boq = QUERY; /* initiate Query */
4137 if (verbose)
4138 { cpu_printf("snd QUERY to q%d (%d) into slot %d\n",
4139 target_q, nstates_get + 1, prfree[target_q]-1);
4140 }
4141 f->m_vsize = remember + 1;
4142 /* number will not change unless we receive more states */
4143 } else if (patience++ > OneHour) /* one hour watchdog timer */
4144 { cpu_printf("timeout -- giving up\n");
4145 sudden_stop("queue timeout");
4146 pan_exit(1);
4147 }
4148 if (0) cpu_printf("timed out -- try again\n");
4149 continue;
4150 }
4151 patience = 0.0; /* reset watchdog */
4152
4153 if (f->m_boq == QUERY)
4154 { if (verbose)
4155 { cpu_printf("got QUERY on q%d (%d <> %d) from slot %d\n",
4156 q, f->m_vsize, nstates_put + 1, prfull[q]-1);
4157 snapshot();
4158 }
4159 remember = f->m_vsize;
4160 f->m_vsize = 0; /* release slot */
4161
4162 if (core_id == 0 && remote_party == 0) /* original root cpu0 */
4163 { if (query_in_progress == 1 /* didn't send more states in the interim */
4164 && *grfree + 1 == remember) /* no action on global queue meanwhile */
4165 { if (verbose) cpu_printf("Termination detected\n");
4166 if (TargetQ_Full(target_q))
4167 { if (verbose)
4168 cpu_printf("warning: target q is full\n");
4169 }
4170 f = Get_Free_Frame(target_q);
4171 if (!f) { Uerror("Fatal2: no free slot"); }
4172 m_clear_frame(f);
4173 f->m_boq = QUIT; /* send final Quit, collect stats */
4174 f->m_vsize = 111; /* anything non-zero will do */
4175 if (verbose)
4176 cpu_printf("put QUIT on q%d\n", target_q);
4177 } else
4178 { if (verbose) cpu_printf("Stale Query\n");
4179 #ifdef USE_DISK
4180 mem_drain();
4181 #endif
4182 }
4183 query_in_progress = 0;
4184 } else
4185 { if (TargetQ_Full(target_q))
4186 { if (verbose)
4187 cpu_printf("warning: forward query - target q full\n");
4188 }
4189 f = Get_Free_Frame(target_q);
4190 if (verbose)
4191 cpu_printf("snd QUERY response to q%d (%d <> %d) in slot %d\n",
4192 target_q, remember, *grfree + 1, prfree[target_q]-1);
4193 if (!f) { Uerror("Fatal4: no free slot"); }
4194
4195 if (*grfree + 1 == remember) /* no action on global queue */
4196 { f->m_boq = QUERY; /* forward query, to root */
4197 f->m_vsize = remember;
4198 } else
4199 { f->m_boq = QUERY_F; /* no match -- busy */
4200 f->m_vsize = 112; /* anything non-zero */
4201 #ifdef USE_DISK
4202 if (dsk_written != dsk_drained)
4203 { mem_drain();
4204 }
4205 #endif
4206 } }
4207 continue;
4208 }
4209
4210 if (f->m_boq == QUERY_F)
4211 { if (verbose)
4212 { cpu_printf("got QUERY_F on q%d from slot %d\n", q, prfull[q]-1);
4213 }
4214 f->m_vsize = 0; /* release slot */
4215
4216 if (core_id == 0 && remote_party == 0) /* original root cpu0 */
4217 { if (verbose) cpu_printf("No Match on Query\n");
4218 query_in_progress = 0;
4219 } else
4220 { if (TargetQ_Full(target_q))
4221 { if (verbose) cpu_printf("warning: forwarding query_f, target queue full\n");
4222 }
4223 f = Get_Free_Frame(target_q);
4224 if (verbose) cpu_printf("forward QUERY_F to q%d into slot %d\n",
4225 target_q, prfree[target_q]-1);
4226 if (!f) { Uerror("Fatal5: no free slot"); }
4227 f->m_boq = QUERY_F; /* cannot terminate yet */
4228 f->m_vsize = 113; /* anything non-zero */
4229 }
4230 #ifdef USE_DISK
4231 if (dsk_written != dsk_drained)
4232 { mem_drain();
4233 }
4234 #endif
4235 continue;
4236 }
4237
4238 if (f->m_boq == QUIT)
4239 { if (0) cpu_printf("done -- local memcnt %g Mb\n", memcnt/(1048576.));
4240 retrieve_info((SM_results *) f); /* collect and combine stats */
4241 if (verbose)
4242 { cpu_printf("received Quit\n");
4243 snapshot();
4244 }
4245 f->m_vsize = 0; /* release incoming slot */
4246 if (core_id != 0)
4247 { f = Get_Free_Frame(target_q); /* new outgoing slot */
4248 if (!f) { Uerror("Fatal6: no free slot"); }
4249 m_clear_frame(f); /* start with zeroed stats */
4250 record_info((SM_results *) f);
4251 f->m_boq = QUIT; /* forward combined results */
4252 f->m_vsize = 114; /* anything non-zero */
4253 if (verbose>1)
4254 cpu_printf("fwd Results to q%d\n", target_q);
4255 }
4256 break; /* successful termination */
4257 }
4258
4259 /* else: 0<= boq <= 255, means STATE transfer */
4260 if (unpack_state(f, q) != 0)
4261 { nstates_get++;
4262 f->m_vsize = 0; /* release slot */
4263 if (VVERBOSE) cpu_printf("Got state\n");
4264
4265 if (search_terminated != NULL
4266 && *search_terminated == 0)
4267 { new_state(); /* explore successors */
4268 memset((uchar *) &cur_Root, 0, sizeof(SM_frame)); /* avoid confusion */
4269 } else
4270 { pan_exit(0);
4271 }
4272 } else
4273 { pan_exit(0);
4274 } }
4275 if (verbose) cpu_printf("done got %d put %d\n", nstates_get, nstates_put);
4276 sleep_report();
4277 }
4278
4279 void
4280 give_up(int unused_x)
4281 {
4282 if (search_terminated != NULL)
4283 { *search_terminated |= 32; /* give_up */
4284 }
4285 if (!writing_trail)
4286 { was_interrupted = 1;
4287 snapshot();
4288 cpu_printf("Give Up\n");
4289 sleep_report();
4290 pan_exit(1);
4291 } else /* we are already terminating */
4292 { cpu_printf("SIGINT\n");
4293 }
4294 }
4295
4296 void
4297 check_overkill(void)
4298 {
4299 vmax_seen = (vmax_seen + 7)/ 8;
4300 vmax_seen *= 8; /* round up to a multiple of 8 */
4301
4302 if (core_id == 0
4303 && !remote_party
4304 && nstates_put > 0
4305 && VMAX - vmax_seen > 8)
4306 {
4307 #ifdef BITSTATE
4308 printf("cpu0: max VMAX value seen in this run: ");
4309 #else
4310 printf("cpu0: recommend recompiling with ");
4311 #endif
4312 printf("-DVMAX=%d\n", vmax_seen);
4313 }
4314 }
4315
4316 void
4317 mem_put(int q) /* handoff state to other cpu, workq q */
4318 { SM_frame *f;
4319 int i, j;
4320
4321 if (vsize > VMAX)
4322 { vsize = (vsize + 7)/8; vsize *= 8; /* round up */
4323 printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize);
4324 Uerror("aborting");
4325 }
4326 if (now._nr_pr > PMAX)
4327 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now._nr_pr);
4328 Uerror("aborting");
4329 }
4330 if (now._nr_qs > QMAX)
4331 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now._nr_qs);
4332 Uerror("aborting");
4333 }
4334 if (vsize > vmax_seen) vmax_seen = vsize;
4335 if (now._nr_pr > pmax_seen) pmax_seen = now._nr_pr;
4336 if (now._nr_qs > qmax_seen) qmax_seen = now._nr_qs;
4337
4338 f = Get_Free_Frame(q); /* not called in likely deadlock states */
4339 if (!f) { Uerror("Fatal3: no free slot"); }
4340
4341 if (VVERBOSE) cpu_printf("putting state into q%d\n", q);
4342
4343 memcpy((uchar *) f->m_now, (uchar *) &now, vsize);
4344 memset((uchar *) f->m_Mask, 0, (VMAX+7)/8 * sizeof(char));
4345 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
4346 { if (Mask[i])
4347 { f->m_Mask[i/8] |= (1<<j);
4348 } }
4349
4350 if (now._nr_pr > 0)
4351 { memcpy((uchar *) f->m_p_offset, (uchar *) proc_offset, now._nr_pr * sizeof(OFFT));
4352 memcpy((uchar *) f->m_p_skip, (uchar *) proc_skip, now._nr_pr * sizeof(uchar));
4353 }
4354 if (now._nr_qs > 0)
4355 { memcpy((uchar *) f->m_q_offset, (uchar *) q_offset, now._nr_qs * sizeof(OFFT));
4356 memcpy((uchar *) f->m_q_skip, (uchar *) q_skip, now._nr_qs * sizeof(uchar));
4357 }
4358 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4359 c_stack((uchar *) f->m_c_stack); /* save unmatched tracked data */
4360 #endif
4361 #ifdef FULL_TRAIL
4362 f->m_stack = stack_last[core_id];
4363 #endif
4364 f->nr_handoffs = nr_handoffs+1;
4365 f->m_tau = trpt->tau;
4366 f->m_o_pm = trpt->o_pm;
4367 f->m_boq = boq;
4368 f->m_vsize = vsize; /* must come last - now the other cpu can see it */
4369
4370 if (query_in_progress == 1)
4371 query_in_progress = 2; /* make sure we know, if a query makes the rounds */
4372 nstates_put++;
4373 }
4374
4375 #ifdef USE_DISK
4376 int Dsk_W_Nr, Dsk_R_Nr;
4377 int dsk_file = -1, dsk_read = -1;
4378 unsigned long dsk_written, dsk_drained;
4379 char dsk_name[512];
4380
4381 #ifndef BFS_DISK
4382 #if defined(WIN32) || defined(WIN64)
4383 #define RFLAGS (O_RDONLY|O_BINARY)
4384 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
4385 #else
4386 #define RFLAGS (O_RDONLY)
4387 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
4388 #endif
4389 #endif
4390
4391 void
4392 dsk_stats(void)
4393 { int i;
4394
4395 if (dsk_written > 0)
4396 { cpu_printf("dsk_written %d states in %d files\ncpu%d: dsk_drained %6d states\n",
4397 dsk_written, Dsk_W_Nr, core_id, dsk_drained);
4398 close(dsk_read);
4399 close(dsk_file);
4400 for (i = 0; i < Dsk_W_Nr; i++)
4401 { sprintf(dsk_name, "Q%.3d_%.3d.tmp", i, core_id);
4402 unlink(dsk_name);
4403 } }
4404 }
4405
4406 void
4407 mem_drain(void)
4408 { SM_frame *f, g;
4409 int q = (core_id + 1) % NCORE; /* target q */
4410 int sz;
4411
4412 if (dsk_read < 0
4413 || dsk_written <= dsk_drained)
4414 { return;
4415 }
4416
4417 while (dsk_written > dsk_drained
4418 && TargetQ_NotFull(q))
4419 { f = Get_Free_Frame(q);
4420 if (!f) { Uerror("Fatal: unhandled condition"); }
4421
4422 if ((dsk_drained+1)%MAX_DSK_FILE == 0) /* 100K states max per file */
4423 { (void) close(dsk_read); /* close current read handle */
4424 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_R_Nr++, core_id);
4425 (void) unlink(dsk_name); /* remove current file */
4426 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_R_Nr, core_id);
4427 cpu_printf("reading %s\n", dsk_name);
4428 dsk_read = open(dsk_name, RFLAGS); /* open next file */
4429 if (dsk_read < 0)
4430 { Uerror("could not open dsk file");
4431 } }
4432 if (read(dsk_read, &g, sizeof(SM_frame)) != sizeof(SM_frame))
4433 { Uerror("bad dsk file read");
4434 }
4435 sz = g.m_vsize;
4436 g.m_vsize = 0;
4437 memcpy(f, &g, sizeof(SM_frame));
4438 f->m_vsize = sz; /* last */
4439
4440 dsk_drained++;
4441 }
4442 }
4443
4444 void
4445 mem_file(void)
4446 { SM_frame f;
4447 int i, j, q = (core_id + 1) % NCORE; /* target q */
4448
4449 if (vsize > VMAX)
4450 { printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize);
4451 Uerror("aborting");
4452 }
4453 if (now._nr_pr > PMAX)
4454 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now._nr_pr);
4455 Uerror("aborting");
4456 }
4457 if (now._nr_qs > QMAX)
4458 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now._nr_qs);
4459 Uerror("aborting");
4460 }
4461
4462 if (VVERBOSE) cpu_printf("filing state for q%d\n", q);
4463
4464 memcpy((uchar *) f.m_now, (uchar *) &now, vsize);
4465 memset((uchar *) f.m_Mask, 0, (VMAX+7)/8 * sizeof(char));
4466 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
4467 { if (Mask[i])
4468 { f.m_Mask[i/8] |= (1<<j);
4469 } }
4470
4471 if (now._nr_pr > 0)
4472 { memcpy((uchar *)f.m_p_offset, (uchar *)proc_offset, now._nr_pr*sizeof(OFFT));
4473 memcpy((uchar *)f.m_p_skip, (uchar *)proc_skip, now._nr_pr*sizeof(uchar));
4474 }
4475 if (now._nr_qs > 0)
4476 { memcpy((uchar *) f.m_q_offset, (uchar *) q_offset, now._nr_qs*sizeof(OFFT));
4477 memcpy((uchar *) f.m_q_skip, (uchar *) q_skip, now._nr_qs*sizeof(uchar));
4478 }
4479 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4480 c_stack((uchar *) f.m_c_stack); /* save unmatched tracked data */
4481 #endif
4482 #ifdef FULL_TRAIL
4483 f.m_stack = stack_last[core_id];
4484 #endif
4485 f.nr_handoffs = nr_handoffs+1;
4486 f.m_tau = trpt->tau;
4487 f.m_o_pm = trpt->o_pm;
4488 f.m_boq = boq;
4489 f.m_vsize = vsize;
4490
4491 if (query_in_progress == 1)
4492 { query_in_progress = 2;
4493 }
4494 if (dsk_file < 0)
4495 { sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_W_Nr, core_id);
4496 dsk_file = open(dsk_name, WFLAGS, 0644);
4497 dsk_read = open(dsk_name, RFLAGS);
4498 if (dsk_file < 0 || dsk_read < 0)
4499 { cpu_printf("File: <%s>\n", dsk_name);
4500 Uerror("cannot open diskfile");
4501 }
4502 Dsk_W_Nr++; /* nr of next file to open */
4503 cpu_printf("created temporary diskfile %s\n", dsk_name);
4504 } else if ((dsk_written+1)%MAX_DSK_FILE == 0)
4505 { close(dsk_file); /* close write handle */
4506 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_W_Nr++, core_id);
4507 dsk_file = open(dsk_name, WFLAGS, 0644);
4508 if (dsk_file < 0)
4509 { cpu_printf("File: <%s>\n", dsk_name);
4510 Uerror("aborting: cannot open new diskfile");
4511 }
4512 cpu_printf("created temporary diskfile %s\n", dsk_name);
4513 }
4514 if (write(dsk_file, &f, sizeof(SM_frame)) != sizeof(SM_frame))
4515 { Uerror("aborting -- disk write failed (disk full?)");
4516 }
4517 nstates_put++;
4518 dsk_written++;
4519 }
4520 #endif
4521
4522 int
4523 mem_hand_off(void)
4524 {
4525 if (search_terminated == NULL
4526 || *search_terminated != 0) /* not a full crash check */
4527 { pan_exit(0);
4528 }
4529 iam_alive(); /* on every transition of Down */
4530 #ifdef USE_DISK
4531 mem_drain(); /* maybe call this also on every Up */
4532 #endif
4533 if (depth > z_handoff /* above handoff limit */
4534 #ifndef SAFETY
4535 && !a_cycles /* not in liveness mode */
4536 #endif
4537 #if SYNC
4538 && boq == -1 /* not mid-rv */
4539 #endif
4540 #ifdef VERI
4541 && (trpt->tau&4) /* claim moves first */
4542 && !((trpt-1)->tau&128) /* not a stutter move */
4543 #endif
4544 && !(trpt->tau&8)) /* not an atomic move */
4545 { int q = (core_id + 1) % NCORE; /* circular handoff */
4546 #ifdef GENEROUS
4547 if (prcnt[q] < LN_FRAMES)
4548 #else
4549 if (TargetQ_NotFull(q)
4550 && (dfs_phase2 == 0 || prcnt[core_id] > 0))
4551 #endif
4552 { mem_put(q);
4553 return 1;
4554 }
4555 { int rval;
4556 #ifndef NGQ
4557 rval = GlobalQ_HasRoom();
4558 #else
4559 rval = 0;
4560 #endif
4561 #ifdef USE_DISK
4562 if (rval == 0)
4563 { void mem_file(void);
4564 mem_file();
4565 rval = 1;
4566 }
4567 #endif
4568 return rval;
4569 }
4570 }
4571 return 0; /* i.e., no handoff */
4572 }
4573
4574 void
4575 mem_put_acc(void) /* liveness mode */
4576 { int q = (core_id + 1) % NCORE;
4577
4578 if (search_terminated == NULL
4579 || *search_terminated != 0)
4580 { pan_exit(0);
4581 }
4582 #ifdef USE_DISK
4583 mem_drain();
4584 #endif
4585 /* some tortured use of preprocessing: */
4586 #if !defined(NGQ) || defined(USE_DISK)
4587 if (TargetQ_Full(q))
4588 {
4589 #endif
4590 #ifndef NGQ
4591 if (GlobalQ_HasRoom())
4592 { return;
4593 }
4594 #endif
4595 #ifdef USE_DISK
4596 mem_file();
4597 } else
4598 #else
4599 #if !defined(NGQ) || defined(USE_DISK)
4600 }
4601 #endif
4602 #endif
4603 { mem_put(q);
4604 }
4605 }
4606
4607 #if defined(WIN32) || defined(WIN64)
4608 void
4609 init_shm(void) /* initialize shared work-queues */
4610 { char key[512];
4611 int n, m;
4612 int must_exit = 0;
4613
4614 if (core_id == 0 && verbose)
4615 { printf("cpu0: step 3: allocate shared work-queues %g Mb\n",
4616 ((double) NCORE * LWQ_SIZE + GWQ_SIZE) / (1048576.));
4617 }
4618 for (m = 0; m < NR_QS; m++) /* last q is global 1 */
4619 { double qsize = (m == NCORE) ? GWQ_SIZE : LWQ_SIZE;
4620 sprintf(key, "Global\\pan_%s_%.3d", PanSource, m);
4621 if (core_id == 0)
4622 { shmid[m] = CreateFileMapping(
4623 INVALID_HANDLE_VALUE, /* use paging file */
4624 NULL, /* default security */
4625 PAGE_READWRITE, /* access permissions */
4626 0, /* high-order 4 bytes */
4627 qsize, /* low-order bytes, size in bytes */
4628 key); /* name */
4629 } else /* worker nodes just open these segments */
4630 { shmid[m] = OpenFileMapping(
4631 FILE_MAP_ALL_ACCESS, /* read/write access */
4632 FALSE, /* children do not inherit handle */
4633 key);
4634 }
4635 if (shmid[m] == NULL)
4636 { fprintf(stderr, "cpu%d: could not create or open shared queues\n",
4637 core_id);
4638 must_exit = 1;
4639 break;
4640 }
4641 /* attach: */
4642 shared_mem[m] = (char *) MapViewOfFile(shmid[m], FILE_MAP_ALL_ACCESS, 0, 0, 0);
4643 if (shared_mem[m] == NULL)
4644 { fprintf(stderr, "cpu%d: cannot attach shared q%d (%d Mb)\n",
4645 core_id, m+1, (int) (qsize/(1048576.)));
4646 must_exit = 1;
4647 break;
4648 }
4649
4650 memcnt += qsize;
4651
4652 m_workq[m] = (SM_frame *) shared_mem[m];
4653 if (core_id == 0)
4654 { int nframes = (m == NCORE) ? GN_FRAMES : LN_FRAMES;
4655 for (n = 0; n < nframes; n++)
4656 { m_workq[m][n].m_vsize = 0;
4657 m_workq[m][n].m_boq = 0;
4658 } } }
4659
4660 if (must_exit)
4661 { fprintf(stderr, "pan: check './pan --' for usage details\n");
4662 pan_exit(1); /* calls cleanup_shm */
4663 }
4664 }
4665
4666 static uchar *
4667 prep_shmid_S(size_t n) /* either sets SS or H_tab, WIN32/WIN64 */
4668 { char *rval;
4669 #ifndef SEP_STATE
4670 char key[512];
4671
4672 if (verbose && core_id == 0)
4673 {
4674 #ifdef BITSTATE
4675 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
4676 (double) n / (1048576.));
4677 #else
4678 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
4679 (double) n / (1048576.));
4680 #endif
4681 }
4682 #ifdef MEMLIM
4683 if (memcnt + (double) n > memlim)
4684 { printf("cpu%d: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
4685 core_id, memcnt/1024., n/1024, memlim/(1048576.));
4686 printf("cpu%d: insufficient memory -- aborting\n", core_id);
4687 exit(1);
4688 }
4689 #endif
4690
4691 /* make key different from queues: */
4692 sprintf(key, "Global\\pan_%s_%.3d", PanSource, NCORE+2); /* different from qs */
4693
4694 if (core_id == 0) /* root */
4695 { shmid_S = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
4696 #ifdef WIN64
4697 PAGE_READWRITE, (n>>32), (n & 0xffffffff), key);
4698 #else
4699 PAGE_READWRITE, 0, n, key);
4700 #endif
4701 memcnt += (double) n;
4702 } else /* worker */
4703 { shmid_S = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, key);
4704 }
4705 if (shmid_S == NULL)
4706 {
4707 #ifdef BITSTATE
4708 fprintf(stderr, "cpu%d: cannot %s shared bitstate",
4709 core_id, core_id?"open":"create");
4710 #else
4711 fprintf(stderr, "cpu%d: cannot %s shared hashtable",
4712 core_id, core_id?"open":"create");
4713 #endif
4714 fprintf(stderr, "pan: check './pan --' for usage details\n");
4715 pan_exit(1);
4716 }
4717
4718 rval = (char *) MapViewOfFile(shmid_S, FILE_MAP_ALL_ACCESS, 0, 0, 0); /* attach */
4719 if ((char *) rval == NULL)
4720 { fprintf(stderr, "cpu%d: cannot attach shared bitstate or hashtable\n", core_id);
4721 fprintf(stderr, "pan: check './pan --' for usage details\n");
4722 pan_exit(1);
4723 }
4724 #else
4725 rval = (char *) emalloc(n);
4726 #endif
4727 return (uchar *) rval;
4728 }
4729
4730 static uchar *
4731 prep_state_mem(size_t n) /* WIN32/WIN64 sets memory arena for states */
4732 { char *rval;
4733 char key[512];
4734 static int cnt = 3; /* start larger than earlier ftok calls */
4735
4736 if (verbose && core_id == 0)
4737 { printf("cpu0: step 2+: pre-allocate memory arena %d of %g Mb\n",
4738 cnt-3, (double) n / (1048576.));
4739 }
4740 #ifdef MEMLIM
4741 if (memcnt + (double) n > memlim)
4742 { printf("cpu%d: error: M %.0f + %.0f exceeds memory limit of %.0f Kb\n",
4743 core_id, memcnt/1024.0, (double) n/1024.0, memlim/1024.0);
4744 return NULL;
4745 }
4746 #endif
4747
4748 sprintf(key, "Global\\pan_%s_%.3d", PanSource, NCORE+cnt); cnt++;
4749
4750 if (core_id == 0)
4751 { shmid_M = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
4752 #ifdef WIN64
4753 PAGE_READWRITE, (n>>32), (n & 0xffffffff), key);
4754 #else
4755 PAGE_READWRITE, 0, n, key);
4756 #endif
4757 } else
4758 { shmid_M = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, key);
4759 }
4760 if (shmid_M == NULL)
4761 { printf("cpu%d: failed to get pool of shared memory nr %d of size %d\n",
4762 core_id, cnt-3, n);
4763 printf("pan: check './pan --' for usage details\n");
4764 return NULL;
4765 }
4766 rval = (char *) MapViewOfFile(shmid_M, FILE_MAP_ALL_ACCESS, 0, 0, 0); /* attach */
4767
4768 if (rval == NULL)
4769 { printf("cpu%d: failed to attach pool of shared memory nr %d of size %d\n",
4770 core_id, cnt-3, n);
4771 return NULL;
4772 }
4773 return (uchar *) rval;
4774 }
4775
4776 void
4777 init_HT(unsigned long n) /* WIN32/WIN64 version */
4778 { volatile char *x;
4779 double get_mem;
4780 #ifndef SEP_STATE
4781 char *dc_mem_start;
4782 #endif
4783 if (verbose) printf("cpu%d: initialization for Windows\n", core_id);
4784
4785 #ifdef SEP_STATE
4786 #ifndef MEMLIM
4787 if (verbose)
4788 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
4789 }
4790 #else
4791 if (verbose)
4792 printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
4793 MEMLIM, ((double)n/(1048576.)), ((double) NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.));
4794 #endif
4795 get_mem = NCORE * sizeof(double) + (1 + CS_NR) * sizeof(void *)+ 4*sizeof(void *) + 2*sizeof(double);
4796 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
4797 get_mem += 4 * NCORE * sizeof(void *);
4798 #ifdef FULL_TRAIL
4799 get_mem += (NCORE) * sizeof(Stack_Tree *);
4800 /* NCORE * stack_last */
4801 #endif
4802 x = (volatile char *) prep_state_mem((size_t) get_mem);
4803 shmid_X = (void *) x;
4804 if (x == NULL)
4805 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
4806 exit(1);
4807 }
4808 search_terminated = (volatile unsigned int *) x; /* comes first */
4809 x += sizeof(void *); /* maintain alignment */
4810
4811 is_alive = (volatile double *) x;
4812 x += NCORE * sizeof(double);
4813
4814 sh_lock = (volatile int *) x;
4815 x += CS_NR * sizeof(void *); /* allow 1 word per entry */
4816
4817 grfree = (volatile int *) x;
4818 x += sizeof(void *);
4819 grfull = (volatile int *) x;
4820 x += sizeof(void *);
4821 grcnt = (volatile int *) x;
4822 x += sizeof(void *);
4823 grmax = (volatile int *) x;
4824 x += sizeof(void *);
4825 prfree = (volatile int *) x;
4826 x += NCORE * sizeof(void *);
4827 prfull = (volatile int *) x;
4828 x += NCORE * sizeof(void *);
4829 prcnt = (volatile int *) x;
4830 x += NCORE * sizeof(void *);
4831 prmax = (volatile int *) x;
4832 x += NCORE * sizeof(void *);
4833 gr_readmiss = (volatile double *) x;
4834 x += sizeof(double);
4835 gr_writemiss = (volatile double *) x;
4836 x += sizeof(double);
4837
4838 #ifdef FULL_TRAIL
4839 stack_last = (volatile Stack_Tree **) x;
4840 x += NCORE * sizeof(Stack_Tree *);
4841 #endif
4842
4843 #ifndef BITSTATE
4844 H_tab = (struct H_el **) emalloc(n);
4845 #endif
4846 #else
4847 #ifndef MEMLIM
4848 #warning MEMLIM not set
4849 #define MEMLIM (2048)
4850 #endif
4851
4852 if (core_id == 0 && verbose)
4853 printf("cpu0: step 0: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb) = %g Mb for state storage\n",
4854 MEMLIM, ((double)n/(1048576.)), ((double) NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.),
4855 (memlim - memcnt - (double) n - ((double) NCORE * LWQ_SIZE + GWQ_SIZE))/(1048576.));
4856 #ifndef BITSTATE
4857 H_tab = (struct H_el **) prep_shmid_S((size_t) n); /* hash_table */
4858 #endif
4859 get_mem = memlim - memcnt - ((double) NCORE) * LWQ_SIZE - GWQ_SIZE;
4860 if (get_mem <= 0)
4861 { Uerror("internal error -- shared state memory");
4862 }
4863
4864 if (core_id == 0 && verbose)
4865 { printf("cpu0: step 2: shared state memory %g Mb\n",
4866 get_mem/(1048576.));
4867 }
4868 x = dc_mem_start = (char *) prep_state_mem((size_t) get_mem); /* for states */
4869 if (x == NULL)
4870 { printf("cpu%d: insufficient memory -- aborting\n", core_id);
4871 exit(1);
4872 }
4873
4874 search_terminated = (volatile unsigned int *) x; /* comes first */
4875 x += sizeof(void *); /* maintain alignment */
4876
4877 is_alive = (volatile double *) x;
4878 x += NCORE * sizeof(double);
4879
4880 sh_lock = (volatile int *) x;
4881 x += CS_NR * sizeof(int);
4882
4883 grfree = (volatile int *) x;
4884 x += sizeof(void *);
4885 grfull = (volatile int *) x;
4886 x += sizeof(void *);
4887 grcnt = (volatile int *) x;
4888 x += sizeof(void *);
4889 grmax = (volatile int *) x;
4890 x += sizeof(void *);
4891 prfree = (volatile int *) x;
4892 x += NCORE * sizeof(void *);
4893 prfull = (volatile int *) x;
4894 x += NCORE * sizeof(void *);
4895 prcnt = (volatile int *) x;
4896 x += NCORE * sizeof(void *);
4897 prmax = (volatile int *) x;
4898 x += NCORE * sizeof(void *);
4899 gr_readmiss = (volatile double *) x;
4900 x += sizeof(double);
4901 gr_writemiss = (volatile double *) x;
4902 x += sizeof(double);
4903
4904 #ifdef FULL_TRAIL
4905 stack_last = (volatile Stack_Tree **) x;
4906 x += NCORE * sizeof(Stack_Tree *);
4907 #endif
4908 if (((long)x)&(sizeof(void *)-1)) /* word alignment */
4909 { x += sizeof(void *)-(((long)x)&(sizeof(void *)-1)); /* 64-bit align */
4910 }
4911
4912 #ifdef COLLAPSE
4913 ncomps = (unsigned long *) x;
4914 x += (256+2) * sizeof(unsigned long);
4915 #endif
4916
4917 dc_shared = (sh_Allocater *) x; /* in shared memory */
4918 x += sizeof(sh_Allocater);
4919
4920 if (core_id == 0) /* root only */
4921 { dc_shared->dc_id = shmid_M;
4922 dc_shared->dc_start = (void *) dc_mem_start;
4923 dc_shared->dc_arena = x;
4924 dc_shared->pattern = 1234567;
4925 dc_shared->dc_size = (long) get_mem - (long) (x - dc_mem_start);
4926 dc_shared->nxt = NULL;
4927 }
4928 #endif
4929 }
4930
4931 #if defined(WIN32) || defined(WIN64) || defined(__i386__) || defined(__x86_64__)
4932 extern BOOLEAN InterlockedBitTestAndSet(LONG volatile* Base, LONG Bit);
4933 int
4934 tas(volatile LONG *s)
4935 { return InterlockedBitTestAndSet(s, 1);
4936 }
4937 #else
4938 #error missing definition of test and set operation for this platform
4939 #endif
4940
4941 void
4942 cleanup_shm(int val)
4943 { int m;
4944 static int nibis = 0;
4945
4946 if (nibis != 0)
4947 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id, val);
4948 return;
4949 } else
4950 { nibis = 1;
4951 }
4952 if (search_terminated != NULL)
4953 { *search_terminated |= 16; /* cleanup_shm */
4954 }
4955
4956 for (m = 0; m < NR_QS; m++)
4957 { if (shmid[m] != NULL)
4958 { UnmapViewOfFile((char *) shared_mem[m]);
4959 CloseHandle(shmid[m]);
4960 } }
4961 #ifdef SEP_STATE
4962 UnmapViewOfFile((void *) shmid_X);
4963 CloseHandle((void *) shmid_M);
4964 #else
4965 #ifdef BITSTATE
4966 if (shmid_S != NULL)
4967 { UnmapViewOfFile(SS);
4968 CloseHandle(shmid_S);
4969 }
4970 #else
4971 if (core_id == 0 && verbose)
4972 { printf("cpu0: done, %ld Mb of shared state memory left\n",
4973 dc_shared->dc_size / (long)(1048576));
4974 }
4975 if (shmid_S != NULL)
4976 { UnmapViewOfFile(H_tab);
4977 CloseHandle(shmid_S);
4978 }
4979 shmid_M = (void *) (dc_shared->dc_id);
4980 UnmapViewOfFile((char *) dc_shared->dc_start);
4981 CloseHandle(shmid_M);
4982 #endif
4983 #endif
4984 /* detached from shared memory - so cannot use cpu_printf */
4985 if (verbose)
4986 { printf("cpu%d: done -- got %d states from queue\n",
4987 core_id, nstates_get);
4988 }
4989 }
4990
4991 void
4992 mem_get(void)
4993 { SM_frame *f;
4994 int is_parent;
4995
4996 #if defined(MA) && !defined(SEP_STATE)
4997 #error MA requires SEP_STATE in multi-core mode
4998 #endif
4999 #ifdef BFS
5000 #error BFS is not supported in multi-core mode
5001 #endif
5002 #ifdef SC
5003 #error SC is not supported in multi-core mode
5004 #endif
5005 init_shm(); /* we are single threaded when this starts */
5006 signal(SIGINT, give_up); /* windows control-c interrupt */
5007
5008 if (core_id == 0 && verbose)
5009 { printf("cpu0: step 4: creating additional workers (proxy %d)\n",
5010 proxy_pid);
5011 }
5012 #if 0
5013 if NCORE > 1 the child or the parent should fork N-1 more times
5014 the parent is the only process with core_id == 0 and is_parent > 0
5015 the others (workers) have is_parent = 0 and core_id = 1..NCORE-1
5016 #endif
5017 if (core_id == 0) /* root starts up the workers */
5018 { worker_pids[0] = (DWORD) getpid(); /* for completeness */
5019 while (++core_id < NCORE) /* first worker sees core_id = 1 */
5020 { char cmdline[64];
5021 STARTUPINFO si = { sizeof(si) };
5022 PROCESS_INFORMATION pi;
5023
5024 if (proxy_pid == core_id) /* always non-zero */
5025 { sprintf(cmdline, "pan_proxy.exe -r %s-Q%d -Z%d",
5026 o_cmdline, getpid(), core_id);
5027 } else
5028 { sprintf(cmdline, "pan.exe %s-Q%d -Z%d",
5029 o_cmdline, getpid(), core_id);
5030 }
5031 if (verbose) printf("cpu%d: spawn %s\n", core_id, cmdline);
5032
5033 is_parent = CreateProcess(0, cmdline, 0, 0, FALSE, 0, 0, 0, &si, &pi);
5034 if (is_parent == 0)
5035 { Uerror("fork failed");
5036 }
5037 worker_pids[core_id] = pi.dwProcessId;
5038 worker_handles[core_id] = pi.hProcess;
5039 if (verbose)
5040 { cpu_printf("created core %d, pid %d\n",
5041 core_id, pi.dwProcessId);
5042 }
5043 if (proxy_pid == core_id) /* we just created the receive half */
5044 { /* add proxy send, store pid in proxy_pid_snd */
5045 sprintf(cmdline, "pan_proxy.exe -s %s-Q%d -Z%d -Y%d",
5046 o_cmdline, getpid(), core_id, worker_pids[proxy_pid]);
5047 if (verbose) printf("cpu%d: spawn %s\n", core_id, cmdline);
5048 is_parent = CreateProcess(0, cmdline, 0,0, FALSE, 0,0,0, &si, &pi);
5049 if (is_parent == 0)
5050 { Uerror("fork failed");
5051 }
5052 proxy_pid_snd = pi.dwProcessId;
5053 proxy_handle_snd = pi.hProcess;
5054 if (verbose)
5055 { cpu_printf("created core %d, pid %d (send proxy)\n",
5056 core_id, pi.dwProcessId);
5057 } } }
5058 core_id = 0; /* reset core_id for root process */
5059 } else /* worker */
5060 { static char db0[16]; /* good for up to 10^6 cores */
5061 static char db1[16];
5062 tprefix = db0; sprefix = db1;
5063 sprintf(tprefix, "cpu%d_trail", core_id); /* avoid conflicts on file access */
5064 sprintf(sprefix, "cpu%d_rst", core_id);
5065 memcnt = 0; /* count only additionally allocated memory */
5066 }
5067 if (verbose)
5068 { cpu_printf("starting core_id %d -- pid %d\n", core_id, getpid());
5069 }
5070 if (core_id == 0 && !remote_party)
5071 { new_state(); /* root starts the search */
5072 if (verbose)
5073 cpu_printf("done with 1st dfs, nstates %g (put %d states), start reading q\n",
5074 nstates, nstates_put);
5075 dfs_phase2 = 1;
5076 }
5077 Read_Queue(core_id); /* all cores */
5078
5079 if (verbose)
5080 { cpu_printf("put %6d states into queue -- got %6d\n",
5081 nstates_put, nstates_get);
5082 }
5083 done = 1;
5084 wrapup();
5085 exit(0);
5086 }
5087 #endif
5088
5089 #ifdef BITSTATE
5090 void
5091 init_SS(unsigned long n)
5092 {
5093 SS = (uchar *) prep_shmid_S((size_t) n);
5094 init_HT(0L);
5095 }
5096 #endif
5097
5098 #endif
5099 clock_t start_time;
5100 #if NCORE>1
5101 clock_t crash_stamp;
5102 #endif
5103 #if !defined(WIN32) && !defined(WIN64)
5104 struct tms start_tm;
5105 #endif
5106
5107 void
5108 start_timer(void)
5109 {
5110 #if defined(WIN32) || defined(WIN64)
5111 start_time = clock();
5112 #else
5113 start_time = times(&start_tm);
5114 #endif
5115 }
5116
5117 void
5118 stop_timer(void)
5119 { clock_t stop_time;
5120 double delta_time;
5121 #if !defined(WIN32) && !defined(WIN64)
5122 struct tms stop_tm;
5123 stop_time = times(&stop_tm);
5124 delta_time = ((double) (stop_time - start_time)) / ((double) sysconf(_SC_CLK_TCK));
5125 #else
5126 stop_time = clock();
5127 delta_time = ((double) (stop_time - start_time)) / ((double) CLOCKS_PER_SEC);
5128 #endif
5129 if (readtrail || delta_time < 0.00) return;
5130 #if NCORE>1
5131 if (core_id == 0 && nstates > (double) 0)
5132 { printf("\ncpu%d: elapsed time %.3g seconds (%g states visited)\n", core_id, delta_time, nstates);
5133 if (delta_time > 0.01)
5134 { printf("cpu%d: rate %g states/second\n", core_id, nstates/delta_time);
5135 }
5136 { void check_overkill(void);
5137 check_overkill();
5138 } }
5139 #else
5140 printf("\npan: elapsed time %.3g seconds\n", delta_time);
5141 if (delta_time > 0.01)
5142 { printf("pan: rate %9.8g states/second\n", nstates/delta_time);
5143 if (verbose)
5144 { printf("pan: avg transition delay %.5g usec\n",
5145 delta_time/(nstates+truncs));
5146 } }
5147 #endif
5148 }
5149
5150 #if NCORE>1
5151 #ifdef T_ALERT
5152 double t_alerts[17];
5153
5154 void
5155 crash_report(void)
5156 { int i;
5157 printf("crash alert intervals:\n");
5158 for (i = 0; i < 17; i++)
5159 { printf("%d\t%g\n", i, t_alerts[i]);
5160 } }
5161 #endif
5162
5163 void
5164 crash_reset(void)
5165 { /* false alarm */
5166 if (crash_stamp != (clock_t) 0)
5167 {
5168 #ifdef T_ALERT
5169 double delta_time;
5170 int i;
5171 #if defined(WIN32) || defined(WIN64)
5172 delta_time = ((double) (clock() - crash_stamp)) / ((double) CLOCKS_PER_SEC);
5173 #else
5174 delta_time = ((double) (times(&start_tm) - crash_stamp)) / ((double) sysconf(_SC_CLK_TCK));
5175 #endif
5176 for (i = 0; i < 16; i++)
5177 { if (delta_time <= (i*30))
5178 { t_alerts[i] = delta_time;
5179 break;
5180 } }
5181 if (i == 16) t_alerts[i] = delta_time;
5182 #endif
5183 if (verbose)
5184 printf("cpu%d: crash alert off\n", core_id);
5185 }
5186 crash_stamp = (clock_t) 0;
5187 }
5188
5189 int
5190 crash_test(double maxtime)
5191 { double delta_time;
5192 if (crash_stamp == (clock_t) 0)
5193 { /* start timing */
5194 #if defined(WIN32) || defined(WIN64)
5195 crash_stamp = clock();
5196 #else
5197 crash_stamp = times(&start_tm);
5198 #endif
5199 if (verbose)
5200 { printf("cpu%d: crash detection\n", core_id);
5201 }
5202 return 0;
5203 }
5204 #if defined(WIN32) || defined(WIN64)
5205 delta_time = ((double) (clock() - crash_stamp)) / ((double) CLOCKS_PER_SEC);
5206 #else
5207 delta_time = ((double) (times(&start_tm) - crash_stamp)) / ((double) sysconf(_SC_CLK_TCK));
5208 #endif
5209 return (delta_time >= maxtime);
5210 }
5211 #endif
5212
5213 void
5214 do_the_search(void)
5215 { int i;
5216 depth = mreached = 0;
5217 trpt = &trail[0];
5218 #ifdef VERI
5219 trpt->tau |= 4; /* the claim moves first */
5220 #endif
5221 for (i = 0; i < (int) now._nr_pr; i++)
5222 { P0 *ptr = (P0 *) pptr(i);
5223 #ifndef NP
5224 if (!(trpt->o_pm&2)
5225 && accpstate[ptr->_t][ptr->_p])
5226 { trpt->o_pm |= 2;
5227 }
5228 #else
5229 if (!(trpt->o_pm&4)
5230 && progstate[ptr->_t][ptr->_p])
5231 { trpt->o_pm |= 4;
5232 }
5233 #endif
5234 }
5235 #ifdef EVENT_TRACE
5236 #ifndef NP
5237 if (accpstate[EVENT_TRACE][now._event])
5238 { trpt->o_pm |= 2;
5239 }
5240 #else
5241 if (progstate[EVENT_TRACE][now._event])
5242 { trpt->o_pm |= 4;
5243 }
5244 #endif
5245 #endif
5246 #ifndef NOCOMP
5247 Mask[0] = Mask[1] = 1; /* _nr_pr, _nr_qs */
5248 if (!a_cycles)
5249 { i = &(now._a_t) - (uchar *) &now;
5250 Mask[i] = 1; /* _a_t */
5251 }
5252 #ifndef NOFAIR
5253 if (!fairness)
5254 { int j = 0;
5255 i = &(now._cnt[0]) - (uchar *) &now;
5256 while (j++ < NFAIR)
5257 Mask[i++] = 1; /* _cnt[] */
5258 }
5259 #endif
5260 #endif
5261 #ifndef NOFAIR
5262 if (fairness
5263 && (a_cycles && (trpt->o_pm&2)))
5264 { now._a_t = 2; /* set the A-bit */
5265 now._cnt[0] = now._nr_pr + 1;
5266 #ifdef VERBOSE
5267 printf("%3d: fairness Rule 1, cnt=%d, _a_t=%d\n",
5268 depth, now._cnt[now._a_t&1], now._a_t);
5269 #endif
5270 }
5271 #endif
5272 c_stack_start = (char *) &i; /* meant to be read-only */
5273 #if defined(HAS_CODE) && defined (C_INIT)
5274 C_INIT; /* initialization of data that must precede fork() */
5275 c_init_done++;
5276 #endif
5277 #if defined(C_States) && (HAS_TRACK==1)
5278 /* capture initial state of tracked C objects */
5279 c_update((uchar *) &(now.c_state[0]));
5280 #endif
5281 #ifdef HAS_CODE
5282 if (readtrail) getrail(); /* no return */
5283 #endif
5284 start_timer();
5285 #ifdef BFS
5286 bfs();
5287 #else
5288 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
5289 /* initial state of tracked & unmatched objects */
5290 c_stack((uchar *) &(svtack->c_stack[0]));
5291 #endif
5292 #ifdef RANDOMIZE
5293 #if RANDOMIZE>0
5294 srand(RANDOMIZE);
5295 #else
5296 srand(123);
5297 #endif
5298 #endif
5299 #if NCORE>1
5300 mem_get();
5301 #else
5302 new_state(); /* start 1st DFS */
5303 #endif
5304 #endif
5305 }
5306 #ifdef INLINE_REV
5307 uchar
5308 do_reverse(Trans *t, short II, uchar M)
5309 { uchar _m = M;
5310 int tt = (int) ((P0 *)this)->_p;
5311 #include REVERSE_MOVES
5312 R999: return _m;
5313 }
5314 #endif
5315 #ifndef INLINE
5316 #ifdef EVENT_TRACE
5317 static char _tp = 'n'; static int _qid = 0;
5318 #endif
5319 uchar
5320 do_transit(Trans *t, short II)
5321 { uchar _m = 0;
5322 int tt = (int) ((P0 *)this)->_p;
5323 #ifdef M_LOSS
5324 uchar delta_m = 0;
5325 #endif
5326 #ifdef EVENT_TRACE
5327 short oboq = boq;
5328 uchar ot = (uchar) ((P0 *)this)->_t;
5329 if (ot == EVENT_TRACE) boq = -1;
5330 #define continue { boq = oboq; return 0; }
5331 #else
5332 #define continue return 0
5333 #ifdef SEPARATE
5334 uchar ot = (uchar) ((P0 *)this)->_t;
5335 #endif
5336 #endif
5337 #include FORWARD_MOVES
5338 P999:
5339 #ifdef EVENT_TRACE
5340 if (ot == EVENT_TRACE) boq = oboq;
5341 #endif
5342 return _m;
5343 #undef continue
5344 }
5345 #ifdef EVENT_TRACE
5346 void
5347 require(char tp, int qid)
5348 { Trans *t;
5349 _tp = tp; _qid = qid;
5350
5351 if (now._event != endevent)
5352 for (t = trans[EVENT_TRACE][now._event]; t; t = t->nxt)
5353 { if (do_transit(t, EVENT_TRACE))
5354 { now._event = t->st;
5355 reached[EVENT_TRACE][t->st] = 1;
5356 #ifdef VERBOSE
5357 printf(" event_trace move to -> %d\n", t->st);
5358 #endif
5359 #ifndef BFS
5360 #ifndef NP
5361 if (accpstate[EVENT_TRACE][now._event])
5362 (trpt+1)->o_pm |= 2;
5363 #else
5364 if (progstate[EVENT_TRACE][now._event])
5365 (trpt+1)->o_pm |= 4;
5366 #endif
5367 #endif
5368 #ifdef NEGATED_TRACE
5369 if (now._event == endevent)
5370 {
5371 #ifndef BFS
5372 depth++; trpt++;
5373 #endif
5374 uerror("event_trace error (all events matched)");
5375 #ifndef BFS
5376 trpt--; depth--;
5377 #endif
5378 break;
5379 }
5380 #endif
5381 for (t = t->nxt; t; t = t->nxt)
5382 { if (do_transit(t, EVENT_TRACE))
5383 Uerror("non-determinism in event-trace");
5384 }
5385 return;
5386 }
5387 #ifdef VERBOSE
5388 else
5389 printf(" event_trace miss '%c' -- %d, %d, %d\n",
5390 tp, qid, now._event, t->forw);
5391 #endif
5392 }
5393 #ifdef NEGATED_TRACE
5394 now._event = endevent; /* only 1st try will count -- fixed 4.2.6 */
5395 #else
5396 #ifndef BFS
5397 depth++; trpt++;
5398 #endif
5399 uerror("event_trace error (no matching event)");
5400 #ifndef BFS
5401 trpt--; depth--;
5402 #endif
5403 #endif
5404 }
5405 #endif
5406 int
5407 enabled(int iam, int pid)
5408 { Trans *t; uchar *othis = this;
5409 int res = 0; int tt; uchar ot;
5410 #ifdef VERI
5411 /* if (pid > 0) */ pid++;
5412 #endif
5413 if (pid == iam)
5414 Uerror("used: enabled(pid=thisproc)");
5415 if (pid < 0 || pid >= (int) now._nr_pr)
5416 return 0;
5417 this = pptr(pid);
5418 TstOnly = 1;
5419 tt = (int) ((P0 *)this)->_p;
5420 ot = (uchar) ((P0 *)this)->_t;
5421 for (t = trans[ot][tt]; t; t = t->nxt)
5422 if (do_transit(t, (short) pid))
5423 { res = 1;
5424 break;
5425 }
5426 TstOnly = 0;
5427 this = othis;
5428 return res;
5429 }
5430 #endif
5431 void
5432 snap_time(void)
5433 { clock_t stop_time;
5434 double delta_time;
5435 #if !defined(WIN32) && !defined(WIN64)
5436 struct tms stop_tm;
5437 stop_time = times(&stop_tm);
5438 delta_time = ((double) (stop_time - start_time)) / ((double) sysconf(_SC_CLK_TCK));
5439 #else
5440 stop_time = clock();
5441 delta_time = ((double) (stop_time - start_time)) / ((double) CLOCKS_PER_SEC);
5442 #endif
5443 if (delta_time > 0.01)
5444 { printf("t= %6.3g ", delta_time);
5445 printf("R= %7.0g", nstates/delta_time);
5446 }
5447 printf("\n");
5448 if (quota > 0.1 && delta_time > quota)
5449 { printf("Time limit of %6.3g minutes exceeded\n", quota/60.0);
5450 #if NCORE>1
5451 fflush(stdout);
5452 leave_critical(GLOBAL_LOCK);
5453 sudden_stop("time-limit");
5454 exit(1);
5455 #endif
5456 wrapup();
5457 }
5458 }
5459 void
5460 snapshot(void)
5461 {
5462 #if NCORE>1
5463 enter_critical(GLOBAL_LOCK); /* snapshot */
5464 printf("cpu%d: ", core_id);
5465 #endif
5466 printf("Depth= %7ld States= %8.3g ",
5467 #if NCORE>1
5468 (long) (nr_handoffs * z_handoff) +
5469 #endif
5470 mreached, nstates);
5471 printf("Transitions= %8.3g ", nstates+truncs);
5472 #ifdef MA
5473 printf("Nodes= %7d ", nr_states);
5474 #endif
5475 printf("Memory= %9.3f\t", memcnt/1048576.);
5476 snap_time();
5477 fflush(stdout);
5478 #if NCORE>1
5479 leave_critical(GLOBAL_LOCK);
5480 #endif
5481 }
5482 #ifdef SC
5483 void
5484 stack2disk(void)
5485 {
5486 if (!stackwrite
5487 && (stackwrite = creat(stackfile, TMODE)) < 0)
5488 Uerror("cannot create stackfile");
5489
5490 if (write(stackwrite, trail, DDD*sizeof(Trail))
5491 != DDD*sizeof(Trail))
5492 Uerror("stackfile write error -- disk is full?");
5493
5494 memmove(trail, &trail[DDD], (HHH-DDD+2)*sizeof(Trail));
5495 memset(&trail[HHH-DDD+2], 0, (omaxdepth - HHH + DDD - 2)*sizeof(Trail));
5496 CNT1++;
5497 }
5498 void
5499 disk2stack(void)
5500 { long have;
5501
5502 CNT2++;
5503 memmove(&trail[DDD], trail, (HHH-DDD+2)*sizeof(Trail));
5504
5505 if (!stackwrite
5506 || lseek(stackwrite, -DDD* (off_t) sizeof(Trail), SEEK_CUR) == -1)
5507 Uerror("disk2stack lseek error");
5508
5509 if (!stackread
5510 && (stackread = open(stackfile, 0)) < 0)
5511 Uerror("cannot open stackfile");
5512
5513 if (lseek(stackread, (CNT1-CNT2)*DDD* (off_t) sizeof(Trail), SEEK_SET) == -1)
5514 Uerror("disk2stack lseek error");
5515
5516 have = read(stackread, trail, DDD*sizeof(Trail));
5517 if (have != DDD*sizeof(Trail))
5518 Uerror("stackfile read error");
5519 }
5520 #endif
5521 uchar *
5522 Pptr(int x)
5523 { if (x < 0 || x >= MAXPROC || !proc_offset[x])
5524 return noptr;
5525 else
5526 return (uchar *) pptr(x);
5527 }
5528 int qs_empty(void);
5529 /*
5530 * new_state() is the main DFS search routine in the verifier
5531 * it has a lot of code ifdef-ed together to support
5532 * different search modes, which makes it quite unreadable.
5533 * if you are studying the code, first use the C preprocessor
5534 * to generate a specific version from the pan.c source,
5535 * e.g. by saying:
5536 * gcc -E -DNOREDUCE -DBITSTATE pan.c > ppan.c
5537 * and then study the resulting file, rather than this one
5538 */
5539 #if !defined(BFS) && (!defined(BITSTATE) || !defined(MA))
5540
5541 #ifdef NSUCC
5542 int N_succ[512];
5543 void
5544 tally_succ(int cnt)
5545 { if (cnt < 512) N_succ[cnt]++;
5546 else printf("tally_succ: cnt %d exceeds range\n", cnt);
5547 }
5548
5549 void
5550 dump_succ(void)
5551 { int i; double sum = 0.0;
5552 double w_avg = 0.0;
5553 printf("Successor counts:\n");
5554 for (i = 0; i < 512; i++)
5555 { sum += (double) N_succ[i];
5556 }
5557 for (i = 0; i < 512; i++)
5558 { if (N_succ[i] > 0)
5559 { printf("%3d %10d (%.4g %% of total)\n",
5560 i, N_succ[i], (100.0 * (double) N_succ[i])/sum);
5561 w_avg += (double) i * (double) N_succ[i];
5562 } }
5563 if (sum > N_succ[0])
5564 printf("mean %.4g (without 0: %.4g)\n", w_avg / sum, w_avg / (sum - (double) N_succ[0]));
5565 }
5566 #endif
5567
5568 void
5569 new_state(void)
5570 { Trans *t;
5571 uchar _n, _m, ot;
5572 #ifdef RANDOMIZE
5573 short ooi, eoi;
5574 #endif
5575 #ifdef M_LOSS
5576 uchar delta_m = 0;
5577 #endif
5578 short II, JJ = 0, kk;
5579 int tt;
5580 #ifdef REVERSE
5581 short From = BASE, To = now._nr_pr-1;
5582 #else
5583 short From = now._nr_pr-1, To = BASE;
5584 #endif
5585 Down:
5586 #ifdef CHECK
5587 cpu_printf("%d: Down - %s %saccepting [pids %d-%d]\n",
5588 depth, (trpt->tau&4)?"claim":"program",
5589 (trpt->o_pm&2)?"":"non-", From, To);
5590 #endif
5591 #ifdef SCHED
5592 if (depth > 0)
5593 { trpt->sched_limit = (trpt-1)->sched_limit;
5594 } else
5595 { trpt->sched_limit = 0;
5596 }
5597 #endif
5598 #ifdef SC
5599 if (depth > hiwater)
5600 { stack2disk();
5601 maxdepth += DDD;
5602 hiwater += DDD;
5603 trpt -= DDD;
5604 if(verbose)
5605 printf("zap %d: %d (maxdepth now %d)\n",
5606 CNT1, hiwater, maxdepth);
5607 }
5608 #endif
5609 trpt->tau &= ~(16|32|64); /* make sure these are off */
5610 #if defined(FULLSTACK) && defined(MA)
5611 trpt->proviso = 0;
5612 #endif
5613 #ifdef NSUCC
5614 trpt->n_succ = 0;
5615 #endif
5616 #if NCORE>1
5617 if (mem_hand_off())
5618 {
5619 #if SYNC
5620 (trpt+1)->o_n = 1; /* not a deadlock: as below */
5621 #endif
5622 #ifndef LOOPSTATE
5623 (trpt-1)->tau |= 16; /* worstcase guess: as below */
5624 #endif
5625 #if NCORE>1 && defined(FULL_TRAIL)
5626 if (upto > 0)
5627 { Pop_Stack_Tree();
5628 }
5629 #endif
5630 goto Up;
5631 }
5632 #endif
5633 if (depth >= maxdepth)
5634 { if (!warned)
5635 { warned = 1;
5636 printf("error: max search depth too small\n");
5637 }
5638 if (bounded)
5639 { uerror("depth limit reached");
5640 }
5641 truncs++;
5642 #if SYNC
5643 (trpt+1)->o_n = 1; /* not a deadlock */
5644 #endif
5645 #ifndef LOOPSTATE
5646 (trpt-1)->tau |= 16; /* worstcase guess */
5647 #endif
5648 #if NCORE>1 && defined(FULL_TRAIL)
5649 if (upto > 0)
5650 { Pop_Stack_Tree();
5651 }
5652 #endif
5653 goto Up;
5654 }
5655 AllOver:
5656 #if (defined(FULLSTACK) && !defined(MA)) || NCORE>1
5657 /* if atomic or rv move, carry forward previous state */
5658 trpt->ostate = (trpt-1)->ostate;
5659 #endif
5660 #ifdef VERI
5661 if ((trpt->tau&4) || ((trpt-1)->tau&128))
5662 #endif
5663 if (boq == -1) { /* if not mid-rv */
5664 #ifndef SAFETY
5665 /* this check should now be redundant
5666 * because the seed state also appears
5667 * on the 1st dfs stack and would be
5668 * matched in hstore below
5669 */
5670 if ((now._a_t&1) && depth > A_depth)
5671 { if (!memcmp((char *)&A_Root,
5672 (char *)&now, vsize))
5673 {
5674 depthfound = A_depth;
5675 #ifdef CHECK
5676 printf("matches seed\n");
5677 #endif
5678 #ifdef NP
5679 uerror("non-progress cycle");
5680 #else
5681 uerror("acceptance cycle");
5682 #endif
5683 #if NCORE>1 && defined(FULL_TRAIL)
5684 if (upto > 0)
5685 { Pop_Stack_Tree();
5686 }
5687 #endif
5688 goto Up;
5689 }
5690 #ifdef CHECK
5691 printf("not seed\n");
5692 #endif
5693 }
5694 #endif
5695 if (!(trpt->tau&8)) /* if no atomic move */
5696 {
5697 #ifdef BITSTATE
5698 #ifdef CNTRSTACK
5699 II = bstore((char *)&now, vsize);
5700 trpt->j6 = j1; trpt->j7 = j2;
5701 JJ = LL[j1] && LL[j2];
5702 #else
5703 #ifdef FULLSTACK
5704 JJ = onstack_now();
5705 #else
5706 #ifndef NOREDUCE
5707 JJ = II; /* worstcase guess for p.o. */
5708 #endif
5709 #endif
5710 II = bstore((char *)&now, vsize);
5711 #endif
5712 #else
5713 #ifdef MA
5714 II = gstore((char *)&now, vsize, 0);
5715 #ifndef FULLSTACK
5716 JJ = II;
5717 #else
5718 JJ = (II == 2)?1:0;
5719 #endif
5720 #else
5721 II = hstore((char *)&now, vsize);
5722 #ifdef FULLSTACK
5723 JJ = (II == 2)?1:0;
5724 #endif
5725 #endif
5726 #endif
5727 kk = (II == 1 || II == 2);
5728 #ifndef SAFETY
5729 #if NCORE==1 || defined (SEP_STATE)
5730 if (II == 2 && ((trpt->o_pm&2) || ((trpt-1)->o_pm&2)))
5731 #ifndef NOFAIR
5732 #if 0
5733 if (!fairness || ((now._a_t&1) && now._cnt[1] == 1)) /* 5.1.4 */
5734 #else
5735 if (a_cycles && !fairness) /* 5.1.6 -- example by Hirofumi Watanabe */
5736 #endif
5737 #endif
5738 {
5739 II = 3; /* Schwoon & Esparza 2005, Gastin&Moro 2004 */
5740 #ifdef VERBOSE
5741 printf("state match on dfs stack\n");
5742 #endif
5743 goto same_case;
5744 }
5745 #endif
5746 #if defined(FULLSTACK) && defined(BITSTATE)
5747 if (!JJ && (now._a_t&1) && depth > A_depth)
5748 { int oj1 = j1;
5749 uchar o_a_t = now._a_t;
5750 now._a_t &= ~(1|16|32);
5751 if (onstack_now())
5752 { II = 3;
5753 #ifdef VERBOSE
5754 printf("state match on 1st dfs stack\n");
5755 #endif
5756 }
5757 now._a_t = o_a_t;
5758 j1 = oj1;
5759 }
5760 #endif
5761 if (II == 3 && a_cycles && (now._a_t&1))
5762 {
5763 #ifndef NOFAIR
5764 if (fairness && now._cnt[1] > 1) /* was != 0 */
5765 {
5766 #ifdef VERBOSE
5767 printf(" fairness count non-zero\n");
5768 #endif
5769 II = 0;
5770 } else
5771 #endif
5772 {
5773 #ifndef BITSTATE
5774 nShadow--;
5775 #endif
5776 same_case: if (Lstate) depthfound = Lstate->D;
5777 #ifdef NP
5778 uerror("non-progress cycle");
5779 #else
5780 uerror("acceptance cycle");
5781 #endif
5782 #if NCORE>1 && defined(FULL_TRAIL)
5783 if (upto > 0)
5784 { Pop_Stack_Tree();
5785 }
5786 #endif
5787 goto Up;
5788 }
5789 }
5790 #endif
5791 #ifndef NOREDUCE
5792 #ifndef SAFETY
5793 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5794 if (II != 0 && (!Lstate || Lstate->cpu_id < core_id))
5795 { (trpt-1)->tau |= 16;
5796 }
5797 #endif
5798 if ((II && JJ) || (II == 3))
5799 { /* marker for liveness proviso */
5800 #ifndef LOOPSTATE
5801 (trpt-1)->tau |= 16;
5802 #endif
5803 truncs2++;
5804 }
5805 #else
5806 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5807 if (!(II != 0 && (!Lstate || Lstate->cpu_id < core_id)))
5808 { /* treat as stack state */
5809 (trpt-1)->tau |= 16;
5810 } else
5811 { /* treat as non-stack state */
5812 (trpt-1)->tau |= 64;
5813 }
5814 #endif
5815 if (!II || !JJ)
5816 { /* successor outside stack */
5817 (trpt-1)->tau |= 64;
5818 }
5819 #endif
5820 #endif
5821 if (II)
5822 { truncs++;
5823 #if NCORE>1 && defined(FULL_TRAIL)
5824 if (upto > 0)
5825 { Pop_Stack_Tree();
5826 if (depth == 0)
5827 { return;
5828 } }
5829 #endif
5830 goto Up;
5831 }
5832 if (!kk)
5833 { static long sdone = (long) 0; long ndone;
5834 nstates++;
5835 #if defined(ZAPH) && defined(BITSTATE)
5836 zstates += (double) hfns;
5837 #endif
5838 ndone = (unsigned long) (nstates/((double) FREQ));
5839 if (ndone != sdone)
5840 { snapshot();
5841 sdone = ndone;
5842 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
5843 if (nstates > ((double)(ONE_L<<(ssize+1))))
5844 { void resize_hashtable(void);
5845 resize_hashtable();
5846 }
5847 #endif
5848 #if defined(ZAPH) && defined(BITSTATE)
5849 if (zstates > ((double)(ONE_L<<(ssize-2))))
5850 { /* more than half the bits set */
5851 void zap_hashtable(void);
5852 zap_hashtable();
5853 zstates = 0;
5854 }
5855 #endif
5856 }
5857 #ifdef SVDUMP
5858 if (vprefix > 0)
5859 if (write(svfd, (uchar *) &now, vprefix) != vprefix)
5860 { fprintf(efd, "writing %s.svd failed\n", PanSource);
5861 wrapup();
5862 }
5863 #endif
5864 #if defined(MA) && defined(W_XPT)
5865 if ((unsigned long) nstates%W_XPT == 0)
5866 { void w_xpoint(void);
5867 w_xpoint();
5868 }
5869 #endif
5870 }
5871 #if defined(FULLSTACK) || defined(CNTRSTACK)
5872 onstack_put();
5873 #ifdef DEBUG2
5874 #if defined(FULLSTACK) && !defined(MA)
5875 printf("%d: putting %u (%d)\n", depth,
5876 trpt->ostate,
5877 (trpt->ostate)?trpt->ostate->tagged:0);
5878 #else
5879 printf("%d: putting\n", depth);
5880 #endif
5881 #endif
5882 #else
5883 #if NCORE>1
5884 trpt->ostate = Lstate;
5885 #endif
5886 #endif
5887 } }
5888 if (depth > mreached)
5889 mreached = depth;
5890 #ifdef VERI
5891 if (trpt->tau&4)
5892 #endif
5893 trpt->tau &= ~(1|2); /* timeout and -request off */
5894 _n = 0;
5895 #if SYNC
5896 (trpt+1)->o_n = 0;
5897 #endif
5898 #ifdef VERI
5899 if (now._nr_pr == 0) /* claim terminated */
5900 uerror("end state in claim reached");
5901 check_claim(((P0 *)pptr(0))->_p);
5902 Stutter:
5903 if (trpt->tau&4) /* must make a claimmove */
5904 {
5905 #ifndef NOFAIR
5906 if ((now._a_t&2) /* A-bit set */
5907 && now._cnt[now._a_t&1] == 1)
5908 { now._a_t &= ~2;
5909 now._cnt[now._a_t&1] = 0;
5910 trpt->o_pm |= 16;
5911 #ifdef DEBUG
5912 printf("%3d: fairness Rule 3.: _a_t = %d\n",
5913 depth, now._a_t);
5914 #endif
5915 }
5916 #endif
5917 II = 0; /* never */
5918 goto Veri0;
5919 }
5920 #endif
5921 #ifndef NOREDUCE
5922 /* Look for a process with only safe transitions */
5923 /* (special rules apply in the 2nd dfs) */
5924 if (boq == -1 && From != To
5925
5926 #ifdef SAFETY
5927 #if NCORE>1
5928 && (depth < z_handoff)
5929 #endif
5930 )
5931 #else
5932 #if NCORE>1
5933 && ((a_cycles) || (!a_cycles && depth < z_handoff))
5934 #endif
5935 && (!(now._a_t&1)
5936 || (a_cycles &&
5937 #ifndef BITSTATE
5938 #ifdef MA
5939 #ifdef VERI
5940 !((trpt-1)->proviso))
5941 #else
5942 !(trpt->proviso))
5943 #endif
5944 #else
5945 #ifdef VERI
5946 (trpt-1)->ostate &&
5947 !(((char *)&((trpt-1)->ostate->state))[0] & 128))
5948 #else
5949 !(((char *)&(trpt->ostate->state))[0] & 128))
5950 #endif
5951 #endif
5952 #else
5953 #ifdef VERI
5954 (trpt-1)->ostate &&
5955 (trpt-1)->ostate->proviso == 0)
5956 #else
5957 trpt->ostate->proviso == 0)
5958 #endif
5959 #endif
5960 ))
5961 #endif
5962
5963 #ifdef REVERSE
5964 for (II = From; II <= To; II++)
5965 #else
5966 for (II = From; II >= To; II--)
5967 #endif
5968 {
5969 Resume: /* pick up here if preselect fails */
5970 this = pptr(II);
5971 tt = (int) ((P0 *)this)->_p;
5972 ot = (uchar) ((P0 *)this)->_t;
5973 if (trans[ot][tt]->atom & 8)
5974 { t = trans[ot][tt];
5975 if (t->qu[0] != 0)
5976 { Ccheck++;
5977 if (!q_cond(II, t))
5978 continue;
5979 Cholds++;
5980 }
5981 From = To = II; /* the process preselected */
5982 #ifdef NIBIS
5983 t->om = 0;
5984 #endif
5985 trpt->tau |= 32; /* preselect marker */
5986 #ifdef DEBUG
5987 #ifdef NIBIS
5988 printf("%3d: proc %d Pre", depth, II);
5989 printf("Selected (om=%d, tau=%d)\n",
5990 t->om, trpt->tau);
5991 #else
5992 printf("%3d: proc %d PreSelected (tau=%d)\n",
5993 depth, II, trpt->tau);
5994 #endif
5995 #endif
5996 goto Again;
5997 }
5998 }
5999 trpt->tau &= ~32;
6000 #endif
6001 #if !defined(NOREDUCE) || (defined(ETIM) && !defined(VERI))
6002 Again:
6003 #endif
6004 /* The Main Expansion Loop over Processes */
6005 trpt->o_pm &= ~(8|16|32|64); /* fairness-marks */
6006 #ifndef NOFAIR
6007 if (fairness && boq == -1
6008 #ifdef VERI
6009 && (!(trpt->tau&4) && !((trpt-1)->tau&128))
6010 #endif
6011 && !(trpt->tau&8))
6012 { /* A_bit = 1; Cnt = N in acc states with A_bit 0 */
6013 if (!(now._a_t&2))
6014 {
6015 if (a_cycles && (trpt->o_pm&2))
6016 { /* Accepting state */
6017 now._a_t |= 2;
6018 now._cnt[now._a_t&1] = now._nr_pr + 1;
6019 trpt->o_pm |= 8;
6020 #ifdef DEBUG
6021 printf("%3d: fairness Rule 1: cnt=%d, _a_t=%d\n",
6022 depth, now._cnt[now._a_t&1], now._a_t);
6023 #endif
6024 }
6025 } else
6026 { /* A_bit = 0 when Cnt 0 */
6027 if (now._cnt[now._a_t&1] == 1)
6028 { now._a_t &= ~2;
6029 now._cnt[now._a_t&1] = 0;
6030 trpt->o_pm |= 16;
6031 #ifdef DEBUG
6032 printf("%3d: fairness Rule 3: _a_t = %d\n",
6033 depth, now._a_t);
6034 #endif
6035 } } }
6036 #endif
6037
6038 #ifdef REVERSE
6039 for (II = From; II <= To; II++)
6040 #else
6041 for (II = From; II >= To; II--)
6042 #endif
6043 {
6044 #if SYNC
6045 /* no rendezvous with same proc */
6046 if (boq != -1 && trpt->pr == II) continue;
6047 #endif
6048 #ifdef SCHED
6049 /* limit max nr of interleavings */
6050 if (From != To
6051 && depth > 0
6052 #ifdef VERI
6053 && II != 0
6054 #endif
6055 && (trpt-1)->pr != II
6056 && trpt->sched_limit >= sched_max)
6057 { continue;
6058 }
6059 #endif
6060 #ifdef VERI
6061 Veri0:
6062 #endif
6063 this = pptr(II);
6064 tt = (int) ((P0 *)this)->_p;
6065 ot = (uchar) ((P0 *)this)->_t;
6066 #ifdef NIBIS
6067 /* don't repeat a previous preselected expansion */
6068 /* could hit this if reduction proviso was false */
6069 t = trans[ot][tt];
6070 if (!(trpt->tau&4)
6071 && !(trpt->tau&1)
6072 && !(trpt->tau&32)
6073 && (t->atom & 8)
6074 && boq == -1
6075 && From != To)
6076 { if (t->qu[0] == 0
6077 || q_cond(II, t))
6078 { _m = t->om;
6079 if (_m>_n||(_n>3&&_m!=0)) _n=_m;
6080 continue; /* did it before */
6081 } }
6082 #endif
6083 trpt->o_pm &= ~1; /* no move in this pid yet */
6084 #ifdef EVENT_TRACE
6085 (trpt+1)->o_event = now._event;
6086 #endif
6087 /* Fairness: Cnt++ when Cnt == II */
6088 #ifndef NOFAIR
6089 trpt->o_pm &= ~64; /* didn't apply rule 2 */
6090 if (fairness
6091 && boq == -1
6092 && !(trpt->o_pm&32)
6093 && (now._a_t&2)
6094 && now._cnt[now._a_t&1] == II+2)
6095 { now._cnt[now._a_t&1] -= 1;
6096 #ifdef VERI
6097 /* claim need not participate */
6098 if (II == 1)
6099 now._cnt[now._a_t&1] = 1;
6100 #endif
6101 #ifdef DEBUG
6102 printf("%3d: proc %d fairness ", depth, II);
6103 printf("Rule 2: --cnt to %d (%d)\n",
6104 now._cnt[now._a_t&1], now._a_t);
6105 #endif
6106 trpt->o_pm |= (32|64);
6107 }
6108 #endif
6109 #ifdef HAS_PROVIDED
6110 if (!provided(II, ot, tt, t)) continue;
6111 #endif
6112 /* check all trans of proc II - escapes first */
6113 #ifdef HAS_UNLESS
6114 trpt->e_state = 0;
6115 #endif
6116 (trpt+1)->pr = (uchar) II;
6117 (trpt+1)->st = tt;
6118 #ifdef RANDOMIZE
6119 for (ooi = eoi = 0, t = trans[ot][tt]; t; t = t->nxt, ooi++)
6120 { if (strcmp(t->tp, "else") == 0)
6121 { eoi++;
6122 break;
6123 } }
6124 if (eoi > 0)
6125 { t = trans[ot][tt];
6126 #ifdef VERBOSE
6127 printf("randomizer: suppressed, saw else\n");
6128 #endif
6129 } else
6130 { eoi = rand()%ooi;
6131 #ifdef VERBOSE
6132 printf("randomizer: skip %d in %d\n", eoi, ooi);
6133 #endif
6134 for (t = trans[ot][tt]; t; t = t->nxt)
6135 if (eoi-- <= 0) break;
6136 }
6137 domore:
6138 for ( ; t && ooi > 0; t = t->nxt, ooi--)
6139 #else
6140 for (t = trans[ot][tt]; t; t = t->nxt)
6141 #endif
6142 {
6143 #ifdef HAS_UNLESS
6144 /* exploring all transitions from
6145 * a single escape state suffices
6146 */
6147 if (trpt->e_state > 0
6148 && trpt->e_state != t->e_trans)
6149 {
6150 #ifdef DEBUG
6151 printf("skip 2nd escape %d (did %d before)\n",
6152 t->e_trans, trpt->e_state);
6153 #endif
6154 break;
6155 }
6156 #endif
6157 (trpt+1)->o_t = t;
6158 #ifdef INLINE
6159 #include FORWARD_MOVES
6160 P999: /* jumps here when move succeeds */
6161 #else
6162 if (!(_m = do_transit(t, II))) continue;
6163 #endif
6164 #ifdef SCHED
6165 if (depth > 0
6166 #ifdef VERI
6167 && II != 0
6168 #endif
6169 && (trpt-1)->pr != II)
6170 { trpt->sched_limit = 1 + (trpt-1)->sched_limit;
6171 }
6172 #endif
6173 if (boq == -1)
6174 #ifdef CTL
6175 /* for branching-time, can accept reduction only if */
6176 /* the persistent set contains just 1 transition */
6177 { if ((trpt->tau&32) && (trpt->o_pm&1))
6178 trpt->tau |= 16;
6179 trpt->o_pm |= 1; /* we moved */
6180 }
6181 #else
6182 trpt->o_pm |= 1; /* we moved */
6183 #endif
6184 #ifdef LOOPSTATE
6185 if (loopstate[ot][tt])
6186 {
6187 #ifdef VERBOSE
6188 printf("exiting from loopstate:\n");
6189 #endif
6190 trpt->tau |= 16;
6191 cnt_loops++;
6192 }
6193 #endif
6194 #ifdef PEG
6195 peg[t->forw]++;
6196 #endif
6197 #if defined(VERBOSE) || defined(CHECK)
6198 #if defined(SVDUMP)
6199 cpu_printf("%3d: proc %d exec %d \n", depth, II, t->t_id);
6200 #else
6201 cpu_printf("%3d: proc %d exec %d, %d to %d, %s %s %s %saccepting [tau=%d]\n",
6202 depth, II, t->forw, tt, t->st, t->tp,
6203 (t->atom&2)?"atomic":"",
6204 (boq != -1)?"rendez-vous":"",
6205 (trpt->o_pm&2)?"":"non-", trpt->tau);
6206 #ifdef HAS_UNLESS
6207 if (t->e_trans)
6208 cpu_printf("\t(escape to state %d)\n", t->st);
6209 #endif
6210 #endif
6211 #ifdef RANDOMIZE
6212 cpu_printf("\t(randomizer %d)\n", ooi);
6213 #endif
6214 #endif
6215 #ifdef HAS_LAST
6216 #ifdef VERI
6217 if (II != 0)
6218 #endif
6219 now._last = II - BASE;
6220 #endif
6221 #ifdef HAS_UNLESS
6222 trpt->e_state = t->e_trans;
6223 #endif
6224 depth++; trpt++;
6225 trpt->pr = (uchar) II;
6226 trpt->st = tt;
6227 trpt->o_pm &= ~(2|4);
6228 if (t->st > 0)
6229 { ((P0 *)this)->_p = t->st;
6230 /* moved down reached[ot][t->st] = 1; */
6231 }
6232 #ifndef SAFETY
6233 if (a_cycles)
6234 {
6235 #if (ACCEPT_LAB>0 && !defined(NP)) || (PROG_LAB>0 && defined(HAS_NP))
6236 int ii;
6237 #endif
6238 #define P__Q ((P0 *)pptr(ii))
6239 #if ACCEPT_LAB>0
6240 #ifdef NP
6241 /* state 1 of np_ claim is accepting */
6242 if (((P0 *)pptr(0))->_p == 1)
6243 trpt->o_pm |= 2;
6244 #else
6245 for (ii = 0; ii < (int) now._nr_pr; ii++)
6246 { if (accpstate[P__Q->_t][P__Q->_p])
6247 { trpt->o_pm |= 2;
6248 break;
6249 } }
6250 #endif
6251 #endif
6252 #if defined(HAS_NP) && PROG_LAB>0
6253 for (ii = 0; ii < (int) now._nr_pr; ii++)
6254 { if (progstate[P__Q->_t][P__Q->_p])
6255 { trpt->o_pm |= 4;
6256 break;
6257 } }
6258 #endif
6259 #undef P__Q
6260 }
6261 #endif
6262 trpt->o_t = t; trpt->o_n = _n;
6263 trpt->o_ot = ot; trpt->o_tt = tt;
6264 trpt->o_To = To; trpt->o_m = _m;
6265 trpt->tau = 0;
6266 #ifdef RANDOMIZE
6267 trpt->oo_i = ooi;
6268 #endif
6269 if (boq != -1 || (t->atom&2))
6270 { trpt->tau |= 8;
6271 #ifdef VERI
6272 /* atomic sequence in claim */
6273 if((trpt-1)->tau&4)
6274 trpt->tau |= 4;
6275 else
6276 trpt->tau &= ~4;
6277 } else
6278 { if ((trpt-1)->tau&4)
6279 trpt->tau &= ~4;
6280 else
6281 trpt->tau |= 4;
6282 }
6283 /* if claim allowed timeout, so */
6284 /* does the next program-step: */
6285 if (((trpt-1)->tau&1) && !(trpt->tau&4))
6286 trpt->tau |= 1;
6287 #else
6288 } else
6289 trpt->tau &= ~8;
6290 #endif
6291 if (boq == -1 && (t->atom&2))
6292 { From = To = II; nlinks++;
6293 } else
6294 #ifdef REVERSE
6295 { From = BASE; To = now._nr_pr-1;
6296 #else
6297 { From = now._nr_pr-1; To = BASE;
6298 #endif
6299 }
6300 #if NCORE>1 && defined(FULL_TRAIL)
6301 if (upto > 0)
6302 { Push_Stack_Tree(II, t->t_id);
6303 }
6304 #endif
6305 goto Down; /* pseudo-recursion */
6306 Up:
6307 #ifdef CHECK
6308 cpu_printf("%d: Up - %s\n", depth,
6309 (trpt->tau&4)?"claim":"program");
6310 #endif
6311 #if NCORE>1
6312 iam_alive();
6313 #ifdef USE_DISK
6314 mem_drain();
6315 #endif
6316 #endif
6317 #if defined(MA) || NCORE>1
6318 if (depth <= 0) return;
6319 /* e.g., if first state is old, after a restart */
6320 #endif
6321 #ifdef SC
6322 if (CNT1 > CNT2
6323 && depth < hiwater - (HHH-DDD) + 2)
6324 {
6325 trpt += DDD;
6326 disk2stack();
6327 maxdepth -= DDD;
6328 hiwater -= DDD;
6329 if(verbose)
6330 printf("unzap %d: %d\n", CNT2, hiwater);
6331 }
6332 #endif
6333 #ifndef NOFAIR
6334 if (trpt->o_pm&128) /* fairness alg */
6335 { now._cnt[now._a_t&1] = trpt->bup.oval;
6336 _n = 1; trpt->o_pm &= ~128;
6337 depth--; trpt--;
6338 #if defined(VERBOSE) || defined(CHECK)
6339 printf("%3d: reversed fairness default move\n", depth);
6340 #endif
6341 goto Q999;
6342 }
6343 #endif
6344 #ifdef HAS_LAST
6345 #ifdef VERI
6346 { int d; Trail *trl;
6347 now._last = 0;
6348 for (d = 1; d < depth; d++)
6349 { trl = getframe(depth-d); /* was (trpt-d) */
6350 if (trl->pr != 0)
6351 { now._last = trl->pr - BASE;
6352 break;
6353 } } }
6354 #else
6355 now._last = (depth<1)?0:(trpt-1)->pr;
6356 #endif
6357 #endif
6358 #ifdef EVENT_TRACE
6359 now._event = trpt->o_event;
6360 #endif
6361 #ifndef SAFETY
6362 if ((now._a_t&1) && depth <= A_depth)
6363 return; /* to checkcycles() */
6364 #endif
6365 t = trpt->o_t; _n = trpt->o_n;
6366 ot = trpt->o_ot; II = trpt->pr;
6367 tt = trpt->o_tt; this = pptr(II);
6368 To = trpt->o_To; _m = trpt->o_m;
6369 #ifdef RANDOMIZE
6370 ooi = trpt->oo_i;
6371 #endif
6372 #ifdef INLINE_REV
6373 _m = do_reverse(t, II, _m);
6374 #else
6375 #include REVERSE_MOVES
6376 R999: /* jumps here when done */
6377 #endif
6378 #ifdef VERBOSE
6379 cpu_printf("%3d: proc %d reverses %d, %d to %d\n",
6380 depth, II, t->forw, tt, t->st);
6381 cpu_printf("\t%s [abit=%d,adepth=%d,tau=%d,%d]\n",
6382 t->tp, now._a_t, A_depth, trpt->tau, (trpt-1)->tau);
6383 #endif
6384 #ifndef NOREDUCE
6385 /* pass the proviso tags */
6386 if ((trpt->tau&8) /* rv or atomic */
6387 && (trpt->tau&16))
6388 (trpt-1)->tau |= 16;
6389 #ifdef SAFETY
6390 if ((trpt->tau&8) /* rv or atomic */
6391 && (trpt->tau&64))
6392 (trpt-1)->tau |= 64;
6393 #endif
6394 #endif
6395 depth--; trpt--;
6396
6397 #ifdef NSUCC
6398 trpt->n_succ++;
6399 #endif
6400 #ifdef NIBIS
6401 (trans[ot][tt])->om = _m; /* head of list */
6402 #endif
6403 /* i.e., not set if rv fails */
6404 if (_m)
6405 {
6406 #if defined(VERI) && !defined(NP)
6407 if (II == 0 && verbose && !reached[ot][t->st])
6408 {
6409 printf("depth %d: Claim reached state %d (line %d)\n",
6410 depth, t->st, src_claim [t->st]);
6411 fflush(stdout);
6412 }
6413 #endif
6414 reached[ot][t->st] = 1;
6415 reached[ot][tt] = 1;
6416 }
6417 #ifdef HAS_UNLESS
6418 else trpt->e_state = 0; /* undo */
6419 #endif
6420 if (_m>_n||(_n>3&&_m!=0)) _n=_m;
6421 ((P0 *)this)->_p = tt;
6422 } /* all options */
6423 #ifdef RANDOMIZE
6424 if (!t && ooi > 0)
6425 { t = trans[ot][tt];
6426 #ifdef VERBOSE
6427 printf("randomizer: continue for %d more\n", ooi);
6428 #endif
6429 goto domore;
6430 }
6431 #ifdef VERBOSE
6432 else
6433 printf("randomizer: done\n");
6434 #endif
6435 #endif
6436 #ifndef NOFAIR
6437 /* Fairness: undo Rule 2 */
6438 if ((trpt->o_pm&32)
6439 && (trpt->o_pm&64))
6440 { if (trpt->o_pm&1)
6441 {
6442 #ifdef VERI
6443 if (now._cnt[now._a_t&1] == 1)
6444 now._cnt[now._a_t&1] = 2;
6445 #endif
6446 now._cnt[now._a_t&1] += 1;
6447 #ifdef VERBOSE
6448 printf("%3d: proc %d fairness ", depth, II);
6449 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6450 now._cnt[now._a_t&1], now._a_t);
6451 #endif
6452 trpt->o_pm &= ~(32|64);
6453 } else
6454 { if (_n > 0)
6455 {
6456 trpt->o_pm &= ~64;
6457 #ifdef REVERSE
6458 II = From-1;
6459 #else
6460 II = From+1;
6461 #endif
6462 } } }
6463 #endif
6464 #ifdef VERI
6465 if (II == 0) break; /* never claim */
6466 #endif
6467 } /* all processes */
6468 #ifdef NSUCC
6469 tally_succ(trpt->n_succ);
6470 #endif
6471 #ifdef SCHED
6472 if (_n == 0 /* no process could move */
6473 #ifdef VERI
6474 && II != 0
6475 #endif
6476 && depth > 0
6477 && trpt->sched_limit >= sched_max)
6478 { _n = 1; /* not a deadlock */
6479 }
6480 #endif
6481 #ifndef NOFAIR
6482 /* Fairness: undo Rule 2 */
6483 if (trpt->o_pm&32) /* remains if proc blocked */
6484 {
6485 #ifdef VERI
6486 if (now._cnt[now._a_t&1] == 1)
6487 now._cnt[now._a_t&1] = 2;
6488 #endif
6489 now._cnt[now._a_t&1] += 1;
6490 #ifdef VERBOSE
6491 printf("%3d: proc -- fairness ", depth);
6492 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6493 now._cnt[now._a_t&1], now._a_t);
6494 #endif
6495 trpt->o_pm &= ~32;
6496 }
6497 #ifndef NP
6498 if (fairness
6499 && _n == 0 /* nobody moved */
6500 #ifdef VERI
6501 && !(trpt->tau&4) /* in program move */
6502 #endif
6503 && !(trpt->tau&8) /* not an atomic one */
6504 #ifdef OTIM
6505 && ((trpt->tau&1) || endstate())
6506 #else
6507 #ifdef ETIM
6508 && (trpt->tau&1) /* already tried timeout */
6509 #endif
6510 #endif
6511 #ifndef NOREDUCE
6512 /* see below */
6513 && !((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6514 #endif
6515 && now._cnt[now._a_t&1] > 0) /* needed more procs */
6516 { depth++; trpt++;
6517 trpt->o_pm |= 128 | ((trpt-1)->o_pm&(2|4));
6518 trpt->bup.oval = now._cnt[now._a_t&1];
6519 now._cnt[now._a_t&1] = 1;
6520 #ifdef VERI
6521 trpt->tau = 4;
6522 #else
6523 trpt->tau = 0;
6524 #endif
6525 #ifdef REVERSE
6526 From = BASE; To = now._nr_pr-1;
6527 #else
6528 From = now._nr_pr-1; To = BASE;
6529 #endif
6530 #if defined(VERBOSE) || defined(CHECK)
6531 printf("%3d: fairness default move ", depth);
6532 printf("(all procs block)\n");
6533 #endif
6534 goto Down;
6535 }
6536 #endif
6537 Q999: /* returns here with _n>0 when done */;
6538 if (trpt->o_pm&8)
6539 { now._a_t &= ~2;
6540 now._cnt[now._a_t&1] = 0;
6541 trpt->o_pm &= ~8;
6542 #ifdef VERBOSE
6543 printf("%3d: fairness undo Rule 1, _a_t=%d\n",
6544 depth, now._a_t);
6545 #endif
6546 }
6547 if (trpt->o_pm&16)
6548 { now._a_t |= 2;
6549 now._cnt[now._a_t&1] = 1;
6550 trpt->o_pm &= ~16;
6551 #ifdef VERBOSE
6552 printf("%3d: fairness undo Rule 3, _a_t=%d\n",
6553 depth, now._a_t);
6554 #endif
6555 }
6556 #endif
6557 #ifndef NOREDUCE
6558 #ifdef SAFETY
6559 #ifdef LOOPSTATE
6560 /* at least one move that was preselected at this */
6561 /* level, blocked or was a loop control flow point */
6562 if ((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6563 #else
6564 /* preselected move - no successors outside stack */
6565 if ((trpt->tau&32) && !(trpt->tau&64))
6566 #endif
6567 #ifdef REVERSE
6568 { From = BASE; To = now._nr_pr-1;
6569 #else
6570 { From = now._nr_pr-1; To = BASE;
6571 #endif
6572 #ifdef DEBUG
6573 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6574 depth, II+1, _n, trpt->tau);
6575 #endif
6576 _n = 0; trpt->tau &= ~(16|32|64);
6577 #ifdef REVERSE
6578 if (II <= To) /* II already decremented */
6579 #else
6580 if (II >= BASE) /* II already decremented */
6581 #endif
6582 goto Resume;
6583 else
6584 goto Again;
6585 }
6586 #else
6587 /* at least one move that was preselected at this */
6588 /* level, blocked or truncated at the next level */
6589 /* implied: #ifdef FULLSTACK */
6590 if ((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6591 {
6592 #ifdef DEBUG
6593 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6594 depth, II+1, (int) _n, trpt->tau);
6595 #endif
6596 if (a_cycles && (trpt->tau&16))
6597 { if (!(now._a_t&1))
6598 {
6599 #ifdef DEBUG
6600 printf("%3d: setting proviso bit\n", depth);
6601 #endif
6602 #ifndef BITSTATE
6603 #ifdef MA
6604 #ifdef VERI
6605 (trpt-1)->proviso = 1;
6606 #else
6607 trpt->proviso = 1;
6608 #endif
6609 #else
6610 #ifdef VERI
6611 if ((trpt-1)->ostate)
6612 ((char *)&((trpt-1)->ostate->state))[0] |= 128;
6613 #else
6614 ((char *)&(trpt->ostate->state))[0] |= 128;
6615 #endif
6616 #endif
6617 #else
6618 #ifdef VERI
6619 if ((trpt-1)->ostate)
6620 (trpt-1)->ostate->proviso = 1;
6621 #else
6622 trpt->ostate->proviso = 1;
6623 #endif
6624 #endif
6625 #ifdef REVERSE
6626 From = BASE; To = now._nr_pr-1;
6627 #else
6628 From = now._nr_pr-1; To = BASE;
6629 #endif
6630 _n = 0; trpt->tau &= ~(16|32|64);
6631 goto Again; /* do full search */
6632 } /* else accept reduction */
6633 } else
6634 #ifdef REVERSE
6635 { From = BASE; To = now._nr_pr-1;
6636 #else
6637 { From = now._nr_pr-1; To = BASE;
6638 #endif
6639 _n = 0; trpt->tau &= ~(16|32|64);
6640 #ifdef REVERSE
6641 if (II <= To) /* already decremented */
6642 #else
6643 if (II >= BASE) /* already decremented */
6644 #endif
6645 goto Resume;
6646 else
6647 goto Again;
6648 } }
6649 /* #endif */
6650 #endif
6651 #endif
6652 if (_n == 0 || ((trpt->tau&4) && (trpt->tau&2)))
6653 {
6654 #ifdef DEBUG
6655 cpu_printf("%3d: no move [II=%d, tau=%d, boq=%d]\n",
6656 depth, II, trpt->tau, boq);
6657 #endif
6658 #if SYNC
6659 /* ok if a rendez-vous fails: */
6660 if (boq != -1) goto Done;
6661 #endif
6662 /* ok if no procs or we're at maxdepth */
6663 if ((now._nr_pr == 0 && (!strict || qs_empty()))
6664 #ifdef OTIM
6665 || endstate()
6666 #endif
6667 || depth >= maxdepth-1) goto Done;
6668 if ((trpt->tau&8) && !(trpt->tau&4))
6669 { trpt->tau &= ~(1|8);
6670 /* 1=timeout, 8=atomic */
6671 #ifdef REVERSE
6672 From = BASE; To = now._nr_pr-1;
6673 #else
6674 From = now._nr_pr-1; To = BASE;
6675 #endif
6676 #ifdef DEBUG
6677 cpu_printf("%3d: atomic step proc %d unexecutable\n", depth, II+1);
6678 #endif
6679 #ifdef VERI
6680 trpt->tau |= 4; /* switch to claim */
6681 #endif
6682 goto AllOver;
6683 }
6684 #ifdef ETIM
6685 if (!(trpt->tau&1)) /* didn't try timeout yet */
6686 {
6687 #ifdef VERI
6688 if (trpt->tau&4)
6689 {
6690 #ifndef NTIM
6691 if (trpt->tau&2) /* requested */
6692 #endif
6693 { trpt->tau |= 1;
6694 trpt->tau &= ~2;
6695 #ifdef DEBUG
6696 cpu_printf("%d: timeout\n", depth);
6697 #endif
6698 goto Stutter;
6699 } }
6700 else
6701 { /* only claim can enable timeout */
6702 if ((trpt->tau&8)
6703 && !((trpt-1)->tau&4))
6704 /* blocks inside an atomic */ goto BreakOut;
6705 #ifdef DEBUG
6706 cpu_printf("%d: req timeout\n",
6707 depth);
6708 #endif
6709 (trpt-1)->tau |= 2; /* request */
6710 #if NCORE>1 && defined(FULL_TRAIL)
6711 if (upto > 0)
6712 { Pop_Stack_Tree();
6713 }
6714 #endif
6715 goto Up;
6716 }
6717 #else
6718 #ifdef DEBUG
6719 cpu_printf("%d: timeout\n", depth);
6720 #endif
6721 trpt->tau |= 1;
6722 goto Again;
6723 #endif
6724 }
6725 #endif
6726 #ifdef VERI
6727 BreakOut:
6728 #ifndef NOSTUTTER
6729 if (!(trpt->tau&4))
6730 { trpt->tau |= 4; /* claim stuttering */
6731 trpt->tau |= 128; /* stutter mark */
6732 #ifdef DEBUG
6733 cpu_printf("%d: claim stutter\n", depth);
6734 #endif
6735 goto Stutter;
6736 }
6737 #else
6738 ;
6739 #endif
6740 #else
6741 if (!noends && !a_cycles && !endstate())
6742 { depth--; trpt--; /* new 4.2.3 */
6743 uerror("invalid end state");
6744 depth++; trpt++;
6745 }
6746 #ifndef NOSTUTTER
6747 else if (a_cycles && (trpt->o_pm&2)) /* new 4.2.4 */
6748 { depth--; trpt--;
6749 uerror("accept stutter");
6750 depth++; trpt++;
6751 }
6752 #endif
6753 #endif
6754 }
6755 Done:
6756 if (!(trpt->tau&8)) /* not in atomic seqs */
6757 {
6758 #ifndef SAFETY
6759 if (_n != 0
6760 #ifdef VERI
6761 /* --after-- a program-step, i.e., */
6762 /* after backtracking a claim-step */
6763 && (trpt->tau&4)
6764 /* with at least one running process */
6765 /* unless in a stuttered accept state */
6766 && ((now._nr_pr > 1) || (trpt->o_pm&2))
6767 #endif
6768 && !(now._a_t&1))
6769 {
6770 #ifndef NOFAIR
6771 if (fairness)
6772 {
6773 #ifdef VERBOSE
6774 cpu_printf("Consider check %d %d...\n",
6775 now._a_t, now._cnt[0]);
6776 #endif
6777 if ((now._a_t&2) /* A-bit */
6778 && (now._cnt[0] == 1))
6779 checkcycles();
6780 } else
6781 #endif
6782 if (a_cycles && (trpt->o_pm&2))
6783 checkcycles();
6784 }
6785 #endif
6786 #ifndef MA
6787 #if defined(FULLSTACK) || defined(CNTRSTACK)
6788 #ifdef VERI
6789 if (boq == -1
6790 && (((trpt->tau&4) && !(trpt->tau&128))
6791 || ( (trpt-1)->tau&128)))
6792 #else
6793 if (boq == -1)
6794 #endif
6795 {
6796 #ifdef DEBUG2
6797 #if defined(FULLSTACK)
6798 printf("%d: zapping %u (%d)\n",
6799 depth, trpt->ostate,
6800 (trpt->ostate)?trpt->ostate->tagged:0);
6801 #endif
6802 #endif
6803 onstack_zap();
6804 }
6805 #endif
6806 #else
6807 #ifdef VERI
6808 if (boq == -1
6809 && (((trpt->tau&4) && !(trpt->tau&128))
6810 || ( (trpt-1)->tau&128)))
6811 #else
6812 if (boq == -1)
6813 #endif
6814 {
6815 #ifdef DEBUG
6816 printf("%d: zapping\n", depth);
6817 #endif
6818 onstack_zap();
6819 #ifndef NOREDUCE
6820 if (trpt->proviso)
6821 gstore((char *) &now, vsize, 1);
6822 #endif
6823 }
6824 #endif
6825 }
6826 if (depth > 0)
6827 {
6828 #if NCORE>1 && defined(FULL_TRAIL)
6829 if (upto > 0)
6830 { Pop_Stack_Tree();
6831 }
6832 #endif
6833 goto Up;
6834 }
6835 }
6836
6837 #else
6838 void new_state(void) { /* place holder */ }
6839 #endif
6840
6841 void
6842 assert(int a, char *s, int ii, int tt, Trans *t)
6843 {
6844 if (!a && !noasserts)
6845 { char bad[1024];
6846 strcpy(bad, "assertion violated ");
6847 if (strlen(s) > 1000)
6848 { strncpy(&bad[19], (const char *) s, 1000);
6849 bad[1019] = '\0';
6850 } else
6851 strcpy(&bad[19], s);
6852 uerror(bad);
6853 }
6854 }
6855 #ifndef NOBOUNDCHECK
6856 int
6857 Boundcheck(int x, int y, int a1, int a2, Trans *a3)
6858 {
6859 assert((x >= 0 && x < y), "- invalid array index",
6860 a1, a2, a3);
6861 return x;
6862 }
6863 #endif
6864 void
6865 wrap_stats(void)
6866 {
6867 if (nShadow>0)
6868 printf("%9.8g states, stored (%g visited)\n",
6869 nstates - nShadow, nstates);
6870 else
6871 printf("%9.8g states, stored\n", nstates);
6872 #ifdef BFS
6873 #if SYNC
6874 printf(" %8g nominal states (- rv and atomic)\n", nstates-midrv-nlinks+revrv);
6875 printf(" %8g rvs succeeded\n", midrv-failedrv);
6876 #else
6877 printf(" %8g nominal states (stored-atomic)\n", nstates-nlinks);
6878 #endif
6879 #ifdef DEBUG
6880 printf(" %8g midrv\n", midrv);
6881 printf(" %8g failedrv\n", failedrv);
6882 printf(" %8g revrv\n", revrv);
6883 #endif
6884 #endif
6885 printf("%9.8g states, matched\n", truncs);
6886 #ifdef CHECK
6887 printf("%9.8g matches within stack\n",truncs2);
6888 #endif
6889 if (nShadow>0)
6890 printf("%9.8g transitions (= visited+matched)\n",
6891 nstates+truncs);
6892 else
6893 printf("%9.8g transitions (= stored+matched)\n",
6894 nstates+truncs);
6895 printf("%9.8g atomic steps\n", nlinks);
6896 if (nlost) printf("%g lost messages\n", (double) nlost);
6897
6898 #ifndef BITSTATE
6899 printf("hash conflicts: %9.8g (resolved)\n", hcmp);
6900 #ifndef AUTO_RESIZE
6901 if (hcmp > (double) (1<<ssize))
6902 { printf("hint: increase hashtable-size (-w) to reduce runtime\n");
6903 } /* in multi-core: also reduces lock delays on access to hashtable */
6904 #endif
6905 #else
6906 #ifdef CHECK
6907 printf("%8g states allocated for dfs stack\n", ngrabs);
6908 #endif
6909 if (udmem)
6910 printf("\nhash factor: %4g (best if > 100.)\n\n",
6911 (double)(((double) udmem) * 8.0) / (double) nstates);
6912 else
6913 printf("\nhash factor: %4g (best if > 100.)\n\n",
6914 (double)(1<<(ssize-8)) / (double) nstates * 256.0);
6915 printf("bits set per state: %u (-k%u)\n", hfns, hfns);
6916 #if 0
6917 if (udmem)
6918 { printf("total bits available: %8g (-M%ld)\n",
6919 ((double) udmem) * 8.0, udmem/(1024L*1024L));
6920 } else
6921 printf("total bits available: %8g (-w%d)\n",
6922 ((double) (ONE_L << (ssize-4)) * 16.0), ssize);
6923 #endif
6924 #endif
6925 #ifdef BFS_DISK
6926 printf("bfs disk reads: %ld writes %ld -- diff %ld\n",
6927 bfs_dsk_reads, bfs_dsk_writes, bfs_dsk_writes-bfs_dsk_reads);
6928 if (bfs_dsk_read >= 0) (void) close(bfs_dsk_read);
6929 if (bfs_dsk_write >= 0) (void) close(bfs_dsk_write);
6930 (void) unlink("pan_bfs_dsk.tmp");
6931 #endif
6932 }
6933
6934 void
6935 wrapup(void)
6936 {
6937 #if defined(BITSTATE) || !defined(NOCOMP)
6938 double nr1, nr2, nr3 = 0.0, nr4, nr5 = 0.0;
6939 #if !defined(MA) && (defined(MEMCNT) || defined(MEMLIM))
6940 int mverbose = 1;
6941 #else
6942 int mverbose = verbose;
6943 #endif
6944 #endif
6945 #if NCORE>1
6946 if (verbose) cpu_printf("wrapup -- %d error(s)\n", errors);
6947 if (core_id != 0)
6948 {
6949 #ifdef USE_DISK
6950 void dsk_stats(void);
6951 dsk_stats();
6952 #endif
6953 if (search_terminated != NULL)
6954 { *search_terminated |= 2; /* wrapup */
6955 }
6956 exit(0); /* normal termination, not an error */
6957 }
6958 #endif
6959 #if !defined(WIN32) && !defined(WIN64)
6960 signal(SIGINT, SIG_DFL);
6961 #endif
6962 printf("\n(%s)\n", SpinVersion);
6963 if (!done) printf("Warning: Search not completed\n");
6964 #ifdef SC
6965 (void) unlink((const char *)stackfile);
6966 #endif
6967 #if NCORE>1
6968 if (a_cycles)
6969 { printf(" + Multi-Core (NCORE=%d)\n", NCORE);
6970 } else
6971 { printf(" + Multi-Core (NCORE=%d -z%d)\n", NCORE, z_handoff);
6972 }
6973 #endif
6974 #ifdef BFS
6975 printf(" + Using Breadth-First Search\n");
6976 #endif
6977 #ifndef NOREDUCE
6978 printf(" + Partial Order Reduction\n");
6979 #endif
6980 #ifdef REVERSE
6981 printf(" + Reverse Depth-First Search Order\n");
6982 #endif
6983 #ifdef T_REVERSE
6984 printf(" + Reverse Transition Ordering\n");
6985 #endif
6986 #ifdef RANDOMIZE
6987 printf(" + Randomized Transition Ordering\n");
6988 #endif
6989 #ifdef SCHED
6990 printf(" + Scheduling Restriction (-DSCHED=%d)\n", sched_max);
6991 #endif
6992 #ifdef COLLAPSE
6993 printf(" + Compression\n");
6994 #endif
6995 #ifdef MA
6996 printf(" + Graph Encoding (-DMA=%d)\n", MA);
6997 #ifdef R_XPT
6998 printf(" Restarted from checkpoint %s.xpt\n", PanSource);
6999 #endif
7000 #endif
7001 #ifdef CHECK
7002 #ifdef FULLSTACK
7003 printf(" + FullStack Matching\n");
7004 #endif
7005 #ifdef CNTRSTACK
7006 printf(" + CntrStack Matching\n");
7007 #endif
7008 #endif
7009 #ifdef BITSTATE
7010 printf("\nBit statespace search for:\n");
7011 #else
7012 #ifdef HC
7013 printf("\nHash-Compact %d search for:\n", HC);
7014 #else
7015 printf("\nFull statespace search for:\n");
7016 #endif
7017 #endif
7018 #ifdef EVENT_TRACE
7019 #ifdef NEGATED_TRACE
7020 printf(" notrace assertion +\n");
7021 #else
7022 printf(" trace assertion +\n");
7023 #endif
7024 #endif
7025 #ifdef VERI
7026 printf(" never claim +\n");
7027 printf(" assertion violations ");
7028 if (noasserts)
7029 printf("- (disabled by -A flag)\n");
7030 else
7031 printf("+ (if within scope of claim)\n");
7032 #else
7033 #ifdef NOCLAIM
7034 printf(" never claim - (not selected)\n");
7035 #else
7036 printf(" never claim - (none specified)\n");
7037 #endif
7038 printf(" assertion violations ");
7039 if (noasserts)
7040 printf("- (disabled by -A flag)\n");
7041 else
7042 printf("+\n");
7043 #endif
7044 #ifndef SAFETY
7045 #ifdef NP
7046 printf(" non-progress cycles ");
7047 #else
7048 printf(" acceptance cycles ");
7049 #endif
7050 if (a_cycles)
7051 printf("+ (fairness %sabled)\n",
7052 fairness?"en":"dis");
7053 else printf("- (not selected)\n");
7054 #else
7055 printf(" cycle checks - (disabled by -DSAFETY)\n");
7056 #endif
7057 #ifdef VERI
7058 printf(" invalid end states - ");
7059 printf("(disabled by ");
7060 if (noends)
7061 printf("-E flag)\n\n");
7062 else
7063 printf("never claim)\n\n");
7064 #else
7065 printf(" invalid end states ");
7066 if (noends)
7067 printf("- (disabled by -E flag)\n\n");
7068 else
7069 printf("+\n\n");
7070 #endif
7071 printf("State-vector %d byte, depth reached %ld", hmax,
7072 #if NCORE>1
7073 (nr_handoffs * z_handoff) +
7074 #endif
7075 mreached);
7076 printf(", errors: %d\n", errors);
7077 fflush(stdout);
7078 #ifdef MA
7079 if (done)
7080 { extern void dfa_stats(void);
7081 if (maxgs+a_cycles+2 < MA)
7082 printf("MA stats: -DMA=%d is sufficient\n",
7083 maxgs+a_cycles+2);
7084 dfa_stats();
7085 }
7086 #endif
7087 wrap_stats();
7088 #ifdef CHECK
7089 printf("stackframes: %d/%d\n\n", smax, svmax);
7090 printf("stats: fa %d, fh %d, zh %d, zn %d - ",
7091 Fa, Fh, Zh, Zn);
7092 printf("check %d holds %d\n", Ccheck, Cholds);
7093 printf("stack stats: puts %d, probes %d, zaps %d\n",
7094 PUT, PROBE, ZAPS);
7095 #else
7096 printf("\n");
7097 #endif
7098
7099 #if defined(BITSTATE) || !defined(NOCOMP)
7100 nr1 = (nstates-nShadow)*
7101 (double)(hmax+sizeof(struct H_el)-sizeof(unsigned));
7102 #ifdef BFS
7103 nr2 = 0.0;
7104 #else
7105 nr2 = (double) ((maxdepth+3)*sizeof(Trail));
7106 #endif
7107 #ifndef BITSTATE
7108 #if !defined(MA) || defined(COLLAPSE)
7109 nr3 = (double) (ONE_L<<ssize)*sizeof(struct H_el *);
7110 #endif
7111 #else
7112 if (udmem)
7113 nr3 = (double) (udmem);
7114 else
7115 nr3 = (double) (ONE_L<<(ssize-3));
7116 #ifdef CNTRSTACK
7117 nr5 = (double) (ONE_L<<(ssize-3));
7118 #endif
7119 #ifdef FULLSTACK
7120 nr5 = (double) (maxdepth*sizeof(struct H_el *));
7121 #endif
7122 #endif
7123 nr4 = (double) (svmax * (sizeof(Svtack) + hmax))
7124 + (double) (smax * (sizeof(Stack) + Maxbody));
7125 #ifndef MA
7126 if (mverbose || memcnt < nr1+nr2+nr3+nr4+nr5)
7127 #endif
7128 { double remainder = memcnt;
7129 double tmp_nr = memcnt-nr3-nr4-(nr2-fragment)-nr5;
7130 #if NCORE>1 && !defined(SEP_STATE)
7131 tmp_nr -= ((double) NCORE * LWQ_SIZE) + GWQ_SIZE;
7132 #endif
7133 if (tmp_nr < 0.0) tmp_nr = 0.;
7134 printf("Stats on memory usage (in Megabytes):\n");
7135 printf("%9.3f equivalent memory usage for states",
7136 nr1/1048576.); /* 1024*1024=1048576 */
7137 printf(" (stored*(State-vector + overhead))\n");
7138 #if NCORE>1 && !defined(WIN32) && !defined(WIN64)
7139 printf("%9.3f shared memory reserved for state storage\n",
7140 mem_reserved/1048576.);
7141 #ifdef SEP_HEAP
7142 printf(" in %d local heaps of %7.3f MB each\n",
7143 NCORE, mem_reserved/(NCORE*1048576.));
7144 #endif
7145 printf("\n");
7146 #endif
7147 #ifdef BITSTATE
7148 if (udmem)
7149 printf("%9.3f memory used for hash array (-M%ld)\n",
7150 nr3/1048576., udmem/(1024L*1024L));
7151 else
7152 printf("%9.3f memory used for hash array (-w%d)\n",
7153 nr3/1048576., ssize);
7154 if (nr5 > 0.0)
7155 printf("%9.3f memory used for bit stack\n",
7156 nr5/1048576.);
7157 remainder = remainder - nr3 - nr5;
7158 #else
7159 printf("%9.3f actual memory usage for states",
7160 tmp_nr/1048576.);
7161 remainder -= tmp_nr;
7162 printf(" (");
7163 if (tmp_nr > 0.)
7164 { if (tmp_nr > nr1) printf("unsuccessful ");
7165 printf("compression: %.2f%%)\n",
7166 (100.0*tmp_nr)/nr1);
7167 } else
7168 printf("less than 1k)\n");
7169 #ifndef MA
7170 if (tmp_nr > 0.)
7171 { printf(" state-vector as stored = %.0f byte",
7172 (tmp_nr)/(nstates-nShadow) -
7173 (double) (sizeof(struct H_el) - sizeof(unsigned)));
7174 printf(" + %ld byte overhead\n",
7175 (long int) sizeof(struct H_el)-sizeof(unsigned));
7176 }
7177 #endif
7178 #if !defined(MA) || defined(COLLAPSE)
7179 printf("%9.3f memory used for hash table (-w%d)\n",
7180 nr3/1048576., ssize);
7181 remainder -= nr3;
7182 #endif
7183 #endif
7184 #ifndef BFS
7185 printf("%9.3f memory used for DFS stack (-m%ld)\n",
7186 nr2/1048576., maxdepth);
7187 remainder -= nr2;
7188 #endif
7189 #if NCORE>1
7190 remainder -= ((double) NCORE * LWQ_SIZE) + GWQ_SIZE;
7191 printf("%9.3f shared memory used for work-queues\n",
7192 (GWQ_SIZE + (double) NCORE * LWQ_SIZE) /1048576.);
7193 printf(" in %d queues of %7.3f MB each",
7194 NCORE, (double) LWQ_SIZE /1048576.);
7195 #ifndef NGQ
7196 printf(" + a global q of %7.3f MB\n",
7197 (double) GWQ_SIZE / 1048576.);
7198 #else
7199 printf("\n");
7200 #endif
7201 #endif
7202 if (remainder - fragment > 1048576.)
7203 printf("%9.3f other (proc and chan stacks)\n",
7204 (remainder-fragment)/1048576.);
7205 if (fragment > 1048576.)
7206 printf("%9.3f memory lost to fragmentation\n",
7207 fragment/1048576.);
7208 printf("%9.3f total actual memory usage\n\n",
7209 memcnt/1048576.);
7210 }
7211 #ifndef MA
7212 else
7213 #endif
7214 #endif
7215 #ifndef MA
7216 printf("%9.3f memory usage (Mbyte)\n\n",
7217 memcnt/1048576.);
7218 #endif
7219 #ifdef COLLAPSE
7220 printf("nr of templates: [ globals chans procs ]\n");
7221 printf("collapse counts: [ ");
7222 { int i; for (i = 0; i < 256+2; i++)
7223 if (ncomps[i] != 0)
7224 printf("%d ", ncomps[i]);
7225 printf("]\n");
7226 }
7227 #endif
7228 if ((done || verbose) && !no_rck) do_reach();
7229 #ifdef PEG
7230 { int i;
7231 printf("\nPeg Counts (transitions executed):\n");
7232 for (i = 1; i < NTRANS; i++)
7233 { if (peg[i]) putpeg(i, peg[i]);
7234 } }
7235 #endif
7236 #ifdef VAR_RANGES
7237 dumpranges();
7238 #endif
7239 #ifdef SVDUMP
7240 if (vprefix > 0) close(svfd);
7241 #endif
7242 #ifdef LOOPSTATE
7243 printf("%g loopstates hit\n", cnt_loops);
7244 #endif
7245 #ifdef NSUCC
7246 dump_succ();
7247 #endif
7248 #if NCORE>1 && defined(T_ALERT)
7249 crash_report();
7250 #endif
7251 pan_exit(0);
7252 }
7253
7254 void
7255 stopped(int arg)
7256 { printf("Interrupted\n");
7257 #if NCORE>1
7258 was_interrupted = 1;
7259 #endif
7260 wrapup();
7261 pan_exit(0);
7262 }
7263
7264 #ifdef SFH
7265 /*
7266 * super fast hash, based on Paul Hsieh's function
7267 * http://www.azillionmonkeys.com/qed/hash.html
7268 */
7269 #include <stdint.h>
7270 #undef get16bits
7271 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
7272 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
7273 #define get16bits(d) (*((const uint16_t *) (d)))
7274 #endif
7275
7276 #ifndef get16bits
7277 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
7278 +(uint32_t)(((const uint8_t *)(d))[0]) )
7279 #endif
7280
7281 void
7282 d_sfh(const char *s, int len)
7283 { uint32_t h = len, tmp;
7284 int rem;
7285
7286 rem = len & 3;
7287 len >>= 2;
7288
7289 for ( ; len > 0; len--)
7290 { h += get16bits(s);
7291 tmp = (get16bits(s+2) << 11) ^ h;
7292 h = (h << 16) ^ tmp;
7293 s += 2*sizeof(uint16_t);
7294 h += h >> 11;
7295 }
7296 switch (rem) {
7297 case 3: h += get16bits(s);
7298 h ^= h << 16;
7299 h ^= s[sizeof(uint16_t)] << 18;
7300 h += h >> 11;
7301 break;
7302 case 2: h += get16bits(s);
7303 h ^= h << 11;
7304 h += h >> 17;
7305 break;
7306 case 1: h += *s;
7307 h ^= h << 10;
7308 h += h >> 1;
7309 break;
7310 }
7311 h ^= h << 3;
7312 h += h >> 5;
7313 h ^= h << 4;
7314 h += h >> 17;
7315 h ^= h << 25;
7316 h += h >> 6;
7317
7318 K1 = h;
7319 }
7320 #endif
7321
7322 #include <stdint.h>
7323 #if defined(HASH64) || defined(WIN64)
7324 /* 64-bit Jenkins hash, 1997
7325 * http://burtleburtle.net/bob/c/lookup8.c
7326 */
7327 #define mix(a,b,c) \
7328 { a -= b; a -= c; a ^= (c>>43); \
7329 b -= c; b -= a; b ^= (a<<9); \
7330 c -= a; c -= b; c ^= (b>>8); \
7331 a -= b; a -= c; a ^= (c>>38); \
7332 b -= c; b -= a; b ^= (a<<23); \
7333 c -= a; c -= b; c ^= (b>>5); \
7334 a -= b; a -= c; a ^= (c>>35); \
7335 b -= c; b -= a; b ^= (a<<49); \
7336 c -= a; c -= b; c ^= (b>>11); \
7337 a -= b; a -= c; a ^= (c>>12); \
7338 b -= c; b -= a; b ^= (a<<18); \
7339 c -= a; c -= b; c ^= (b>>22); \
7340 }
7341 #else
7342 /* 32-bit Jenkins hash, 2006
7343 * http://burtleburtle.net/bob/c/lookup3.c
7344 */
7345 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k))))
7346
7347 #define mix(a,b,c) \
7348 { a -= c; a ^= rot(c, 4); c += b; \
7349 b -= a; b ^= rot(a, 6); a += c; \
7350 c -= b; c ^= rot(b, 8); b += a; \
7351 a -= c; a ^= rot(c,16); c += b; \
7352 b -= a; b ^= rot(a,19); a += c; \
7353 c -= b; c ^= rot(b, 4); b += a; \
7354 }
7355
7356 #define final(a,b,c) \
7357 { c ^= b; c -= rot(b,14); \
7358 a ^= c; a -= rot(c,11); \
7359 b ^= a; b -= rot(a,25); \
7360 c ^= b; c -= rot(b,16); \
7361 a ^= c; a -= rot(c,4); \
7362 b ^= a; b -= rot(a,14); \
7363 c ^= b; c -= rot(b,24); \
7364 }
7365 #endif
7366
7367 void
7368 d_hash(uchar *kb, int nbytes)
7369 { uint8_t *bp;
7370 #if defined(HASH64) || defined(WIN64)
7371 uint64_t a = 0, b, c, n;
7372 uint64_t *k = (uint64_t *) kb;
7373 #else
7374 uint32_t a, b, c, n;
7375 uint32_t *k = (uint32_t *) kb;
7376 #endif
7377 /* extend to multiple of words, if needed */
7378 n = nbytes/WS; /* nr of words */
7379 a = nbytes - (n*WS);
7380 if (a > 0)
7381 { n++;
7382 bp = kb + nbytes;
7383 switch (a) {
7384 case 3: *bp++ = 0; /* fall thru */
7385 case 2: *bp++ = 0; /* fall thru */
7386 case 1: *bp = 0;
7387 case 0: break;
7388 } }
7389 #if defined(HASH64) || defined(WIN64)
7390 b = HASH_CONST[HASH_NR];
7391 c = 0x9e3779b97f4a7c13LL; /* arbitrary value */
7392 while (n >= 3)
7393 { a += k[0];
7394 b += k[1];
7395 c += k[2];
7396 mix(a,b,c);
7397 n -= 3;
7398 k += 3;
7399 }
7400 c += (((uint64_t) nbytes)<<3);
7401 switch (n) {
7402 case 2: b += k[1];
7403 case 1: a += k[0];
7404 case 0: break;
7405 }
7406 mix(a,b,c);
7407 #else
7408 a = c = 0xdeadbeef + (n<<2);
7409 b = HASH_CONST[HASH_NR];
7410 while (n > 3)
7411 { a += k[0];
7412 b += k[1];
7413 c += k[2];
7414 mix(a,b,c);
7415 n -= 3;
7416 k += 3;
7417 }
7418 switch (n) {
7419 case 3: c += k[2];
7420 case 2: b += k[1];
7421 case 1: a += k[0];
7422 case 0: break;
7423 }
7424 final(a,b,c);
7425 #endif
7426 j1 = c&nmask; j3 = a&7; /* 1st bit */
7427 j2 = b&nmask; j4 = (a>>3)&7; /* 2nd bit */
7428 K1 = c; K2 = b;
7429 }
7430
7431 void
7432 s_hash(uchar *cp, int om)
7433 {
7434 #if defined(SFH)
7435 d_sfh((const char *) cp, om); /* sets K1 */
7436 #else
7437 d_hash(cp, om); /* sets K1 etc */
7438 #endif
7439 #ifdef BITSTATE
7440 if (S_Tab == H_tab)
7441 j1 = K1 % omaxdepth;
7442 else
7443 #endif
7444 if (ssize < 8*WS)
7445 j1 = K1&mask;
7446 else
7447 j1 = K1;
7448 }
7449 #ifndef RANDSTOR
7450 int *prerand;
7451 void
7452 inirand(void)
7453 { int i;
7454 srand(123); /* fixed startpoint */
7455 prerand = (int *) emalloc((omaxdepth+3)*sizeof(int));
7456 for (i = 0; i < omaxdepth+3; i++)
7457 prerand[i] = rand();
7458 }
7459 int
7460 pan_rand(void)
7461 { if (!prerand) inirand();
7462 return prerand[depth];
7463 }
7464 #endif
7465
7466 void
7467 set_masks(void) /* 4.2.5 */
7468 {
7469 if (WS == 4 && ssize >= 32)
7470 { mask = 0xffffffff;
7471 #ifdef BITSTATE
7472 switch (ssize) {
7473 case 34: nmask = (mask>>1); break;
7474 case 33: nmask = (mask>>2); break;
7475 default: nmask = (mask>>3); break;
7476 }
7477 #else
7478 nmask = mask;
7479 #endif
7480 } else if (WS == 8)
7481 { mask = ((ONE_L<<ssize)-1); /* hash init */
7482 #ifdef BITSTATE
7483 nmask = mask>>3;
7484 #else
7485 nmask = mask;
7486 #endif
7487 } else if (WS != 4)
7488 { fprintf(stderr, "pan: wordsize %ld not supported\n", (long int) WS);
7489 exit(1);
7490 } else /* WS == 4 and ssize < 32 */
7491 { mask = ((ONE_L<<ssize)-1); /* hash init */
7492 nmask = (mask>>3);
7493 }
7494 }
7495
7496 static long reclaim_size;
7497 static char *reclaim_mem;
7498 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
7499 #if NCORE>1
7500 #error cannot combine AUTO_RESIZE with NCORE>1 yet
7501 #endif
7502 static struct H_el **N_tab;
7503 void
7504 reverse_capture(struct H_el *p)
7505 { if (!p) return;
7506 reverse_capture(p->nxt);
7507 /* last element of list moves first */
7508 /* to preserve list-order */
7509 j2 = p->m_K1;
7510 if (ssize < 8*WS) /* probably always true */
7511 { j2 &= mask;
7512 }
7513 p->nxt = N_tab[j2];
7514 N_tab[j2] = p;
7515 }
7516 void
7517 resize_hashtable(void)
7518 {
7519 if (WS == 4 && ssize >= 27 - 1)
7520 { return; /* canot increase further */
7521 }
7522
7523 ssize += 2; /* 4x size */
7524
7525 printf("pan: resizing hashtable to -w%d.. ", ssize);
7526
7527 N_tab = (struct H_el **)
7528 emalloc((ONE_L<<ssize)*sizeof(struct H_el *));
7529
7530 set_masks(); /* they changed */
7531
7532 for (j1 = 0; j1 < (ONE_L << (ssize - 2)); j1++)
7533 { reverse_capture(H_tab[j1]);
7534 }
7535 reclaim_mem = (char *) H_tab;
7536 reclaim_size = (ONE_L << (ssize - 2));
7537 H_tab = N_tab;
7538
7539 printf(" done\n");
7540 }
7541 #endif
7542 #if defined(ZAPH) && defined(BITSTATE)
7543 void
7544 zap_hashtable(void)
7545 { cpu_printf("pan: resetting hashtable\n");
7546 if (udmem)
7547 { memset(SS, 0, udmem);
7548 } else
7549 { memset(SS, 0, ONE_L<<(ssize-3));
7550 }
7551 }
7552 #endif
7553
7554 int
7555 main(int argc, char *argv[])
7556 { void to_compile(void);
7557
7558 efd = stderr; /* default */
7559 #ifdef BITSTATE
7560 bstore = bstore_reg; /* default */
7561 #endif
7562 #if NCORE>1
7563 { int i, j;
7564 strcpy(o_cmdline, "");
7565 for (j = 1; j < argc; j++)
7566 { strcat(o_cmdline, argv[j]);
7567 strcat(o_cmdline, " ");
7568 }
7569 /* printf("Command Line: %s\n", o_cmdline); */
7570 if (strlen(o_cmdline) >= sizeof(o_cmdline))
7571 { Uerror("option list too long");
7572 } }
7573 #endif
7574 while (argc > 1 && argv[1][0] == '-')
7575 { switch (argv[1][1]) {
7576 #ifndef SAFETY
7577 #ifdef NP
7578 case 'a': fprintf(efd, "error: -a disabled");
7579 usage(efd); break;
7580 #else
7581 case 'a': a_cycles = 1; break;
7582 #endif
7583 #endif
7584 case 'A': noasserts = 1; break;
7585 case 'b': bounded = 1; break;
7586 #ifdef HAS_CODE
7587 case 'C': coltrace = 1; goto samething;
7588 #endif
7589 case 'c': upto = atoi(&argv[1][2]); break;
7590 case 'd': state_tables++; break;
7591 case 'e': every_error = 1; Nr_Trails = 1; break;
7592 case 'E': noends = 1; break;
7593 #ifdef SC
7594 case 'F': if (strlen(argv[1]) > 2)
7595 stackfile = &argv[1][2];
7596 break;
7597 #endif
7598 #if !defined(SAFETY) && !defined(NOFAIR)
7599 case 'f': fairness = 1; break;
7600 #endif
7601 #ifdef HAS_CODE
7602 case 'g': gui = 1; goto samething;
7603 #endif
7604 case 'h': if (!argv[1][2]) usage(efd); else
7605 HASH_NR = atoi(&argv[1][2])%33; break;
7606 case 'I': iterative = 2; every_error = 1; break;
7607 case 'i': iterative = 1; every_error = 1; break;
7608 case 'J': like_java = 1; break; /* Klaus Havelund */
7609 #ifdef BITSTATE
7610 case 'k': hfns = atoi(&argv[1][2]); break;
7611 #endif
7612 #ifdef SCHED
7613 case 'L': sched_max = atoi(&argv[1][2]); break;
7614 #endif
7615 #ifndef SAFETY
7616 #ifdef NP
7617 case 'l': a_cycles = 1; break;
7618 #else
7619 case 'l': fprintf(efd, "error: -l disabled");
7620 usage(efd); break;
7621 #endif
7622 #endif
7623 #ifdef BITSTATE
7624 case 'M': udmem = atoi(&argv[1][2]); break;
7625 case 'G': udmem = atoi(&argv[1][2]); udmem *= 1024; break;
7626 #else
7627 case 'M': case 'G':
7628 fprintf(stderr, "-M and -G affect only -DBITSTATE\n");
7629 break;
7630 #endif
7631 case 'm': maxdepth = atoi(&argv[1][2]); break;
7632 case 'n': no_rck = 1; break;
7633 case 'P': readtrail = 1; onlyproc = atoi(&argv[1][2]);
7634 if (argv[2][0] != '-') /* check next arg */
7635 { trailfilename = argv[2];
7636 argc--; argv++; /* skip next arg */
7637 }
7638 break;
7639 #ifdef SVDUMP
7640 case 'p': vprefix = atoi(&argv[1][2]); break;
7641 #endif
7642 #if NCORE==1
7643 case 'Q': quota = (double) 60.0 * (double) atoi(&argv[1][2]); break;
7644 #endif
7645 case 'q': strict = 1; break;
7646 case 'R': Nrun = atoi(&argv[1][2]); break;
7647 #ifdef HAS_CODE
7648 case 'r':
7649 samething: readtrail = 1;
7650 if (isdigit(argv[1][2]))
7651 whichtrail = atoi(&argv[1][2]);
7652 else if (argc > 2 && argv[2][0] != '-') /* check next arg */
7653 { trailfilename = argv[2];
7654 argc--; argv++; /* skip next arg */
7655 }
7656 break;
7657 case 'S': silent = 1; goto samething;
7658 #endif
7659 #ifdef BITSTATE
7660 case 's': hfns = 1; break;
7661 #endif
7662 case 'T': TMODE = 0444; break;
7663 case 't': if (argv[1][2]) tprefix = &argv[1][2]; break;
7664 case 'V': start_timer(); printf("Generated by %s\n", SpinVersion);
7665 to_compile(); pan_exit(2); break;
7666 case 'v': verbose++; break;
7667 case 'w': ssize = atoi(&argv[1][2]); break;
7668 case 'Y': signoff = 1; break;
7669 case 'X': efd = stdout; break;
7670 case 'x': exclusive = 1; break;
7671 #if NCORE>1
7672 /* -B ip is passthru to proxy of remote ip address: */
7673 case 'B': argc--; argv++; break;
7674 case 'Q': worker_pids[0] = atoi(&argv[1][2]); break;
7675 /* -Un means that the nth worker should be instantiated as a proxy */
7676 case 'U': proxy_pid = atoi(&argv[1][2]); break;
7677 /* -W means that this copy is started by a cluster-server as a remote */
7678 /* this flag is passed to ./pan_proxy, which interprets it */
7679 case 'W': remote_party++; break;
7680 case 'Z': core_id = atoi(&argv[1][2]);
7681 if (verbose)
7682 { printf("cpu%d: pid %d parent %d\n",
7683 core_id, getpid(), worker_pids[0]);
7684 }
7685 break;
7686 case 'z': z_handoff = atoi(&argv[1][2]); break;
7687 #else
7688 case 'z': break; /* ignored for single-core */
7689 #endif
7690 default : fprintf(efd, "saw option -%c\n", argv[1][1]); usage(efd); break;
7691 }
7692 argc--; argv++;
7693 }
7694 if (iterative && TMODE != 0666)
7695 { TMODE = 0666;
7696 fprintf(efd, "warning: -T ignored when -i or -I is used\n");
7697 }
7698 #if defined(HASH32) && !defined(SFH)
7699 if (WS > 4)
7700 { fprintf(efd, "strong warning: compiling -DHASH32 on a 64-bit machine\n");
7701 fprintf(efd, " without -DSFH can slow down performance a lot\n");
7702 }
7703 #endif
7704 #if defined(WIN32) || defined(WIN64)
7705 if (TMODE == 0666)
7706 TMODE = _S_IWRITE | _S_IREAD;
7707 else
7708 TMODE = _S_IREAD;
7709 #endif
7710 #if NCORE>1
7711 store_proxy_pid = proxy_pid; /* for checks in mem_file() and someone_crashed() */
7712 if (core_id != 0) { proxy_pid = 0; }
7713 #ifndef SEP_STATE
7714 if (core_id == 0 && a_cycles)
7715 { fprintf(efd, "hint: this search may be more efficient ");
7716 fprintf(efd, "if pan.c is compiled -DSEP_STATE\n");
7717 }
7718 #endif
7719 if (z_handoff < 0)
7720 { z_handoff = 20; /* conservative default - for non-liveness checks */
7721 }
7722 #if defined(NGQ) || defined(LWQ_FIXED)
7723 LWQ_SIZE = (double) (128.*1048576.);
7724 #else
7725 LWQ_SIZE = (double) ( z_handoff + 2.) * (double) sizeof(SM_frame);
7726 #endif
7727 #if NCORE>2
7728 if (a_cycles)
7729 { fprintf(efd, "warning: the intended nr of cores to be used in liveness mode is 2\n");
7730 #ifndef SEP_STATE
7731 fprintf(efd, "warning: without -DSEP_STATE there is no guarantee that all liveness violations are found\n");
7732 #endif
7733 }
7734 #endif
7735 #ifdef HAS_HIDDEN
7736 #error cannot use hidden variables when compiling multi-core
7737 #endif
7738 #endif
7739 #ifdef BITSTATE
7740 if (hfns <= 0)
7741 { hfns = 1;
7742 fprintf(efd, "warning: using -k%d as minimal usable value\n", hfns);
7743 }
7744 #endif
7745 omaxdepth = maxdepth;
7746 #ifdef BITSTATE
7747 if (WS == 4 && ssize > 34)
7748 { ssize = 34;
7749 fprintf(efd, "warning: using -w%d as max usable value\n", ssize);
7750 /*
7751 * -w35 would not work: 35-3 = 32 but 1^31 is the largest
7752 * power of 2 that can be represented in an unsigned long
7753 */
7754 }
7755 #else
7756 if (WS == 4 && ssize > 27)
7757 { ssize = 27;
7758 fprintf(efd, "warning: using -w%d as max usable value\n", ssize);
7759 /*
7760 * for emalloc, the lookup table size multiplies by 4 for the pointers
7761 * the largest power of 2 that can be represented in a ulong is 1^31
7762 * hence the largest number of lookup table slots is 31-4 = 27
7763 */
7764 }
7765 #endif
7766 #ifdef SC
7767 hiwater = HHH = maxdepth-10;
7768 DDD = HHH/2;
7769 if (!stackfile)
7770 { stackfile = (char *) emalloc(strlen(PanSource)+4+1);
7771 sprintf(stackfile, "%s._s_", PanSource);
7772 }
7773 if (iterative)
7774 { fprintf(efd, "error: cannot use -i or -I with -DSC\n");
7775 pan_exit(1);
7776 }
7777 #endif
7778 #if (defined(R_XPT) || defined(W_XPT)) && !defined(MA)
7779 #warning -DR_XPT and -DW_XPT assume -DMA (ignored)
7780 #endif
7781 if (iterative && a_cycles)
7782 fprintf(efd, "warning: -i or -I work for safety properties only\n");
7783 #ifdef BFS
7784 #ifdef SC
7785 #error -DBFS not compatible with -DSC
7786 #endif
7787 #ifdef HAS_LAST
7788 #error -DBFS not compatible with _last
7789 #endif
7790 #ifdef HAS_STACK
7791 #error cannot use c_track UnMatched with BFS
7792 #endif
7793 #ifdef REACH
7794 #warning -DREACH is redundant when -DBFS is used
7795 #endif
7796 #endif
7797 #if defined(MERGED) && defined(PEG)
7798 #error to use -DPEG use: spin -o3 -a
7799 #endif
7800 #ifdef HC
7801 #ifdef SFH
7802 #error cannot combine -DHC and -DSFH
7803 /* use of NOCOMP is the real reason */
7804 #else
7805 #ifdef NOCOMP
7806 #error cannot combine -DHC and -DNOCOMP
7807 #endif
7808 #endif
7809 #ifdef BITSTATE
7810 #error cannot combine -DHC and -DBITSTATE
7811 #endif
7812 #endif
7813 #if defined(SAFETY) && defined(NP)
7814 #error cannot combine -DNP and -DBFS or -DSAFETY
7815 #endif
7816 #ifdef MA
7817 #ifdef BITSTATE
7818 #error cannot combine -DMA and -DBITSTATE
7819 #endif
7820 #if MA <= 0
7821 #error usage: -DMA=N with N > 0 and N < VECTORSZ
7822 #endif
7823 #endif
7824 #ifdef COLLAPSE
7825 #ifdef BITSTATE
7826 #error cannot combine -DBITSTATE and -DCOLLAPSE
7827 #endif
7828 #ifdef SFH
7829 #error cannot combine -DCOLLAPSE and -DSFH
7830 /* use of NOCOMP is the real reason */
7831 #else
7832 #ifdef NOCOMP
7833 #error cannot combine -DCOLLAPSE and -DNOCOMP
7834 #endif
7835 #endif
7836 #endif
7837 if (maxdepth <= 0 || ssize <= 1) usage(efd);
7838 #if SYNC>0 && !defined(NOREDUCE)
7839 if (a_cycles && fairness)
7840 { fprintf(efd, "error: p.o. reduction not compatible with ");
7841 fprintf(efd, "fairness (-f) in models\n");
7842 fprintf(efd, " with rendezvous operations: ");
7843 fprintf(efd, "recompile with -DNOREDUCE\n");
7844 pan_exit(1);
7845 }
7846 #endif
7847 #if defined(REM_VARS) && !defined(NOREDUCE)
7848 #warning p.o. reduction not compatible with remote varrefs (use -DNOREDUCE)
7849 #endif
7850 #if defined(NOCOMP) && !defined(BITSTATE)
7851 if (a_cycles)
7852 { fprintf(efd, "error: use of -DNOCOMP voids -l and -a\n");
7853 pan_exit(1);
7854 }
7855 #endif
7856 #ifdef MEMLIM
7857 memlim = ((double) MEMLIM) * (double) (1<<20); /* size in Mbyte */
7858 #endif
7859 #ifndef BITSTATE
7860 if (Nrun > 1) HASH_NR = Nrun - 1;
7861 #endif
7862 if (Nrun < 1 || Nrun > 32)
7863 { fprintf(efd, "error: invalid arg for -R\n");
7864 usage(efd);
7865 }
7866 #ifndef SAFETY
7867 if (fairness && !a_cycles)
7868 { fprintf(efd, "error: -f requires -a or -l\n");
7869 usage(efd);
7870 }
7871 #if ACCEPT_LAB==0
7872 if (a_cycles)
7873 { fprintf(efd, "error: no accept labels defined ");
7874 fprintf(efd, "in model (for option -a)\n");
7875 usage(efd);
7876 }
7877 #endif
7878 #endif
7879 #ifndef NOREDUCE
7880 #ifdef HAS_ENABLED
7881 #error use of enabled() requires -DNOREDUCE
7882 #endif
7883 #ifdef HAS_PCVALUE
7884 #error use of pcvalue() requires -DNOREDUCE
7885 #endif
7886 #ifdef HAS_BADELSE
7887 #error use of 'else' combined with i/o stmnts requires -DNOREDUCE
7888 #endif
7889 #ifdef HAS_LAST
7890 #error use of _last requires -DNOREDUCE
7891 #endif
7892 #endif
7893 #if SYNC>0 && !defined(NOREDUCE)
7894 #ifdef HAS_UNLESS
7895 fprintf(efd, "warning: use of a rendezvous stmnts in the escape\n");
7896 fprintf(efd, " of an unless clause, if present, could make p.o. reduction\n");
7897 fprintf(efd, " invalid (use -DNOREDUCE to avoid this)\n");
7898 #ifdef BFS
7899 fprintf(efd, " (this type of rv is also not compatible with -DBFS)\n");
7900 #endif
7901 #endif
7902 #endif
7903 #if SYNC>0 && defined(BFS)
7904 #warning use of rendezvous with BFS does not preserve all invalid endstates
7905 #endif
7906 #if !defined(REACH) && !defined(BITSTATE)
7907 if (iterative != 0 && a_cycles == 0)
7908 { fprintf(efd, "warning: -i and -I need -DREACH to work accurately\n");
7909 }
7910 #endif
7911 #if defined(BITSTATE) && defined(REACH)
7912 #warning -DREACH is voided by -DBITSTATE
7913 #endif
7914 #if defined(MA) && defined(REACH)
7915 #warning -DREACH is voided by -DMA
7916 #endif
7917 #if defined(FULLSTACK) && defined(CNTRSTACK)
7918 #error cannot combine -DFULLSTACK and -DCNTRSTACK
7919 #endif
7920 #if defined(VERI)
7921 #if ACCEPT_LAB>0
7922 #ifndef BFS
7923 if (!a_cycles
7924 #ifdef HAS_CODE
7925 && !readtrail
7926 #endif
7927 #if NCORE>1
7928 && core_id == 0
7929 #endif
7930 && !state_tables)
7931 { fprintf(efd, "warning: never claim + accept labels ");
7932 fprintf(efd, "requires -a flag to fully verify\n");
7933 }
7934 #else
7935 if (!state_tables
7936 #ifdef HAS_CODE
7937 && !readtrail
7938 #endif
7939 )
7940 { fprintf(efd, "warning: verification in BFS mode ");
7941 fprintf(efd, "is restricted to safety properties\n");
7942 }
7943 #endif
7944 #endif
7945 #endif
7946 #ifndef SAFETY
7947 if (!a_cycles
7948 #ifdef HAS_CODE
7949 && !readtrail
7950 #endif
7951 #if NCORE>1
7952 && core_id == 0
7953 #endif
7954 && !state_tables)
7955 { fprintf(efd, "hint: this search is more efficient ");
7956 fprintf(efd, "if pan.c is compiled -DSAFETY\n");
7957 }
7958 #ifndef NOCOMP
7959 if (!a_cycles)
7960 { S_A = 0;
7961 } else
7962 { if (!fairness)
7963 { S_A = 1; /* _a_t */
7964 #ifndef NOFAIR
7965 } else /* _a_t and _cnt[NFAIR] */
7966 { S_A = (&(now._cnt[0]) - (uchar *) &now) + NFAIR - 2;
7967 /* -2 because first two uchars in now are masked */
7968 #endif
7969 } }
7970 #endif
7971 #endif
7972 signal(SIGINT, stopped);
7973 set_masks();
7974 #ifdef BFS
7975 trail = (Trail *) emalloc(6*sizeof(Trail));
7976 trail += 3;
7977 #else
7978 trail = (Trail *) emalloc((maxdepth+3)*sizeof(Trail));
7979 trail++; /* protect trpt-1 refs at depth 0 */
7980 #endif
7981 #ifdef SVDUMP
7982 if (vprefix > 0)
7983 { char nm[64];
7984 sprintf(nm, "%s.svd", PanSource);
7985 if ((svfd = creat(nm, TMODE)) < 0)
7986 { fprintf(efd, "couldn't create %s\n", nm);
7987 vprefix = 0;
7988 } }
7989 #endif
7990 #ifdef RANDSTOR
7991 srand(123);
7992 #endif
7993 #if SYNC>0 && ASYNC==0
7994 set_recvs();
7995 #endif
7996 run();
7997 done = 1;
7998 wrapup();
7999 return 0;
8000 }
8001
8002 void
8003 usage(FILE *fd)
8004 {
8005 fprintf(fd, "%s\n", SpinVersion);
8006 fprintf(fd, "Valid Options are:\n");
8007 #ifndef SAFETY
8008 #ifdef NP
8009 fprintf(fd, " -a -> is disabled by -DNP ");
8010 fprintf(fd, "(-DNP compiles for -l only)\n");
8011 #else
8012 fprintf(fd, " -a find acceptance cycles\n");
8013 #endif
8014 #else
8015 fprintf(fd, " -a,-l,-f -> are disabled by -DSAFETY\n");
8016 #endif
8017 fprintf(fd, " -A ignore assert() violations\n");
8018 fprintf(fd, " -b consider it an error to exceed the depth-limit\n");
8019 fprintf(fd, " -cN stop at Nth error ");
8020 fprintf(fd, "(defaults to -c1)\n");
8021 fprintf(fd, " -d print state tables and stop\n");
8022 fprintf(fd, " -e create trails for all errors\n");
8023 fprintf(fd, " -E ignore invalid end states\n");
8024 #ifdef SC
8025 fprintf(fd, " -Ffile use 'file' to store disk-stack\n");
8026 #endif
8027 #ifndef NOFAIR
8028 fprintf(fd, " -f add weak fairness (to -a or -l)\n");
8029 #endif
8030 fprintf(fd, " -hN use different hash-seed N:1..32\n");
8031 fprintf(fd, " -i search for shortest path to error\n");
8032 fprintf(fd, " -I like -i, but approximate and faster\n");
8033 fprintf(fd, " -J reverse eval order of nested unlesses\n");
8034 #ifdef BITSTATE
8035 fprintf(fd, " -kN set N bits per state (defaults to 3)\n");
8036 #endif
8037 #ifdef SCHED
8038 fprintf(fd, " -LN set scheduling restriction to N (default 10)\n");
8039 #endif
8040 #ifndef SAFETY
8041 #ifdef NP
8042 fprintf(fd, " -l find non-progress cycles\n");
8043 #else
8044 fprintf(fd, " -l find non-progress cycles -> ");
8045 fprintf(fd, "disabled, requires ");
8046 fprintf(fd, "compilation with -DNP\n");
8047 #endif
8048 #endif
8049 #ifdef BITSTATE
8050 fprintf(fd, " -MN use N Megabytes for bitstate hash array\n");
8051 fprintf(fd, " -GN use N Gigabytes for bitstate hash array\n");
8052 #endif
8053 fprintf(fd, " -mN max depth N steps (default=10k)\n");
8054 fprintf(fd, " -n no listing of unreached states\n");
8055 #ifdef SVDUMP
8056 fprintf(fd, " -pN create svfile (save N bytes per state)\n");
8057 #endif
8058 fprintf(fd, " -QN set time-limit on execution of N minutes\n");
8059 fprintf(fd, " -q require empty chans in valid end states\n");
8060 #ifdef HAS_CODE
8061 fprintf(fd, " -r read and execute trail - can add -v,-n,-PN,-g,-C\n");
8062 fprintf(fd, " -rN read and execute N-th error trail\n");
8063 fprintf(fd, " -C read and execute trail - columnated output (can add -v,-n)\n");
8064 fprintf(fd, " -PN read and execute trail - restrict trail output to proc N\n");
8065 fprintf(fd, " -g read and execute trail + msc gui support\n");
8066 fprintf(fd, " -S silent replay: only user defined printfs show\n");
8067 #endif
8068 #ifdef BITSTATE
8069 fprintf(fd, " -RN repeat run Nx with N ");
8070 fprintf(fd, "[1..32] independent hash functions\n");
8071 fprintf(fd, " -s same as -k1 (single bit per state)\n");
8072 #endif
8073 fprintf(fd, " -T create trail files in read-only mode\n");
8074 fprintf(fd, " -tsuf replace .trail with .suf on trailfiles\n");
8075 fprintf(fd, " -V print SPIN version number\n");
8076 fprintf(fd, " -v verbose -- filenames in unreached state listing\n");
8077 fprintf(fd, " -wN hashtable of 2^N entries ");
8078 fprintf(fd, "(defaults to -w%d)\n", ssize);
8079 fprintf(fd, " -x do not overwrite an existing trail file\n");
8080 #if NCORE>1
8081 fprintf(fd, " -zN handoff states below depth N to 2nd cpu (multi_core)\n");
8082 #endif
8083 #ifdef HAS_CODE
8084 fprintf(fd, "\n options -r, -C, -PN, -g, and -S can optionally be followed by\n");
8085 fprintf(fd, " a filename argument, as in '-r filename', naming the trailfile\n");
8086 #endif
8087 #if NCORE>1
8088 multi_usage(fd);
8089 #endif
8090 exit(1);
8091 }
8092
8093 char *
8094 Malloc(unsigned long n)
8095 { char *tmp;
8096 #ifdef MEMLIM
8097 if (memcnt+ (double) n > memlim) goto err;
8098 #endif
8099 #if 1
8100 tmp = (char *) malloc(n);
8101 if (!tmp)
8102 #else
8103 tmp = (char *) sbrk(n);
8104 if (tmp == (char *) -ONE_L)
8105 #endif
8106 {
8107 #ifdef MEMLIM
8108 err:
8109 #endif
8110 printf("pan: out of memory\n");
8111 #ifdef MEMLIM
8112 printf(" %g bytes used\n", memcnt);
8113 printf(" %g bytes more needed\n", (double) n);
8114 printf(" %g bytes limit\n",
8115 memlim);
8116 #endif
8117 #ifdef COLLAPSE
8118 printf("hint: to reduce memory, recompile with\n");
8119 #ifndef MA
8120 printf(" -DMA=%d # better/slower compression, or\n", hmax);
8121 #endif
8122 printf(" -DBITSTATE # supertrace, approximation\n");
8123 #else
8124 #ifndef BITSTATE
8125 printf("hint: to reduce memory, recompile with\n");
8126 #ifndef HC
8127 printf(" -DCOLLAPSE # good, fast compression, or\n");
8128 #ifndef MA
8129 printf(" -DMA=%d # better/slower compression, or\n", hmax);
8130 #endif
8131 printf(" -DHC # hash-compaction, approximation\n");
8132 #endif
8133 printf(" -DBITSTATE # supertrace, approximation\n");
8134 #endif
8135 #endif
8136 #if NCORE>1
8137 #ifdef FULL_TRAIL
8138 printf(" omit -DFULL_TRAIL or use pan -c0 to reduce memory\n");
8139 #endif
8140 #ifdef SEP_STATE
8141 printf("hint: to reduce memory, recompile without\n");
8142 printf(" -DSEP_STATE # may be faster, but uses more memory\n");
8143 #endif
8144 #endif
8145 wrapup();
8146 }
8147 memcnt += (double) n;
8148 return tmp;
8149 }
8150
8151 #define CHUNK (100*VECTORSZ)
8152
8153 char *
8154 emalloc(unsigned long n) /* never released or reallocated */
8155 { char *tmp;
8156 if (n == 0)
8157 return (char *) NULL;
8158 if (n&(sizeof(void *)-1)) /* for proper alignment */
8159 n += sizeof(void *)-(n&(sizeof(void *)-1));
8160 if ((unsigned long) left < n)
8161 { grow = (n < CHUNK) ? CHUNK : n;
8162 have = Malloc(grow);
8163 fragment += (double) left;
8164 left = grow;
8165 }
8166 tmp = have;
8167 have += (long) n;
8168 left -= (long) n;
8169 memset(tmp, 0, n);
8170 return tmp;
8171 }
8172 void
8173 Uerror(char *str)
8174 { /* always fatal */
8175 uerror(str);
8176 #if NCORE>1
8177 sudden_stop("Uerror");
8178 #endif
8179 wrapup();
8180 }
8181
8182 #if defined(MA) && !defined(SAFETY)
8183 int
8184 Unwind(void)
8185 { Trans *t; uchar ot, _m; int tt; short II;
8186 #ifdef VERBOSE
8187 int i;
8188 #endif
8189 uchar oat = now._a_t;
8190 now._a_t &= ~(1|16|32);
8191 memcpy((char *) &comp_now, (char *) &now, vsize);
8192 now._a_t = oat;
8193 Up:
8194 #ifdef SC
8195 trpt = getframe(depth);
8196 #endif
8197 #ifdef VERBOSE
8198 printf("%d State: ", depth);
8199 for (i = 0; i < vsize; i++) printf("%d%s,",
8200 ((char *)&now)[i], Mask[i]?"*":"");
8201 printf("\n");
8202 #endif
8203 #ifndef NOFAIR
8204 if (trpt->o_pm&128) /* fairness alg */
8205 { now._cnt[now._a_t&1] = trpt->bup.oval;
8206 depth--;
8207 #ifdef SC
8208 trpt = getframe(depth);
8209 #else
8210 trpt--;
8211 #endif
8212 goto Q999;
8213 }
8214 #endif
8215 #ifdef HAS_LAST
8216 #ifdef VERI
8217 { int d; Trail *trl;
8218 now._last = 0;
8219 for (d = 1; d < depth; d++)
8220 { trl = getframe(depth-d); /* was trl = (trpt-d); */
8221 if (trl->pr != 0)
8222 { now._last = trl->pr - BASE;
8223 break;
8224 } } }
8225 #else
8226 now._last = (depth<1)?0:(trpt-1)->pr;
8227 #endif
8228 #endif
8229 #ifdef EVENT_TRACE
8230 now._event = trpt->o_event;
8231 #endif
8232 if ((now._a_t&1) && depth <= A_depth)
8233 { now._a_t &= ~(1|16|32);
8234 if (fairness) now._a_t |= 2; /* ? */
8235 A_depth = 0;
8236 goto CameFromHere; /* checkcycles() */
8237 }
8238 t = trpt->o_t;
8239 ot = trpt->o_ot; II = trpt->pr;
8240 tt = trpt->o_tt; this = pptr(II);
8241 _m = do_reverse(t, II, trpt->o_m);
8242 #ifdef VERBOSE
8243 printf("%3d: proc %d ", depth, II);
8244 printf("reverses %d, %d to %d,",
8245 t->forw, tt, t->st);
8246 printf(" %s [abit=%d,adepth=%d,",
8247 t->tp, now._a_t, A_depth);
8248 printf("tau=%d,%d] <unwind>\n",
8249 trpt->tau, (trpt-1)->tau);
8250 #endif
8251 depth--;
8252 #ifdef SC
8253 trpt = getframe(depth);
8254 #else
8255 trpt--;
8256 #endif
8257 /* reached[ot][t->st] = 1; 3.4.13 */
8258 ((P0 *)this)->_p = tt;
8259 #ifndef NOFAIR
8260 if ((trpt->o_pm&32))
8261 {
8262 #ifdef VERI
8263 if (now._cnt[now._a_t&1] == 0)
8264 now._cnt[now._a_t&1] = 1;
8265 #endif
8266 now._cnt[now._a_t&1] += 1;
8267 }
8268 Q999:
8269 if (trpt->o_pm&8)
8270 { now._a_t &= ~2;
8271 now._cnt[now._a_t&1] = 0;
8272 }
8273 if (trpt->o_pm&16)
8274 now._a_t |= 2;
8275 #endif
8276 CameFromHere:
8277 if (memcmp((char *) &now, (char *) &comp_now, vsize) == 0)
8278 return depth;
8279 if (depth > 0) goto Up;
8280 return 0;
8281 }
8282 #endif
8283 static char unwinding;
8284 void
8285 uerror(char *str)
8286 { static char laststr[256];
8287 int is_cycle;
8288
8289 if (unwinding) return; /* 1.4.2 */
8290 if (strncmp(str, laststr, 254))
8291 #if NCORE>1
8292 cpu_printf("pan: %s (at depth %ld)\n", str,
8293 #else
8294 printf("pan: %s (at depth %ld)\n", str,
8295 #endif
8296 #if NCORE>1
8297 (nr_handoffs * z_handoff) +
8298 #endif
8299 ((depthfound==-1)?depth:depthfound));
8300 strncpy(laststr, str, 254);
8301 errors++;
8302 #ifdef HAS_CODE
8303 if (readtrail) { wrap_trail(); return; }
8304 #endif
8305 is_cycle = (strstr(str, " cycle") != (char *) 0);
8306 if (!is_cycle)
8307 { depth++; trpt++;
8308 }
8309 if ((every_error != 0)
8310 || errors == upto)
8311 {
8312 #if defined(MA) && !defined(SAFETY)
8313 if (is_cycle)
8314 { int od = depth;
8315 unwinding = 1;
8316 depthfound = Unwind();
8317 unwinding = 0;
8318 depth = od;
8319 }
8320 #endif
8321 #if NCORE>1
8322 writing_trail = 1;
8323 #endif
8324 #ifdef BFS
8325 if (depth > 1) trpt--;
8326 nuerror(str);
8327 if (depth > 1) trpt++;
8328 #else
8329 putrail();
8330 #endif
8331 #if defined(MA) && !defined(SAFETY)
8332 if (strstr(str, " cycle"))
8333 { if (every_error)
8334 printf("sorry: MA writes 1 trail max\n");
8335 wrapup(); /* no recovery from unwind */
8336 }
8337 #endif
8338 #if NCORE>1
8339 if (search_terminated != NULL)
8340 { *search_terminated |= 4; /* uerror */
8341 }
8342 writing_trail = 0;
8343 #endif
8344 }
8345 if (!is_cycle)
8346 { depth--; trpt--; /* undo */
8347 }
8348 #ifndef BFS
8349 if (iterative != 0 && maxdepth > 0)
8350 { maxdepth = (iterative == 1)?(depth-1):(depth/2);
8351 warned = 1;
8352 printf("pan: reducing search depth to %ld\n",
8353 maxdepth);
8354 } else
8355 #endif
8356 if (errors >= upto && upto != 0)
8357 {
8358 #if NCORE>1
8359 sudden_stop("uerror");
8360 #endif
8361 wrapup();
8362 }
8363 depthfound = -1;
8364 }
8365
8366 int
8367 xrefsrc(int lno, S_F_MAP *mp, int M, int i)
8368 { Trans *T; int j, retval=1;
8369 for (T = trans[M][i]; T; T = T->nxt)
8370 if (T && T->tp)
8371 { if (strcmp(T->tp, ".(goto)") == 0
8372 || strncmp(T->tp, "goto :", 6) == 0)
8373 return 1; /* not reported */
8374
8375 printf("\tline %d", lno);
8376 if (verbose)
8377 for (j = 0; j < sizeof(mp); j++)
8378 if (i >= mp[j].from && i <= mp[j].upto)
8379 { printf(", \"%s\"", mp[j].fnm);
8380 break;
8381 }
8382 printf(", state %d", i);
8383 if (strcmp(T->tp, "") != 0)
8384 { char *q;
8385 q = transmognify(T->tp);
8386 printf(", \"%s\"", q?q:"");
8387 } else if (stopstate[M][i])
8388 printf(", -end state-");
8389 printf("\n");
8390 retval = 0; /* reported */
8391 }
8392 return retval;
8393 }
8394
8395 void
8396 r_ck(uchar *which, int N, int M, short *src, S_F_MAP *mp)
8397 { int i, m=0;
8398
8399 #ifdef VERI
8400 if (M == VERI && !verbose) return;
8401 #endif
8402 printf("unreached in proctype %s\n", procname[M]);
8403 for (i = 1; i < N; i++)
8404 if (which[i] == 0
8405 && (mapstate[M][i] == 0
8406 || which[mapstate[M][i]] == 0))
8407 m += xrefsrc((int) src[i], mp, M, i);
8408 else
8409 m++;
8410 printf(" (%d of %d states)\n", N-1-m, N-1);
8411 }
8412 #if NCORE>1 && !defined(SEP_STATE)
8413 static long rev_trail_cnt;
8414
8415 #ifdef FULL_TRAIL
8416 void
8417 rev_trail(int fd, volatile Stack_Tree *st_tr)
8418 { long j; char snap[64];
8419
8420 if (!st_tr)
8421 { return;
8422 }
8423 rev_trail(fd, st_tr->prv);
8424 #ifdef VERBOSE
8425 printf("%d (%d) LRT [%d,%d] -- %9u (root %9u)\n",
8426 depth, rev_trail_cnt, st_tr->pr, st_tr->t_id, st_tr, stack_last[core_id]);
8427 #endif
8428 if (st_tr->pr != 255)
8429 { sprintf(snap, "%ld:%d:%d\n",
8430 rev_trail_cnt++, st_tr->pr, st_tr->t_id);
8431 j = strlen(snap);
8432 if (write(fd, snap, j) != j)
8433 { printf("pan: error writing trailfile\n");
8434 close(fd);
8435 wrapup();
8436 return;
8437 }
8438 } else /* handoff point */
8439 { if (a_cycles)
8440 { write(fd, "-1:-1:-1\n", 9);
8441 } }
8442 }
8443 #endif
8444 #endif
8445
8446 void
8447 putrail(void)
8448 { int fd;
8449 #if defined VERI || defined(MERGED)
8450 char snap[64];
8451 #endif
8452 #if NCORE==1 || defined(SEP_STATE) || !defined(FULL_TRAIL)
8453 long i, j;
8454 Trail *trl;
8455 #endif
8456 fd = make_trail();
8457 if (fd < 0) return;
8458 #ifdef VERI
8459 sprintf(snap, "-2:%d:-2\n", VERI);
8460 write(fd, snap, strlen(snap));
8461 #endif
8462 #ifdef MERGED
8463 sprintf(snap, "-4:-4:-4\n");
8464 write(fd, snap, strlen(snap));
8465 #endif
8466 #if NCORE>1 && !defined(SEP_STATE) && defined(FULL_TRAIL)
8467 rev_trail_cnt = 1;
8468 enter_critical(GLOBAL_LOCK);
8469 rev_trail(fd, stack_last[core_id]);
8470 leave_critical(GLOBAL_LOCK);
8471 #else
8472 i = 1; /* trail starts at position 1 */
8473 #if NCORE>1 && defined(SEP_STATE)
8474 if (cur_Root.m_vsize > 0) { i++; depth++; }
8475 #endif
8476 for ( ; i <= depth; i++)
8477 { if (i == depthfound+1)
8478 write(fd, "-1:-1:-1\n", 9);
8479 trl = getframe(i);
8480 if (!trl->o_t) continue;
8481 if (trl->o_pm&128) continue;
8482 sprintf(snap, "%ld:%d:%d\n",
8483 i, trl->pr, trl->o_t->t_id);
8484 j = strlen(snap);
8485 if (write(fd, snap, j) != j)
8486 { printf("pan: error writing trailfile\n");
8487 close(fd);
8488 wrapup();
8489 } }
8490 #endif
8491 close(fd);
8492 #if NCORE>1
8493 cpu_printf("pan: wrote trailfile\n");
8494 #endif
8495 }
8496
8497 void
8498 sv_save(void) /* push state vector onto save stack */
8499 { if (!svtack->nxt)
8500 { svtack->nxt = (Svtack *) emalloc(sizeof(Svtack));
8501 svtack->nxt->body = emalloc(vsize*sizeof(char));
8502 svtack->nxt->lst = svtack;
8503 svtack->nxt->m_delta = vsize;
8504 svmax++;
8505 } else if (vsize > svtack->nxt->m_delta)
8506 { svtack->nxt->body = emalloc(vsize*sizeof(char));
8507 svtack->nxt->lst = svtack;
8508 svtack->nxt->m_delta = vsize;
8509 svmax++;
8510 }
8511 svtack = svtack->nxt;
8512 #if SYNC
8513 svtack->o_boq = boq;
8514 #endif
8515 svtack->o_delta = vsize; /* don't compress */
8516 memcpy((char *)(svtack->body), (char *) &now, vsize);
8517 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
8518 c_stack((uchar *) &(svtack->c_stack[0]));
8519 #endif
8520 #ifdef DEBUG
8521 cpu_printf("%d: sv_save\n", depth);
8522 #endif
8523 }
8524
8525 void
8526 sv_restor(void) /* pop state vector from save stack */
8527 {
8528 memcpy((char *)&now, svtack->body, svtack->o_delta);
8529 #if SYNC
8530 boq = svtack->o_boq;
8531 #endif
8532 #if defined(C_States) && (HAS_TRACK==1)
8533 #ifdef HAS_STACK
8534 c_unstack((uchar *) &(svtack->c_stack[0]));
8535 #endif
8536 c_revert((uchar *) &(now.c_state[0]));
8537 #endif
8538 if (vsize != svtack->o_delta)
8539 Uerror("sv_restor");
8540 if (!svtack->lst)
8541 Uerror("error: v_restor");
8542 svtack = svtack->lst;
8543 #ifdef DEBUG
8544 cpu_printf(" sv_restor\n");
8545 #endif
8546 }
8547
8548 void
8549 p_restor(int h)
8550 { int i; char *z = (char *) &now;
8551
8552 proc_offset[h] = stack->o_offset;
8553 proc_skip[h] = (uchar) stack->o_skip;
8554 #ifndef XUSAFE
8555 p_name[h] = stack->o_name;
8556 #endif
8557 #ifndef NOCOMP
8558 for (i = vsize + stack->o_skip; i > vsize; i--)
8559 Mask[i-1] = 1; /* align */
8560 #endif
8561 vsize += stack->o_skip;
8562 memcpy(z+vsize, stack->body, stack->o_delta);
8563 vsize += stack->o_delta;
8564 #ifndef NOVSZ
8565 now._vsz = vsize;
8566 #endif
8567 #ifndef NOCOMP
8568 for (i = 1; i <= Air[((P0 *)pptr(h))->_t]; i++)
8569 Mask[vsize - i] = 1; /* pad */
8570 Mask[proc_offset[h]] = 1; /* _pid */
8571 #endif
8572 if (BASE > 0 && h > 0)
8573 ((P0 *)pptr(h))->_pid = h-BASE;
8574 else
8575 ((P0 *)pptr(h))->_pid = h;
8576 i = stack->o_delqs;
8577 now._nr_pr += 1;
8578 if (!stack->lst) /* debugging */
8579 Uerror("error: p_restor");
8580 stack = stack->lst;
8581 this = pptr(h);
8582 while (i-- > 0)
8583 q_restor();
8584 }
8585
8586 void
8587 q_restor(void)
8588 { char *z = (char *) &now;
8589 #ifndef NOCOMP
8590 int k, k_end;
8591 #endif
8592 q_offset[now._nr_qs] = stack->o_offset;
8593 q_skip[now._nr_qs] = (uchar) stack->o_skip;
8594 #ifndef XUSAFE
8595 q_name[now._nr_qs] = stack->o_name;
8596 #endif
8597 vsize += stack->o_skip;
8598 memcpy(z+vsize, stack->body, stack->o_delta);
8599 vsize += stack->o_delta;
8600 #ifndef NOVSZ
8601 now._vsz = vsize;
8602 #endif
8603 now._nr_qs += 1;
8604 #ifndef NOCOMP
8605 k_end = stack->o_offset;
8606 k = k_end - stack->o_skip;
8607 #if SYNC
8608 #ifndef BFS
8609 if (q_zero(now._nr_qs)) k_end += stack->o_delta;
8610 #endif
8611 #endif
8612 for ( ; k < k_end; k++)
8613 Mask[k] = 1;
8614 #endif
8615 if (!stack->lst) /* debugging */
8616 Uerror("error: q_restor");
8617 stack = stack->lst;
8618 }
8619 typedef struct IntChunks {
8620 int *ptr;
8621 struct IntChunks *nxt;
8622 } IntChunks;
8623 IntChunks *filled_chunks[512];
8624 IntChunks *empty_chunks[512];
8625 int *
8626 grab_ints(int nr)
8627 { IntChunks *z;
8628 if (nr >= 512) Uerror("cannot happen grab_int");
8629 if (filled_chunks[nr])
8630 { z = filled_chunks[nr];
8631 filled_chunks[nr] = filled_chunks[nr]->nxt;
8632 } else
8633 { z = (IntChunks *) emalloc(sizeof(IntChunks));
8634 z->ptr = (int *) emalloc(nr * sizeof(int));
8635 }
8636 z->nxt = empty_chunks[nr];
8637 empty_chunks[nr] = z;
8638 return z->ptr;
8639 }
8640 void
8641 ungrab_ints(int *p, int nr)
8642 { IntChunks *z;
8643 if (!empty_chunks[nr]) Uerror("cannot happen ungrab_int");
8644 z = empty_chunks[nr];
8645 empty_chunks[nr] = empty_chunks[nr]->nxt;
8646 z->ptr = p;
8647 z->nxt = filled_chunks[nr];
8648 filled_chunks[nr] = z;
8649 }
8650 int
8651 delproc(int sav, int h)
8652 { int d, i=0;
8653 #ifndef NOCOMP
8654 int o_vsize = vsize;
8655 #endif
8656 if (h+1 != (int) now._nr_pr) return 0;
8657
8658 while (now._nr_qs
8659 && q_offset[now._nr_qs-1] > proc_offset[h])
8660 { delq(sav);
8661 i++;
8662 }
8663 d = vsize - proc_offset[h];
8664 if (sav)
8665 { if (!stack->nxt)
8666 { stack->nxt = (Stack *)
8667 emalloc(sizeof(Stack));
8668 stack->nxt->body =
8669 emalloc(Maxbody*sizeof(char));
8670 stack->nxt->lst = stack;
8671 smax++;
8672 }
8673 stack = stack->nxt;
8674 stack->o_offset = proc_offset[h];
8675 #if VECTORSZ>32000
8676 stack->o_skip = (int) proc_skip[h];
8677 #else
8678 stack->o_skip = (short) proc_skip[h];
8679 #endif
8680 #ifndef XUSAFE
8681 stack->o_name = p_name[h];
8682 #endif
8683 stack->o_delta = d;
8684 stack->o_delqs = i;
8685 memcpy(stack->body, (char *)pptr(h), d);
8686 }
8687 vsize = proc_offset[h];
8688 now._nr_pr = now._nr_pr - 1;
8689 memset((char *)pptr(h), 0, d);
8690 vsize -= (int) proc_skip[h];
8691 #ifndef NOVSZ
8692 now._vsz = vsize;
8693 #endif
8694 #ifndef NOCOMP
8695 for (i = vsize; i < o_vsize; i++)
8696 Mask[i] = 0; /* reset */
8697 #endif
8698 return 1;
8699 }
8700
8701 void
8702 delq(int sav)
8703 { int h = now._nr_qs - 1;
8704 int d = vsize - q_offset[now._nr_qs - 1];
8705 #ifndef NOCOMP
8706 int k, o_vsize = vsize;
8707 #endif
8708 if (sav)
8709 { if (!stack->nxt)
8710 { stack->nxt = (Stack *)
8711 emalloc(sizeof(Stack));
8712 stack->nxt->body =
8713 emalloc(Maxbody*sizeof(char));
8714 stack->nxt->lst = stack;
8715 smax++;
8716 }
8717 stack = stack->nxt;
8718 stack->o_offset = q_offset[h];
8719 #if VECTORSZ>32000
8720 stack->o_skip = (int) q_skip[h];
8721 #else
8722 stack->o_skip = (short) q_skip[h];
8723 #endif
8724 #ifndef XUSAFE
8725 stack->o_name = q_name[h];
8726 #endif
8727 stack->o_delta = d;
8728 memcpy(stack->body, (char *)qptr(h), d);
8729 }
8730 vsize = q_offset[h];
8731 now._nr_qs = now._nr_qs - 1;
8732 memset((char *)qptr(h), 0, d);
8733 vsize -= (int) q_skip[h];
8734 #ifndef NOVSZ
8735 now._vsz = vsize;
8736 #endif
8737 #ifndef NOCOMP
8738 for (k = vsize; k < o_vsize; k++)
8739 Mask[k] = 0; /* reset */
8740 #endif
8741 }
8742
8743 int
8744 qs_empty(void)
8745 { int i;
8746 for (i = 0; i < (int) now._nr_qs; i++)
8747 { if (q_sz(i) > 0)
8748 return 0;
8749 }
8750 return 1;
8751 }
8752
8753 int
8754 endstate(void)
8755 { int i; P0 *ptr;
8756 for (i = BASE; i < (int) now._nr_pr; i++)
8757 { ptr = (P0 *) pptr(i);
8758 if (!stopstate[ptr->_t][ptr->_p])
8759 return 0;
8760 }
8761 if (strict) return qs_empty();
8762 #if defined(EVENT_TRACE) && !defined(OTIM)
8763 if (!stopstate[EVENT_TRACE][now._event] && !a_cycles)
8764 { printf("pan: event_trace not completed\n");
8765 return 0;
8766 }
8767 #endif
8768 return 1;
8769 }
8770
8771 #ifndef SAFETY
8772 void
8773 checkcycles(void)
8774 { uchar o_a_t = now._a_t;
8775 #ifdef SCHED
8776 int o_limit;
8777 #endif
8778 #ifndef NOFAIR
8779 uchar o_cnt = now._cnt[1];
8780 #endif
8781 #ifdef FULLSTACK
8782 #ifndef MA
8783 struct H_el *sv = trpt->ostate; /* save */
8784 #else
8785 uchar prov = trpt->proviso; /* save */
8786 #endif
8787 #endif
8788 #ifdef DEBUG
8789 { int i; uchar *v = (uchar *) &now;
8790 printf(" set Seed state ");
8791 #ifndef NOFAIR
8792 if (fairness) printf("(cnt = %d:%d, nrpr=%d) ",
8793 now._cnt[0], now._cnt[1], now._nr_pr);
8794 #endif
8795 /* for (i = 0; i < n; i++) printf("%d,", v[i]); */
8796 printf("\n");
8797 }
8798 printf("%d: cycle check starts\n", depth);
8799 #endif
8800 now._a_t |= (1|16|32);
8801 /* 1 = 2nd DFS; (16|32) to help hasher */
8802 #ifndef NOFAIR
8803 now._cnt[1] = now._cnt[0];
8804 #endif
8805 memcpy((char *)&A_Root, (char *)&now, vsize);
8806 A_depth = depthfound = depth;
8807 #if NCORE>1
8808 mem_put_acc();
8809 #else
8810 #ifdef SCHED
8811 o_limit = trpt->sched_limit;
8812 trpt->sched_limit = 0;
8813 #endif
8814 new_state(); /* start 2nd DFS */
8815 #ifdef SCHED
8816 trpt->sched_limit = o_limit;
8817 #endif
8818 #endif
8819 now._a_t = o_a_t;
8820 #ifndef NOFAIR
8821 now._cnt[1] = o_cnt;
8822 #endif
8823 A_depth = 0; depthfound = -1;
8824 #ifdef DEBUG
8825 printf("%d: cycle check returns\n", depth);
8826 #endif
8827 #ifdef FULLSTACK
8828 #ifndef MA
8829 trpt->ostate = sv; /* restore */
8830 #else
8831 trpt->proviso = prov;
8832 #endif
8833 #endif
8834 }
8835 #endif
8836
8837 #if defined(FULLSTACK) && defined(BITSTATE)
8838 struct H_el *Free_list = (struct H_el *) 0;
8839 void
8840 onstack_init(void) /* to store stack states in a bitstate search */
8841 { S_Tab = (struct H_el **) emalloc(maxdepth*sizeof(struct H_el *));
8842 }
8843 struct H_el *
8844 grab_state(int n)
8845 { struct H_el *v, *last = 0;
8846 if (H_tab == S_Tab)
8847 { for (v = Free_list; v && ((int) v->tagged >= n); v=v->nxt)
8848 { if ((int) v->tagged == n)
8849 { if (last)
8850 last->nxt = v->nxt;
8851 else
8852 gotcha: Free_list = v->nxt;
8853 v->tagged = 0;
8854 v->nxt = 0;
8855 #ifdef COLLAPSE
8856 v->ln = 0;
8857 #endif
8858 return v;
8859 }
8860 Fh++; last=v;
8861 }
8862 /* new: second try */
8863 v = Free_list;
8864 if (v && ((int) v->tagged >= n))
8865 goto gotcha;
8866 ngrabs++;
8867 }
8868 return (struct H_el *)
8869 emalloc(sizeof(struct H_el)+n-sizeof(unsigned));
8870 }
8871
8872 #else
8873 #if NCORE>1
8874 struct H_el *
8875 grab_state(int n)
8876 { struct H_el *grab_shared(int);
8877 return grab_shared(sizeof(struct H_el)+n-sizeof(unsigned));
8878 }
8879 #else
8880 #ifndef AUTO_RESIZE
8881 #define grab_state(n) (struct H_el *) \
8882 emalloc(sizeof(struct H_el)+n-sizeof(unsigned long));
8883 #else
8884 struct H_el *
8885 grab_state(int n)
8886 { struct H_el *p;
8887 int cnt = sizeof(struct H_el)+n-sizeof(unsigned long);
8888
8889 if (reclaim_size >= cnt+WS)
8890 { if ((cnt & (WS-1)) != 0) /* alignment */
8891 { cnt += WS - (cnt & (WS-1));
8892 }
8893 p = (struct H_el *) reclaim_mem;
8894 reclaim_mem += cnt;
8895 reclaim_size -= cnt;
8896 memset(p, 0, cnt);
8897 } else
8898 { p = (struct H_el *) emalloc(cnt);
8899 }
8900 return p;
8901 }
8902 #endif
8903 #endif
8904 #endif
8905 #ifdef COLLAPSE
8906 unsigned long
8907 ordinal(char *v, long n, short tp)
8908 { struct H_el *tmp, *ntmp; long m;
8909 struct H_el *olst = (struct H_el *) 0;
8910 s_hash((uchar *)v, n);
8911 #if NCORE>1 && !defined(SEP_STATE)
8912 enter_critical(CS_ID); /* uses spinlock - 1..128 */
8913 #endif
8914 tmp = H_tab[j1];
8915 if (!tmp)
8916 { tmp = grab_state(n);
8917 H_tab[j1] = tmp;
8918 } else
8919 for ( ;; olst = tmp, tmp = tmp->nxt)
8920 { m = memcmp(((char *)&(tmp->state)), v, n);
8921 if (n == tmp->ln)
8922 {
8923 if (m == 0)
8924 goto done;
8925 if (m < 0)
8926 {
8927 Insert: ntmp = grab_state(n);
8928 ntmp->nxt = tmp;
8929 if (!olst)
8930 H_tab[j1] = ntmp;
8931 else
8932 olst->nxt = ntmp;
8933 tmp = ntmp;
8934 break;
8935 } else if (!tmp->nxt)
8936 {
8937 Append: tmp->nxt = grab_state(n);
8938 tmp = tmp->nxt;
8939 break;
8940 }
8941 continue;
8942 }
8943 if (n < tmp->ln)
8944 goto Insert;
8945 else if (!tmp->nxt)
8946 goto Append;
8947 }
8948 m = ++ncomps[tp];
8949 #ifdef FULLSTACK
8950 tmp->tagged = m;
8951 #else
8952 tmp->st_id = m;
8953 #endif
8954 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
8955 tmp->m_K1 = K1;
8956 #endif
8957 memcpy(((char *)&(tmp->state)), v, n);
8958 tmp->ln = n;
8959 done:
8960 #if NCORE>1 && !defined(SEP_STATE)
8961 leave_critical(CS_ID); /* uses spinlock */
8962 #endif
8963 #ifdef FULLSTACK
8964 return tmp->tagged;
8965 #else
8966 return tmp->st_id;
8967 #endif
8968 }
8969
8970 int
8971 compress(char *vin, int nin) /* collapse compression */
8972 { char *w, *v = (char *) &comp_now;
8973 int i, j;
8974 unsigned long n;
8975 static char *x;
8976 static uchar nbytes[513]; /* 1 + 256 + 256 */
8977 static unsigned short nbytelen;
8978 long col_q(int, char *);
8979 long col_p(int, char *);
8980 #ifndef SAFETY
8981 if (a_cycles)
8982 *v++ = now._a_t;
8983 #ifndef NOFAIR
8984 if (fairness)
8985 for (i = 0; i < NFAIR; i++)
8986 *v++ = now._cnt[i];
8987 #endif
8988 #endif
8989 nbytelen = 0;
8990 #ifndef JOINPROCS
8991 for (i = 0; i < (int) now._nr_pr; i++)
8992 { n = col_p(i, (char *) 0);
8993 #ifdef NOFIX
8994 nbytes[nbytelen] = 0;
8995 #else
8996 nbytes[nbytelen] = 1;
8997 *v++ = ((P0 *) pptr(i))->_t;
8998 #endif
8999 *v++ = n&255;
9000 if (n >= (1<<8))
9001 { nbytes[nbytelen]++;
9002 *v++ = (n>>8)&255;
9003 }
9004 if (n >= (1<<16))
9005 { nbytes[nbytelen]++;
9006 *v++ = (n>>16)&255;
9007 }
9008 if (n >= (1<<24))
9009 { nbytes[nbytelen]++;
9010 *v++ = (n>>24)&255;
9011 }
9012 nbytelen++;
9013 }
9014 #else
9015 x = scratch;
9016 for (i = 0; i < (int) now._nr_pr; i++)
9017 x += col_p(i, x);
9018 n = ordinal(scratch, x-scratch, 2); /* procs */
9019 *v++ = n&255;
9020 nbytes[nbytelen] = 0;
9021 if (n >= (1<<8))
9022 { nbytes[nbytelen]++;
9023 *v++ = (n>>8)&255;
9024 }
9025 if (n >= (1<<16))
9026 { nbytes[nbytelen]++;
9027 *v++ = (n>>16)&255;
9028 }
9029 if (n >= (1<<24))
9030 { nbytes[nbytelen]++;
9031 *v++ = (n>>24)&255;
9032 }
9033 nbytelen++;
9034 #endif
9035 #ifdef SEPQS
9036 for (i = 0; i < (int) now._nr_qs; i++)
9037 { n = col_q(i, (char *) 0);
9038 nbytes[nbytelen] = 0;
9039 *v++ = n&255;
9040 if (n >= (1<<8))
9041 { nbytes[nbytelen]++;
9042 *v++ = (n>>8)&255;
9043 }
9044 if (n >= (1<<16))
9045 { nbytes[nbytelen]++;
9046 *v++ = (n>>16)&255;
9047 }
9048 if (n >= (1<<24))
9049 { nbytes[nbytelen]++;
9050 *v++ = (n>>24)&255;
9051 }
9052 nbytelen++;
9053 }
9054 #endif
9055 #ifdef NOVSZ
9056 /* 3 = _a_t, _nr_pr, _nr_qs */
9057 w = (char *) &now + 3 * sizeof(uchar);
9058 #ifndef NOFAIR
9059 w += NFAIR;
9060 #endif
9061 #else
9062 #if VECTORSZ<65536
9063 w = (char *) &(now._vsz) + sizeof(unsigned short);
9064 #else
9065 w = (char *) &(now._vsz) + sizeof(unsigned long);
9066 #endif
9067 #endif
9068 x = scratch;
9069 *x++ = now._nr_pr;
9070 *x++ = now._nr_qs;
9071 if (now._nr_qs > 0 && qptr(0) < pptr(0))
9072 n = qptr(0) - (uchar *) w;
9073 else
9074 n = pptr(0) - (uchar *) w;
9075 j = w - (char *) &now;
9076 for (i = 0; i < (int) n; i++, w++)
9077 if (!Mask[j++]) *x++ = *w;
9078 #ifndef SEPQS
9079 for (i = 0; i < (int) now._nr_qs; i++)
9080 x += col_q(i, x);
9081 #endif
9082 x--;
9083 for (i = 0, j = 6; i < nbytelen; i++)
9084 { if (j == 6)
9085 { j = 0;
9086 *(++x) = 0;
9087 } else
9088 j += 2;
9089 *x |= (nbytes[i] << j);
9090 }
9091 x++;
9092 for (j = 0; j < WS-1; j++)
9093 *x++ = 0;
9094 x -= j; j = 0;
9095 n = ordinal(scratch, x-scratch, 0); /* globals */
9096 *v++ = n&255;
9097 if (n >= (1<< 8)) { *v++ = (n>> 8)&255; j++; }
9098 if (n >= (1<<16)) { *v++ = (n>>16)&255; j++; }
9099 if (n >= (1<<24)) { *v++ = (n>>24)&255; j++; }
9100 *v++ = j; /* add last count as a byte */
9101 for (i = 0; i < WS-1; i++)
9102 *v++ = 0;
9103 v -= i;
9104 #if 0
9105 printf("collapse %d -> %d\n",
9106 vsize, v - (char *)&comp_now);
9107 #endif
9108 return v - (char *)&comp_now;
9109 }
9110 #else
9111 #if !defined(NOCOMP)
9112 int
9113 compress(char *vin, int n) /* default compression */
9114 {
9115 #ifdef HC
9116 int delta = 0;
9117 s_hash((uchar *)vin, n); /* sets K1 and K2 */
9118 #ifndef SAFETY
9119 if (S_A)
9120 { delta++; /* _a_t */
9121 #ifndef NOFAIR
9122 if (S_A > NFAIR)
9123 delta += NFAIR; /* _cnt[] */
9124 #endif
9125 }
9126 #endif
9127 memcpy((char *) &comp_now + delta, (char *) &K1, WS);
9128 delta += WS;
9129 #if HC>0
9130 memcpy((char *) &comp_now + delta, (char *) &K2, HC);
9131 delta += HC;
9132 #endif
9133 return delta;
9134 #else
9135 char *vv = vin;
9136 char *v = (char *) &comp_now;
9137 int i;
9138 #ifndef NO_FAST_C
9139 int r = 0, unroll = n/8;
9140 if (unroll > 0)
9141 { i = 0;
9142 while (r++ < unroll)
9143 { /* unroll 8 times, avoid ifs */
9144 /* 1 */ *v = *vv++;
9145 v += 1 - Mask[i++];
9146 /* 2 */ *v = *vv++;
9147 v += 1 - Mask[i++];
9148 /* 3 */ *v = *vv++;
9149 v += 1 - Mask[i++];
9150 /* 4 */ *v = *vv++;
9151 v += 1 - Mask[i++];
9152 /* 5 */ *v = *vv++;
9153 v += 1 - Mask[i++];
9154 /* 6 */ *v = *vv++;
9155 v += 1 - Mask[i++];
9156 /* 7 */ *v = *vv++;
9157 v += 1 - Mask[i++];
9158 /* 8 */ *v = *vv++;
9159 v += 1 - Mask[i++];
9160 }
9161 r = n - i; /* the rest, at most 7 */
9162 switch (r) {
9163 case 7: *v = *vv++; v += 1 - Mask[i++];
9164 case 6: *v = *vv++; v += 1 - Mask[i++];
9165 case 5: *v = *vv++; v += 1 - Mask[i++];
9166 case 4: *v = *vv++; v += 1 - Mask[i++];
9167 case 3: *v = *vv++; v += 1 - Mask[i++];
9168 case 2: *v = *vv++; v += 1 - Mask[i++];
9169 case 1: *v = *vv++; v += 1 - Mask[i++];
9170 case 0: break;
9171 }
9172 r = (n+WS-1)/WS; /* words rounded up */
9173 r *= WS; /* bytes */
9174 i = r - i; /* remainder */
9175 switch (i) {
9176 case 7: *v++ = 0; /* fall thru */
9177 case 6: *v++ = 0;
9178 case 5: *v++ = 0;
9179 case 4: *v++ = 0;
9180 case 3: *v++ = 0;
9181 case 2: *v++ = 0;
9182 case 1: *v++ = 0;
9183 case 0: break;
9184 default: Uerror("unexpected wordsize");
9185 }
9186 v -= i;
9187 } else
9188 #endif
9189 { for (i = 0; i < n; i++, vv++)
9190 if (!Mask[i]) *v++ = *vv;
9191 for (i = 0; i < WS-1; i++)
9192 *v++ = 0;
9193 v -= i;
9194 }
9195 #if 0
9196 printf("compress %d -> %d\n",
9197 n, v - (char *)&comp_now);
9198 #endif
9199 return v - (char *)&comp_now;
9200 #endif
9201 }
9202 #endif
9203 #endif
9204 #if defined(FULLSTACK) && defined(BITSTATE)
9205 #if defined(MA)
9206 #if !defined(onstack_now)
9207 int onstack_now(void) {}
9208 #endif
9209 #if !defined(onstack_put)
9210 void onstack_put(void) {}
9211 #endif
9212 #if !defined(onstack_zap)
9213 void onstack_zap(void) {}
9214 #endif
9215 #else
9216 void
9217 onstack_zap(void)
9218 { struct H_el *v, *w, *last = 0;
9219 struct H_el **tmp = H_tab;
9220 char *nv; int n, m;
9221
9222 static char warned = 0;
9223
9224 H_tab = S_Tab;
9225 #ifndef NOCOMP
9226 nv = (char *) &comp_now;
9227 n = compress((char *)&now, vsize);
9228 #else
9229 #if defined(BITSTATE) && defined(LC)
9230 nv = (char *) &comp_now;
9231 n = compact_stack((char *)&now, vsize);
9232 #else
9233 nv = (char *) &now;
9234 n = vsize;
9235 #endif
9236 #endif
9237 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9238 s_hash((uchar *)nv, n);
9239 #endif
9240 H_tab = tmp;
9241 for (v = S_Tab[j1]; v; Zh++, last=v, v=v->nxt)
9242 { m = memcmp(&(v->state), nv, n);
9243 if (m == 0)
9244 goto Found;
9245 if (m < 0)
9246 break;
9247 }
9248 /* NotFound: */
9249 #ifndef ZAPH
9250 #if defined(BITSTATE) && NCORE>1
9251 /* seen this happen, likely harmless, but not yet understood */
9252 if (warned == 0)
9253 #endif
9254 { /* Uerror("stack out of wack - zap"); */
9255 cpu_printf("pan: warning, stack incomplete\n");
9256 warned = 1;
9257 }
9258 #endif
9259 return;
9260 Found:
9261 ZAPS++;
9262 if (last)
9263 last->nxt = v->nxt;
9264 else
9265 S_Tab[j1] = v->nxt;
9266 v->tagged = (unsigned) n;
9267 #if !defined(NOREDUCE) && !defined(SAFETY)
9268 v->proviso = 0;
9269 #endif
9270 v->nxt = last = (struct H_el *) 0;
9271 for (w = Free_list; w; Fa++, last=w, w = w->nxt)
9272 { if ((int) w->tagged <= n)
9273 { if (last)
9274 { v->nxt = w;
9275 last->nxt = v;
9276 } else
9277 { v->nxt = Free_list;
9278 Free_list = v;
9279 }
9280 return;
9281 }
9282 if (!w->nxt)
9283 { w->nxt = v;
9284 return;
9285 } }
9286 Free_list = v;
9287 }
9288 void
9289 onstack_put(void)
9290 { struct H_el **tmp = H_tab;
9291 H_tab = S_Tab;
9292 if (hstore((char *)&now, vsize) != 0)
9293 #if defined(BITSTATE) && defined(LC)
9294 printf("pan: warning, double stack entry\n");
9295 #else
9296 #ifndef ZAPH
9297 Uerror("cannot happen - unstack_put");
9298 #endif
9299 #endif
9300 H_tab = tmp;
9301 trpt->ostate = Lstate;
9302 PUT++;
9303 }
9304 int
9305 onstack_now(void)
9306 { struct H_el *tmp;
9307 struct H_el **tmp2 = H_tab;
9308 char *v; int n, m = 1;
9309
9310 H_tab = S_Tab;
9311 #ifdef NOCOMP
9312 #if defined(BITSTATE) && defined(LC)
9313 v = (char *) &comp_now;
9314 n = compact_stack((char *)&now, vsize);
9315 #else
9316 v = (char *) &now;
9317 n = vsize;
9318 #endif
9319 #else
9320 v = (char *) &comp_now;
9321 n = compress((char *)&now, vsize);
9322 #endif
9323 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9324 s_hash((uchar *)v, n);
9325 #endif
9326 H_tab = tmp2;
9327 for (tmp = S_Tab[j1]; tmp; Zn++, tmp = tmp->nxt)
9328 { m = memcmp(((char *)&(tmp->state)),v,n);
9329 if (m <= 0)
9330 { Lstate = (struct H_el *) tmp;
9331 break;
9332 } }
9333 PROBE++;
9334 return (m == 0);
9335 }
9336 #endif
9337 #endif
9338 #ifndef BITSTATE
9339 void
9340 hinit(void)
9341 {
9342 #ifdef MA
9343 #ifdef R_XPT
9344 { void r_xpoint(void);
9345 r_xpoint();
9346 }
9347 #else
9348 dfa_init((unsigned short) (MA+a_cycles));
9349 #if NCORE>1 && !defined(COLLAPSE)
9350 if (!readtrail)
9351 { void init_HT(unsigned long);
9352 init_HT(0L);
9353 }
9354 #endif
9355 #endif
9356 #endif
9357 #if !defined(MA) || defined(COLLAPSE)
9358 #if NCORE>1
9359 if (!readtrail)
9360 { void init_HT(unsigned long);
9361 init_HT((unsigned long) (ONE_L<<ssize)*sizeof(struct H_el *));
9362 } else
9363 #endif
9364 H_tab = (struct H_el **)
9365 emalloc((ONE_L<<ssize)*sizeof(struct H_el *));
9366 #endif
9367 }
9368 #endif
9369
9370 #if !defined(BITSTATE) || defined(FULLSTACK)
9371 #ifdef DEBUG
9372 void
9373 dumpstate(int wasnew, char *v, int n, int tag)
9374 { int i;
9375 #ifndef SAFETY
9376 if (S_A)
9377 { printf(" state tags %d (%d::%d): ",
9378 V_A, wasnew, v[0]);
9379 #ifdef FULLSTACK
9380 printf(" %d ", tag);
9381 #endif
9382 printf("\n");
9383 }
9384 #endif
9385 #ifdef SDUMP
9386 #ifndef NOCOMP
9387 printf(" State: ");
9388 for (i = 0; i < vsize; i++) printf("%d%s,",
9389 ((char *)&now)[i], Mask[i]?"*":"");
9390 #endif
9391 printf("\n Vector: ");
9392 for (i = 0; i < n; i++) printf("%d,", v[i]);
9393 printf("\n");
9394 #endif
9395 }
9396 #endif
9397 #ifdef MA
9398 int
9399 gstore(char *vin, int nin, uchar pbit)
9400 { int n, i;
9401 int ret_val = 1;
9402 uchar *v;
9403 static uchar Info[MA+1];
9404 #ifndef NOCOMP
9405 n = compress(vin, nin);
9406 v = (uchar *) &comp_now;
9407 #else
9408 n = nin;
9409 v = vin;
9410 #endif
9411 if (n >= MA)
9412 { printf("pan: error, MA too small, recompile pan.c");
9413 printf(" with -DMA=N with N>%d\n", n);
9414 Uerror("aborting");
9415 }
9416 if (n > (int) maxgs)
9417 { maxgs = (unsigned int) n;
9418 }
9419 for (i = 0; i < n; i++)
9420 { Info[i] = v[i];
9421 }
9422 for ( ; i < MA-1; i++)
9423 { Info[i] = 0;
9424 }
9425 Info[MA-1] = pbit;
9426 if (a_cycles) /* place _a_t at the end */
9427 { Info[MA] = Info[0];
9428 Info[0] = 0;
9429 }
9430
9431 #if NCORE>1 && !defined(SEP_STATE)
9432 enter_critical(GLOBAL_LOCK); /* crude, but necessary */
9433 /* to make this mode work, also replace emalloc with grab_shared inside store MA routines */
9434 #endif
9435
9436 if (!dfa_store(Info))
9437 { if (pbit == 0
9438 && (now._a_t&1)
9439 && depth > A_depth)
9440 { Info[MA] &= ~(1|16|32); /* _a_t */
9441 if (dfa_member(MA))
9442 { Info[MA-1] = 4; /* off-stack bit */
9443 nShadow++;
9444 if (!dfa_member(MA-1))
9445 { ret_val = 3;
9446 #ifdef VERBOSE
9447 printf("intersected 1st dfs stack\n");
9448 #endif
9449 goto done;
9450 } } }
9451 ret_val = 0;
9452 #ifdef VERBOSE
9453 printf("new state\n");
9454 #endif
9455 goto done;
9456 }
9457 #ifdef FULLSTACK
9458 if (pbit == 0)
9459 { Info[MA-1] = 1; /* proviso bit */
9460 #ifndef BFS
9461 trpt->proviso = dfa_member(MA-1);
9462 #endif
9463 Info[MA-1] = 4; /* off-stack bit */
9464 if (dfa_member(MA-1))
9465 { ret_val = 1; /* off-stack */
9466 #ifdef VERBOSE
9467 printf("old state\n");
9468 #endif
9469 } else
9470 { ret_val = 2; /* on-stack */
9471 #ifdef VERBOSE
9472 printf("on-stack\n");
9473 #endif
9474 }
9475 goto done;
9476 }
9477 #endif
9478 ret_val = 1;
9479 #ifdef VERBOSE
9480 printf("old state\n");
9481 #endif
9482 done:
9483 #if NCORE>1 && !defined(SEP_STATE)
9484 leave_critical(GLOBAL_LOCK);
9485 #endif
9486 return ret_val; /* old state */
9487 }
9488 #endif
9489 #if defined(BITSTATE) && defined(LC)
9490 int
9491 compact_stack(char *vin, int n)
9492 { int delta = 0;
9493 s_hash((uchar *)vin, n); /* sets K1 and K2 */
9494 #ifndef SAFETY
9495 delta++; /* room for state[0] |= 128 */
9496 #endif
9497 memcpy((char *) &comp_now + delta, (char *) &K1, WS);
9498 delta += WS;
9499 memcpy((char *) &comp_now + delta, (char *) &K2, WS);
9500 delta += WS; /* use all available bits */
9501 return delta;
9502 }
9503 #endif
9504 int
9505 hstore(char *vin, int nin) /* hash table storage */
9506 { struct H_el *ntmp;
9507 struct H_el *tmp, *olst = (struct H_el *) 0;
9508 char *v; int n, m=0;
9509 #ifdef HC
9510 uchar rem_a;
9511 #endif
9512 #ifdef NOCOMP
9513 #if defined(BITSTATE) && defined(LC)
9514 if (S_Tab == H_tab)
9515 { v = (char *) &comp_now;
9516 n = compact_stack(vin, nin);
9517 } else
9518 { v = vin; n = nin;
9519 }
9520 #else
9521 v = vin; n = nin;
9522 #endif
9523 #else
9524 v = (char *) &comp_now;
9525 #ifdef HC
9526 rem_a = now._a_t;
9527 now._a_t = 0;
9528 #endif
9529 n = compress(vin, nin);
9530 #ifdef HC
9531 now._a_t = rem_a;
9532 #endif
9533 #ifndef SAFETY
9534 if (S_A)
9535 { v[0] = 0; /* _a_t */
9536 #ifndef NOFAIR
9537 if (S_A > NFAIR)
9538 for (m = 0; m < NFAIR; m++)
9539 v[m+1] = 0; /* _cnt[] */
9540 #endif
9541 m = 0;
9542 }
9543 #endif
9544 #endif
9545 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9546 s_hash((uchar *)v, n);
9547 #endif
9548 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9549 enter_critical(CS_ID); /* uses spinlock */
9550 #endif
9551 tmp = H_tab[j1];
9552 if (!tmp)
9553 { tmp = grab_state(n);
9554 #if NCORE>1
9555 if (!tmp)
9556 { /* if we get here -- we've already issued a warning */
9557 /* but we want to allow the normal distributed termination */
9558 /* to collect the stats on all cpus in the wrapup */
9559 #if !defined(SEP_STATE) && !defined(BITSTATE)
9560 leave_critical(CS_ID);
9561 #endif
9562 return 1; /* allow normal termination */
9563 }
9564 #endif
9565 H_tab[j1] = tmp;
9566 } else
9567 { for (;; hcmp++, olst = tmp, tmp = tmp->nxt)
9568 { /* skip the _a_t and the _cnt bytes */
9569 #ifdef COLLAPSE
9570 if (tmp->ln != 0)
9571 { if (!tmp->nxt) goto Append;
9572 continue;
9573 }
9574 #endif
9575 m = memcmp(((char *)&(tmp->state)) + S_A,
9576 v + S_A, n - S_A);
9577 if (m == 0) {
9578 #ifdef SAFETY
9579 #define wasnew 0
9580 #else
9581 int wasnew = 0;
9582 #endif
9583 #ifndef SAFETY
9584 #ifndef NOCOMP
9585 if (S_A)
9586 { if ((((char *)&(tmp->state))[0] & V_A) != V_A)
9587 { wasnew = 1; nShadow++;
9588 ((char *)&(tmp->state))[0] |= V_A;
9589 }
9590 #ifndef NOFAIR
9591 if (S_A > NFAIR)
9592 { /* 0 <= now._cnt[now._a_t&1] < MAXPROC */
9593 unsigned ci, bp; /* index, bit pos */
9594 ci = (now._cnt[now._a_t&1] / 8);
9595 bp = (now._cnt[now._a_t&1] - 8*ci);
9596 if (now._a_t&1) /* use tail-bits in _cnt */
9597 { ci = (NFAIR - 1) - ci;
9598 bp = 7 - bp; /* bp = 0..7 */
9599 }
9600 ci++; /* skip over _a_t */
9601 bp = 1 << bp; /* the bit mask */
9602 if ((((char *)&(tmp->state))[ci] & bp)==0)
9603 { if (!wasnew)
9604 { wasnew = 1;
9605 nShadow++;
9606 }
9607 ((char *)&(tmp->state))[ci] |= bp;
9608 }
9609 }
9610 /* else: wasnew == 0, i.e., old state */
9611 #endif
9612 }
9613 #endif
9614 #endif
9615 #if NCORE>1
9616 Lstate = (struct H_el *) tmp;
9617 #endif
9618 #ifdef FULLSTACK
9619 #ifndef SAFETY
9620 if (wasnew)
9621 { Lstate = (struct H_el *) tmp;
9622 tmp->tagged |= V_A;
9623 if ((now._a_t&1)
9624 && (tmp->tagged&A_V)
9625 && depth > A_depth)
9626 {
9627 intersect:
9628 #ifdef CHECK
9629 #if NCORE>1
9630 printf("cpu%d: ", core_id);
9631 #endif
9632 printf("1st dfs-stack intersected on state %d+\n",
9633 (int) tmp->st_id);
9634 #endif
9635 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9636 leave_critical(CS_ID);
9637 #endif
9638 return 3;
9639 }
9640 #ifdef CHECK
9641 #if NCORE>1
9642 printf("cpu%d: ", core_id);
9643 #endif
9644 printf(" New state %d+\n", (int) tmp->st_id);
9645 #endif
9646 #ifdef DEBUG
9647 dumpstate(1, (char *)&(tmp->state),n,tmp->tagged);
9648 #endif
9649 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9650 leave_critical(CS_ID);
9651 #endif
9652 return 0;
9653 } else
9654 #endif
9655 if ((S_A)?(tmp->tagged&V_A):tmp->tagged)
9656 { Lstate = (struct H_el *) tmp;
9657 #ifndef SAFETY
9658 /* already on current dfs stack */
9659 /* but may also be on 1st dfs stack */
9660 if ((now._a_t&1)
9661 && (tmp->tagged&A_V)
9662 && depth > A_depth
9663 #ifndef NOFAIR
9664 && (!fairness || now._cnt[1] <= 1)
9665 #endif
9666 )
9667 goto intersect;
9668 #endif
9669 #ifdef CHECK
9670 #if NCORE>1
9671 printf("cpu%d: ", core_id);
9672 #endif
9673 printf(" Stack state %d\n", (int) tmp->st_id);
9674 #endif
9675 #ifdef DEBUG
9676 dumpstate(0, (char *)&(tmp->state),n,tmp->tagged);
9677 #endif
9678 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9679 leave_critical(CS_ID);
9680 #endif
9681 return 2; /* match on stack */
9682 }
9683 #else
9684 if (wasnew)
9685 {
9686 #ifdef CHECK
9687 #if NCORE>1
9688 printf("cpu%d: ", core_id);
9689 #endif
9690 printf(" New state %d+\n", (int) tmp->st_id);
9691 #endif
9692 #ifdef DEBUG
9693 dumpstate(1, (char *)&(tmp->state), n, 0);
9694 #endif
9695 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9696 leave_critical(CS_ID);
9697 #endif
9698 return 0;
9699 }
9700 #endif
9701 #ifdef CHECK
9702 #if NCORE>1
9703 printf("cpu%d: ", core_id);
9704 #endif
9705 printf(" Old state %d\n", (int) tmp->st_id);
9706 #endif
9707 #ifdef DEBUG
9708 dumpstate(0, (char *)&(tmp->state), n, 0);
9709 #endif
9710 #ifdef REACH
9711 if (tmp->D > depth)
9712 { tmp->D = depth;
9713 #ifdef CHECK
9714 #if NCORE>1
9715 printf("cpu%d: ", core_id);
9716 #endif
9717 printf(" ReVisiting (from smaller depth)\n");
9718 #endif
9719 nstates--;
9720 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9721 leave_critical(CS_ID);
9722 #endif
9723 return 0;
9724 }
9725 #endif
9726 #if (defined(BFS) && defined(Q_PROVISO)) || NCORE>1
9727 Lstate = (struct H_el *) tmp;
9728 #endif
9729 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9730 leave_critical(CS_ID);
9731 #endif
9732 return 1; /* match outside stack */
9733 } else if (m < 0)
9734 { /* insert state before tmp */
9735 ntmp = grab_state(n);
9736 #if NCORE>1
9737 if (!ntmp)
9738 {
9739 #if !defined(SEP_STATE) && !defined(BITSTATE)
9740 leave_critical(CS_ID);
9741 #endif
9742 return 1; /* allow normal termination */
9743 }
9744 #endif
9745 ntmp->nxt = tmp;
9746 if (!olst)
9747 H_tab[j1] = ntmp;
9748 else
9749 olst->nxt = ntmp;
9750 tmp = ntmp;
9751 break;
9752 } else if (!tmp->nxt)
9753 { /* append after tmp */
9754 #ifdef COLLAPSE
9755 Append:
9756 #endif
9757 tmp->nxt = grab_state(n);
9758 #if NCORE>1
9759 if (!tmp->nxt)
9760 {
9761 #if !defined(SEP_STATE) && !defined(BITSTATE)
9762 leave_critical(CS_ID);
9763 #endif
9764 return 1; /* allow normal termination */
9765 }
9766 #endif
9767 tmp = tmp->nxt;
9768 break;
9769 } }
9770 }
9771 #ifdef CHECK
9772 tmp->st_id = (unsigned) nstates;
9773 #if NCORE>1
9774 printf("cpu%d: ", core_id);
9775 #endif
9776 #ifdef BITSTATE
9777 printf(" Push state %d\n", ((int) nstates) - 1);
9778 #else
9779 printf(" New state %d\n", (int) nstates);
9780 #endif
9781 #endif
9782 #if !defined(SAFETY) || defined(REACH)
9783 tmp->D = depth;
9784 #endif
9785 #ifndef SAFETY
9786 #ifndef NOCOMP
9787 if (S_A)
9788 { v[0] = V_A;
9789 #ifndef NOFAIR
9790 if (S_A > NFAIR)
9791 { unsigned ci, bp; /* as above */
9792 ci = (now._cnt[now._a_t&1] / 8);
9793 bp = (now._cnt[now._a_t&1] - 8*ci);
9794 if (now._a_t&1)
9795 { ci = (NFAIR - 1) - ci;
9796 bp = 7 - bp; /* bp = 0..7 */
9797 }
9798 v[1+ci] = 1 << bp;
9799 }
9800 #endif
9801 }
9802 #endif
9803 #endif
9804 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
9805 tmp->m_K1 = K1;
9806 #endif
9807 memcpy(((char *)&(tmp->state)), v, n);
9808 #ifdef FULLSTACK
9809 tmp->tagged = (S_A)?V_A:(depth+1);
9810 #ifdef DEBUG
9811 dumpstate(-1, v, n, tmp->tagged);
9812 #endif
9813 Lstate = (struct H_el *) tmp;
9814 #else
9815 #ifdef DEBUG
9816 dumpstate(-1, v, n, 0);
9817 #endif
9818 #if NCORE>1
9819 Lstate = (struct H_el *) tmp;
9820 #endif
9821 #endif
9822 /* #if NCORE>1 && !defined(SEP_STATE) */
9823 #if NCORE>1
9824 #ifdef V_PROVISO
9825 tmp->cpu_id = core_id;
9826 #endif
9827 #if !defined(SEP_STATE) && !defined(BITSTATE)
9828 leave_critical(CS_ID);
9829 #endif
9830 #endif
9831 return 0;
9832 }
9833 #endif
9834 #include TRANSITIONS
9835 void
9836 do_reach(void)
9837 {
9838 r_ck(reached0, nstates0, 0, src_ln0, src_file0);
9839 r_ck(reached1, nstates1, 1, src_ln1, src_file1);
9840 r_ck(reached2, nstates2, 2, src_ln2, src_file2);
9841 r_ck(reached3, nstates3, 3, src_ln3, src_file3);
9842 r_ck(reached4, nstates4, 4, src_ln4, src_file4);
9843 r_ck(reached5, nstates5, 5, src_ln5, src_file5);
9844 }
9845
9846 void
9847 iniglobals(void)
9848 {
9849 deliver = 0;
9850 { int l_in;
9851 for (l_in = 0; l_in < 8; l_in++)
9852 {
9853 now.buffer_use[l_in] = 0;
9854 }
9855 }
9856 now.write_off = 0;
9857 { int l_in;
9858 for (l_in = 0; l_in < 2; l_in++)
9859 {
9860 now.commit_count[l_in] = 0;
9861 }
9862 }
9863 now._commit_sum = 0;
9864 now.read_off = 0;
9865 now.events_lost = 0;
9866 now.refcount = 0;
9867 #ifdef VAR_RANGES
9868 { int l_in;
9869 for (l_in = 0; l_in < 8; l_in++)
9870 {
9871 logval("buffer_use[l_in]", now.buffer_use[l_in]);
9872 }
9873 }
9874 logval("write_off", now.write_off);
9875 { int l_in;
9876 for (l_in = 0; l_in < 2; l_in++)
9877 {
9878 logval("commit_count[l_in]", now.commit_count[l_in]);
9879 }
9880 }
9881 logval("_commit_sum", now._commit_sum);
9882 logval("read_off", now.read_off);
9883 logval("events_lost", now.events_lost);
9884 logval("refcount", now.refcount);
9885 #endif
9886 Maxbody = max(Maxbody, sizeof(State)-VECTORSZ);
9887 }
9888
9889 int
9890 addqueue(int n, int is_rv)
9891 { int j=0, i = now._nr_qs;
9892 #ifndef NOCOMP
9893 int k;
9894 #endif
9895 if (i >= MAXQ)
9896 Uerror("too many queues");
9897 switch (n) {
9898 default: Uerror("bad queue - addqueue");
9899 }
9900 if (vsize%WS)
9901 q_skip[i] = WS-(vsize%WS);
9902 else
9903 q_skip[i] = 0;
9904 #ifndef NOCOMP
9905 k = vsize;
9906 #ifndef BFS
9907 if (is_rv) k += j;
9908 #endif
9909 for (k += (int) q_skip[i]; k > vsize; k--)
9910 Mask[k-1] = 1;
9911 #endif
9912 vsize += (int) q_skip[i];
9913 q_offset[i] = vsize;
9914 now._nr_qs += 1;
9915 vsize += j;
9916 #ifndef NOVSZ
9917 now._vsz = vsize;
9918 #endif
9919 hmax = max(hmax, vsize);
9920 if (vsize >= VECTORSZ)
9921 Uerror("VECTORSZ is too small, edit pan.h");
9922 memset((char *)qptr(i), 0, j);
9923 ((Q0 *)qptr(i))->_t = n;
9924 return i+1;
9925 }
9926
9927 #if NQS>0
9928 void
9929 qsend(int into, int sorted, int args_given)
9930 { int j; uchar *z;
9931
9932 #ifdef HAS_SORTED
9933 int k;
9934 #endif
9935 if (!into--)
9936 uerror("ref to uninitialized chan name (sending)");
9937 if (into >= (int) now._nr_qs || into < 0)
9938 Uerror("qsend bad queue#");
9939 z = qptr(into);
9940 j = ((Q0 *)qptr(into))->Qlen;
9941 switch (((Q0 *)qptr(into))->_t) {
9942 case 0: printf("queue %d was deleted\n", into+1);
9943 default: Uerror("bad queue - qsend");
9944 }
9945 #ifdef EVENT_TRACE
9946 if (in_s_scope(into+1))
9947 require('s', into);
9948 #endif
9949 }
9950 #endif
9951
9952 #if SYNC
9953 int
9954 q_zero(int from)
9955 { if (!from--)
9956 { uerror("ref to uninitialized chan name (q_zero)");
9957 return 0;
9958 }
9959 switch(((Q0 *)qptr(from))->_t) {
9960 case 0: printf("queue %d was deleted\n", from+1);
9961 }
9962 Uerror("bad queue q-zero");
9963 return -1;
9964 }
9965 int
9966 not_RV(int from)
9967 { if (q_zero(from))
9968 { printf("==>> a test of the contents of a rv ");
9969 printf("channel always returns FALSE\n");
9970 uerror("error to poll rendezvous channel");
9971 }
9972 return 1;
9973 }
9974 #endif
9975 #ifndef XUSAFE
9976 void
9977 setq_claim(int x, int m, char *s, int y, char *p)
9978 { if (x == 0)
9979 uerror("x[rs] claim on uninitialized channel");
9980 if (x < 0 || x > MAXQ)
9981 Uerror("cannot happen setq_claim");
9982 q_claim[x] |= m;
9983 p_name[y] = p;
9984 q_name[x] = s;
9985 if (m&2) q_S_check(x, y);
9986 if (m&1) q_R_check(x, y);
9987 }
9988 short q_sender[MAXQ+1];
9989 int
9990 q_S_check(int x, int who)
9991 { if (!q_sender[x])
9992 { q_sender[x] = who+1;
9993 #if SYNC
9994 if (q_zero(x))
9995 { printf("chan %s (%d), ",
9996 q_name[x], x-1);
9997 printf("sndr proc %s (%d)\n",
9998 p_name[who], who);
9999 uerror("xs chans cannot be used for rv");
10000 }
10001 #endif
10002 } else
10003 if (q_sender[x] != who+1)
10004 { printf("pan: xs assertion violated: ");
10005 printf("access to chan <%s> (%d)\npan: by ",
10006 q_name[x], x-1);
10007 if (q_sender[x] > 0 && p_name[q_sender[x]-1])
10008 printf("%s (proc %d) and by ",
10009 p_name[q_sender[x]-1], q_sender[x]-1);
10010 printf("%s (proc %d)\n",
10011 p_name[who], who);
10012 uerror("error, partial order reduction invalid");
10013 }
10014 return 1;
10015 }
10016 short q_recver[MAXQ+1];
10017 int
10018 q_R_check(int x, int who)
10019 { if (!q_recver[x])
10020 { q_recver[x] = who+1;
10021 #if SYNC
10022 if (q_zero(x))
10023 { printf("chan %s (%d), ",
10024 q_name[x], x-1);
10025 printf("recv proc %s (%d)\n",
10026 p_name[who], who);
10027 uerror("xr chans cannot be used for rv");
10028 }
10029 #endif
10030 } else
10031 if (q_recver[x] != who+1)
10032 { printf("pan: xr assertion violated: ");
10033 printf("access to chan %s (%d)\npan: ",
10034 q_name[x], x-1);
10035 if (q_recver[x] > 0 && p_name[q_recver[x]-1])
10036 printf("by %s (proc %d) and ",
10037 p_name[q_recver[x]-1], q_recver[x]-1);
10038 printf("by %s (proc %d)\n",
10039 p_name[who], who);
10040 uerror("error, partial order reduction invalid");
10041 }
10042 return 1;
10043 }
10044 #endif
10045 int
10046 q_len(int x)
10047 { if (!x--)
10048 uerror("ref to uninitialized chan name (len)");
10049 return ((Q0 *)qptr(x))->Qlen;
10050 }
10051
10052 int
10053 q_full(int from)
10054 { if (!from--)
10055 uerror("ref to uninitialized chan name (qfull)");
10056 switch(((Q0 *)qptr(from))->_t) {
10057 case 0: printf("queue %d was deleted\n", from+1);
10058 }
10059 Uerror("bad queue - q_full");
10060 return 0;
10061 }
10062
10063 #ifdef HAS_UNLESS
10064 int
10065 q_e_f(int from)
10066 { /* empty or full */
10067 return !q_len(from) || q_full(from);
10068 }
10069 #endif
10070 #if NQS>0
10071 int
10072 qrecv(int from, int slot, int fld, int done)
10073 { uchar *z;
10074 int j, k, r=0;
10075
10076 if (!from--)
10077 uerror("ref to uninitialized chan name (receiving)");
10078 if (from >= (int) now._nr_qs || from < 0)
10079 Uerror("qrecv bad queue#");
10080 z = qptr(from);
10081 #ifdef EVENT_TRACE
10082 if (done && (in_r_scope(from+1)))
10083 require('r', from);
10084 #endif
10085 switch (((Q0 *)qptr(from))->_t) {
10086 case 0: printf("queue %d was deleted\n", from+1);
10087 default: Uerror("bad queue - qrecv");
10088 }
10089 return r;
10090 }
10091 #endif
10092
10093 #ifndef BITSTATE
10094 #ifdef COLLAPSE
10095 long
10096 col_q(int i, char *z)
10097 { int j=0, k;
10098 char *x, *y;
10099 Q0 *ptr = (Q0 *) qptr(i);
10100 switch (ptr->_t) {
10101 default: Uerror("bad qtype - collapse");
10102 }
10103 if (z) x = z; else x = scratch;
10104 y = (char *) ptr; k = q_offset[i];
10105 /* no need to store the empty slots at the end */
10106 j -= (q_max[ptr->_t] - ptr->Qlen) * ((j - 2)/q_max[ptr->_t]);
10107 for ( ; j > 0; j--, y++)
10108 if (!Mask[k++]) *x++ = *y;
10109 for (j = 0; j < WS-1; j++)
10110 *x++ = 0;
10111 x -= j;
10112 if (z) return (long) (x - z);
10113 return ordinal(scratch, x-scratch, 1); /* chan */
10114 }
10115 #endif
10116 #endif
10117 int
10118 unsend(int into)
10119 { int _m=0, j; uchar *z;
10120
10121 #ifdef HAS_SORTED
10122 int k;
10123 #endif
10124 if (!into--)
10125 uerror("ref to uninitialized chan (unsend)");
10126 z = qptr(into);
10127 j = ((Q0 *)z)->Qlen;
10128 ((Q0 *)z)->Qlen = --j;
10129 switch (((Q0 *)qptr(into))->_t) {
10130 default: Uerror("bad queue - unsend");
10131 }
10132 return _m;
10133 }
10134
10135 void
10136 unrecv(int from, int slot, int fld, int fldvar, int strt)
10137 { int j; uchar *z;
10138
10139 if (!from--)
10140 uerror("ref to uninitialized chan (unrecv)");
10141 z = qptr(from);
10142 j = ((Q0 *)z)->Qlen;
10143 if (strt) ((Q0 *)z)->Qlen = j+1;
10144 switch (((Q0 *)qptr(from))->_t) {
10145 default: Uerror("bad queue - qrecv");
10146 }
10147 }
10148 int
10149 q_cond(short II, Trans *t)
10150 { int i = 0;
10151 for (i = 0; i < 6; i++)
10152 { if (t->ty[i] == TIMEOUT_F) return 1;
10153 if (t->ty[i] == ALPHA_F)
10154 #ifdef GLOB_ALPHA
10155 return 0;
10156 #else
10157 return (II+1 == (short) now._nr_pr && II+1 < MAXPROC);
10158 #endif
10159 switch (t->qu[i]) {
10160 case 0: break;
10161 default: Uerror("unknown qid - q_cond");
10162 return 0;
10163 }
10164 }
10165 return 1;
10166 }
10167 void
10168 to_compile(void)
10169 { char ctd[1024], carg[64];
10170 #ifdef BITSTATE
10171 strcpy(ctd, "-DBITSTATE ");
10172 #else
10173 strcpy(ctd, "");
10174 #endif
10175 #ifdef NOVSZ
10176 strcat(ctd, "-DNOVSZ ");
10177 #endif
10178 #ifdef REVERSE
10179 strcat(ctd, "-DREVERSE ");
10180 #endif
10181 #ifdef T_REVERSE
10182 strcat(ctd, "-DT_REVERSE ");
10183 #endif
10184 #ifdef RANDOMIZE
10185 #if RANDOMIZE>0
10186 sprintf(carg, "-DRANDOMIZE=%d ", RANDOMIZE);
10187 strcat(ctd, carg);
10188 #else
10189 strcat(ctd, "-DRANDOMIZE ");
10190 #endif
10191 #endif
10192 #ifdef SCHED
10193 sprintf(carg, "-DSCHED=%d ", SCHED);
10194 strcat(ctd, carg);
10195 #endif
10196 #ifdef BFS
10197 strcat(ctd, "-DBFS ");
10198 #endif
10199 #ifdef MEMLIM
10200 sprintf(carg, "-DMEMLIM=%d ", MEMLIM);
10201 strcat(ctd, carg);
10202 #else
10203 #ifdef MEMCNT
10204 sprintf(carg, "-DMEMCNT=%d ", MEMCNT);
10205 strcat(ctd, carg);
10206 #endif
10207 #endif
10208 #ifdef NOCLAIM
10209 strcat(ctd, "-DNOCLAIM ");
10210 #endif
10211 #ifdef SAFETY
10212 strcat(ctd, "-DSAFETY ");
10213 #else
10214 #ifdef NOFAIR
10215 strcat(ctd, "-DNOFAIR ");
10216 #else
10217 #ifdef NFAIR
10218 if (NFAIR != 2)
10219 { sprintf(carg, "-DNFAIR=%d ", NFAIR);
10220 strcat(ctd, carg);
10221 }
10222 #endif
10223 #endif
10224 #endif
10225 #ifdef NOREDUCE
10226 strcat(ctd, "-DNOREDUCE ");
10227 #else
10228 #ifdef XUSAFE
10229 strcat(ctd, "-DXUSAFE ");
10230 #endif
10231 #endif
10232 #ifdef NP
10233 strcat(ctd, "-DNP ");
10234 #endif
10235 #ifdef PEG
10236 strcat(ctd, "-DPEG ");
10237 #endif
10238 #ifdef VAR_RANGES
10239 strcat(ctd, "-DVAR_RANGES ");
10240 #endif
10241 #ifdef HC0
10242 strcat(ctd, "-DHC0 ");
10243 #endif
10244 #ifdef HC1
10245 strcat(ctd, "-DHC1 ");
10246 #endif
10247 #ifdef HC2
10248 strcat(ctd, "-DHC2 ");
10249 #endif
10250 #ifdef HC3
10251 strcat(ctd, "-DHC3 ");
10252 #endif
10253 #ifdef HC4
10254 strcat(ctd, "-DHC4 ");
10255 #endif
10256 #ifdef CHECK
10257 strcat(ctd, "-DCHECK ");
10258 #endif
10259 #ifdef CTL
10260 strcat(ctd, "-DCTL ");
10261 #endif
10262 #ifdef NIBIS
10263 strcat(ctd, "-DNIBIS ");
10264 #endif
10265 #ifdef NOBOUNDCHECK
10266 strcat(ctd, "-DNOBOUNDCHECK ");
10267 #endif
10268 #ifdef NOSTUTTER
10269 strcat(ctd, "-DNOSTUTTER ");
10270 #endif
10271 #ifdef REACH
10272 strcat(ctd, "-DREACH ");
10273 #endif
10274 #ifdef PRINTF
10275 strcat(ctd, "-DPRINTF ");
10276 #endif
10277 #ifdef OTIM
10278 strcat(ctd, "-DOTIM ");
10279 #endif
10280 #ifdef COLLAPSE
10281 strcat(ctd, "-DCOLLAPSE ");
10282 #endif
10283 #ifdef MA
10284 sprintf(carg, "-DMA=%d ", MA);
10285 strcat(ctd, carg);
10286 #endif
10287 #ifdef SVDUMP
10288 strcat(ctd, "-DSVDUMP ");
10289 #endif
10290 #ifdef VECTORSZ
10291 if (VECTORSZ != 1024)
10292 { sprintf(carg, "-DVECTORSZ=%d ", VECTORSZ);
10293 strcat(ctd, carg);
10294 }
10295 #endif
10296 #ifdef VERBOSE
10297 strcat(ctd, "-DVERBOSE ");
10298 #endif
10299 #ifdef CHECK
10300 strcat(ctd, "-DCHECK ");
10301 #endif
10302 #ifdef SDUMP
10303 strcat(ctd, "-DSDUMP ");
10304 #endif
10305 #if NCORE>1
10306 sprintf(carg, "-DNCORE=%d ", NCORE);
10307 strcat(ctd, carg);
10308 #endif
10309 #ifdef SFH
10310 sprintf(carg, "-DSFH ");
10311 strcat(ctd, carg);
10312 #endif
10313 #ifdef VMAX
10314 if (VMAX != 256)
10315 { sprintf(carg, "-DVMAX=%d ", VMAX);
10316 strcat(ctd, carg);
10317 }
10318 #endif
10319 #ifdef PMAX
10320 if (PMAX != 16)
10321 { sprintf(carg, "-DPMAX=%d ", PMAX);
10322 strcat(ctd, carg);
10323 }
10324 #endif
10325 #ifdef QMAX
10326 if (QMAX != 16)
10327 { sprintf(carg, "-DQMAX=%d ", QMAX);
10328 strcat(ctd, carg);
10329 }
10330 #endif
10331 #ifdef SET_WQ_SIZE
10332 sprintf(carg, "-DSET_WQ_SIZE=%d ", SET_WQ_SIZE);
10333 strcat(ctd, carg);
10334 #endif
10335 printf("Compiled as: cc -o pan %span.c\n", ctd);
10336 }
10337 void
10338 active_procs(void)
10339 {
10340 if (!permuted) {
10341 Addproc(4);
10342 } else {
10343 Addproc(4);
10344 }
10345 }
10346 #ifdef MA
10347 /*
10348 #include <stdio.h>
10349 #define uchar unsigned char
10350 */
10351 #define ulong unsigned long
10352 #define ushort unsigned short
10353
10354 #define TWIDTH 256
10355 #define HASH(y,n) (n)*(((long)y))
10356 #define INRANGE(e,h) ((h>=e->From && h<=e->To)||(e->s==1 && e->S==h))
10357
10358 extern char *emalloc(unsigned long); /* imported routine */
10359 extern void dfa_init(ushort); /* 4 exported routines */
10360 extern int dfa_member(ulong);
10361 extern int dfa_store(uchar *);
10362 extern void dfa_stats(void);
10363
10364 typedef struct Edge {
10365 uchar From, To; /* max range 0..255 */
10366 uchar s, S; /* if s=1, S is singleton */
10367 struct Vertex *Dst;
10368 struct Edge *Nxt;
10369 } Edge;
10370
10371 typedef struct Vertex {
10372 ulong key, num; /* key for splay tree, nr incoming edges */
10373 uchar from[2], to[2]; /* in-node predefined edge info */
10374 struct Vertex *dst[2];/* most nodes have 2 or more edges */
10375 struct Edge *Succ; /* in case there are more edges */
10376 struct Vertex *lnk, *left, *right; /* splay tree plumbing */
10377 } Vertex;
10378
10379 static Edge *free_edges;
10380 static Vertex *free_vertices;
10381 static Vertex **layers; /* one splay tree of nodes per layer */
10382 static Vertex **path; /* run of word in the DFA */
10383 static Vertex *R, *F, *NF; /* Root, Final, Not-Final */
10384 static uchar *word, *lastword;/* string, and last string inserted */
10385 static int dfa_depth, iv=0, nv=0, pfrst=0, Tally;
10386
10387 static void insert_it(Vertex *, int); /* splay-tree code */
10388 static void delete_it(Vertex *, int);
10389 static Vertex *find_it(Vertex *, Vertex *, uchar, int);
10390
10391 static void
10392 recyc_edges(Edge *e)
10393 {
10394 if (!e) return;
10395 recyc_edges(e->Nxt);
10396 e->Nxt = free_edges;
10397 free_edges = e;
10398 }
10399
10400 static Edge *
10401 new_edge(Vertex *dst)
10402 { Edge *e;
10403
10404 if (free_edges)
10405 { e = free_edges;
10406 free_edges = e->Nxt;
10407 e->From = e->To = e->s = e->S = 0;
10408 e->Nxt = (Edge *) 0;
10409 } else
10410 e = (Edge *) emalloc(sizeof(Edge));
10411 e->Dst = dst;
10412
10413 return e;
10414 }
10415
10416 static void
10417 recyc_vertex(Vertex *v)
10418 {
10419 recyc_edges(v->Succ);
10420 v->Succ = (Edge *) free_vertices;
10421 free_vertices = v;
10422 nr_states--;
10423 }
10424
10425 static Vertex *
10426 new_vertex(void)
10427 { Vertex *v;
10428
10429 if (free_vertices)
10430 { v = free_vertices;
10431 free_vertices = (Vertex *) v->Succ;
10432 v->Succ = (Edge *) 0;
10433 v->num = 0;
10434 } else
10435 v = (Vertex *) emalloc(sizeof(Vertex));
10436
10437 nr_states++;
10438 return v;
10439 }
10440
10441 static Vertex *
10442 allDelta(Vertex *v, int n)
10443 { Vertex *dst = new_vertex();
10444
10445 v->from[0] = 0;
10446 v->to[0] = 255;
10447 v->dst[0] = dst;
10448 dst->num = 256;
10449 insert_it(v, n);
10450 return dst;
10451 }
10452
10453 static void
10454 insert_edge(Vertex *v, Edge *e)
10455 { /* put new edge first */
10456 if (!v->dst[0])
10457 { v->dst[0] = e->Dst;
10458 v->from[0] = e->From;
10459 v->to[0] = e->To;
10460 recyc_edges(e);
10461 return;
10462 }
10463 if (!v->dst[1])
10464 { v->from[1] = v->from[0]; v->from[0] = e->From;
10465 v->to[1] = v->to[0]; v->to[0] = e->To;
10466 v->dst[1] = v->dst[0]; v->dst[0] = e->Dst;
10467 recyc_edges(e);
10468 return;
10469 } /* shift */
10470 { int f = v->from[1];
10471 int t = v->to[1];
10472 Vertex *d = v->dst[1];
10473 v->from[1] = v->from[0]; v->from[0] = e->From;
10474 v->to[1] = v->to[0]; v->to[0] = e->To;
10475 v->dst[1] = v->dst[0]; v->dst[0] = e->Dst;
10476 e->From = f;
10477 e->To = t;
10478 e->Dst = d;
10479 }
10480 e->Nxt = v->Succ;
10481 v->Succ = e;
10482 }
10483
10484 static void
10485 copyRecursive(Vertex *v, Edge *e)
10486 { Edge *f;
10487 if (e->Nxt) copyRecursive(v, e->Nxt);
10488 f = new_edge(e->Dst);
10489 f->From = e->From;
10490 f->To = e->To;
10491 f->s = e->s;
10492 f->S = e->S;
10493 f->Nxt = v->Succ;
10494 v->Succ = f;
10495 }
10496
10497 static void
10498 copyEdges(Vertex *to, Vertex *from)
10499 { int i;
10500 for (i = 0; i < 2; i++)
10501 { to->from[i] = from->from[i];
10502 to->to[i] = from->to[i];
10503 to->dst[i] = from->dst[i];
10504 }
10505 if (from->Succ) copyRecursive(to, from->Succ);
10506 }
10507
10508 static Edge *
10509 cacheDelta(Vertex *v, int h, int first)
10510 { static Edge *ov, tmp; int i;
10511
10512 if (!first && INRANGE(ov,h))
10513 return ov; /* intercepts about 10% */
10514 for (i = 0; i < 2; i++)
10515 if (v->dst[i] && h >= v->from[i] && h <= v->to[i])
10516 { tmp.From = v->from[i];
10517 tmp.To = v->to[i];
10518 tmp.Dst = v->dst[i];
10519 tmp.s = tmp.S = 0;
10520 ov = &tmp;
10521 return ov;
10522 }
10523 for (ov = v->Succ; ov; ov = ov->Nxt)
10524 if (INRANGE(ov,h)) return ov;
10525
10526 Uerror("cannot get here, cacheDelta");
10527 return (Edge *) 0;
10528 }
10529
10530 static Vertex *
10531 Delta(Vertex *v, int h) /* v->delta[h] */
10532 { Edge *e;
10533
10534 if (v->dst[0] && h >= v->from[0] && h <= v->to[0])
10535 return v->dst[0]; /* oldest edge */
10536 if (v->dst[1] && h >= v->from[1] && h <= v->to[1])
10537 return v->dst[1];
10538 for (e = v->Succ; e; e = e->Nxt)
10539 if (INRANGE(e,h))
10540 return e->Dst;
10541 Uerror("cannot happen Delta");
10542 return (Vertex *) 0;
10543 }
10544
10545 static void
10546 numDelta(Vertex *v, int d)
10547 { Edge *e;
10548 ulong cnt;
10549 int i;
10550
10551 for (i = 0; i < 2; i++)
10552 if (v->dst[i])
10553 { cnt = v->dst[i]->num + d*(1 + v->to[i] - v->from[i]);
10554 if (d == 1 && cnt < v->dst[i]->num) goto bad;
10555 v->dst[i]->num = cnt;
10556 }
10557 for (e = v->Succ; e; e = e->Nxt)
10558 { cnt = e->Dst->num + d*(1 + e->To - e->From + e->s);
10559 if (d == 1 && cnt < e->Dst->num)
10560 bad: Uerror("too many incoming edges");
10561 e->Dst->num = cnt;
10562 }
10563 }
10564
10565 static void
10566 setDelta(Vertex *v, int h, Vertex *newdst) /* v->delta[h] = newdst; */
10567 { Edge *e, *f = (Edge *) 0, *g;
10568 int i;
10569
10570 /* remove the old entry, if there */
10571 for (i = 0; i < 2; i++)
10572 if (v->dst[i] && h >= v->from[i] && h <= v->to[i])
10573 { if (h == v->from[i])
10574 { if (h == v->to[i])
10575 { v->dst[i] = (Vertex *) 0;
10576 v->from[i] = v->to[i] = 0;
10577 } else
10578 v->from[i]++;
10579 } else if (h == v->to[i])
10580 { v->to[i]--;
10581 } else
10582 { g = new_edge(v->dst[i]);/* same dst */
10583 g->From = v->from[i];
10584 g->To = h-1; /* left half */
10585 v->from[i] = h+1; /* right half */
10586 insert_edge(v, g);
10587 }
10588 goto part2;
10589 }
10590 for (e = v->Succ; e; f = e, e = e->Nxt)
10591 { if (e->s == 1 && e->S == h)
10592 { e->s = e->S = 0;
10593 goto rem_tst;
10594 }
10595 if (h >= e->From && h <= e->To)
10596 { if (h == e->From)
10597 { if (h == e->To)
10598 { if (e->s)
10599 { e->From = e->To = e->S;
10600 e->s = 0;
10601 break;
10602 } else
10603 goto rem_do;
10604 } else
10605 e->From++;
10606 } else if (h == e->To)
10607 { e->To--;
10608 } else /* split */
10609 { g = new_edge(e->Dst); /* same dst */
10610 g->From = e->From;
10611 g->To = h-1; /* g=left half */
10612 e->From = h+1; /* e=right half */
10613 g->Nxt = e->Nxt; /* insert g */
10614 e->Nxt = g; /* behind e */
10615 break; /* done */
10616 }
10617
10618 rem_tst: if (e->From > e->To)
10619 { if (e->s == 0) {
10620 rem_do: if (f)
10621 f->Nxt = e->Nxt;
10622 else
10623 v->Succ = e->Nxt;
10624 e->Nxt = (Edge *) 0;
10625 recyc_edges(e);
10626 } else
10627 { e->From = e->To = e->S;
10628 e->s = 0;
10629 } }
10630 break;
10631 } }
10632 part2:
10633 /* check if newdst is already there */
10634 for (i = 0; i < 2; i++)
10635 if (v->dst[i] == newdst)
10636 { if (h+1 == (int) v->from[i])
10637 { v->from[i] = h;
10638 return;
10639 }
10640 if (h == (int) v->to[i]+1)
10641 { v->to[i] = h;
10642 return;
10643 } }
10644 for (e = v->Succ; e; e = e->Nxt)
10645 { if (e->Dst == newdst)
10646 { if (h+1 == (int) e->From)
10647 { e->From = h;
10648 if (e->s == 1 && e->S+1 == e->From)
10649 { e->From = e->S;
10650 e->s = e->S = 0;
10651 }
10652 return;
10653 }
10654 if (h == (int) e->To+1)
10655 { e->To = h;
10656 if (e->s == 1 && e->S == e->To+1)
10657 { e->To = e->S;
10658 e->s = e->S = 0;
10659 }
10660 return;
10661 }
10662 if (e->s == 0)
10663 { e->s = 1;
10664 e->S = h;
10665 return;
10666 } } }
10667 /* add as a new edge */
10668 e = new_edge(newdst);
10669 e->From = e->To = h;
10670 insert_edge(v, e);
10671 }
10672
10673 static ulong
10674 cheap_key(Vertex *v)
10675 { ulong vk2 = 0;
10676
10677 if (v->dst[0])
10678 { vk2 = (ulong) v->dst[0];
10679 if ((ulong) v->dst[1] > vk2)
10680 vk2 = (ulong) v->dst[1];
10681 } else if (v->dst[1])
10682 vk2 = (ulong) v->dst[1];
10683 if (v->Succ)
10684 { Edge *e;
10685 for (e = v->Succ; e; e = e->Nxt)
10686 if ((ulong) e->Dst > vk2)
10687 vk2 = (ulong) e->Dst;
10688 }
10689 Tally = (vk2>>2)&(TWIDTH-1);
10690 return v->key;
10691 }
10692
10693 static ulong
10694 mk_key(Vertex *v) /* not sensitive to order */
10695 { ulong m = 0, vk2 = 0;
10696 Edge *e;
10697
10698 if (v->dst[0])
10699 { m += HASH(v->dst[0], v->to[0] - v->from[0] + 1);
10700 vk2 = (ulong) v->dst[0];
10701 }
10702 if (v->dst[1])
10703 { m += HASH(v->dst[1], v->to[1] - v->from[1] + 1);
10704 if ((ulong) v->dst[1] > vk2) vk2 = (ulong) v->dst[1];
10705 }
10706 for (e = v->Succ; e; e = e->Nxt)
10707 { m += HASH(e->Dst, e->To - e->From + 1 + e->s);
10708 if ((ulong) e->Dst > vk2) vk2 = (ulong) e->Dst;
10709 }
10710 Tally = (vk2>>2)&(TWIDTH-1);
10711 return m;
10712 }
10713
10714 static ulong
10715 mk_special(int sigma, Vertex *n, Vertex *v)
10716 { ulong m = 0, vk2 = 0;
10717 Edge *f;
10718 int i;
10719
10720 for (i = 0; i < 2; i++)
10721 if (v->dst[i])
10722 { if (sigma >= v->from[i] && sigma <= v->to[i])
10723 { m += HASH(v->dst[i], v->to[i]-v->from[i]);
10724 if ((ulong) v->dst[i] > vk2
10725 && v->to[i] > v->from[i])
10726 vk2 = (ulong) v->dst[i];
10727 } else
10728 { m += HASH(v->dst[i], v->to[i]-v->from[i]+1);
10729 if ((ulong) v->dst[i] > vk2)
10730 vk2 = (ulong) v->dst[i];
10731 } }
10732 for (f = v->Succ; f; f = f->Nxt)
10733 { if (sigma >= f->From && sigma <= f->To)
10734 { m += HASH(f->Dst, f->To - f->From + f->s);
10735 if ((ulong) f->Dst > vk2
10736 && f->To - f->From + f->s > 0)
10737 vk2 = (ulong) f->Dst;
10738 } else if (f->s == 1 && sigma == f->S)
10739 { m += HASH(f->Dst, f->To - f->From + 1);
10740 if ((ulong) f->Dst > vk2) vk2 = (ulong) f->Dst;
10741 } else
10742 { m += HASH(f->Dst, f->To - f->From + 1 + f->s);
10743 if ((ulong) f->Dst > vk2) vk2 = (ulong) f->Dst;
10744 } }
10745
10746 if ((ulong) n > vk2) vk2 = (ulong) n;
10747 Tally = (vk2>>2)&(TWIDTH-1);
10748 m += HASH(n, 1);
10749 return m;
10750 }
10751
10752 void
10753 dfa_init(ushort nr_layers)
10754 { int i; Vertex *r, *t;
10755
10756 dfa_depth = nr_layers; /* one byte per layer */
10757 path = (Vertex **) emalloc((dfa_depth+1)*sizeof(Vertex *));
10758 layers = (Vertex **) emalloc(TWIDTH*(dfa_depth+1)*sizeof(Vertex *));
10759 lastword = (uchar *) emalloc((dfa_depth+1)*sizeof(uchar));
10760 lastword[dfa_depth] = lastword[0] = 255;
10761 path[0] = R = new_vertex(); F = new_vertex();
10762
10763 for (i = 1, r = R; i < dfa_depth; i++, r = t)
10764 t = allDelta(r, i-1);
10765 NF = allDelta(r, i-1);
10766 }
10767
10768 #if 0
10769 static void complement_dfa(void) { Vertex *tmp = F; F = NF; NF = tmp; }
10770 #endif
10771
10772 double
10773 tree_stats(Vertex *t)
10774 { Edge *e; double cnt=0.0;
10775 if (!t) return 0;
10776 if (!t->key) return 0;
10777 t->key = 0; /* precaution */
10778 if (t->dst[0]) cnt++;
10779 if (t->dst[1]) cnt++;
10780 for (e = t->Succ; e; e = e->Nxt)
10781 cnt++;
10782 cnt += tree_stats(t->lnk);
10783 cnt += tree_stats(t->left);
10784 cnt += tree_stats(t->right);
10785 return cnt;
10786 }
10787
10788 void
10789 dfa_stats(void)
10790 { int i, j; double cnt = 0.0;
10791 for (j = 0; j < TWIDTH; j++)
10792 for (i = 0; i < dfa_depth+1; i++)
10793 cnt += tree_stats(layers[i*TWIDTH+j]);
10794 printf("Minimized Automaton: %6d nodes and %6g edges\n",
10795 nr_states, cnt);
10796 }
10797
10798 int
10799 dfa_member(ulong n)
10800 { Vertex **p, **q;
10801 uchar *w = &word[n];
10802 int i;
10803
10804 p = &path[n]; q = (p+1);
10805 for (i = n; i < dfa_depth; i++)
10806 *q++ = Delta(*p++, *w++);
10807 return (*p == F);
10808 }
10809
10810 int
10811 dfa_store(uchar *sv)
10812 { Vertex **p, **q, *s, *y, *old, *new = F;
10813 uchar *w, *u = lastword;
10814 int i, j, k;
10815
10816 w = word = sv;
10817 while (*w++ == *u++) /* find first byte that differs */
10818 ;
10819 pfrst = (int) (u - lastword) - 1;
10820 memcpy(&lastword[pfrst], &sv[pfrst], dfa_depth-pfrst);
10821 if (pfrst > iv) pfrst = iv;
10822 if (pfrst > nv) pfrst = nv;
10823 /* phase1: */
10824 p = &path[pfrst]; q = (p+1); w = &word[pfrst];
10825 for (i = pfrst; i < dfa_depth; i++)
10826 *q++ = Delta(*p++, *w++); /* (*p)->delta[*w++]; */
10827
10828 if (*p == F) return 1; /* it's already there */
10829 /* phase2: */
10830 iv = dfa_depth;
10831 do { iv--;
10832 old = new;
10833 new = find_it(path[iv], old, word[iv], iv);
10834 } while (new && iv > 0);
10835
10836 /* phase3: */
10837 nv = k = 0; s = path[0];
10838 for (j = 1; j <= iv; ++j)
10839 if (path[j]->num > 1)
10840 { y = new_vertex();
10841 copyEdges(y, path[j]);
10842 insert_it(y, j);
10843 numDelta(y, 1);
10844 delete_it(s, j-1);
10845 setDelta(s, word[j-1], y);
10846 insert_it(s, j-1);
10847 y->num = 1; /* initial value 1 */
10848 s = y;
10849 path[j]->num--; /* only 1 moved from j to y */
10850 k = 1;
10851 } else
10852 { s = path[j];
10853 if (!k) nv = j;
10854 }
10855 y = Delta(s, word[iv]);
10856 y->num--;
10857 delete_it(s, iv);
10858 setDelta(s, word[iv], old);
10859 insert_it(s, iv);
10860 old->num++;
10861
10862 for (j = iv+1; j < dfa_depth; j++)
10863 if (path[j]->num == 0)
10864 { numDelta(path[j], -1);
10865 delete_it(path[j], j);
10866 recyc_vertex(path[j]);
10867 } else
10868 break;
10869 return 0;
10870 }
10871
10872 static Vertex *
10873 splay(ulong i, Vertex *t)
10874 { Vertex N, *l, *r, *y;
10875
10876 if (!t) return t;
10877 N.left = N.right = (Vertex *) 0;
10878 l = r = &N;
10879 for (;;)
10880 { if (i < t->key)
10881 { if (!t->left) break;
10882 if (i < t->left->key)
10883 { y = t->left;
10884 t->left = y->right;
10885 y->right = t;
10886 t = y;
10887 if (!t->left) break;
10888 }
10889 r->left = t;
10890 r = t;
10891 t = t->left;
10892 } else if (i > t->key)
10893 { if (!t->right) break;
10894 if (i > t->right->key)
10895 { y = t->right;
10896 t->right = y->left;
10897 y->left = t;
10898 t = y;
10899 if (!t->right) break;
10900 }
10901 l->right = t;
10902 l = t;
10903 t = t->right;
10904 } else
10905 break;
10906 }
10907 l->right = t->left;
10908 r->left = t->right;
10909 t->left = N.right;
10910 t->right = N.left;
10911 return t;
10912 }
10913
10914 static void
10915 insert_it(Vertex *v, int L)
10916 { Vertex *new, *t;
10917 ulong i; int nr;
10918
10919 i = mk_key(v);
10920 nr = ((L*TWIDTH)+Tally);
10921 t = layers[nr];
10922
10923 v->key = i;
10924 if (!t)
10925 { layers[nr] = v;
10926 return;
10927 }
10928 t = splay(i, t);
10929 if (i < t->key)
10930 { new = v;
10931 new->left = t->left;
10932 new->right = t;
10933 t->left = (Vertex *) 0;
10934 } else if (i > t->key)
10935 { new = v;
10936 new->right = t->right;
10937 new->left = t;
10938 t->right = (Vertex *) 0;
10939 } else /* it's already there */
10940 { v->lnk = t->lnk; /* put in linked list off v */
10941 t->lnk = v;
10942 new = t;
10943 }
10944 layers[nr] = new;
10945 }
10946
10947 static int
10948 checkit(Vertex *h, Vertex *v, Vertex *n, uchar sigma)
10949 { Edge *g, *f;
10950 int i, k, j = 1;
10951
10952 for (k = 0; k < 2; k++)
10953 if (h->dst[k])
10954 { if (sigma >= h->from[k] && sigma <= h->to[k])
10955 { if (h->dst[k] != n) goto no_match;
10956 }
10957 for (i = h->from[k]; i <= h->to[k]; i++)
10958 { if (i == sigma) continue;
10959 g = cacheDelta(v, i, j); j = 0;
10960 if (h->dst[k] != g->Dst)
10961 goto no_match;
10962 if (g->s == 0 || g->S != i)
10963 i = g->To;
10964 } }
10965 for (f = h->Succ; f; f = f->Nxt)
10966 { if (INRANGE(f,sigma))
10967 { if (f->Dst != n) goto no_match;
10968 }
10969 for (i = f->From; i <= f->To; i++)
10970 { if (i == sigma) continue;
10971 g = cacheDelta(v, i, j); j = 0;
10972 if (f->Dst != g->Dst)
10973 goto no_match;
10974 if (g->s == 1 && i == g->S)
10975 continue;
10976 i = g->To;
10977 }
10978 if (f->s && f->S != sigma)
10979 { g = cacheDelta(v, f->S, 1);
10980 if (f->Dst != g->Dst)
10981 goto no_match;
10982 }
10983 }
10984 if (h->Succ || h->dst[0] || h->dst[1]) return 1;
10985 no_match:
10986 return 0;
10987 }
10988
10989 static Vertex *
10990 find_it(Vertex *v, Vertex *n, uchar sigma, int L)
10991 { Vertex *z, *t;
10992 ulong i; int nr;
10993
10994 i = mk_special(sigma,n,v);
10995 nr = ((L*TWIDTH)+Tally);
10996 t = layers[nr];
10997
10998 if (!t) return (Vertex *) 0;
10999 layers[nr] = t = splay(i, t);
11000 if (i == t->key)
11001 for (z = t; z; z = z->lnk)
11002 if (checkit(z, v, n, sigma))
11003 return z;
11004
11005 return (Vertex *) 0;
11006 }
11007
11008 static void
11009 delete_it(Vertex *v, int L)
11010 { Vertex *x, *t;
11011 ulong i; int nr;
11012
11013 i = cheap_key(v);
11014 nr = ((L*TWIDTH)+Tally);
11015 t = layers[nr];
11016 if (!t) return;
11017
11018 t = splay(i, t);
11019 if (i == t->key)
11020 { Vertex *z, *y = (Vertex *) 0;
11021 for (z = t; z && z != v; y = z, z = z->lnk)
11022 ;
11023 if (z != v) goto bad;
11024 if (y)
11025 { y->lnk = z->lnk;
11026 z->lnk = (Vertex *) 0;
11027 layers[nr] = t;
11028 return;
11029 } else if (z->lnk) /* z == t == v */
11030 { y = z->lnk;
11031 y->left = t->left;
11032 y->right = t->right;
11033 t->left = t->right = t->lnk = (Vertex *) 0;
11034 layers[nr] = y;
11035 return;
11036 }
11037 /* delete the node itself */
11038 if (!t->left)
11039 { x = t->right;
11040 } else
11041 { x = splay(i, t->left);
11042 x->right = t->right;
11043 }
11044 t->left = t->right = t->lnk = (Vertex *) 0;
11045 layers[nr] = x;
11046 return;
11047 }
11048 bad: Uerror("cannot happen delete");
11049 }
11050 #endif
11051 #if defined(MA) && (defined(W_XPT) || defined(R_XPT))
11052 static Vertex **temptree;
11053 static char wbuf[4096];
11054 static int WCNT = 4096, wcnt=0;
11055 static uchar stacker[MA+1];
11056 static ulong stackcnt = 0;
11057 extern double nstates, nlinks, truncs, truncs2;
11058
11059 static void
11060 xwrite(int fd, char *b, int n)
11061 {
11062 if (wcnt+n >= 4096)
11063 { write(fd, wbuf, wcnt);
11064 wcnt = 0;
11065 }
11066 memcpy(&wbuf[wcnt], b, n);
11067 wcnt += n;
11068 }
11069
11070 static void
11071 wclose(fd)
11072 {
11073 if (wcnt > 0)
11074 write(fd, wbuf, wcnt);
11075 wcnt = 0;
11076 close(fd);
11077 }
11078
11079 static void
11080 w_vertex(int fd, Vertex *v)
11081 { char t[3]; int i; Edge *e;
11082
11083 xwrite(fd, (char *) &v, sizeof(Vertex *));
11084 t[0] = 0;
11085 for (i = 0; i < 2; i++)
11086 if (v->dst[i])
11087 { t[1] = v->from[i], t[2] = v->to[i];
11088 xwrite(fd, t, 3);
11089 xwrite(fd, (char *) &(v->dst[i]), sizeof(Vertex *));
11090 }
11091 for (e = v->Succ; e; e = e->Nxt)
11092 { t[1] = e->From, t[2] = e->To;
11093 xwrite(fd, t, 3);
11094 xwrite(fd, (char *) &(e->Dst), sizeof(Vertex *));
11095
11096 if (e->s)
11097 { t[1] = t[2] = e->S;
11098 xwrite(fd, t, 3);
11099 xwrite(fd, (char *) &(e->Dst), sizeof(Vertex *));
11100 } }
11101 }
11102
11103 static void
11104 w_layer(int fd, Vertex *v)
11105 { uchar c=1;
11106
11107 if (!v) return;
11108 xwrite(fd, (char *) &c, 1);
11109 w_vertex(fd, v);
11110 w_layer(fd, v->lnk);
11111 w_layer(fd, v->left);
11112 w_layer(fd, v->right);
11113 }
11114
11115 void
11116 w_xpoint(void)
11117 { int fd; char nm[64];
11118 int i, j; uchar c;
11119 static uchar xwarned = 0;
11120
11121 sprintf(nm, "%s.xpt", PanSource);
11122 if ((fd = creat(nm, 0666)) <= 0)
11123 if (!xwarned)
11124 { xwarned = 1;
11125 printf("cannot creat checkpoint file\n");
11126 return;
11127 }
11128 xwrite(fd, (char *) &nstates, sizeof(double));
11129 xwrite(fd, (char *) &truncs, sizeof(double));
11130 xwrite(fd, (char *) &truncs2, sizeof(double));
11131 xwrite(fd, (char *) &nlinks, sizeof(double));
11132 xwrite(fd, (char *) &dfa_depth, sizeof(int));
11133 xwrite(fd, (char *) &R, sizeof(Vertex *));
11134 xwrite(fd, (char *) &F, sizeof(Vertex *));
11135 xwrite(fd, (char *) &NF, sizeof(Vertex *));
11136
11137 for (j = 0; j < TWIDTH; j++)
11138 for (i = 0; i < dfa_depth+1; i++)
11139 { w_layer(fd, layers[i*TWIDTH+j]);
11140 c = 2; xwrite(fd, (char *) &c, 1);
11141 }
11142 wclose(fd);
11143 }
11144
11145 static void
11146 xread(int fd, char *b, int n)
11147 { int m = wcnt; int delta = 0;
11148 if (m < n)
11149 { if (m > 0) memcpy(b, &wbuf[WCNT-m], m);
11150 delta = m;
11151 WCNT = wcnt = read(fd, wbuf, 4096);
11152 if (wcnt < n-m)
11153 Uerror("xread failed -- insufficient data");
11154 n -= m;
11155 }
11156 memcpy(&b[delta], &wbuf[WCNT-wcnt], n);
11157 wcnt -= n;
11158 }
11159
11160 static void
11161 x_cleanup(Vertex *c)
11162 { Edge *e; /* remove the tree and edges from c */
11163 if (!c) return;
11164 for (e = c->Succ; e; e = e->Nxt)
11165 x_cleanup(e->Dst);
11166 recyc_vertex(c);
11167 }
11168
11169 static void
11170 x_remove(void)
11171 { Vertex *tmp; int i, s;
11172 int r, j;
11173 /* double-check: */
11174 stacker[dfa_depth-1] = 0; r = dfa_store(stacker);
11175 stacker[dfa_depth-1] = 4; j = dfa_member(dfa_depth-1);
11176 if (r != 1 || j != 0)
11177 { printf("%d: ", stackcnt);
11178 for (i = 0; i < dfa_depth; i++)
11179 printf("%d,", stacker[i]);
11180 printf(" -- not a stackstate <o:%d,4:%d>\n", r, j);
11181 return;
11182 }
11183 stacker[dfa_depth-1] = 1;
11184 s = dfa_member(dfa_depth-1);
11185
11186 { tmp = F; F = NF; NF = tmp; } /* complement */
11187 if (s) dfa_store(stacker);
11188 stacker[dfa_depth-1] = 0;
11189 dfa_store(stacker);
11190 stackcnt++;
11191 { tmp = F; F = NF; NF = tmp; }
11192 }
11193
11194 static void
11195 x_rm_stack(Vertex *t, int k)
11196 { int j; Edge *e;
11197
11198 if (k == 0)
11199 { x_remove();
11200 return;
11201 }
11202 if (t)
11203 for (e = t->Succ; e; e = e->Nxt)
11204 { for (j = e->From; j <= (int) e->To; j++)
11205 { stacker[k] = (uchar) j;
11206 x_rm_stack(e->Dst, k-1);
11207 }
11208 if (e->s)
11209 { stacker[k] = e->S;
11210 x_rm_stack(e->Dst, k-1);
11211 } }
11212 }
11213
11214 static Vertex *
11215 insert_withkey(Vertex *v, int L)
11216 { Vertex *new, *t = temptree[L];
11217
11218 if (!t) { temptree[L] = v; return v; }
11219 t = splay(v->key, t);
11220 if (v->key < t->key)
11221 { new = v;
11222 new->left = t->left;
11223 new->right = t;
11224 t->left = (Vertex *) 0;
11225 } else if (v->key > t->key)
11226 { new = v;
11227 new->right = t->right;
11228 new->left = t;
11229 t->right = (Vertex *) 0;
11230 } else
11231 { if (t != R && t != F && t != NF)
11232 Uerror("double insert, bad checkpoint data");
11233 else
11234 { recyc_vertex(v);
11235 new = t;
11236 } }
11237 temptree[L] = new;
11238
11239 return new;
11240 }
11241
11242 static Vertex *
11243 find_withkey(Vertex *v, int L)
11244 { Vertex *t = temptree[L];
11245 if (t)
11246 { temptree[L] = t = splay((ulong) v, t);
11247 if (t->key == (ulong) v)
11248 return t;
11249 }
11250 Uerror("not found error, bad checkpoint data");
11251 return (Vertex *) 0;
11252 }
11253
11254 void
11255 r_layer(int fd, int n)
11256 { Vertex *v;
11257 Edge *e;
11258 char c, t[2];
11259
11260 for (;;)
11261 { xread(fd, &c, 1);
11262 if (c == 2) break;
11263 if (c == 1)
11264 { v = new_vertex();
11265 xread(fd, (char *) &(v->key), sizeof(Vertex *));
11266 v = insert_withkey(v, n);
11267 } else /* c == 0 */
11268 { e = new_edge((Vertex *) 0);
11269 xread(fd, t, 2);
11270 e->From = t[0];
11271 e->To = t[1];
11272 xread(fd, (char *) &(e->Dst), sizeof(Vertex *));
11273 insert_edge(v, e);
11274 } }
11275 }
11276
11277 static void
11278 v_fix(Vertex *t, int nr)
11279 { int i; Edge *e;
11280
11281 if (!t) return;
11282
11283 for (i = 0; i < 2; i++)
11284 if (t->dst[i])
11285 t->dst[i] = find_withkey(t->dst[i], nr);
11286
11287 for (e = t->Succ; e; e = e->Nxt)
11288 e->Dst = find_withkey(e->Dst, nr);
11289
11290 v_fix(t->left, nr);
11291 v_fix(t->right, nr);
11292 }
11293
11294 static void
11295 v_insert(Vertex *t, int nr)
11296 { Edge *e; int i;
11297
11298 if (!t) return;
11299 v_insert(t->left, nr);
11300 v_insert(t->right, nr);
11301
11302 /* remove only leafs from temptree */
11303 t->left = t->right = t->lnk = (Vertex *) 0;
11304 insert_it(t, nr); /* into layers */
11305 for (i = 0; i < 2; i++)
11306 if (t->dst[i])
11307 t->dst[i]->num += (t->to[i] - t->from[i] + 1);
11308 for (e = t->Succ; e; e = e->Nxt)
11309 e->Dst->num += (e->To - e->From + 1 + e->s);
11310 }
11311
11312 static void
11313 x_fixup(void)
11314 { int i;
11315
11316 for (i = 0; i < dfa_depth; i++)
11317 v_fix(temptree[i], (i+1));
11318
11319 for (i = dfa_depth; i >= 0; i--)
11320 v_insert(temptree[i], i);
11321 }
11322
11323 static Vertex *
11324 x_tail(Vertex *t, ulong want)
11325 { int i, yes, no; Edge *e; Vertex *v = (Vertex *) 0;
11326
11327 if (!t) return v;
11328
11329 yes = no = 0;
11330 for (i = 0; i < 2; i++)
11331 if ((ulong) t->dst[i] == want)
11332 { /* was t->from[i] <= 0 && t->to[i] >= 0 */
11333 /* but from and to are uchar */
11334 if (t->from[i] == 0)
11335 yes = 1;
11336 else
11337 if (t->from[i] <= 4 && t->to[i] >= 4)
11338 no = 1;
11339 }
11340
11341 for (e = t->Succ; e; e = e->Nxt)
11342 if ((ulong) e->Dst == want)
11343 { /* was INRANGE(e,0) but From and To are uchar */
11344 if ((e->From == 0) || (e->s==1 && e->S==0))
11345 yes = 1;
11346 else if (INRANGE(e, 4))
11347 no = 1;
11348 }
11349 if (yes && !no) return t;
11350 v = x_tail(t->left, want); if (v) return v;
11351 v = x_tail(t->right, want); if (v) return v;
11352 return (Vertex *) 0;
11353 }
11354
11355 static void
11356 x_anytail(Vertex *t, Vertex *c, int nr)
11357 { int i; Edge *e, *f; Vertex *v;
11358
11359 if (!t) return;
11360
11361 for (i = 0; i < 2; i++)
11362 if ((ulong) t->dst[i] == c->key)
11363 { v = new_vertex(); v->key = t->key;
11364 f = new_edge(v);
11365 f->From = t->from[i];
11366 f->To = t->to[i];
11367 f->Nxt = c->Succ;
11368 c->Succ = f;
11369 if (nr > 0)
11370 x_anytail(temptree[nr-1], v, nr-1);
11371 }
11372
11373 for (e = t->Succ; e; e = e->Nxt)
11374 if ((ulong) e->Dst == c->key)
11375 { v = new_vertex(); v->key = t->key;
11376 f = new_edge(v);
11377 f->From = e->From;
11378 f->To = e->To;
11379 f->s = e->s;
11380 f->S = e->S;
11381 f->Nxt = c->Succ;
11382 c->Succ = f;
11383 x_anytail(temptree[nr-1], v, nr-1);
11384 }
11385
11386 x_anytail(t->left, c, nr);
11387 x_anytail(t->right, c, nr);
11388 }
11389
11390 static Vertex *
11391 x_cpy_rev(void)
11392 { Vertex *c, *v; /* find 0 and !4 predecessor of F */
11393
11394 v = x_tail(temptree[dfa_depth-1], F->key);
11395 if (!v) return (Vertex *) 0;
11396
11397 c = new_vertex(); c->key = v->key;
11398
11399 /* every node on dfa_depth-2 that has v->key as succ */
11400 /* make copy and let c point to these (reversing ptrs) */
11401
11402 x_anytail(temptree[dfa_depth-2], c, dfa_depth-2);
11403
11404 return c;
11405 }
11406
11407 void
11408 r_xpoint(void)
11409 { int fd; char nm[64]; Vertex *d;
11410 int i, j;
11411
11412 wcnt = 0;
11413 sprintf(nm, "%s.xpt", PanSource);
11414 if ((fd = open(nm, 0)) < 0) /* O_RDONLY */
11415 Uerror("cannot open checkpoint file");
11416
11417 xread(fd, (char *) &nstates, sizeof(double));
11418 xread(fd, (char *) &truncs, sizeof(double));
11419 xread(fd, (char *) &truncs2, sizeof(double));
11420 xread(fd, (char *) &nlinks, sizeof(double));
11421 xread(fd, (char *) &dfa_depth, sizeof(int));
11422
11423 if (dfa_depth != MA+a_cycles)
11424 Uerror("bad dfa_depth in checkpoint file");
11425
11426 path = (Vertex **) emalloc((dfa_depth+1)*sizeof(Vertex *));
11427 layers = (Vertex **) emalloc(TWIDTH*(dfa_depth+1)*sizeof(Vertex *));
11428 temptree = (Vertex **) emalloc((dfa_depth+2)*sizeof(Vertex *));
11429 lastword = (uchar *) emalloc((dfa_depth+1)*sizeof(uchar));
11430 lastword[dfa_depth] = lastword[0] = 255;
11431
11432 path[0] = R = new_vertex();
11433 xread(fd, (char *) &R->key, sizeof(Vertex *));
11434 R = insert_withkey(R, 0);
11435
11436 F = new_vertex();
11437 xread(fd, (char *) &F->key, sizeof(Vertex *));
11438 F = insert_withkey(F, dfa_depth);
11439
11440 NF = new_vertex();
11441 xread(fd, (char *) &NF->key, sizeof(Vertex *));
11442 NF = insert_withkey(NF, dfa_depth);
11443
11444 for (j = 0; j < TWIDTH; j++)
11445 for (i = 0; i < dfa_depth+1; i++)
11446 r_layer(fd, i);
11447
11448 if (wcnt != 0) Uerror("bad count in checkpoint file");
11449
11450 d = x_cpy_rev();
11451 x_fixup();
11452 stacker[dfa_depth-1] = 0;
11453 x_rm_stack(d, dfa_depth-2);
11454 x_cleanup(d);
11455 close(fd);
11456
11457 printf("pan: removed %d stackstates\n", stackcnt);
11458 nstates -= (double) stackcnt;
11459 }
11460 #endif
11461 #ifdef VERI
11462 void
11463 check_claim(int st)
11464 {
11465 if (st == endclaim)
11466 uerror("claim violated!");
11467 if (stopstate[VERI][st])
11468 uerror("end state in claim reached");
11469 }
11470 #endif
11471 void
11472 c_globals(void)
11473 { /* int i; */
11474 printf("global vars:\n");
11475 printf(" byte write_off: %d\n", now.write_off);
11476 { int l_in;
11477 for (l_in = 0; l_in < 2; l_in++)
11478 {
11479 printf(" byte commit_count[%d]: %d\n", l_in, now.commit_count[l_in]);
11480 }
11481 }
11482 printf(" byte _commit_sum: %d\n", now._commit_sum);
11483 printf(" byte read_off: %d\n", now.read_off);
11484 printf(" byte events_lost: %d\n", now.events_lost);
11485 printf(" byte refcount: %d\n", now.refcount);
11486 { int l_in;
11487 for (l_in = 0; l_in < 8; l_in++)
11488 {
11489 printf(" bit buffer_use[%d]: %d\n", l_in, now.buffer_use[l_in]);
11490 }
11491 }
11492 }
11493 void
11494 c_locals(int pid, int tp)
11495 { /* int i; */
11496 switch(tp) {
11497 case 5:
11498 /* none */
11499 break;
11500 case 4:
11501 printf("local vars proc %d (:init:):\n", pid);
11502 printf(" byte i: %d\n", ((P4 *)pptr(pid))->i);
11503 printf(" byte j: %d\n", ((P4 *)pptr(pid))->j);
11504 printf(" byte sum: %d\n", ((P4 *)pptr(pid))->sum);
11505 printf(" byte commit_sum: %d\n", ((P4 *)pptr(pid))->commit_sum);
11506 break;
11507 case 3:
11508 /* none */
11509 break;
11510 case 2:
11511 printf("local vars proc %d (reader):\n", pid);
11512 printf(" byte i: %d\n", ((P2 *)pptr(pid))->i);
11513 printf(" byte j: %d\n", ((P2 *)pptr(pid))->j);
11514 break;
11515 case 1:
11516 printf("local vars proc %d (tracer):\n", pid);
11517 printf(" byte size: %d\n", ((P1 *)pptr(pid))->size);
11518 printf(" byte prev_off: %d\n", ((P1 *)pptr(pid))->prev_off);
11519 printf(" byte new_off: %d\n", ((P1 *)pptr(pid))->new_off);
11520 printf(" byte tmp_commit: %d\n", ((P1 *)pptr(pid))->tmp_commit);
11521 printf(" byte i: %d\n", ((P1 *)pptr(pid))->i);
11522 printf(" byte j: %d\n", ((P1 *)pptr(pid))->j);
11523 break;
11524 case 0:
11525 printf("local vars proc %d (switcher):\n", pid);
11526 printf(" byte prev_off: %d\n", ((P0 *)pptr(pid))->prev_off);
11527 printf(" byte new_off: %d\n", ((P0 *)pptr(pid))->new_off);
11528 printf(" byte tmp_commit: %d\n", ((P0 *)pptr(pid))->tmp_commit);
11529 printf(" byte size: %d\n", ((P0 *)pptr(pid))->size);
11530 break;
11531 }
11532 }
11533 void
11534 printm(int x)
11535 {
11536 switch (x) {
11537 default: Printf("%d", x);
11538 }
11539 }
11540 void
11541 c_chandump(int unused) { unused++; /* avoid complaints */ }
This page took 0.293757 seconds and 4 git commands to generate.