update verif
[lttv.git] / trunk / verif / examples / pan.c
1 /*** Generated by Spin Version 5.1.6 -- 9 May 2008 ***/
2 /*** From source: buffer.spin ***/
3
4 #ifdef SC
5 #define _FILE_OFFSET_BITS 64
6 #endif
7 #include <stdio.h>
8 #include <signal.h>
9 #include <stdlib.h>
10 #include <stdarg.h>
11 #include <string.h>
12 #include <ctype.h>
13 #include <errno.h>
14 #if defined(WIN32) || defined(WIN64)
15 #include <time.h>
16 #else
17 #include <unistd.h>
18 #include <sys/times.h>
19 #endif
20 #include <sys/types.h>
21 #include <sys/stat.h>
22 #include <fcntl.h>
23 #define Offsetof(X, Y) ((unsigned long)(&(((X *)0)->Y)))
24 #ifndef max
25 #define max(a,b) (((a)<(b)) ? (b) : (a))
26 #endif
27 #ifndef PRINTF
28 int Printf(const char *fmt, ...); /* prototype only */
29 #endif
30 #include "pan.h"
31 #ifdef LOOPSTATE
32 double cnt_loops;
33 #endif
34 State A_Root; /* seed-state for cycles */
35 State now; /* the full state-vector */
36 #undef C_States
37 #if defined(C_States) && defined(HAS_TRACK)
38 void
39 c_update(uchar *p_t_r)
40 {
41 #ifdef VERBOSE
42 printf("c_update %u\n", p_t_r);
43 #endif
44 }
45 void
46 c_revert(uchar *p_t_r)
47 {
48 #ifdef VERBOSE
49 printf("c_revert %u\n", p_t_r);
50 #endif
51 }
52 #endif
53 void
54 globinit(void)
55 {
56 }
57 void
58 locinit4(int h)
59 {
60 }
61 void
62 locinit3(int h)
63 {
64 }
65 void
66 locinit2(int h)
67 {
68 }
69 void
70 locinit1(int h)
71 {
72 }
73 void
74 locinit0(int h)
75 {
76 }
77 #ifdef CNTRSTACK
78 #define onstack_now() (LL[trpt->j6] && LL[trpt->j7])
79 #define onstack_put() LL[trpt->j6]++; LL[trpt->j7]++
80 #define onstack_zap() LL[trpt->j6]--; LL[trpt->j7]--
81 #endif
82 #if !defined(SAFETY) && !defined(NOCOMP)
83 #define V_A (((now._a_t&1)?2:1) << (now._a_t&2))
84 #define A_V (((now._a_t&1)?1:2) << (now._a_t&2))
85 int S_A = 0;
86 #else
87 #define V_A 0
88 #define A_V 0
89 #define S_A 0
90 #endif
91 #ifdef MA
92 #undef onstack_now
93 #undef onstack_put
94 #undef onstack_zap
95 #define onstack_put() ;
96 #define onstack_zap() gstore((char *) &now, vsize, 4)
97 #else
98 #if defined(FULLSTACK) && !defined(BITSTATE)
99 #define onstack_put() trpt->ostate = Lstate
100 #define onstack_zap() { \
101 if (trpt->ostate) \
102 trpt->ostate->tagged = \
103 (S_A)? (trpt->ostate->tagged&~V_A) : 0; \
104 }
105 #endif
106 #endif
107 #ifndef NO_V_PROVISO
108 #define V_PROVISO
109 #endif
110 #if !defined(NO_RESIZE) && !defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(SPACE) && NCORE==1
111 #define AUTO_RESIZE
112 #endif
113
114 struct H_el {
115 struct H_el *nxt;
116 #ifdef FULLSTACK
117 unsigned int tagged;
118 #if defined(BITSTATE) && !defined(NOREDUCE) && !defined(SAFETY)
119 unsigned int proviso;
120 #endif
121 #endif
122 #if defined(CHECK) || (defined(COLLAPSE) && !defined(FULLSTACK))
123 unsigned long st_id;
124 #endif
125 #if !defined(SAFETY) || defined(REACH)
126 unsigned int D;
127 #endif
128 #if NCORE>1
129 /* could cost 1 extra word: 4 bytes if 32-bit and 8 bytes if 64-bit */
130 #ifdef V_PROVISO
131 uchar cpu_id; /* id of cpu that created the state */
132 #endif
133 #endif
134 #ifdef COLLAPSE
135 #if VECTORSZ<65536
136 unsigned short ln;
137 #else
138 unsigned long ln;
139 #endif
140 #endif
141 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
142 unsigned long m_K1;
143 #endif
144 unsigned long state;
145 } **H_tab, **S_Tab;
146
147 typedef struct Trail {
148 int st; /* current state */
149 uchar pr; /* process id */
150 uchar tau; /* 8 bit-flags */
151 uchar o_pm; /* 8 more bit-flags */
152 #if 0
153 Meaning of bit-flags:
154 tau&1 -> timeout enabled
155 tau&2 -> request to enable timeout 1 level up (in claim)
156 tau&4 -> current transition is a claim move
157 tau&8 -> current transition is an atomic move
158 tau&16 -> last move was truncated on stack
159 tau&32 -> current transition is a preselected move
160 tau&64 -> at least one next state is not on the stack
161 tau&128 -> current transition is a stutter move
162 o_pm&1 -> the current pid moved -- implements else
163 o_pm&2 -> this is an acceptance state
164 o_pm&4 -> this is a progress state
165 o_pm&8 -> fairness alg rule 1 undo mark
166 o_pm&16 -> fairness alg rule 3 undo mark
167 o_pm&32 -> fairness alg rule 2 undo mark
168 o_pm&64 -> the current proc applied rule2
169 o_pm&128 -> a fairness, dummy move - all procs blocked
170 #endif
171 #ifdef NSUCC
172 uchar n_succ; /* nr of successor states */
173 #endif
174 #if defined(FULLSTACK) && defined(MA) && !defined(BFS)
175 uchar proviso;
176 #endif
177 #ifndef BFS
178 uchar o_n, o_ot; /* to save locals */
179 #endif
180 uchar o_m;
181 #ifdef EVENT_TRACE
182 #if nstates_event<256
183 uchar o_event;
184 #else
185 unsigned short o_event;
186 #endif
187 #endif
188 int o_tt;
189 #ifndef BFS
190 short o_To;
191 #ifdef RANDOMIZE
192 short oo_i;
193 #endif
194 #endif
195 #if defined(HAS_UNLESS) && !defined(BFS)
196 int e_state; /* if escape trans - state of origin */
197 #endif
198 #if (defined(FULLSTACK) && !defined(MA)) || defined(BFS) || (NCORE>1)
199 struct H_el *ostate; /* pointer to stored state */
200 #endif
201 #if defined(CNTRSTACK) && !defined(BFS)
202 long j6, j7;
203 #endif
204 Trans *o_t;
205 #ifdef SCHED
206 /* based on Qadeer&Rehof, Tacas 2005, LNCS 3440, pp. 93-107 */
207 #if NCORE>1
208 #error "-DSCHED cannot be combined with -DNCORE (yet)"
209 #endif
210 int sched_limit;
211 #endif
212 #ifdef HAS_SORTED
213 short ipt;
214 #endif
215 union {
216 int oval;
217 int *ovals;
218 } bup;
219 } Trail;
220 Trail *trail, *trpt;
221 FILE *efd;
222 uchar *this;
223 long maxdepth=10000;
224 long omaxdepth=10000;
225 #ifdef SCHED
226 int sched_max = 10;
227 #endif
228 #ifdef PERMUTED
229 uchar permuted = 1;
230 #else
231 uchar permuted = 0;
232 #endif
233 double quota; /* time limit */
234 #if NCORE>1
235 long z_handoff = -1;
236 #endif
237 #ifdef SC
238 char *stackfile;
239 #endif
240 uchar *SS, *LL;
241 uchar HASH_NR = 0;
242
243 double memcnt = (double) 0;
244 double memlim = (double) (1<<30); /* 1 GB */
245 #if NCORE>1
246 double mem_reserved = (double) 0;
247 #endif
248
249 /* for emalloc: */
250 static char *have;
251 static long left = 0L;
252 static double fragment = (double) 0;
253 static unsigned long grow;
254
255 unsigned int HASH_CONST[] = {
256 /* asuming 4 bytes per int */
257 0x88888EEF, 0x00400007,
258 0x04c11db7, 0x100d4e63,
259 0x0fc22f87, 0x3ff0c3ff,
260 0x38e84cd7, 0x02b148e9,
261 0x98b2e49d, 0xb616d379,
262 0xa5247fd9, 0xbae92a15,
263 0xb91c8bc5, 0x8e5880f3,
264 0xacd7c069, 0xb4c44bb3,
265 0x2ead1fb7, 0x8e428171,
266 0xdbebd459, 0x828ae611,
267 0x6cb25933, 0x86cdd651,
268 0x9e8f5f21, 0xd5f8d8e7,
269 0x9c4e956f, 0xb5cf2c71,
270 0x2e805a6d, 0x33fc3a55,
271 0xaf203ed1, 0xe31f5909,
272 0x5276db35, 0x0c565ef7,
273 0x273d1aa5, 0x8923b1dd,
274 0
275 };
276 #if NCORE>1
277 extern int core_id;
278 #endif
279 long mreached=0;
280 int done=0, errors=0, Nrun=1;
281 int c_init_done=0;
282 char *c_stack_start = (char *) 0;
283 double nstates=0, nlinks=0, truncs=0, truncs2=0;
284 double nlost=0, nShadow=0, hcmp=0, ngrabs=0;
285 #if defined(ZAPH) && defined(BITSTATE)
286 double zstates = 0;
287 #endif
288 int c_init_run;
289 #ifdef BFS
290 double midrv=0, failedrv=0, revrv=0;
291 #endif
292 unsigned long nr_states=0; /* nodes in DFA */
293 long Fa=0, Fh=0, Zh=0, Zn=0;
294 long PUT=0, PROBE=0, ZAPS=0;
295 long Ccheck=0, Cholds=0;
296 int a_cycles=0, upto=1, strict=0, verbose = 0, signoff = 0;
297 #ifdef HAS_CODE
298 int gui = 0, coltrace = 0, readtrail = 0;
299 int whichtrail = 0, onlyproc = -1, silent = 0;
300 #endif
301 int state_tables=0, fairness=0, no_rck=0, Nr_Trails=0;
302 char simvals[128];
303 #ifndef INLINE
304 int TstOnly=0;
305 #endif
306 unsigned long mask, nmask;
307 #ifdef BITSTATE
308 int ssize=23; /* 1 Mb */
309 #else
310 int ssize=19; /* 512K slots */
311 #endif
312 int hmax=0, svmax=0, smax=0;
313 int Maxbody=0, XX;
314 uchar *noptr; /* used by macro Pptr(x) */
315 #ifdef VAR_RANGES
316 void logval(char *, int);
317 void dumpranges(void);
318 #endif
319 #ifdef MA
320 #define INLINE_REV
321 extern void dfa_init(unsigned short);
322 extern int dfa_member(unsigned long);
323 extern int dfa_store(uchar *);
324 unsigned int maxgs = 0;
325 #endif
326
327 #ifdef ALIGNED
328 State comp_now __attribute__ ((aligned (8)));
329 /* gcc 64-bit aligned for Itanium2 systems */
330 /* MAJOR runtime penalty if not used on those systems */
331 #else
332 State comp_now; /* compressed state vector */
333 #endif
334
335 State comp_msk;
336 uchar *Mask = (uchar *) &comp_msk;
337 #ifdef COLLAPSE
338 State comp_tmp;
339 static char *scratch = (char *) &comp_tmp;
340 #endif
341 Stack *stack; /* for queues, processes */
342 Svtack *svtack; /* for old state vectors */
343 #ifdef BITSTATE
344 static unsigned int hfns = 3; /* new default */
345 #endif
346 static unsigned long j1;
347 static unsigned long K1, K2;
348 static unsigned long j2, j3, j4;
349 #ifdef BITSTATE
350 static long udmem;
351 #endif
352 static long A_depth = 0;
353 long depth = 0;
354 #if NCORE>1
355 long nr_handoffs = 0;
356 #endif
357 static uchar warned = 0, iterative = 0, exclusive = 0, like_java = 0, every_error = 0;
358 static uchar noasserts = 0, noends = 0, bounded = 0;
359 #if SYNC>0 && ASYNC==0
360 void set_recvs(void);
361 int no_recvs(int);
362 #endif
363 #if SYNC
364 #define IfNotBlocked if (boq != -1) continue;
365 #define UnBlock boq = -1
366 #else
367 #define IfNotBlocked /* cannot block */
368 #define UnBlock /* don't bother */
369 #endif
370
371 #ifdef BITSTATE
372 int (*bstore)(char *, int);
373 int bstore_reg(char *, int);
374 int bstore_mod(char *, int);
375 #endif
376 void active_procs(void);
377 void cleanup(void);
378 void do_the_search(void);
379 void find_shorter(int);
380 void iniglobals(void);
381 void stopped(int);
382 void wrapup(void);
383 int *grab_ints(int);
384 void ungrab_ints(int *, int);
385 #ifndef NOBOUNDCHECK
386 #define Index(x, y) Boundcheck(x, y, II, tt, t)
387 #else
388 #define Index(x, y) x
389 #endif
390 short Air[] = { (short) Air0, (short) Air1, (short) Air2, (short) Air3, (short) Air4, (short) Air5 };
391 int
392 addproc(int n)
393 { int j, h = now._nr_pr;
394 #ifndef NOCOMP
395 int k;
396 #endif
397 uchar *o_this = this;
398
399 #ifndef INLINE
400 if (TstOnly) return (h < MAXPROC);
401 #endif
402 #ifndef NOBOUNDCHECK
403 /* redefine Index only within this procedure */
404 #undef Index
405 #define Index(x, y) Boundcheck(x, y, 0, 0, 0)
406 #endif
407 if (h >= MAXPROC)
408 Uerror("too many processes");
409 switch (n) {
410 case 0: j = sizeof(P0); break;
411 case 1: j = sizeof(P1); break;
412 case 2: j = sizeof(P2); break;
413 case 3: j = sizeof(P3); break;
414 case 4: j = sizeof(P4); break;
415 case 5: j = sizeof(P5); break;
416 default: Uerror("bad proc - addproc");
417 }
418 if (vsize%WS)
419 proc_skip[h] = WS-(vsize%WS);
420 else
421 proc_skip[h] = 0;
422 #ifndef NOCOMP
423 for (k = vsize + (int) proc_skip[h]; k > vsize; k--)
424 Mask[k-1] = 1; /* align */
425 #endif
426 vsize += (int) proc_skip[h];
427 proc_offset[h] = vsize;
428 #ifdef SVDUMP
429 if (vprefix > 0)
430 { int dummy = 0;
431 write(svfd, (uchar *) &dummy, sizeof(int)); /* mark */
432 write(svfd, (uchar *) &h, sizeof(int));
433 write(svfd, (uchar *) &n, sizeof(int));
434 #if VECTORSZ>32000
435 write(svfd, (uchar *) &proc_offset[h], sizeof(int));
436 #else
437 write(svfd, (uchar *) &proc_offset[h], sizeof(short));
438 #endif
439 write(svfd, (uchar *) &now, vprefix-4*sizeof(int)); /* padd */
440 }
441 #endif
442 now._nr_pr += 1;
443 if (fairness && ((int) now._nr_pr + 1 >= (8*NFAIR)/2))
444 { printf("pan: error: too many processes -- current");
445 printf(" max is %d procs (-DNFAIR=%d)\n",
446 (8*NFAIR)/2 - 2, NFAIR);
447 printf("\trecompile with -DNFAIR=%d\n",
448 NFAIR+1);
449 pan_exit(1);
450 }
451 vsize += j;
452 #ifndef NOVSZ
453 now._vsz = vsize;
454 #endif
455 #ifndef NOCOMP
456 for (k = 1; k <= Air[n]; k++)
457 Mask[vsize - k] = 1; /* pad */
458 Mask[vsize-j] = 1; /* _pid */
459 #endif
460 hmax = max(hmax, vsize);
461 if (vsize >= VECTORSZ)
462 { printf("pan: error, VECTORSZ too small, recompile pan.c");
463 printf(" with -DVECTORSZ=N with N>%d\n", (int) vsize);
464 Uerror("aborting");
465 }
466 memset((char *)pptr(h), 0, j);
467 this = pptr(h);
468 if (BASE > 0 && h > 0)
469 ((P0 *)this)->_pid = h-BASE;
470 else
471 ((P0 *)this)->_pid = h;
472 switch (n) {
473 case 5: /* np_ */
474 ((P5 *)pptr(h))->_t = 5;
475 ((P5 *)pptr(h))->_p = 0;
476 reached5[0] = 1;
477 accpstate[5][1] = 1;
478 break;
479 case 4: /* :init: */
480 ((P4 *)pptr(h))->_t = 4;
481 ((P4 *)pptr(h))->_p = 41; reached4[41]=1;
482 /* params: */
483 /* locals: */
484 ((P4 *)pptr(h))->i = 0;
485 ((P4 *)pptr(h))->j = 0;
486 ((P4 *)pptr(h))->sum = 0;
487 ((P4 *)pptr(h))->commit_sum = 0;
488 #ifdef VAR_RANGES
489 logval(":init::i", ((P4 *)pptr(h))->i);
490 logval(":init::j", ((P4 *)pptr(h))->j);
491 logval(":init::sum", ((P4 *)pptr(h))->sum);
492 logval(":init::commit_sum", ((P4 *)pptr(h))->commit_sum);
493 #endif
494 #ifdef HAS_CODE
495 locinit4(h);
496 #endif
497 break;
498 case 3: /* cleaner */
499 ((P3 *)pptr(h))->_t = 3;
500 ((P3 *)pptr(h))->_p = 8; reached3[8]=1;
501 /* params: */
502 /* locals: */
503 #ifdef VAR_RANGES
504 #endif
505 #ifdef HAS_CODE
506 locinit3(h);
507 #endif
508 break;
509 case 2: /* reader */
510 ((P2 *)pptr(h))->_t = 2;
511 ((P2 *)pptr(h))->_p = 26; reached2[26]=1;
512 /* params: */
513 /* locals: */
514 ((P2 *)pptr(h))->i = 0;
515 ((P2 *)pptr(h))->j = 0;
516 #ifdef VAR_RANGES
517 logval("reader:i", ((P2 *)pptr(h))->i);
518 logval("reader:j", ((P2 *)pptr(h))->j);
519 #endif
520 #ifdef HAS_CODE
521 locinit2(h);
522 #endif
523 break;
524 case 1: /* tracer */
525 ((P1 *)pptr(h))->_t = 1;
526 ((P1 *)pptr(h))->_p = 3; reached1[3]=1;
527 /* params: */
528 /* locals: */
529 ((P1 *)pptr(h))->size = 1;
530 ((P1 *)pptr(h))->prev_off = 0;
531 ((P1 *)pptr(h))->new_off = 0;
532 ((P1 *)pptr(h))->tmp_commit = 0;
533 ((P1 *)pptr(h))->i = 0;
534 ((P1 *)pptr(h))->j = 0;
535 #ifdef VAR_RANGES
536 logval("tracer:size", ((P1 *)pptr(h))->size);
537 logval("tracer:prev_off", ((P1 *)pptr(h))->prev_off);
538 logval("tracer:new_off", ((P1 *)pptr(h))->new_off);
539 logval("tracer:tmp_commit", ((P1 *)pptr(h))->tmp_commit);
540 logval("tracer:i", ((P1 *)pptr(h))->i);
541 logval("tracer:j", ((P1 *)pptr(h))->j);
542 #endif
543 #ifdef HAS_CODE
544 locinit1(h);
545 #endif
546 break;
547 case 0: /* switcher */
548 ((P0 *)pptr(h))->_t = 0;
549 ((P0 *)pptr(h))->_p = 11; reached0[11]=1;
550 /* params: */
551 /* locals: */
552 ((P0 *)pptr(h))->prev_off = 0;
553 ((P0 *)pptr(h))->new_off = 0;
554 ((P0 *)pptr(h))->tmp_commit = 0;
555 ((P0 *)pptr(h))->size = 0;
556 #ifdef VAR_RANGES
557 logval("switcher:prev_off", ((P0 *)pptr(h))->prev_off);
558 logval("switcher:new_off", ((P0 *)pptr(h))->new_off);
559 logval("switcher:tmp_commit", ((P0 *)pptr(h))->tmp_commit);
560 logval("switcher:size", ((P0 *)pptr(h))->size);
561 #endif
562 #ifdef HAS_CODE
563 locinit0(h);
564 #endif
565 break;
566 }
567 this = o_this;
568 return h-BASE;
569 #ifndef NOBOUNDCHECK
570 #undef Index
571 #define Index(x, y) Boundcheck(x, y, II, tt, t)
572 #endif
573 }
574
575 #if defined(BITSTATE) && defined(COLLAPSE)
576 /* just to allow compilation, to generate the error */
577 long col_p(int i, char *z) { return 0; }
578 long col_q(int i, char *z) { return 0; }
579 #endif
580 #ifndef BITSTATE
581 #ifdef COLLAPSE
582 long
583 col_p(int i, char *z)
584 { int j, k; unsigned long ordinal(char *, long, short);
585 char *x, *y;
586 P0 *ptr = (P0 *) pptr(i);
587 switch (ptr->_t) {
588 case 0: j = sizeof(P0); break;
589 case 1: j = sizeof(P1); break;
590 case 2: j = sizeof(P2); break;
591 case 3: j = sizeof(P3); break;
592 case 4: j = sizeof(P4); break;
593 case 5: j = sizeof(P5); break;
594 default: Uerror("bad proctype - collapse");
595 }
596 if (z) x = z; else x = scratch;
597 y = (char *) ptr; k = proc_offset[i];
598 for ( ; j > 0; j--, y++)
599 if (!Mask[k++]) *x++ = *y;
600 for (j = 0; j < WS-1; j++)
601 *x++ = 0;
602 x -= j;
603 if (z) return (long) (x - z);
604 return ordinal(scratch, x-scratch, (short) (2+ptr->_t));
605 }
606 #endif
607 #endif
608 void
609 run(void)
610 { /* int i; */
611 memset((char *)&now, 0, sizeof(State));
612 vsize = (unsigned long) (sizeof(State) - VECTORSZ);
613 #ifndef NOVSZ
614 now._vsz = vsize;
615 #endif
616 /* optional provisioning statements, e.g. to */
617 /* set hidden variables, used as constants */
618 #ifdef PROV
619 #include PROV
620 #endif
621 settable();
622 Maxbody = max(Maxbody, ((int) sizeof(P0)));
623 Maxbody = max(Maxbody, ((int) sizeof(P1)));
624 Maxbody = max(Maxbody, ((int) sizeof(P2)));
625 Maxbody = max(Maxbody, ((int) sizeof(P3)));
626 Maxbody = max(Maxbody, ((int) sizeof(P4)));
627 Maxbody = max(Maxbody, ((int) sizeof(P5)));
628 reached[0] = reached0;
629 reached[1] = reached1;
630 reached[2] = reached2;
631 reached[3] = reached3;
632 reached[4] = reached4;
633 reached[5] = reached5;
634 accpstate[0] = (uchar *) emalloc(nstates0);
635 accpstate[1] = (uchar *) emalloc(nstates1);
636 accpstate[2] = (uchar *) emalloc(nstates2);
637 accpstate[3] = (uchar *) emalloc(nstates3);
638 accpstate[4] = (uchar *) emalloc(nstates4);
639 accpstate[5] = (uchar *) emalloc(nstates5);
640 progstate[0] = (uchar *) emalloc(nstates0);
641 progstate[1] = (uchar *) emalloc(nstates1);
642 progstate[2] = (uchar *) emalloc(nstates2);
643 progstate[3] = (uchar *) emalloc(nstates3);
644 progstate[4] = (uchar *) emalloc(nstates4);
645 progstate[5] = (uchar *) emalloc(nstates5);
646 loopstate0 = loopstate[0] = (uchar *) emalloc(nstates0);
647 loopstate1 = loopstate[1] = (uchar *) emalloc(nstates1);
648 loopstate2 = loopstate[2] = (uchar *) emalloc(nstates2);
649 loopstate3 = loopstate[3] = (uchar *) emalloc(nstates3);
650 loopstate4 = loopstate[4] = (uchar *) emalloc(nstates4);
651 loopstate5 = loopstate[5] = (uchar *) emalloc(nstates5);
652 stopstate[0] = (uchar *) emalloc(nstates0);
653 stopstate[1] = (uchar *) emalloc(nstates1);
654 stopstate[2] = (uchar *) emalloc(nstates2);
655 stopstate[3] = (uchar *) emalloc(nstates3);
656 stopstate[4] = (uchar *) emalloc(nstates4);
657 stopstate[5] = (uchar *) emalloc(nstates5);
658 visstate[0] = (uchar *) emalloc(nstates0);
659 visstate[1] = (uchar *) emalloc(nstates1);
660 visstate[2] = (uchar *) emalloc(nstates2);
661 visstate[3] = (uchar *) emalloc(nstates3);
662 visstate[4] = (uchar *) emalloc(nstates4);
663 visstate[5] = (uchar *) emalloc(nstates5);
664 mapstate[0] = (short *) emalloc(nstates0 * sizeof(short));
665 mapstate[1] = (short *) emalloc(nstates1 * sizeof(short));
666 mapstate[2] = (short *) emalloc(nstates2 * sizeof(short));
667 mapstate[3] = (short *) emalloc(nstates3 * sizeof(short));
668 mapstate[4] = (short *) emalloc(nstates4 * sizeof(short));
669 mapstate[5] = (short *) emalloc(nstates5 * sizeof(short));
670 #ifdef HAS_CODE
671 #ifdef HAS_CODE
672 #ifdef HAS_CODE
673 #ifdef HAS_CODE
674 #ifdef HAS_CODE
675 #ifdef HAS_CODE
676 NrStates[0] = nstates0;
677 NrStates[1] = nstates1;
678 NrStates[2] = nstates2;
679 NrStates[3] = nstates3;
680 NrStates[4] = nstates4;
681 NrStates[5] = nstates5;
682 #endif
683 #endif
684 #endif
685 #endif
686 #endif
687 #endif
688 stopstate[0][endstate0] = 1;
689 stopstate[1][endstate1] = 1;
690 stopstate[2][endstate2] = 1;
691 stopstate[3][endstate3] = 1;
692 stopstate[4][endstate4] = 1;
693 stopstate[5][endstate5] = 1;
694 stopstate[1][48] = 1;
695 retrans(0, nstates0, start0, src_ln0, reached0, loopstate0);
696 retrans(1, nstates1, start1, src_ln1, reached1, loopstate1);
697 retrans(2, nstates2, start2, src_ln2, reached2, loopstate2);
698 retrans(3, nstates3, start3, src_ln3, reached3, loopstate3);
699 retrans(4, nstates4, start4, src_ln4, reached4, loopstate4);
700 if (state_tables)
701 { printf("\nTransition Type: ");
702 printf("A=atomic; D=d_step; L=local; G=global\n");
703 printf("Source-State Labels: ");
704 printf("p=progress; e=end; a=accept;\n");
705 #ifdef MERGED
706 printf("Note: statement merging was used. Only the first\n");
707 printf(" stmnt executed in each merge sequence is shown\n");
708 printf(" (use spin -a -o3 to disable statement merging)\n");
709 #endif
710 pan_exit(0);
711 }
712 iniglobals();
713 #if defined(VERI) && !defined(NOREDUCE) && !defined(NP)
714 if (!state_tables
715 #ifdef HAS_CODE
716 && !readtrail
717 #endif
718 #if NCORE>1
719 && core_id == 0
720 #endif
721 )
722 { printf("warning: for p.o. reduction to be valid ");
723 printf("the never claim must be stutter-invariant\n");
724 printf("(never claims generated from LTL ");
725 printf("formulae are stutter-invariant)\n");
726 }
727 #endif
728 UnBlock; /* disable rendez-vous */
729 #ifdef BITSTATE
730 if (udmem)
731 { udmem *= 1024L*1024L;
732 #if NCORE>1
733 if (!readtrail)
734 { void init_SS(unsigned long);
735 init_SS((unsigned long) udmem);
736 } else
737 #endif
738 SS = (uchar *) emalloc(udmem);
739 bstore = bstore_mod;
740 } else
741 #if NCORE>1
742 { void init_SS(unsigned long);
743 init_SS(ONE_L<<(ssize-3));
744 }
745 #else
746 SS = (uchar *) emalloc(ONE_L<<(ssize-3));
747 #endif
748 #else
749 hinit();
750 #endif
751 #if defined(FULLSTACK) && defined(BITSTATE)
752 onstack_init();
753 #endif
754 #if defined(CNTRSTACK) && !defined(BFS)
755 LL = (uchar *) emalloc(ONE_L<<(ssize-3));
756 #endif
757 stack = ( Stack *) emalloc(sizeof(Stack));
758 svtack = (Svtack *) emalloc(sizeof(Svtack));
759 /* a place to point for Pptr of non-running procs: */
760 noptr = (uchar *) emalloc(Maxbody * sizeof(char));
761 #ifdef SVDUMP
762 if (vprefix > 0)
763 write(svfd, (uchar *) &vprefix, sizeof(int));
764 #endif
765 #ifdef VERI
766 Addproc(VERI); /* never - pid = 0 */
767 #endif
768 active_procs(); /* started after never */
769 #ifdef EVENT_TRACE
770 now._event = start_event;
771 reached[EVENT_TRACE][start_event] = 1;
772 #endif
773 #ifdef HAS_CODE
774 globinit();
775 #endif
776 #ifdef BITSTATE
777 go_again:
778 #endif
779 do_the_search();
780 #ifdef BITSTATE
781 if (--Nrun > 0 && HASH_CONST[++HASH_NR])
782 { printf("Run %d:\n", HASH_NR);
783 wrap_stats();
784 printf("\n");
785 memset(SS, 0, ONE_L<<(ssize-3));
786 #ifdef CNTRSTACK
787 memset(LL, 0, ONE_L<<(ssize-3));
788 #endif
789 #ifdef FULLSTACK
790 memset((uchar *) S_Tab, 0,
791 maxdepth*sizeof(struct H_el *));
792 #endif
793 nstates=nlinks=truncs=truncs2=ngrabs = 0;
794 nlost=nShadow=hcmp = 0;
795 Fa=Fh=Zh=Zn = 0;
796 PUT=PROBE=ZAPS=Ccheck=Cholds = 0;
797 goto go_again;
798 }
799 #endif
800 }
801 #ifdef HAS_PROVIDED
802 int provided(int, uchar, int, Trans *);
803 #endif
804 #if NCORE>1
805 #define GLOBAL_LOCK (0)
806 #ifndef CS_N
807 #define CS_N (256*NCORE)
808 #endif
809 #ifdef NGQ
810 #define NR_QS (NCORE)
811 #define CS_NR (CS_N+1) /* 2^N + 1, nr critical sections */
812 #define GQ_RD GLOBAL_LOCK
813 #define GQ_WR GLOBAL_LOCK
814 #define CS_ID (1 + (int) (j1 & (CS_N-1))) /* mask: 2^N - 1, zero reserved */
815 #define QLOCK(n) (1+n)
816 #else
817 #define NR_QS (NCORE+1)
818 #define CS_NR (CS_N+3)
819 #define GQ_RD (1)
820 #define GQ_WR (2)
821 #define CS_ID (3 + (int) (j1 & (CS_N-1)))
822 #define QLOCK(n) (3+n)
823 #endif
824
825 void e_critical(int);
826 void x_critical(int);
827
828 #ifndef SEP_STATE
829 #define enter_critical(w) e_critical(w)
830 #define leave_critical(w) x_critical(w)
831 #else
832 #ifdef NGQ
833 #define enter_critical(w) { if (w < 1+NCORE) e_critical(w); }
834 #define leave_critical(w) { if (w < 1+NCORE) x_critical(w); }
835 #else
836 #define enter_critical(w) { if (w < 3+NCORE) e_critical(w); }
837 #define leave_critical(w) { if (w < 3+NCORE) x_critical(w); }
838 #endif
839 #endif
840
841 int
842 cpu_printf(const char *fmt, ...)
843 { va_list args;
844 enter_critical(GLOBAL_LOCK); /* printing */
845 printf("cpu%d: ", core_id);
846 fflush(stdout);
847 va_start(args, fmt);
848 vprintf(fmt, args);
849 va_end(args);
850 fflush(stdout);
851 leave_critical(GLOBAL_LOCK);
852 return 1;
853 }
854 #else
855 int
856 cpu_printf(const char *fmt, ...)
857 { va_list args;
858 va_start(args, fmt);
859 vprintf(fmt, args);
860 va_end(args);
861 return 1;
862 }
863 #endif
864 int
865 Printf(const char *fmt, ...)
866 { /* Make sure the args to Printf
867 * are always evaluated (e.g., they
868 * could contain a run stmnt)
869 * but do not generate the output
870 * during verification runs
871 * unless explicitly wanted
872 * If this fails on your system
873 * compile SPIN itself -DPRINTF
874 * and this code is not generated
875 */
876 #ifdef HAS_CODE
877 if (readtrail)
878 { va_list args;
879 va_start(args, fmt);
880 vprintf(fmt, args);
881 va_end(args);
882 return 1;
883 }
884 #endif
885 #ifdef PRINTF
886 va_list args;
887 va_start(args, fmt);
888 vprintf(fmt, args);
889 va_end(args);
890 #endif
891 return 1;
892 }
893 extern void printm(int);
894 #ifndef SC
895 #define getframe(i) &trail[i];
896 #else
897 static long HHH, DDD, hiwater;
898 static long CNT1, CNT2;
899 static int stackwrite;
900 static int stackread;
901 static Trail frameptr;
902 Trail *
903 getframe(int d)
904 {
905 if (CNT1 == CNT2)
906 return &trail[d];
907
908 if (d >= (CNT1-CNT2)*DDD)
909 return &trail[d - (CNT1-CNT2)*DDD];
910
911 if (!stackread
912 && (stackread = open(stackfile, 0)) < 0)
913 { printf("getframe: cannot open %s\n", stackfile);
914 wrapup();
915 }
916 if (lseek(stackread, d* (off_t) sizeof(Trail), SEEK_SET) == -1
917 || read(stackread, &frameptr, sizeof(Trail)) != sizeof(Trail))
918 { printf("getframe: frame read error\n");
919 wrapup();
920 }
921 return &frameptr;
922 }
923 #endif
924 #if !defined(SAFETY) && !defined(BITSTATE)
925 #if !defined(FULLSTACK) || defined(MA)
926 #define depth_of(x) A_depth /* an estimate */
927 #else
928 int
929 depth_of(struct H_el *s)
930 { Trail *t; int d;
931 for (d = 0; d <= A_depth; d++)
932 { t = getframe(d);
933 if (s == t->ostate)
934 return d;
935 }
936 printf("pan: cannot happen, depth_of\n");
937 return depthfound;
938 }
939 #endif
940 #endif
941 #if NCORE>1
942 extern void cleanup_shm(int);
943 volatile unsigned int *search_terminated; /* to signal early termination */
944 #endif
945 void
946 pan_exit(int val)
947 { void stop_timer(void);
948 if (signoff)
949 { printf("--end of output--\n");
950 }
951 #if NCORE>1
952 if (search_terminated != NULL)
953 { *search_terminated |= 1; /* pan_exit */
954 }
955 #ifdef USE_DISK
956 { void dsk_stats(void);
957 dsk_stats();
958 }
959 #endif
960 if (!state_tables && !readtrail)
961 { cleanup_shm(1);
962 }
963 #endif
964 if (val == 2)
965 { val = 0;
966 } else
967 { stop_timer();
968 }
969 exit(val);
970 }
971 #ifdef HAS_CODE
972 char *
973 transmognify(char *s)
974 { char *v, *w;
975 static char buf[2][2048];
976 int i, toggle = 0;
977 if (!s || strlen(s) > 2047) return s;
978 memset(buf[0], 0, 2048);
979 memset(buf[1], 0, 2048);
980 strcpy(buf[toggle], s);
981 while ((v = strstr(buf[toggle], "{c_code")))
982 { *v = '\0'; v++;
983 strcpy(buf[1-toggle], buf[toggle]);
984 for (w = v; *w != '}' && *w != '\0'; w++) /* skip */;
985 if (*w != '}') return s;
986 *w = '\0'; w++;
987 for (i = 0; code_lookup[i].c; i++)
988 if (strcmp(v, code_lookup[i].c) == 0
989 && strlen(v) == strlen(code_lookup[i].c))
990 { if (strlen(buf[1-toggle])
991 + strlen(code_lookup[i].t)
992 + strlen(w) > 2047)
993 return s;
994 strcat(buf[1-toggle], code_lookup[i].t);
995 break;
996 }
997 strcat(buf[1-toggle], w);
998 toggle = 1 - toggle;
999 }
1000 buf[toggle][2047] = '\0';
1001 return buf[toggle];
1002 }
1003 #else
1004 char * transmognify(char *s) { return s; }
1005 #endif
1006 #ifdef HAS_CODE
1007 void
1008 add_src_txt(int ot, int tt)
1009 { Trans *t;
1010 char *q;
1011
1012 for (t = trans[ot][tt]; t; t = t->nxt)
1013 { printf("\t\t");
1014 q = transmognify(t->tp);
1015 for ( ; q && *q; q++)
1016 if (*q == '\n')
1017 printf("\\n");
1018 else
1019 putchar(*q);
1020 printf("\n");
1021 }
1022 }
1023 void
1024 wrap_trail(void)
1025 { static int wrap_in_progress = 0;
1026 int i; short II;
1027 P0 *z;
1028
1029 if (wrap_in_progress++) return;
1030
1031 printf("spin: trail ends after %ld steps\n", depth);
1032 if (onlyproc >= 0)
1033 { if (onlyproc >= now._nr_pr) { pan_exit(0); }
1034 II = onlyproc;
1035 z = (P0 *)pptr(II);
1036 printf("%3ld: proc %d (%s) ",
1037 depth, II, procname[z->_t]);
1038 for (i = 0; src_all[i].src; i++)
1039 if (src_all[i].tp == (int) z->_t)
1040 { printf(" line %3d",
1041 src_all[i].src[z->_p]);
1042 break;
1043 }
1044 printf(" (state %2d)", z->_p);
1045 if (!stopstate[z->_t][z->_p])
1046 printf(" (invalid end state)");
1047 printf("\n");
1048 add_src_txt(z->_t, z->_p);
1049 pan_exit(0);
1050 }
1051 printf("#processes %d:\n", now._nr_pr);
1052 if (depth < 0) depth = 0;
1053 for (II = 0; II < now._nr_pr; II++)
1054 { z = (P0 *)pptr(II);
1055 printf("%3ld: proc %d (%s) ",
1056 depth, II, procname[z->_t]);
1057 for (i = 0; src_all[i].src; i++)
1058 if (src_all[i].tp == (int) z->_t)
1059 { printf(" line %3d",
1060 src_all[i].src[z->_p]);
1061 break;
1062 }
1063 printf(" (state %2d)", z->_p);
1064 if (!stopstate[z->_t][z->_p])
1065 printf(" (invalid end state)");
1066 printf("\n");
1067 add_src_txt(z->_t, z->_p);
1068 }
1069 c_globals();
1070 for (II = 0; II < now._nr_pr; II++)
1071 { z = (P0 *)pptr(II);
1072 c_locals(II, z->_t);
1073 }
1074 #ifdef ON_EXIT
1075 ON_EXIT;
1076 #endif
1077 pan_exit(0);
1078 }
1079 FILE *
1080 findtrail(void)
1081 { FILE *fd;
1082 char fnm[512], *q;
1083 char MyFile[512];
1084 char MySuffix[16];
1085 int try_core;
1086 int candidate_files;
1087
1088 if (trailfilename != NULL)
1089 { fd = fopen(trailfilename, "r");
1090 if (fd == NULL)
1091 { printf("pan: cannot find %s\n", trailfilename);
1092 pan_exit(1);
1093 } /* else */
1094 goto success;
1095 }
1096 talk:
1097 try_core = 1;
1098 candidate_files = 0;
1099 tprefix = "trail";
1100 strcpy(MyFile, TrailFile);
1101 do { /* see if there's more than one possible trailfile */
1102 if (whichtrail)
1103 { sprintf(fnm, "%s%d.%s",
1104 MyFile, whichtrail, tprefix);
1105 fd = fopen(fnm, "r");
1106 if (fd != NULL)
1107 { candidate_files++;
1108 if (verbose==100)
1109 printf("trail%d: %s\n",
1110 candidate_files, fnm);
1111 fclose(fd);
1112 }
1113 if ((q = strchr(MyFile, '.')) != NULL)
1114 { *q = '\0';
1115 sprintf(fnm, "%s%d.%s",
1116 MyFile, whichtrail, tprefix);
1117 *q = '.';
1118 fd = fopen(fnm, "r");
1119 if (fd != NULL)
1120 { candidate_files++;
1121 if (verbose==100)
1122 printf("trail%d: %s\n",
1123 candidate_files, fnm);
1124 fclose(fd);
1125 } }
1126 } else
1127 { sprintf(fnm, "%s.%s", MyFile, tprefix);
1128 fd = fopen(fnm, "r");
1129 if (fd != NULL)
1130 { candidate_files++;
1131 if (verbose==100)
1132 printf("trail%d: %s\n",
1133 candidate_files, fnm);
1134 fclose(fd);
1135 }
1136 if ((q = strchr(MyFile, '.')) != NULL)
1137 { *q = '\0';
1138 sprintf(fnm, "%s.%s", MyFile, tprefix);
1139 *q = '.';
1140 fd = fopen(fnm, "r");
1141 if (fd != NULL)
1142 { candidate_files++;
1143 if (verbose==100)
1144 printf("trail%d: %s\n",
1145 candidate_files, fnm);
1146 fclose(fd);
1147 } } }
1148 tprefix = MySuffix;
1149 sprintf(tprefix, "cpu%d_trail", try_core++);
1150 } while (try_core <= NCORE);
1151
1152 if (candidate_files != 1)
1153 { if (verbose != 100)
1154 { printf("error: there are %d trail files:\n",
1155 candidate_files);
1156 verbose = 100;
1157 goto talk;
1158 } else
1159 { printf("pan: rm or mv all except one\n");
1160 exit(1);
1161 } }
1162 try_core = 1;
1163 strcpy(MyFile, TrailFile); /* restore */
1164 tprefix = "trail";
1165 try_again:
1166 if (whichtrail)
1167 { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, tprefix);
1168 fd = fopen(fnm, "r");
1169 if (fd == NULL && (q = strchr(MyFile, '.')))
1170 { *q = '\0';
1171 sprintf(fnm, "%s%d.%s",
1172 MyFile, whichtrail, tprefix);
1173 *q = '.';
1174 fd = fopen(fnm, "r");
1175 }
1176 } else
1177 { sprintf(fnm, "%s.%s", MyFile, tprefix);
1178 fd = fopen(fnm, "r");
1179 if (fd == NULL && (q = strchr(MyFile, '.')))
1180 { *q = '\0';
1181 sprintf(fnm, "%s.%s", MyFile, tprefix);
1182 *q = '.';
1183 fd = fopen(fnm, "r");
1184 } }
1185 if (fd == NULL)
1186 { if (try_core < NCORE)
1187 { tprefix = MySuffix;
1188 sprintf(tprefix, "cpu%d_trail", try_core++);
1189 goto try_again;
1190 }
1191 printf("pan: cannot find trailfile %s\n", fnm);
1192 pan_exit(1);
1193 }
1194 success:
1195 #if NCORE>1 && defined(SEP_STATE)
1196 { void set_root(void); /* for partial traces from local root */
1197 set_root();
1198 }
1199 #endif
1200 return fd;
1201 }
1202
1203 uchar do_transit(Trans *, short);
1204
1205 void
1206 getrail(void)
1207 { FILE *fd;
1208 char *q;
1209 int i, t_id, lastnever=-1; short II;
1210 Trans *t;
1211 P0 *z;
1212
1213 fd = findtrail(); /* exits if unsuccessful */
1214 while (fscanf(fd, "%ld:%d:%d\n", &depth, &i, &t_id) == 3)
1215 { if (depth == -1)
1216 printf("<<<<<START OF CYCLE>>>>>\n");
1217 if (depth < 0)
1218 continue;
1219 if (i > now._nr_pr)
1220 { printf("pan: Error, proc %d invalid pid ", i);
1221 printf("transition %d\n", t_id);
1222 break;
1223 }
1224 II = i;
1225 z = (P0 *)pptr(II);
1226 for (t = trans[z->_t][z->_p]; t; t = t->nxt)
1227 if (t->t_id == (T_ID) t_id)
1228 break;
1229 if (!t)
1230 { for (i = 0; i < NrStates[z->_t]; i++)
1231 { t = trans[z->_t][i];
1232 if (t && t->t_id == (T_ID) t_id)
1233 { printf("\tRecovered at state %d\n", i);
1234 z->_p = i;
1235 goto recovered;
1236 } }
1237 printf("pan: Error, proc %d type %d state %d: ",
1238 II, z->_t, z->_p);
1239 printf("transition %d not found\n", t_id);
1240 printf("pan: list of possible transitions in this process:\n");
1241 if (z->_t >= 0 && z->_t <= _NP_)
1242 for (t = trans[z->_t][z->_p]; t; t = t->nxt)
1243 printf(" t_id %d -- case %d, [%s]\n",
1244 t->t_id, t->forw, t->tp);
1245 break; /* pan_exit(1); */
1246 }
1247 recovered:
1248 q = transmognify(t->tp);
1249 if (gui) simvals[0] = '\0';
1250 this = pptr(II);
1251 trpt->tau |= 1;
1252 if (!do_transit(t, II))
1253 { if (onlyproc >= 0 && II != onlyproc)
1254 goto moveon;
1255 printf("pan: error, next transition UNEXECUTABLE on replay\n");
1256 printf(" most likely causes: missing c_track statements\n");
1257 printf(" or illegal side-effects in c_expr statements\n");
1258 }
1259 if (onlyproc >= 0 && II != onlyproc)
1260 goto moveon;
1261 if (verbose)
1262 { printf("%3ld: proc %2d (%s) ", depth, II, procname[z->_t]);
1263 for (i = 0; src_all[i].src; i++)
1264 if (src_all[i].tp == (int) z->_t)
1265 { printf(" line %3d \"%s\" ",
1266 src_all[i].src[z->_p], PanSource);
1267 break;
1268 }
1269 printf("(state %d) trans {%d,%d} [%s]\n",
1270 z->_p, t_id, t->forw, q?q:"");
1271 c_globals();
1272 for (i = 0; i < now._nr_pr; i++)
1273 { c_locals(i, ((P0 *)pptr(i))->_t);
1274 }
1275 } else
1276 if (strcmp(procname[z->_t], ":never:") == 0)
1277 { if (lastnever != (int) z->_p)
1278 { for (i = 0; src_all[i].src; i++)
1279 if (src_all[i].tp == (int) z->_t)
1280 { printf("MSC: ~G %d\n",
1281 src_all[i].src[z->_p]);
1282 break;
1283 }
1284 if (!src_all[i].src)
1285 printf("MSC: ~R %d\n", z->_p);
1286 }
1287 lastnever = z->_p;
1288 goto sameas;
1289 } else
1290 if (strcmp(procname[z->_t], ":np_:") != 0)
1291 {
1292 sameas: if (no_rck) goto moveon;
1293 if (coltrace)
1294 { printf("%ld: ", depth);
1295 for (i = 0; i < II; i++)
1296 printf("\t\t");
1297 printf("%s(%d):", procname[z->_t], II);
1298 printf("[%s]\n", q?q:"");
1299 } else if (!silent)
1300 { if (strlen(simvals) > 0) {
1301 printf("%3ld: proc %2d (%s)",
1302 depth, II, procname[z->_t]);
1303 for (i = 0; src_all[i].src; i++)
1304 if (src_all[i].tp == (int) z->_t)
1305 { printf(" line %3d \"%s\" ",
1306 src_all[i].src[z->_p], PanSource);
1307 break;
1308 }
1309 printf("(state %d) [values: %s]\n", z->_p, simvals);
1310 }
1311 printf("%3ld: proc %2d (%s)",
1312 depth, II, procname[z->_t]);
1313 for (i = 0; src_all[i].src; i++)
1314 if (src_all[i].tp == (int) z->_t)
1315 { printf(" line %3d \"%s\" ",
1316 src_all[i].src[z->_p], PanSource);
1317 break;
1318 }
1319 printf("(state %d) [%s]\n", z->_p, q?q:"");
1320 /* printf("\n"); */
1321 } }
1322 moveon: z->_p = t->st;
1323 }
1324 wrap_trail();
1325 }
1326 #endif
1327 int
1328 f_pid(int pt)
1329 { int i;
1330 P0 *z;
1331 for (i = 0; i < now._nr_pr; i++)
1332 { z = (P0 *)pptr(i);
1333 if (z->_t == (unsigned) pt)
1334 return BASE+z->_pid;
1335 }
1336 return -1;
1337 }
1338 #ifdef VERI
1339 void check_claim(int);
1340 #endif
1341
1342 #if !defined(HASH64) && !defined(HASH32)
1343 #define HASH32
1344 #endif
1345 #if defined(HASH32) && defined(SAFETY) && !defined(SFH) && !defined(SPACE)
1346 #define SFH
1347 #endif
1348 #if defined(SFH) && (defined(BITSTATE) || defined(COLLAPSE) || defined(HC) || defined(HASH64))
1349 #undef SFH
1350 #endif
1351 #if defined(SFH) && !defined(NOCOMP)
1352 #define NOCOMP /* go for speed */
1353 #endif
1354 #if NCORE>1 && !defined(GLOB_HEAP)
1355 #define SEP_HEAP /* version 5.1.2 */
1356 #endif
1357
1358 #ifdef BITSTATE
1359 int
1360 bstore_mod(char *v, int n) /* hasharray size not a power of two */
1361 { unsigned long x, y;
1362 unsigned int i = 1;
1363
1364 d_hash((uchar *) v, n); /* sets j3, j4, K1, K2 */
1365 x = K1; y = j3;
1366 for (;;)
1367 { if (!(SS[x%udmem]&(1<<y))) break;
1368 if (i == hfns) {
1369 #ifdef DEBUG
1370 printf("Old bitstate\n");
1371 #endif
1372 return 1;
1373 }
1374 x = (x + K2 + i);
1375 y = (y + j4) & 7;
1376 i++;
1377 }
1378 #ifdef RANDSTOR
1379 if (rand()%100 > RANDSTOR) return 0;
1380 #endif
1381 for (;;)
1382 { SS[x%udmem] |= (1<<y);
1383 if (i == hfns) break;
1384 x = (x + K2 + i);
1385 y = (y + j4) & 7;
1386 i++;
1387 }
1388 #ifdef DEBUG
1389 printf("New bitstate\n");
1390 #endif
1391 if (now._a_t&1)
1392 { nShadow++;
1393 }
1394 return 0;
1395 }
1396 int
1397 bstore_reg(char *v, int n) /* extended hashing, Peter Dillinger, 2004 */
1398 { unsigned long x, y;
1399 unsigned int i = 1;
1400
1401 d_hash((uchar *) v, n); /* sets j1-j4 */
1402 x = j2; y = j3;
1403 for (;;)
1404 { if (!(SS[x]&(1<<y))) break;
1405 if (i == hfns) {
1406 #ifdef DEBUG
1407 printf("Old bitstate\n");
1408 #endif
1409 return 1;
1410 }
1411 x = (x + j1 + i) & nmask;
1412 y = (y + j4) & 7;
1413 i++;
1414 }
1415 #ifdef RANDSTOR
1416 if (rand()%100 > RANDSTOR) return 0;
1417 #endif
1418 for (;;)
1419 { SS[x] |= (1<<y);
1420 if (i == hfns) break;
1421 x = (x + j1 + i) & nmask;
1422 y = (y + j4) & 7;
1423 i++;
1424 }
1425 #ifdef DEBUG
1426 printf("New bitstate\n");
1427 #endif
1428 if (now._a_t&1)
1429 { nShadow++;
1430 }
1431 return 0;
1432 }
1433 #endif
1434 unsigned long TMODE = 0666; /* file permission bits for trail files */
1435
1436 int trcnt=1;
1437 char snap[64], fnm[512];
1438
1439 int
1440 make_trail(void)
1441 { int fd;
1442 char *q;
1443 char MyFile[512];
1444 int w_flags = O_CREAT|O_WRONLY|O_TRUNC;
1445
1446 if (exclusive == 1 && iterative == 0)
1447 { w_flags |= O_EXCL;
1448 }
1449
1450 q = strrchr(TrailFile, '/');
1451 if (q == NULL) q = TrailFile; else q++;
1452 strcpy(MyFile, q); /* TrailFile is not a writable string */
1453
1454 if (iterative == 0 && Nr_Trails++ > 0)
1455 { sprintf(fnm, "%s%d.%s",
1456 MyFile, Nr_Trails-1, tprefix);
1457 } else
1458 {
1459 #ifdef PUTPID
1460 sprintf(fnm, "%s%d.%s", MyFile, getpid(), tprefix);
1461 #else
1462 sprintf(fnm, "%s.%s", MyFile, tprefix);
1463 #endif
1464 }
1465 if ((fd = open(fnm, w_flags, TMODE)) < 0)
1466 { if ((q = strchr(MyFile, '.')))
1467 { *q = '\0';
1468 if (iterative == 0 && Nr_Trails-1 > 0)
1469 sprintf(fnm, "%s%d.%s",
1470 MyFile, Nr_Trails-1, tprefix);
1471 else
1472 sprintf(fnm, "%s.%s", MyFile, tprefix);
1473 *q = '.';
1474 fd = open(fnm, w_flags, TMODE);
1475 } }
1476 if (fd < 0)
1477 { printf("pan: cannot create %s\n", fnm);
1478 perror("cause");
1479 } else
1480 {
1481 #if NCORE>1 && (defined(SEP_STATE) || !defined(FULL_TRAIL))
1482 void write_root(void);
1483 write_root();
1484 #else
1485 printf("pan: wrote %s\n", fnm);
1486 #endif
1487 }
1488 return fd;
1489 }
1490
1491 #ifndef FREQ
1492 #define FREQ (1000000)
1493 #endif
1494 #ifdef BFS
1495 #define Q_PROVISO
1496 #ifndef INLINE_REV
1497 #define INLINE_REV
1498 #endif
1499
1500 typedef struct SV_Hold {
1501 State *sv;
1502 int sz;
1503 struct SV_Hold *nxt;
1504 } SV_Hold;
1505
1506 typedef struct EV_Hold {
1507 char *sv;
1508 int sz;
1509 int nrpr;
1510 int nrqs;
1511 char *po;
1512 char *qo;
1513 char *ps, *qs;
1514 struct EV_Hold *nxt;
1515 } EV_Hold;
1516
1517 typedef struct BFS_Trail {
1518 Trail *frame;
1519 SV_Hold *onow;
1520 EV_Hold *omask;
1521 #ifdef Q_PROVISO
1522 struct H_el *lstate;
1523 #endif
1524 short boq;
1525 struct BFS_Trail *nxt;
1526 } BFS_Trail;
1527
1528 BFS_Trail *bfs_trail, *bfs_bot, *bfs_free;
1529
1530 SV_Hold *svhold, *svfree;
1531
1532 #ifdef BFS_DISK
1533 #ifndef BFS_LIMIT
1534 #define BFS_LIMIT 100000
1535 #endif
1536 #ifndef BFS_DSK_LIMIT
1537 #define BFS_DSK_LIMIT 1000000
1538 #endif
1539 #if defined(WIN32) || defined(WIN64)
1540 #define RFLAGS (O_RDONLY|O_BINARY)
1541 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
1542 #else
1543 #define RFLAGS (O_RDONLY)
1544 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
1545 #endif
1546 long bfs_size_limit;
1547 int bfs_dsk_write = -1;
1548 int bfs_dsk_read = -1;
1549 long bfs_dsk_writes, bfs_dsk_reads;
1550 int bfs_dsk_seqno_w, bfs_dsk_seqno_r;
1551 #endif
1552
1553 uchar do_reverse(Trans *, short, uchar);
1554 void snapshot(void);
1555
1556 SV_Hold *
1557 getsv(int n)
1558 { SV_Hold *h = (SV_Hold *) 0, *oh;
1559
1560 oh = (SV_Hold *) 0;
1561 for (h = svfree; h; oh = h, h = h->nxt)
1562 { if (n == h->sz)
1563 { if (!oh)
1564 svfree = h->nxt;
1565 else
1566 oh->nxt = h->nxt;
1567 h->nxt = (SV_Hold *) 0;
1568 break;
1569 }
1570 if (n < h->sz)
1571 { h = (SV_Hold *) 0;
1572 break;
1573 }
1574 /* else continue */
1575 }
1576
1577 if (!h)
1578 { h = (SV_Hold *) emalloc(sizeof(SV_Hold));
1579 h->sz = n;
1580 #ifdef BFS_DISK
1581 if (bfs_size_limit >= BFS_LIMIT)
1582 { h->sv = (State *) 0; /* means: read disk */
1583 bfs_dsk_writes++; /* count */
1584 if (bfs_dsk_write < 0 /* file descriptor */
1585 || bfs_dsk_writes%BFS_DSK_LIMIT == 0)
1586 { char dsk_nm[32];
1587 if (bfs_dsk_write >= 0)
1588 { (void) close(bfs_dsk_write);
1589 }
1590 sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_w++);
1591 bfs_dsk_write = open(dsk_nm, WFLAGS, 0644);
1592 if (bfs_dsk_write < 0)
1593 { Uerror("could not create tmp disk file");
1594 }
1595 printf("pan: created disk file %s\n", dsk_nm);
1596 }
1597 if (write(bfs_dsk_write, (char *) &now, n) != n)
1598 { Uerror("aborting -- disk write failed (disk full?)");
1599 }
1600 return h; /* no memcpy */
1601 }
1602 bfs_size_limit++;
1603 #endif
1604 h->sv = (State *) emalloc(sizeof(State) - VECTORSZ + n);
1605 }
1606
1607 memcpy((char *)h->sv, (char *)&now, n);
1608 return h;
1609 }
1610
1611 EV_Hold *
1612 getsv_mask(int n)
1613 { EV_Hold *h;
1614 static EV_Hold *kept = (EV_Hold *) 0;
1615
1616 for (h = kept; h; h = h->nxt)
1617 if (n == h->sz
1618 && (memcmp((char *) Mask, (char *) h->sv, n) == 0)
1619 && (now._nr_pr == h->nrpr)
1620 && (now._nr_qs == h->nrqs)
1621 #if VECTORSZ>32000
1622 && (memcmp((char *) proc_offset, (char *) h->po, now._nr_pr * sizeof(int)) == 0)
1623 && (memcmp((char *) q_offset, (char *) h->qo, now._nr_qs * sizeof(int)) == 0)
1624 #else
1625 && (memcmp((char *) proc_offset, (char *) h->po, now._nr_pr * sizeof(short)) == 0)
1626 && (memcmp((char *) q_offset, (char *) h->qo, now._nr_qs * sizeof(short)) == 0)
1627 #endif
1628 && (memcmp((char *) proc_skip, (char *) h->ps, now._nr_pr * sizeof(uchar)) == 0)
1629 && (memcmp((char *) q_skip, (char *) h->qs, now._nr_qs * sizeof(uchar)) == 0))
1630 break;
1631 if (!h)
1632 { h = (EV_Hold *) emalloc(sizeof(EV_Hold));
1633 h->sz = n;
1634 h->nrpr = now._nr_pr;
1635 h->nrqs = now._nr_qs;
1636
1637 h->sv = (char *) emalloc(n * sizeof(char));
1638 memcpy((char *) h->sv, (char *) Mask, n);
1639
1640 if (now._nr_pr > 0)
1641 { h->ps = (char *) emalloc(now._nr_pr * sizeof(int));
1642 memcpy((char *) h->ps, (char *) proc_skip, now._nr_pr * sizeof(uchar));
1643 #if VECTORSZ>32000
1644 h->po = (char *) emalloc(now._nr_pr * sizeof(int));
1645 memcpy((char *) h->po, (char *) proc_offset, now._nr_pr * sizeof(int));
1646 #else
1647 h->po = (char *) emalloc(now._nr_pr * sizeof(short));
1648 memcpy((char *) h->po, (char *) proc_offset, now._nr_pr * sizeof(short));
1649 #endif
1650 }
1651 if (now._nr_qs > 0)
1652 { h->qs = (char *) emalloc(now._nr_qs * sizeof(int));
1653 memcpy((char *) h->qs, (char *) q_skip, now._nr_qs * sizeof(uchar));
1654 #if VECTORSZ>32000
1655 h->qo = (char *) emalloc(now._nr_qs * sizeof(int));
1656 memcpy((char *) h->qo, (char *) q_offset, now._nr_qs * sizeof(int));
1657 #else
1658 h->qo = (char *) emalloc(now._nr_qs * sizeof(short));
1659 memcpy((char *) h->qo, (char *) q_offset, now._nr_qs * sizeof(short));
1660 #endif
1661 }
1662
1663 h->nxt = kept;
1664 kept = h;
1665 }
1666 return h;
1667 }
1668
1669 void
1670 freesv(SV_Hold *p)
1671 { SV_Hold *h, *oh;
1672
1673 oh = (SV_Hold *) 0;
1674 for (h = svfree; h; oh = h, h = h->nxt)
1675 if (h->sz >= p->sz)
1676 break;
1677
1678 if (!oh)
1679 { p->nxt = svfree;
1680 svfree = p;
1681 } else
1682 { p->nxt = h;
1683 oh->nxt = p;
1684 }
1685 }
1686
1687 BFS_Trail *
1688 get_bfs_frame(void)
1689 { BFS_Trail *t;
1690
1691 if (bfs_free)
1692 { t = bfs_free;
1693 bfs_free = bfs_free->nxt;
1694 t->nxt = (BFS_Trail *) 0;
1695 } else
1696 { t = (BFS_Trail *) emalloc(sizeof(BFS_Trail));
1697 }
1698 t->frame = (Trail *) emalloc(sizeof(Trail));
1699 return t;
1700 }
1701
1702 void
1703 push_bfs(Trail *f, int d)
1704 { BFS_Trail *t;
1705
1706 t = get_bfs_frame();
1707 memcpy((char *)t->frame, (char *)f, sizeof(Trail));
1708 t->frame->o_tt = d; /* depth */
1709
1710 t->boq = boq;
1711 t->onow = getsv(vsize);
1712 t->omask = getsv_mask(vsize);
1713 #if defined(FULLSTACK) && defined(Q_PROVISO)
1714 t->lstate = Lstate;
1715 #endif
1716 if (!bfs_bot)
1717 { bfs_bot = bfs_trail = t;
1718 } else
1719 { bfs_bot->nxt = t;
1720 bfs_bot = t;
1721 }
1722 #ifdef CHECK
1723 printf("PUSH %u (%d)\n", t->frame, d);
1724 #endif
1725 }
1726
1727 Trail *
1728 pop_bfs(void)
1729 { BFS_Trail *t;
1730
1731 if (!bfs_trail)
1732 return (Trail *) 0;
1733
1734 t = bfs_trail;
1735 bfs_trail = t->nxt;
1736 if (!bfs_trail)
1737 bfs_bot = (BFS_Trail *) 0;
1738 #if defined(Q_PROVISO) && !defined(BITSTATE) && !defined(NOREDUCE)
1739 if (t->lstate) t->lstate->tagged = 0;
1740 #endif
1741
1742 t->nxt = bfs_free;
1743 bfs_free = t;
1744
1745 vsize = t->onow->sz;
1746 boq = t->boq;
1747 #ifdef BFS_DISK
1748 if (t->onow->sv == (State *) 0)
1749 { char dsk_nm[32];
1750 bfs_dsk_reads++; /* count */
1751 if (bfs_dsk_read >= 0 /* file descriptor */
1752 && bfs_dsk_reads%BFS_DSK_LIMIT == 0)
1753 { (void) close(bfs_dsk_read);
1754 sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_r-1);
1755 (void) unlink(dsk_nm);
1756 bfs_dsk_read = -1;
1757 }
1758 if (bfs_dsk_read < 0)
1759 { sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_r++);
1760 bfs_dsk_read = open(dsk_nm, RFLAGS);
1761 if (bfs_dsk_read < 0)
1762 { Uerror("could not open temp disk file");
1763 } }
1764 if (read(bfs_dsk_read, (char *) &now, vsize) != vsize)
1765 { Uerror("bad bfs disk file read");
1766 }
1767 #ifndef NOVSZ
1768 if (now._vsz != vsize)
1769 { Uerror("disk read vsz mismatch");
1770 }
1771 #endif
1772 } else
1773 #endif
1774 memcpy((uchar *) &now, (uchar *) t->onow->sv, vsize);
1775 memcpy((uchar *) Mask, (uchar *) t->omask->sv, vsize);
1776 if (now._nr_pr > 0)
1777 #if VECTORSZ>32000
1778 { memcpy((char *)proc_offset, (char *)t->omask->po, now._nr_pr * sizeof(int));
1779 #else
1780 { memcpy((char *)proc_offset, (char *)t->omask->po, now._nr_pr * sizeof(short));
1781 #endif
1782 memcpy((char *)proc_skip, (char *)t->omask->ps, now._nr_pr * sizeof(uchar));
1783 }
1784 if (now._nr_qs > 0)
1785 #if VECTORSZ>32000
1786 { memcpy((uchar *)q_offset, (uchar *)t->omask->qo, now._nr_qs * sizeof(int));
1787 #else
1788 { memcpy((uchar *)q_offset, (uchar *)t->omask->qo, now._nr_qs * sizeof(short));
1789 #endif
1790 memcpy((uchar *)q_skip, (uchar *)t->omask->qs, now._nr_qs * sizeof(uchar));
1791 }
1792 #ifdef BFS_DISK
1793 if (t->onow->sv != (State *) 0)
1794 #endif
1795 freesv(t->onow); /* omask not freed */
1796 #ifdef CHECK
1797 printf("POP %u (%d)\n", t->frame, t->frame->o_tt);
1798 #endif
1799 return t->frame;
1800 }
1801
1802 void
1803 store_state(Trail *ntrpt, int shortcut, short oboq)
1804 {
1805 #ifdef VERI
1806 Trans *t2 = (Trans *) 0;
1807 uchar ot; int tt, E_state;
1808 uchar o_opm = trpt->o_pm, *othis = this;
1809
1810 if (shortcut)
1811 {
1812 #ifdef VERBOSE
1813 printf("claim: shortcut\n");
1814 #endif
1815 goto store_it; /* no claim move */
1816 }
1817
1818 this = (((uchar *)&now)+proc_offset[0]); /* 0 = never claim */
1819 trpt->o_pm = 0;
1820
1821 tt = (int) ((P0 *)this)->_p;
1822 ot = (uchar) ((P0 *)this)->_t;
1823
1824 #ifdef HAS_UNLESS
1825 E_state = 0;
1826 #endif
1827 for (t2 = trans[ot][tt]; t2; t2 = t2?t2->nxt:(Trans *)0)
1828 {
1829 #ifdef HAS_UNLESS
1830 if (E_state > 0
1831 && E_state != t2->e_trans)
1832 break;
1833 #endif
1834 if (do_transit(t2, 0))
1835 {
1836 #ifdef VERBOSE
1837 if (!reached[ot][t2->st])
1838 printf("depth: %d -- claim move from %d -> %d\n",
1839 trpt->o_tt, ((P0 *)this)->_p, t2->st);
1840 #endif
1841 #ifdef HAS_UNLESS
1842 E_state = t2->e_trans;
1843 #endif
1844 if (t2->st > 0)
1845 { ((P0 *)this)->_p = t2->st;
1846 reached[ot][t2->st] = 1;
1847 #ifndef NOCLAIM
1848 check_claim(t2->st);
1849 #endif
1850 }
1851 if (now._nr_pr == 0) /* claim terminated */
1852 uerror("end state in claim reached");
1853
1854 #ifdef PEG
1855 peg[t2->forw]++;
1856 #endif
1857 trpt->o_pm |= 1;
1858 if (t2->atom&2)
1859 Uerror("atomic in claim not supported in BFS mode");
1860 store_it:
1861
1862 #endif
1863
1864 #ifdef BITSTATE
1865 if (!bstore((char *)&now, vsize))
1866 #else
1867 #ifdef MA
1868 if (!gstore((char *)&now, vsize, 0))
1869 #else
1870 if (!hstore((char *)&now, vsize))
1871 #endif
1872 #endif
1873 { static long sdone = (long) 0; long ndone;
1874 nstates++;
1875 #ifndef NOREDUCE
1876 trpt->tau |= 64;
1877 #endif
1878 ndone = (unsigned long) (nstates/((double) FREQ));
1879 if (ndone != sdone && mreached%10 != 0)
1880 { snapshot();
1881 sdone = ndone;
1882 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
1883 if (nstates > ((double)(1<<(ssize+1))))
1884 { void resize_hashtable(void);
1885 resize_hashtable();
1886 }
1887 #endif
1888 }
1889 #if SYNC
1890 if (boq != -1)
1891 midrv++;
1892 else if (oboq != -1)
1893 { Trail *x;
1894 x = (Trail *) trpt->ostate; /* pre-rv state */
1895 if (x) x->o_pm |= 4; /* mark success */
1896 }
1897 #endif
1898 push_bfs(ntrpt, trpt->o_tt+1);
1899 } else
1900 { truncs++;
1901 #if !defined(NOREDUCE) && defined(FULLSTACK) && defined(Q_PROVISO)
1902 #if !defined(BITSTATE)
1903 if (Lstate && Lstate->tagged) trpt->tau |= 64;
1904 #else
1905 if (trpt->tau&32)
1906 { BFS_Trail *tprov;
1907 for (tprov = bfs_trail; tprov; tprov = tprov->nxt)
1908 if (tprov->onow->sv != (State *) 0
1909 && memcmp((uchar *)&now, (uchar *)tprov->onow->sv, vsize) == 0)
1910 { trpt->tau |= 64;
1911 break; /* state is in queue */
1912 } }
1913 #endif
1914 #endif
1915 }
1916 #ifdef VERI
1917 ((P0 *)this)->_p = tt; /* reset claim */
1918 if (t2)
1919 do_reverse(t2, 0, 0);
1920 else
1921 break;
1922 } }
1923 this = othis;
1924 trpt->o_pm = o_opm;
1925 #endif
1926 }
1927
1928 Trail *ntrpt;
1929
1930 void
1931 bfs(void)
1932 { Trans *t; Trail *otrpt, *x;
1933 uchar _n, _m, ot, nps = 0;
1934 int tt, E_state;
1935 short II, From = (short) (now._nr_pr-1), To = BASE;
1936 short oboq = boq;
1937
1938 ntrpt = (Trail *) emalloc(sizeof(Trail));
1939 trpt->ostate = (struct H_el *) 0;
1940 trpt->tau = 0;
1941
1942 trpt->o_tt = -1;
1943 store_state(ntrpt, 0, oboq); /* initial state */
1944
1945 while ((otrpt = pop_bfs())) /* also restores now */
1946 { memcpy((char *) trpt, (char *) otrpt, sizeof(Trail));
1947 #if defined(C_States) && (HAS_TRACK==1)
1948 c_revert((uchar *) &(now.c_state[0]));
1949 #endif
1950 if (trpt->o_pm & 4)
1951 {
1952 #ifdef VERBOSE
1953 printf("Revisit of atomic not needed (%d)\n",
1954 trpt->o_pm);
1955 #endif
1956 continue;
1957 }
1958 #ifndef NOREDUCE
1959 nps = 0;
1960 #endif
1961 if (trpt->o_pm == 8)
1962 { revrv++;
1963 if (trpt->tau&8)
1964 {
1965 #ifdef VERBOSE
1966 printf("Break atomic (pm:%d,tau:%d)\n",
1967 trpt->o_pm, trpt->tau);
1968 #endif
1969 trpt->tau &= ~8;
1970 }
1971 #ifndef NOREDUCE
1972 else if (trpt->tau&32)
1973 {
1974 #ifdef VERBOSE
1975 printf("Void preselection (pm:%d,tau:%d)\n",
1976 trpt->o_pm, trpt->tau);
1977 #endif
1978 trpt->tau &= ~32;
1979 nps = 1; /* no preselection in repeat */
1980 }
1981 #endif
1982 }
1983 trpt->o_pm &= ~(4|8);
1984 if (trpt->o_tt > mreached)
1985 { mreached = trpt->o_tt;
1986 if (mreached%10 == 0)
1987 { snapshot();
1988 } }
1989 depth = trpt->o_tt;
1990 if (depth >= maxdepth)
1991 {
1992 #if SYNC
1993 Trail *x;
1994 if (boq != -1)
1995 { x = (Trail *) trpt->ostate;
1996 if (x) x->o_pm |= 4; /* not failing */
1997 }
1998 #endif
1999 truncs++;
2000 if (!warned)
2001 { warned = 1;
2002 printf("error: max search depth too small\n");
2003 }
2004 if (bounded)
2005 uerror("depth limit reached");
2006 continue;
2007 }
2008 #ifndef NOREDUCE
2009 if (boq == -1 && !(trpt->tau&8) && nps == 0)
2010 for (II = now._nr_pr-1; II >= BASE; II -= 1)
2011 {
2012 Pickup: this = pptr(II);
2013 tt = (int) ((P0 *)this)->_p;
2014 ot = (uchar) ((P0 *)this)->_t;
2015 if (trans[ot][tt]->atom & 8)
2016 { t = trans[ot][tt];
2017 if (t->qu[0] != 0)
2018 { Ccheck++;
2019 if (!q_cond(II, t))
2020 continue;
2021 Cholds++;
2022 }
2023 From = To = II;
2024 trpt->tau |= 32; /* preselect marker */
2025 #ifdef DEBUG
2026 printf("%3d: proc %d PreSelected (tau=%d)\n",
2027 depth, II, trpt->tau);
2028 #endif
2029 goto MainLoop;
2030 } }
2031 trpt->tau &= ~32;
2032 #endif
2033 Repeat:
2034 if (trpt->tau&8) /* atomic */
2035 { From = To = (short ) trpt->pr;
2036 nlinks++;
2037 } else
2038 { From = now._nr_pr-1;
2039 To = BASE;
2040 }
2041 MainLoop:
2042 _n = _m = 0;
2043 for (II = From; II >= To; II -= 1)
2044 {
2045 this = (((uchar *)&now)+proc_offset[II]);
2046 tt = (int) ((P0 *)this)->_p;
2047 ot = (uchar) ((P0 *)this)->_t;
2048 #if SYNC
2049 /* no rendezvous with same proc */
2050 if (boq != -1 && trpt->pr == II) continue;
2051 #endif
2052 ntrpt->pr = (uchar) II;
2053 ntrpt->st = tt;
2054 trpt->o_pm &= ~1; /* no move yet */
2055 #ifdef EVENT_TRACE
2056 trpt->o_event = now._event;
2057 #endif
2058 #ifdef HAS_PROVIDED
2059 if (!provided(II, ot, tt, t)) continue;
2060 #endif
2061 #ifdef HAS_UNLESS
2062 E_state = 0;
2063 #endif
2064 for (t = trans[ot][tt]; t; t = t->nxt)
2065 {
2066 #ifdef HAS_UNLESS
2067 if (E_state > 0
2068 && E_state != t->e_trans)
2069 break;
2070 #endif
2071 ntrpt->o_t = t;
2072
2073 oboq = boq;
2074
2075 if (!(_m = do_transit(t, II)))
2076 continue;
2077
2078 trpt->o_pm |= 1; /* we moved */
2079 (trpt+1)->o_m = _m; /* for unsend */
2080 #ifdef PEG
2081 peg[t->forw]++;
2082 #endif
2083 #ifdef CHECK
2084 printf("%3d: proc %d exec %d, ",
2085 depth, II, t->forw);
2086 printf("%d to %d, %s %s %s",
2087 tt, t->st, t->tp,
2088 (t->atom&2)?"atomic":"",
2089 (boq != -1)?"rendez-vous":"");
2090 #ifdef HAS_UNLESS
2091 if (t->e_trans)
2092 printf(" (escapes to state %d)", t->st);
2093 #endif
2094 printf(" %saccepting [tau=%d]\n",
2095 (trpt->o_pm&2)?"":"non-", trpt->tau);
2096 #endif
2097 #ifdef HAS_UNLESS
2098 E_state = t->e_trans;
2099 #if SYNC>0
2100 if (t->e_trans > 0 && (boq != -1 /* || oboq != -1 */))
2101 { fprintf(efd, "error: the use of rendezvous stmnt in the escape clause\n");
2102 fprintf(efd, " of an unless stmnt is not compatible with -DBFS\n");
2103 pan_exit(1);
2104 }
2105 #endif
2106 #endif
2107 if (t->st > 0) ((P0 *)this)->_p = t->st;
2108
2109 /* ptr to pred: */ ntrpt->ostate = (struct H_el *) otrpt;
2110 ntrpt->st = tt;
2111 if (boq == -1 && (t->atom&2)) /* atomic */
2112 ntrpt->tau = 8; /* record for next move */
2113 else
2114 ntrpt->tau = 0;
2115
2116 store_state(ntrpt, (boq != -1 || (t->atom&2)), oboq);
2117 #ifdef EVENT_TRACE
2118 now._event = trpt->o_event;
2119 #endif
2120
2121 /* undo move and continue */
2122 trpt++; /* this is where ovals and ipt are set */
2123 do_reverse(t, II, _m); /* restore now. */
2124 trpt--;
2125 #ifdef CHECK
2126 #if NCORE>1
2127 enter_critical(GLOBAL_LOCK); /* in verbose mode only */
2128 printf("cpu%d: ", core_id);
2129 #endif
2130 printf("%3d: proc %d ", depth, II);
2131 printf("reverses %d, %d to %d,",
2132 t->forw, tt, t->st);
2133 printf(" %s [abit=%d,adepth=%d,",
2134 t->tp, now._a_t, A_depth);
2135 printf("tau=%d,%d]\n",
2136 trpt->tau, (trpt-1)->tau);
2137 #if NCORE>1
2138 leave_critical(GLOBAL_LOCK);
2139 #endif
2140 #endif
2141 reached[ot][t->st] = 1;
2142 reached[ot][tt] = 1;
2143
2144 ((P0 *)this)->_p = tt;
2145 _n |= _m;
2146 } }
2147 #ifndef NOREDUCE
2148 /* preselected - no succ definitely outside stack */
2149 if ((trpt->tau&32) && !(trpt->tau&64))
2150 { From = now._nr_pr-1; To = BASE;
2151 #ifdef DEBUG
2152 cpu_printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
2153 depth, II+1, (int) _n, trpt->tau);
2154 #endif
2155 _n = 0; trpt->tau &= ~32;
2156 if (II >= BASE)
2157 goto Pickup;
2158 goto MainLoop;
2159 }
2160 trpt->tau &= ~(32|64);
2161 #endif
2162 if (_n != 0)
2163 continue;
2164 #ifdef DEBUG
2165 printf("%3d: no move [II=%d, tau=%d, boq=%d, _nr_pr=%d]\n",
2166 depth, II, trpt->tau, boq, now._nr_pr);
2167 #endif
2168 if (boq != -1)
2169 { failedrv++;
2170 x = (Trail *) trpt->ostate; /* pre-rv state */
2171 if (!x) continue; /* root state */
2172 if ((x->tau&8) || (x->tau&32)) /* break atomic or preselect at parent */
2173 { x->o_pm |= 8; /* mark failure */
2174 this = (((uchar *)&now)+proc_offset[otrpt->pr]);
2175 #ifdef VERBOSE
2176 printf("\treset state of %d from %d to %d\n",
2177 otrpt->pr, ((P0 *)this)->_p, otrpt->st);
2178 #endif
2179 ((P0 *)this)->_p = otrpt->st;
2180 unsend(boq); /* retract rv offer */
2181 boq = -1;
2182 push_bfs(x, x->o_tt);
2183 #ifdef VERBOSE
2184 printf("failed rv, repush with %d\n", x->o_pm);
2185 #endif
2186 }
2187 #ifdef VERBOSE
2188 else printf("failed rv, tau at parent: %d\n", x->tau);
2189 #endif
2190 } else if (now._nr_pr > 0)
2191 {
2192 if ((trpt->tau&8)) /* atomic */
2193 { trpt->tau &= ~(1|8); /* 1=timeout, 8=atomic */
2194 #ifdef DEBUG
2195 printf("%3d: atomic step proc %d blocks\n",
2196 depth, II+1);
2197 #endif
2198 goto Repeat;
2199 }
2200
2201 if (!(trpt->tau&1)) /* didn't try timeout yet */
2202 { trpt->tau |= 1;
2203 #ifdef DEBUG
2204 printf("%d: timeout\n", depth);
2205 #endif
2206 goto MainLoop;
2207 }
2208 #ifndef VERI
2209 if (!noends && !a_cycles && !endstate())
2210 uerror("invalid end state");
2211 #endif
2212 } }
2213 }
2214
2215 void
2216 putter(Trail *trpt, int fd)
2217 { long j;
2218
2219 if (!trpt) return;
2220
2221 if (trpt != (Trail *) trpt->ostate)
2222 putter((Trail *) trpt->ostate, fd);
2223
2224 if (trpt->o_t)
2225 { sprintf(snap, "%d:%d:%d\n",
2226 trcnt++, trpt->pr, trpt->o_t->t_id);
2227 j = strlen(snap);
2228 if (write(fd, snap, j) != j)
2229 { printf("pan: error writing %s\n", fnm);
2230 pan_exit(1);
2231 } }
2232 }
2233
2234 void
2235 nuerror(char *str)
2236 { int fd = make_trail();
2237 int j;
2238
2239 if (fd < 0) return;
2240 #ifdef VERI
2241 sprintf(snap, "-2:%d:-2\n", VERI);
2242 write(fd, snap, strlen(snap));
2243 #endif
2244 #ifdef MERGED
2245 sprintf(snap, "-4:-4:-4\n");
2246 write(fd, snap, strlen(snap));
2247 #endif
2248 trcnt = 1;
2249 putter(trpt, fd);
2250 if (ntrpt->o_t)
2251 { sprintf(snap, "%d:%d:%d\n",
2252 trcnt++, ntrpt->pr, ntrpt->o_t->t_id);
2253 j = strlen(snap);
2254 if (write(fd, snap, j) != j)
2255 { printf("pan: error writing %s\n", fnm);
2256 pan_exit(1);
2257 } }
2258 close(fd);
2259 if (errors >= upto && upto != 0)
2260 { wrapup();
2261 }
2262 }
2263 #endif
2264 #if NCORE>1
2265 #if defined(WIN32) || defined(WIN64)
2266 #ifndef _CONSOLE
2267 #define _CONSOLE
2268 #endif
2269 #ifdef WIN64
2270 #undef long
2271 #endif
2272 #include <windows.h>
2273
2274 #ifdef WIN64
2275 #define long long long
2276 #endif
2277 #else
2278 #include <sys/ipc.h>
2279 #include <sys/sem.h>
2280 #include <sys/shm.h>
2281 #endif
2282
2283 /* code common to cygwin/linux and win32/win64: */
2284
2285 #ifdef VERBOSE
2286 #define VVERBOSE (1)
2287 #else
2288 #define VVERBOSE (0)
2289 #endif
2290
2291 /* the following values must be larger than 256 and must fit in an int */
2292 #define QUIT 1024 /* terminate now command */
2293 #define QUERY 512 /* termination status query message */
2294 #define QUERY_F 513 /* query failed, cannot quit */
2295
2296 #define GN_FRAMES (int) (GWQ_SIZE / (double) sizeof(SM_frame))
2297 #define LN_FRAMES (int) (LWQ_SIZE / (double) sizeof(SM_frame))
2298
2299 #ifndef VMAX
2300 #define VMAX VECTORSZ
2301 #endif
2302 #ifndef PMAX
2303 #define PMAX 64
2304 #endif
2305 #ifndef QMAX
2306 #define QMAX 64
2307 #endif
2308
2309 #if VECTORSZ>32000
2310 #define OFFT int
2311 #else
2312 #define OFFT short
2313 #endif
2314
2315 #ifdef SET_SEG_SIZE
2316 /* no longer usefule -- being recomputed for local heap size anyway */
2317 double SEG_SIZE = (((double) SET_SEG_SIZE) * 1048576.);
2318 #else
2319 double SEG_SIZE = (1048576.*1024.); /* 1GB default shared memory pool segments */
2320 #endif
2321
2322 double LWQ_SIZE = 0.; /* initialized in main */
2323
2324 #ifdef SET_WQ_SIZE
2325 #ifdef NGQ
2326 #warning SET_WQ_SIZE applies to global queue -- ignored
2327 double GWQ_SIZE = 0.;
2328 #else
2329 double GWQ_SIZE = (((double) SET_WQ_SIZE) * 1048576.);
2330 /* must match the value in pan_proxy.c, if used */
2331 #endif
2332 #else
2333 #ifdef NGQ
2334 double GWQ_SIZE = 0.;
2335 #else
2336 double GWQ_SIZE = (128.*1048576.); /* 128 MB default queue sizes */
2337 #endif
2338 #endif
2339
2340 /* Crash Detection Parameters */
2341 #ifndef ONESECOND
2342 #define ONESECOND (1<<25)
2343 #endif
2344 #ifndef SHORT_T
2345 #define SHORT_T (0.1)
2346 #endif
2347 #ifndef LONG_T
2348 #define LONG_T (600)
2349 #endif
2350
2351 double OneSecond = (double) (ONESECOND); /* waiting for a free slot -- checks crash */
2352 double TenSeconds = 10. * (ONESECOND); /* waiting for a lock -- check for a crash */
2353
2354 /* Termination Detection Params -- waiting for new state input in Get_Full_Frame */
2355 double Delay = ((double) SHORT_T) * (ONESECOND); /* termination detection trigger */
2356 double OneHour = ((double) LONG_T) * (ONESECOND); /* timeout termination detection */
2357
2358 typedef struct SM_frame SM_frame;
2359 typedef struct SM_results SM_results;
2360 typedef struct sh_Allocater sh_Allocater;
2361
2362 struct SM_frame { /* about 6K per slot */
2363 volatile int m_vsize; /* 0 means free slot */
2364 volatile int m_boq; /* >500 is a control message */
2365 #ifdef FULL_TRAIL
2366 volatile struct Stack_Tree *m_stack; /* ptr to previous state */
2367 #endif
2368 volatile uchar m_tau;
2369 volatile uchar m_o_pm;
2370 volatile int nr_handoffs; /* to compute real_depth */
2371 volatile char m_now [VMAX];
2372 volatile char m_Mask [(VMAX + 7)/8];
2373 volatile OFFT m_p_offset[PMAX];
2374 volatile OFFT m_q_offset[QMAX];
2375 volatile uchar m_p_skip [PMAX];
2376 volatile uchar m_q_skip [QMAX];
2377 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
2378 volatile uchar m_c_stack [StackSize];
2379 #endif
2380 };
2381
2382 int proxy_pid; /* id of proxy if nonzero -- receive half */
2383 int store_proxy_pid;
2384 short remote_party;
2385 int proxy_pid_snd; /* id of proxy if nonzero -- send half */
2386 char o_cmdline[512]; /* to pass options to children */
2387
2388 int iamin[CS_NR+NCORE]; /* non-shared */
2389
2390 #if defined(WIN32) || defined(WIN64)
2391 int tas(volatile LONG *);
2392
2393 HANDLE proxy_handle_snd; /* for Windows Create and Terminate */
2394
2395 struct sh_Allocater { /* shared memory for states */
2396 volatile char *dc_arena; /* to allocate states from */
2397 volatile long pattern; /* to detect overruns */
2398 volatile long dc_size; /* nr of bytes left */
2399 volatile void *dc_start; /* where memory segment starts */
2400 volatile void *dc_id; /* to attach, detach, remove shared memory segments */
2401 volatile sh_Allocater *nxt; /* linked list of pools */
2402 };
2403 DWORD worker_pids[NCORE]; /* root mem of pids of all workers created */
2404 HANDLE worker_handles[NCORE]; /* for windows Create and Terminate */
2405 void * shmid [NR_QS]; /* return value from CreateFileMapping */
2406 void * shmid_M; /* shared mem for state allocation in hashtable */
2407
2408 #ifdef SEP_STATE
2409 void *shmid_X;
2410 #else
2411 void *shmid_S; /* shared bitstate arena or hashtable */
2412 #endif
2413 #else
2414 int tas(volatile int *);
2415
2416 struct sh_Allocater { /* shared memory for states */
2417 volatile char *dc_arena; /* to allocate states from */
2418 volatile long pattern; /* to detect overruns */
2419 volatile long dc_size; /* nr of bytes left */
2420 volatile char *dc_start; /* where memory segment starts */
2421 volatile int dc_id; /* to attach, detach, remove shared memory segments */
2422 volatile sh_Allocater *nxt; /* linked list of pools */
2423 };
2424
2425 int worker_pids[NCORE]; /* root mem of pids of all workers created */
2426 int shmid [NR_QS]; /* return value from shmget */
2427 int nibis = 0; /* set after shared mem has been released */
2428 int shmid_M; /* shared mem for state allocation in hashtable */
2429 #ifdef SEP_STATE
2430 long shmid_X;
2431 #else
2432 int shmid_S; /* shared bitstate arena or hashtable */
2433 volatile sh_Allocater *first_pool; /* of shared state memory */
2434 volatile sh_Allocater *last_pool;
2435 #endif
2436 #endif
2437
2438 struct SM_results { /* for shuttling back final stats */
2439 volatile int m_vsize; /* avoid conflicts with frames */
2440 volatile int m_boq; /* these 2 fields are not written in record_info */
2441 /* probably not all fields really need to be volatile */
2442 volatile double m_memcnt;
2443 volatile double m_nstates;
2444 volatile double m_truncs;
2445 volatile double m_truncs2;
2446 volatile double m_nShadow;
2447 volatile double m_nlinks;
2448 volatile double m_ngrabs;
2449 volatile double m_nlost;
2450 volatile double m_hcmp;
2451 volatile double m_frame_wait;
2452 volatile int m_hmax;
2453 volatile int m_svmax;
2454 volatile int m_smax;
2455 volatile int m_mreached;
2456 volatile int m_errors;
2457 volatile int m_VMAX;
2458 volatile short m_PMAX;
2459 volatile short m_QMAX;
2460 volatile uchar m_R; /* reached info for all proctypes */
2461 };
2462
2463 int core_id = 0; /* internal process nr, to know which q to use */
2464 unsigned long nstates_put = 0; /* statistics */
2465 unsigned long nstates_get = 0;
2466 int query_in_progress = 0; /* termination detection */
2467
2468 double free_wait = 0.; /* waiting for a free frame */
2469 double frame_wait = 0.; /* waiting for a full frame */
2470 double lock_wait = 0.; /* waiting for access to cs */
2471 double glock_wait[3]; /* waiting for access to global lock */
2472
2473 char *sprefix = "rst";
2474 uchar was_interrupted, issued_kill, writing_trail;
2475
2476 static SM_frame cur_Root; /* current root, to be safe with error trails */
2477
2478 SM_frame *m_workq [NR_QS]; /* per cpu work queues + global q */
2479 char *shared_mem[NR_QS]; /* return value from shmat */
2480 #ifdef SEP_HEAP
2481 char *my_heap;
2482 long my_size;
2483 #endif
2484 volatile sh_Allocater *dc_shared; /* assigned at initialization */
2485
2486 static int vmax_seen, pmax_seen, qmax_seen;
2487 static double gq_tries, gq_hasroom, gq_hasnoroom;
2488
2489 volatile int *prfree;
2490 volatile int *prfull;
2491 volatile int *prcnt;
2492 volatile int *prmax;
2493
2494 volatile int *sh_lock; /* mutual exclusion locks - in shared memory */
2495 volatile double *is_alive; /* to detect when processes crash */
2496 volatile int *grfree, *grfull, *grcnt, *grmax; /* access to shared global q */
2497 volatile double *gr_readmiss, *gr_writemiss;
2498 static int lrfree; /* used for temporary recording of slot */
2499 static int dfs_phase2;
2500
2501 void mem_put(int); /* handoff state to other cpu */
2502 void mem_put_acc(void); /* liveness mode */
2503 void mem_get(void); /* get state from work queue */
2504 void sudden_stop(char *);
2505 #if 0
2506 void enter_critical(int);
2507 void leave_critical(int);
2508 #endif
2509
2510 void
2511 record_info(SM_results *r)
2512 { int i;
2513 uchar *ptr;
2514
2515 #ifdef SEP_STATE
2516 if (0)
2517 { cpu_printf("nstates %g nshadow %g -- memory %-6.3f Mb\n",
2518 nstates, nShadow, memcnt/(1048576.));
2519 }
2520 r->m_memcnt = 0;
2521 #else
2522 #ifdef BITSTATE
2523 r->m_memcnt = 0; /* it's shared */
2524 #endif
2525 r->m_memcnt = memcnt;
2526 #endif
2527 if (a_cycles && core_id == 1)
2528 { r->m_nstates = nstates;
2529 r->m_nShadow = nstates;
2530 } else
2531 { r->m_nstates = nstates;
2532 r->m_nShadow = nShadow;
2533 }
2534 r->m_truncs = truncs;
2535 r->m_truncs2 = truncs2;
2536 r->m_nlinks = nlinks;
2537 r->m_ngrabs = ngrabs;
2538 r->m_nlost = nlost;
2539 r->m_hcmp = hcmp;
2540 r->m_frame_wait = frame_wait;
2541 r->m_hmax = hmax;
2542 r->m_svmax = svmax;
2543 r->m_smax = smax;
2544 r->m_mreached = mreached;
2545 r->m_errors = errors;
2546 r->m_VMAX = vmax_seen;
2547 r->m_PMAX = (short) pmax_seen;
2548 r->m_QMAX = (short) qmax_seen;
2549 ptr = (uchar *) &(r->m_R);
2550 for (i = 0; i <= _NP_; i++) /* all proctypes */
2551 { memcpy(ptr, reached[i], NrStates[i]*sizeof(uchar));
2552 ptr += NrStates[i]*sizeof(uchar);
2553 }
2554 if (verbose>1)
2555 { cpu_printf("Put Results nstates %g (sz %d)\n", nstates, ptr - &(r->m_R));
2556 }
2557 }
2558
2559 void snapshot(void);
2560
2561 void
2562 retrieve_info(SM_results *r)
2563 { int i, j;
2564 volatile uchar *ptr;
2565
2566 snapshot(); /* for a final report */
2567
2568 enter_critical(GLOBAL_LOCK);
2569 #ifdef SEP_HEAP
2570 if (verbose)
2571 { printf("cpu%d: local heap-left %ld KB (%d MB)\n",
2572 core_id, (int) (my_size/1024), (int) (my_size/1048576));
2573 }
2574 #endif
2575 if (verbose && core_id == 0)
2576 { printf("qmax: ");
2577 for (i = 0; i < NCORE; i++)
2578 { printf("%d ", prmax[i]);
2579 }
2580 #ifndef NGQ
2581 printf("G: %d", *grmax);
2582 #endif
2583 printf("\n");
2584 }
2585 leave_critical(GLOBAL_LOCK);
2586
2587 memcnt += r->m_memcnt;
2588 nstates += r->m_nstates;
2589 nShadow += r->m_nShadow;
2590 truncs += r->m_truncs;
2591 truncs2 += r->m_truncs2;
2592 nlinks += r->m_nlinks;
2593 ngrabs += r->m_ngrabs;
2594 nlost += r->m_nlost;
2595 hcmp += r->m_hcmp;
2596 /* frame_wait += r->m_frame_wait; */
2597 errors += r->m_errors;
2598
2599 if (hmax < r->m_hmax) hmax = r->m_hmax;
2600 if (svmax < r->m_svmax) svmax = r->m_svmax;
2601 if (smax < r->m_smax) smax = r->m_smax;
2602 if (mreached < r->m_mreached) mreached = r->m_mreached;
2603
2604 if (vmax_seen < r->m_VMAX) vmax_seen = r->m_VMAX;
2605 if (pmax_seen < (int) r->m_PMAX) pmax_seen = (int) r->m_PMAX;
2606 if (qmax_seen < (int) r->m_QMAX) qmax_seen = (int) r->m_QMAX;
2607
2608 ptr = &(r->m_R);
2609 for (i = 0; i <= _NP_; i++) /* all proctypes */
2610 { for (j = 0; j < NrStates[i]; j++)
2611 { if (*(ptr + j) != 0)
2612 { reached[i][j] = 1;
2613 } }
2614 ptr += NrStates[i]*sizeof(uchar);
2615 }
2616 if (verbose>1)
2617 { cpu_printf("Got Results (%d)\n", ptr - &(r->m_R));
2618 snapshot();
2619 }
2620 }
2621
2622 #if !defined(WIN32) && !defined(WIN64)
2623 static void
2624 rm_shared_segments(void)
2625 { int m;
2626 volatile sh_Allocater *nxt_pool;
2627 /*
2628 * mark all shared memory segments for removal
2629 * the actual removes wont happen intil last process dies or detaches
2630 * the shmctl calls can return -1 if not all procs have detached yet
2631 */
2632 for (m = 0; m < NR_QS; m++) /* +1 for global q */
2633 { if (shmid[m] != -1)
2634 { (void) shmctl(shmid[m], IPC_RMID, NULL);
2635 } }
2636 #ifdef SEP_STATE
2637 if (shmid_M != -1)
2638 { (void) shmctl(shmid_M, IPC_RMID, NULL);
2639 }
2640 #else
2641 if (shmid_S != -1)
2642 { (void) shmctl(shmid_S, IPC_RMID, NULL);
2643 }
2644 for (last_pool = first_pool; last_pool != NULL; last_pool = nxt_pool)
2645 { shmid_M = (int) (last_pool->dc_id);
2646 nxt_pool = last_pool->nxt; /* as a pre-caution only */
2647 if (shmid_M != -1)
2648 { (void) shmctl(shmid_M, IPC_RMID, NULL);
2649 } }
2650 #endif
2651 }
2652 #endif
2653
2654 void
2655 sudden_stop(char *s)
2656 { char b[64];
2657 int i;
2658
2659 printf("cpu%d: stop - %s\n", core_id, s);
2660 #if !defined(WIN32) && !defined(WIN64)
2661 if (proxy_pid != 0)
2662 { rm_shared_segments();
2663 }
2664 #endif
2665 if (search_terminated != NULL)
2666 { if (*search_terminated != 0)
2667 { if (verbose)
2668 { printf("cpu%d: termination initiated (%d)\n",
2669 core_id, *search_terminated);
2670 }
2671 } else
2672 { if (verbose)
2673 { printf("cpu%d: initiated termination\n", core_id);
2674 }
2675 *search_terminated |= 8; /* sudden_stop */
2676 }
2677 if (core_id == 0)
2678 { if (((*search_terminated) & 4) /* uerror in one of the cpus */
2679 && !((*search_terminated) & (8|32|128|256))) /* abnormal stop */
2680 { if (errors == 0) errors++; /* we know there is at least 1 */
2681 }
2682 wrapup(); /* incomplete stats, but at least something */
2683 }
2684 return;
2685 } /* else: should rarely happen, take more drastic measures */
2686
2687 if (core_id == 0) /* local root process */
2688 { for (i = 1; i < NCORE; i++) /* not for 0 of course */
2689 {
2690 #if defined(WIN32) || defined(WIN64)
2691 DWORD dwExitCode = 0;
2692 GetExitCodeProcess(worker_handles[i], &dwExitCode);
2693 if (dwExitCode == STILL_ACTIVE)
2694 { TerminateProcess(worker_handles[i], 0);
2695 }
2696 printf("cpu0: terminate %d %d\n",
2697 worker_pids[i], (dwExitCode == STILL_ACTIVE));
2698 #else
2699 sprintf(b, "kill -%d %d", SIGKILL, worker_pids[i]);
2700 system(b); /* if this is a proxy: receive half */
2701 printf("cpu0: %s\n", b);
2702 #endif
2703 }
2704 issued_kill++;
2705 } else
2706 { /* on WIN32/WIN64 -- these merely kills the root process... */
2707 if (was_interrupted == 0)
2708 { sprintf(b, "kill -%d %d", SIGINT, worker_pids[0]);
2709 system(b); /* warn the root process */
2710 printf("cpu%d: %s\n", core_id, b);
2711 issued_kill++;
2712 } }
2713 }
2714
2715 #define iam_alive() is_alive[core_id]++
2716
2717 extern int crash_test(double);
2718 extern void crash_reset(void);
2719
2720 int
2721 someone_crashed(int wait_type)
2722 { static double last_value = 0.0;
2723 static int count = 0;
2724
2725 if (search_terminated == NULL
2726 || *search_terminated != 0)
2727 {
2728 if (!(*search_terminated & (8|32|128|256)))
2729 { if (count++ < 100*NCORE)
2730 { return 0;
2731 } }
2732 return 1;
2733 }
2734 /* check left neighbor only */
2735 if (last_value == is_alive[(core_id + NCORE - 1) % NCORE])
2736 { if (count++ >= 100) /* to avoid unnecessary checks */
2737 { return 1;
2738 }
2739 return 0;
2740 }
2741 last_value = is_alive[(core_id + NCORE - 1) % NCORE];
2742 count = 0;
2743 crash_reset();
2744 return 0;
2745 }
2746
2747 void
2748 sleep_report(void)
2749 {
2750 enter_critical(GLOBAL_LOCK);
2751 if (verbose)
2752 {
2753 #ifdef NGQ
2754 printf("cpu%d: locks: global %g\tother %g\t",
2755 core_id, glock_wait[0], lock_wait - glock_wait[0]);
2756 #else
2757 printf("cpu%d: locks: GL %g, RQ %g, WQ %g, HT %g\t",
2758 core_id, glock_wait[0], glock_wait[1], glock_wait[2],
2759 lock_wait - glock_wait[0] - glock_wait[1] - glock_wait[2]);
2760 #endif
2761 printf("waits: states %g slots %g\n", frame_wait, free_wait);
2762 #ifndef NGQ
2763 printf("cpu%d: gq [tries %g, room %g, noroom %g]\n", core_id, gq_tries, gq_hasroom, gq_hasnoroom);
2764 if (core_id == 0 && (*gr_readmiss >= 1.0 || *gr_readmiss >= 1.0 || *grcnt != 0))
2765 printf("cpu0: gq [readmiss: %g, writemiss: %g cnt %d]\n", *gr_readmiss, *gr_writemiss, *grcnt);
2766 #endif
2767 }
2768 if (free_wait > 1000000.)
2769 #ifndef NGQ
2770 if (!a_cycles)
2771 { printf("hint: this search may be faster with a larger work-queue\n");
2772 printf(" (-DSET_WQ_SIZE=N with N>%g), and/or with -DUSE_DISK\n",
2773 GWQ_SIZE/sizeof(SM_frame));
2774 printf(" or with a larger value for -zN (N>%d)\n", z_handoff);
2775 #else
2776 { printf("hint: this search may be faster if compiled without -DNGQ, with -DUSE_DISK, ");
2777 printf("or with a larger -zN (N>%d)\n", z_handoff);
2778 #endif
2779 }
2780 leave_critical(GLOBAL_LOCK);
2781 }
2782
2783 #ifndef MAX_DSK_FILE
2784 #define MAX_DSK_FILE 1000000 /* default is max 1M states per file */
2785 #endif
2786
2787 void
2788 multi_usage(FILE *fd)
2789 { static int warned = 0;
2790 if (warned > 0) { return; } else { warned++; }
2791 fprintf(fd, "\n");
2792 fprintf(fd, "Defining multi-core mode:\n\n");
2793 fprintf(fd, " -DDUAL_CORE --> same as -DNCORE=2\n");
2794 fprintf(fd, " -DQUAD_CORE --> same as -DNCORE=4\n");
2795 fprintf(fd, " -DNCORE=N --> enables multi_core verification if N>1\n");
2796 fprintf(fd, "\n");
2797 fprintf(fd, "Additional directives supported in multi-core mode:\n\n");
2798 fprintf(fd, " -DSEP_STATE --> forces separate statespaces instead of a single shared state space\n");
2799 fprintf(fd, " -DNUSE_DISK --> use disk for storing states when a work queue overflows\n");
2800 fprintf(fd, " -DMAX_DSK_FILE --> max nr of states per diskfile (%d)\n", MAX_DSK_FILE);
2801 fprintf(fd, " -DFULL_TRAIL --> support full error trails (increases memory use)\n");
2802 fprintf(fd, "\n");
2803 fprintf(fd, "More advanced use (should rarely need changing):\n\n");
2804 fprintf(fd, " To change the nr of states that can be stored in the global queue\n");
2805 fprintf(fd, " (lower numbers allow for more states to be stored, prefer multiples of 8):\n");
2806 fprintf(fd, " -DVMAX=N --> upperbound on statevector for handoffs (N=%d)\n", VMAX);
2807 fprintf(fd, " -DPMAX=N --> upperbound on nr of procs (default: N=%d)\n", PMAX);
2808 fprintf(fd, " -DQMAX=N --> upperbound on nr of channels (default: N=%d)\n", QMAX);
2809 fprintf(fd, "\n");
2810 fprintf(fd, " To set the total amount of memory reserved for the global workqueue:\n");
2811 fprintf(fd, " -DSET_WQ_SIZE=N --> default: N=128 (defined in MBytes)\n\n");
2812 fprintf(fd, " To force the use of a single global heap, instead of separate heaps:\n");
2813 fprintf(fd, " -DGLOB_HEAP\n");
2814 fprintf(fd, "\n");
2815 fprintf(fd, " To define a fct to initialize data before spawning processes (use quotes):\n");
2816 fprintf(fd, " \"-DC_INIT=fct()\"\n");
2817 fprintf(fd, "\n");
2818 fprintf(fd, " Timer settings for termination and crash detection:\n");
2819 fprintf(fd, " -DSHORT_T=N --> timeout for termination detection trigger (N=%g)\n", (double) SHORT_T);
2820 fprintf(fd, " -DLONG_T=N --> timeout for giving up on termination detection (N=%g)\n", (double) LONG_T);
2821 fprintf(fd, " -DONESECOND --> (1<<29) --> timeout waiting for a free slot -- to check for crash\n");
2822 fprintf(fd, " -DT_ALERT --> collect stats on crash alert timeouts\n\n");
2823 fprintf(fd, "Help with Linux/Windows/Cygwin configuration for multi-core:\n");
2824 fprintf(fd, " http://spinroot.com/spin/multicore/V5_Readme.html\n");
2825 fprintf(fd, "\n");
2826 }
2827 #if NCORE>1 && defined(FULL_TRAIL)
2828 typedef struct Stack_Tree {
2829 uchar pr; /* process that made transition */
2830 T_ID t_id; /* id of transition */
2831 volatile struct Stack_Tree *prv; /* backward link towards root */
2832 } Stack_Tree;
2833
2834 struct H_el *grab_shared(int);
2835 volatile Stack_Tree **stack_last; /* in shared memory */
2836 char *stack_cache = NULL; /* local */
2837 int nr_cached = 0; /* local */
2838
2839 #ifndef CACHE_NR
2840 #define CACHE_NR 1024
2841 #endif
2842
2843 volatile Stack_Tree *
2844 stack_prefetch(void)
2845 { volatile Stack_Tree *st;
2846
2847 if (nr_cached == 0)
2848 { stack_cache = (char *) grab_shared(CACHE_NR * sizeof(Stack_Tree));
2849 nr_cached = CACHE_NR;
2850 }
2851 st = (volatile Stack_Tree *) stack_cache;
2852 stack_cache += sizeof(Stack_Tree);
2853 nr_cached--;
2854 return st;
2855 }
2856
2857 void
2858 Push_Stack_Tree(short II, T_ID t_id)
2859 { volatile Stack_Tree *st;
2860
2861 st = (volatile Stack_Tree *) stack_prefetch();
2862 st->pr = II;
2863 st->t_id = t_id;
2864 st->prv = (Stack_Tree *) stack_last[core_id];
2865 stack_last[core_id] = st;
2866 }
2867
2868 void
2869 Pop_Stack_Tree(void)
2870 { volatile Stack_Tree *cf = stack_last[core_id];
2871
2872 if (cf)
2873 { stack_last[core_id] = cf->prv;
2874 } else if (nr_handoffs * z_handoff + depth > 0)
2875 { printf("cpu%d: error pop_stack_tree (depth %d)\n",
2876 core_id, depth);
2877 }
2878 }
2879 #endif
2880
2881 void
2882 e_critical(int which)
2883 { double cnt_start;
2884
2885 if (readtrail || iamin[which] > 0)
2886 { if (!readtrail && verbose)
2887 { printf("cpu%d: Double Lock on %d (now %d)\n",
2888 core_id, which, iamin[which]+1);
2889 fflush(stdout);
2890 }
2891 iamin[which]++; /* local variable */
2892 return;
2893 }
2894
2895 cnt_start = lock_wait;
2896
2897 while (sh_lock != NULL) /* as long as we have shared memory */
2898 { int r = tas(&sh_lock[which]);
2899 if (r == 0)
2900 { iamin[which] = 1;
2901 return; /* locked */
2902 }
2903
2904 lock_wait++;
2905 #ifndef NGQ
2906 if (which < 3) { glock_wait[which]++; }
2907 #else
2908 if (which == 0) { glock_wait[which]++; }
2909 #endif
2910 iam_alive();
2911
2912 if (lock_wait - cnt_start > TenSeconds)
2913 { printf("cpu%d: lock timeout on %d\n", core_id, which);
2914 cnt_start = lock_wait;
2915 if (someone_crashed(1))
2916 { sudden_stop("lock timeout");
2917 pan_exit(1);
2918 } } }
2919 }
2920
2921 void
2922 x_critical(int which)
2923 {
2924 if (iamin[which] != 1)
2925 { if (iamin[which] > 1)
2926 { iamin[which]--; /* this is thread-local - no races on this one */
2927 if (!readtrail && verbose)
2928 { printf("cpu%d: Partial Unlock on %d (%d more needed)\n",
2929 core_id, which, iamin[which]);
2930 fflush(stdout);
2931 }
2932 return;
2933 } else /* iamin[which] <= 0 */
2934 { if (!readtrail)
2935 { printf("cpu%d: Invalid Unlock iamin[%d] = %d\n",
2936 core_id, which, iamin[which]);
2937 fflush(stdout);
2938 }
2939 return;
2940 } }
2941
2942 if (sh_lock != NULL)
2943 { iamin[which] = 0;
2944 sh_lock[which] = 0; /* unlock */
2945 }
2946 }
2947
2948 void
2949 #if defined(WIN32) || defined(WIN64)
2950 start_proxy(char *s, DWORD r_pid)
2951 #else
2952 start_proxy(char *s, int r_pid)
2953 #endif
2954 { char Q_arg[16], Z_arg[16], Y_arg[16];
2955 char *args[32], *ptr;
2956 int argcnt = 0;
2957
2958 sprintf(Q_arg, "-Q%d", getpid());
2959 sprintf(Y_arg, "-Y%d", r_pid);
2960 sprintf(Z_arg, "-Z%d", proxy_pid /* core_id */);
2961
2962 args[argcnt++] = "proxy";
2963 args[argcnt++] = s; /* -r or -s */
2964 args[argcnt++] = Q_arg;
2965 args[argcnt++] = Z_arg;
2966 args[argcnt++] = Y_arg;
2967
2968 if (strlen(o_cmdline) > 0)
2969 { ptr = o_cmdline; /* assume args separated by spaces */
2970 do { args[argcnt++] = ptr++;
2971 if ((ptr = strchr(ptr, ' ')) != NULL)
2972 { while (*ptr == ' ')
2973 { *ptr++ = '\0';
2974 }
2975 } else
2976 { break;
2977 }
2978 } while (argcnt < 31);
2979 }
2980 args[argcnt] = NULL;
2981 #if defined(WIN32) || defined(WIN64)
2982 execvp("pan_proxy", args); /* no return */
2983 #else
2984 execvp("./pan_proxy", args); /* no return */
2985 #endif
2986 Uerror("pan_proxy exec failed");
2987 }
2988 /*** end of common code fragment ***/
2989
2990 #if !defined(WIN32) && !defined(WIN64)
2991 void
2992 init_shm(void) /* initialize shared work-queues - linux/cygwin */
2993 { key_t key[NR_QS];
2994 int n, m;
2995 int must_exit = 0;
2996
2997 if (core_id == 0 && verbose)
2998 { printf("cpu0: step 3: allocate shared workqueues %g MB\n",
2999 ((double) NCORE * LWQ_SIZE + GWQ_SIZE) / (1048576.) );
3000 }
3001 for (m = 0; m < NR_QS; m++) /* last q is the global q */
3002 { double qsize = (m == NCORE) ? GWQ_SIZE : LWQ_SIZE;
3003 key[m] = ftok(PanSource, m+1);
3004 if (key[m] == -1)
3005 { perror("ftok shared queues"); must_exit = 1; break;
3006 }
3007
3008 if (core_id == 0) /* root creates */
3009 { /* check for stale copy */
3010 shmid[m] = shmget(key[m], (size_t) qsize, 0600);
3011 if (shmid[m] != -1) /* yes there is one; remove it */
3012 { printf("cpu0: removing stale q%d, status: %d\n",
3013 m, shmctl(shmid[m], IPC_RMID, NULL));
3014 }
3015 shmid[m] = shmget(key[m], (size_t) qsize, 0600|IPC_CREAT|IPC_EXCL);
3016 memcnt += qsize;
3017 } else /* workers attach */
3018 { shmid[m] = shmget(key[m], (size_t) qsize, 0600);
3019 /* never called, since we create shm *before* we fork */
3020 }
3021 if (shmid[m] == -1)
3022 { perror("shmget shared queues"); must_exit = 1; break;
3023 }
3024
3025 shared_mem[m] = (char *) shmat(shmid[m], (void *) 0, 0); /* attach */
3026 if (shared_mem[m] == (char *) -1)
3027 { fprintf(stderr, "error: cannot attach shared wq %d (%d Mb)\n",
3028 m+1, (int) (qsize/(1048576.)));
3029 perror("shmat shared queues"); must_exit = 1; break;
3030 }
3031
3032 m_workq[m] = (SM_frame *) shared_mem[m];
3033 if (core_id == 0)
3034 { int nframes = (m == NCORE) ? GN_FRAMES : LN_FRAMES;
3035 for (n = 0; n < nframes; n++)
3036 { m_workq[m][n].m_vsize = 0;
3037 m_workq[m][n].m_boq = 0;
3038 } } }
3039
3040 if (must_exit)
3041 { rm_shared_segments();
3042 fprintf(stderr, "pan: check './pan --' for usage details\n");
3043 pan_exit(1); /* calls cleanup_shm */
3044 }
3045 }
3046
3047 static uchar *
3048 prep_shmid_S(size_t n) /* either sets SS or H_tab, linux/cygwin */
3049 { char *rval;
3050 #ifndef SEP_STATE
3051 key_t key;
3052
3053 if (verbose && core_id == 0)
3054 {
3055 #ifdef BITSTATE
3056 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
3057 (double) n / (1048576.));
3058 #else
3059 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
3060 (double) n / (1048576.));
3061 #endif
3062 }
3063 #ifdef MEMLIM
3064 if (memcnt + (double) n > memlim)
3065 { printf("cpu0: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
3066 memcnt/1024., n/1024, memlim/(1048576.));
3067 printf("cpu0: insufficient memory -- aborting\n");
3068 exit(1);
3069 }
3070 #endif
3071
3072 key = ftok(PanSource, NCORE+2); /* different from queues */
3073 if (key == -1)
3074 { perror("ftok shared bitstate or hashtable");
3075 fprintf(stderr, "pan: check './pan --' for usage details\n");
3076 pan_exit(1);
3077 }
3078
3079 if (core_id == 0) /* root */
3080 { shmid_S = shmget(key, n, 0600);
3081 if (shmid_S != -1)
3082 { printf("cpu0: removing stale segment, status: %d\n",
3083 shmctl(shmid_S, IPC_RMID, NULL));
3084 }
3085 shmid_S = shmget(key, n, 0600 | IPC_CREAT | IPC_EXCL);
3086 memcnt += (double) n;
3087 } else /* worker */
3088 { shmid_S = shmget(key, n, 0600);
3089 }
3090 if (shmid_S == -1)
3091 { perror("shmget shared bitstate or hashtable too large?");
3092 fprintf(stderr, "pan: check './pan --' for usage details\n");
3093 pan_exit(1);
3094 }
3095
3096 rval = (char *) shmat(shmid_S, (void *) 0, 0); /* attach */
3097 if ((char *) rval == (char *) -1)
3098 { perror("shmat shared bitstate or hashtable");
3099 fprintf(stderr, "pan: check './pan --' for usage details\n");
3100 pan_exit(1);
3101 }
3102 #else
3103 rval = (char *) emalloc(n);
3104 #endif
3105 return (uchar *) rval;
3106 }
3107
3108 #define TRY_AGAIN 1
3109 #define NOT_AGAIN 0
3110
3111 static char shm_prep_result;
3112
3113 static uchar *
3114 prep_state_mem(size_t n) /* sets memory arena for states linux/cygwin */
3115 { char *rval;
3116 key_t key;
3117 static int cnt = 3; /* start larger than earlier ftok calls */
3118
3119 shm_prep_result = NOT_AGAIN; /* default */
3120 if (verbose && core_id == 0)
3121 { printf("cpu0: step 2+: pre-allocate memory arena %d of %6.2g Mb\n",
3122 cnt-3, (double) n / (1048576.));
3123 }
3124 #ifdef MEMLIM
3125 if (memcnt + (double) n > memlim)
3126 { printf("cpu0: error: M %.0f + %.0f Kb exceeds memory limit of %.0f Mb\n",
3127 memcnt/1024.0, (double) n/1024.0, memlim/(1048576.));
3128 return NULL;
3129 }
3130 #endif
3131
3132 key = ftok(PanSource, NCORE+cnt); cnt++;
3133 if (key == -1)
3134 { perror("ftok T");
3135 printf("pan: check './pan --' for usage details\n");
3136 pan_exit(1);
3137 }
3138
3139 if (core_id == 0)
3140 { shmid_M = shmget(key, n, 0600);
3141 if (shmid_M != -1)
3142 { printf("cpu0: removing stale memory segment %d, status: %d\n",
3143 cnt-3, shmctl(shmid_M, IPC_RMID, NULL));
3144 }
3145 shmid_M = shmget(key, n, 0600 | IPC_CREAT | IPC_EXCL);
3146 /* memcnt += (double) n; -- only amount actually used is counted */
3147 } else
3148 { shmid_M = shmget(key, n, 0600);
3149
3150 }
3151 if (shmid_M == -1)
3152 { if (verbose)
3153 { printf("error: failed to get pool of shared memory %d of %.0f Mb\n",
3154 cnt-3, ((double)n)/(1048576.));
3155 perror("state mem");
3156 printf("pan: check './pan --' for usage details\n");
3157 }
3158 shm_prep_result = TRY_AGAIN;
3159 return NULL;
3160 }
3161 rval = (char *) shmat(shmid_M, (void *) 0, 0); /* attach */
3162
3163 if ((char *) rval == (char *) -1)
3164 { printf("cpu%d error: failed to attach pool of shared memory %d of %.0f Mb\n",
3165 core_id, cnt-3, ((double)n)/(1048576.));
3166 perror("state mem");
3167 return NULL;
3168 }
3169 return (uchar *) rval;
3170 }
3171
3172 void
3173 init_HT(unsigned long n) /* cygwin/linux version */
3174 { volatile char *x;
3175 double get_mem;
3176 #ifndef SEP_STATE
3177 volatile char *dc_mem_start;
3178 double need_mem, got_mem = 0.;
3179 #endif
3180
3181 #ifdef SEP_STATE
3182 #ifndef MEMLIM
3183 if (verbose)
3184 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
3185 }
3186 #else
3187 if (verbose)
3188 { printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
3189 MEMLIM, ((double)n/(1048576.)), (((double) NCORE * LWQ_SIZE) + GWQ_SIZE) /(1048576.) );
3190 }
3191 #endif
3192 get_mem = NCORE * sizeof(double) + (1 + CS_NR) * sizeof(void *) + 4*sizeof(void *) + 2*sizeof(double);
3193 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
3194 get_mem += 4 * NCORE * sizeof(void *); /* prfree, prfull, prcnt, prmax */
3195 #ifdef FULL_TRAIL
3196 get_mem += (NCORE) * sizeof(Stack_Tree *); /* NCORE * stack_last */
3197 #endif
3198 x = (volatile char *) prep_state_mem((size_t) get_mem); /* work queues and basic structs */
3199 shmid_X = (long) x;
3200 if (x == NULL)
3201 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
3202 exit(1);
3203 }
3204 search_terminated = (volatile unsigned int *) x; /* comes first */
3205 x += sizeof(void *); /* maintain alignment */
3206
3207 is_alive = (volatile double *) x;
3208 x += NCORE * sizeof(double);
3209
3210 sh_lock = (volatile int *) x;
3211 x += CS_NR * sizeof(void *);
3212
3213 grfree = (volatile int *) x;
3214 x += sizeof(void *);
3215 grfull = (volatile int *) x;
3216 x += sizeof(void *);
3217 grcnt = (volatile int *) x;
3218 x += sizeof(void *);
3219 grmax = (volatile int *) x;
3220 x += sizeof(void *);
3221 prfree = (volatile int *) x;
3222 x += NCORE * sizeof(void *);
3223 prfull = (volatile int *) x;
3224 x += NCORE * sizeof(void *);
3225 prcnt = (volatile int *) x;
3226 x += NCORE * sizeof(void *);
3227 prmax = (volatile int *) x;
3228 x += NCORE * sizeof(void *);
3229 gr_readmiss = (volatile double *) x;
3230 x += sizeof(double);
3231 gr_writemiss = (volatile double *) x;
3232 x += sizeof(double);
3233
3234 #ifdef FULL_TRAIL
3235 stack_last = (volatile Stack_Tree **) x;
3236 x += NCORE * sizeof(Stack_Tree *);
3237 #endif
3238
3239 #ifndef BITSTATE
3240 H_tab = (struct H_el **) emalloc(n);
3241 #endif
3242 #else
3243 #ifndef MEMLIM
3244 #warning MEMLIM not set
3245 #define MEMLIM (2048)
3246 #endif
3247
3248 if (core_id == 0 && verbose)
3249 { printf("cpu0: step 0: -DMEMLIM=%d Mb minus hashtable+workqs (%g + %g Mb) leaves %g Mb\n",
3250 MEMLIM, ((double)n/(1048576.)), (NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.),
3251 (memlim - memcnt - (double) n - (NCORE * LWQ_SIZE + GWQ_SIZE))/(1048576.));
3252 }
3253 #ifndef BITSTATE
3254 H_tab = (struct H_el **) prep_shmid_S((size_t) n); /* hash_table */
3255 #endif
3256 need_mem = memlim - memcnt - ((double) NCORE * LWQ_SIZE) - GWQ_SIZE;
3257 if (need_mem <= 0.)
3258 { Uerror("internal error -- shared state memory");
3259 }
3260
3261 if (core_id == 0 && verbose)
3262 { printf("cpu0: step 2: pre-allocate shared state memory %g Mb\n",
3263 need_mem/(1048576.));
3264 }
3265 #ifdef SEP_HEAP
3266 SEG_SIZE = need_mem / NCORE;
3267 if (verbose && core_id == 0)
3268 { printf("cpu0: setting segsize to %6g MB\n",
3269 SEG_SIZE/(1048576.));
3270 }
3271 #if defined(CYGWIN) || defined(__CYGWIN__)
3272 if (SEG_SIZE > 512.*1024.*1024.)
3273 { printf("warning: reducing SEG_SIZE of %g MB to 512MB (exceeds max for Cygwin)\n",
3274 SEG_SIZE/(1024.*1024.));
3275 SEG_SIZE = 512.*1024.*1024.;
3276 }
3277 #endif
3278 #endif
3279 mem_reserved = need_mem;
3280 while (need_mem > 1024.)
3281 { get_mem = need_mem;
3282 shm_more:
3283 if (get_mem > (double) SEG_SIZE)
3284 { get_mem = (double) SEG_SIZE;
3285 }
3286 if (get_mem <= 0.0) break;
3287
3288 /* for allocating states: */
3289 x = dc_mem_start = (volatile char *) prep_state_mem((size_t) get_mem);
3290 if (x == NULL)
3291 { if (shm_prep_result == NOT_AGAIN
3292 || first_pool != NULL
3293 || SEG_SIZE < (16. * 1048576.))
3294 { break;
3295 }
3296 SEG_SIZE /= 2.;
3297 if (verbose)
3298 { printf("pan: lowered segsize to 0.000000\n", SEG_SIZE);
3299 }
3300 if (SEG_SIZE >= 1024.)
3301 { goto shm_more;
3302 }
3303 break;
3304 }
3305
3306 need_mem -= get_mem;
3307 got_mem += get_mem;
3308 if (first_pool == NULL)
3309 { search_terminated = (volatile unsigned int *) x; /* comes first */
3310 x += sizeof(void *); /* maintain alignment */
3311
3312 is_alive = (volatile double *) x;
3313 x += NCORE * sizeof(double);
3314
3315 sh_lock = (volatile int *) x;
3316 x += CS_NR * sizeof(void *);
3317
3318 grfree = (volatile int *) x;
3319 x += sizeof(void *);
3320 grfull = (volatile int *) x;
3321 x += sizeof(void *);
3322 grcnt = (volatile int *) x;
3323 x += sizeof(void *);
3324 grmax = (volatile int *) x;
3325 x += sizeof(void *);
3326 prfree = (volatile int *) x;
3327 x += NCORE * sizeof(void *);
3328 prfull = (volatile int *) x;
3329 x += NCORE * sizeof(void *);
3330 prcnt = (volatile int *) x;
3331 x += NCORE * sizeof(void *);
3332 prmax = (volatile int *) x;
3333 x += NCORE * sizeof(void *);
3334 gr_readmiss = (volatile double *) x;
3335 x += sizeof(double);
3336 gr_writemiss = (volatile double *) x;
3337 x += sizeof(double);
3338 #ifdef FULL_TRAIL
3339 stack_last = (volatile Stack_Tree **) x;
3340 x += NCORE * sizeof(Stack_Tree *);
3341 #endif
3342 if (((long)x)&(sizeof(void *)-1)) /* 64-bit word alignment */
3343 { x += sizeof(void *)-(((long)x)&(sizeof(void *)-1));
3344 }
3345
3346 #ifdef COLLAPSE
3347 ncomps = (unsigned long *) x;
3348 x += (256+2) * sizeof(unsigned long);
3349 #endif
3350 }
3351
3352 dc_shared = (sh_Allocater *) x; /* must be in shared memory */
3353 x += sizeof(sh_Allocater);
3354
3355 if (core_id == 0) /* root only */
3356 { dc_shared->dc_id = shmid_M;
3357 dc_shared->dc_start = dc_mem_start;
3358 dc_shared->dc_arena = x;
3359 dc_shared->pattern = 1234567; /* protection */
3360 dc_shared->dc_size = (long) get_mem - (long) (x - dc_mem_start);
3361 dc_shared->nxt = (long) 0;
3362
3363 if (last_pool == NULL)
3364 { first_pool = last_pool = dc_shared;
3365 } else
3366 { last_pool->nxt = dc_shared;
3367 last_pool = dc_shared;
3368 }
3369 } else if (first_pool == NULL)
3370 { first_pool = dc_shared;
3371 } }
3372
3373 if (need_mem > 1024.)
3374 { printf("cpu0: could allocate only %g Mb of shared memory (wanted %g more)\n",
3375 got_mem/(1048576.), need_mem/(1048576.));
3376 }
3377
3378 if (!first_pool)
3379 { printf("cpu0: insufficient memory -- aborting.\n");
3380 exit(1);
3381 }
3382 /* we are still single-threaded at this point, with core_id 0 */
3383 dc_shared = first_pool;
3384
3385 #endif
3386 }
3387
3388 /* Test and Set assembly code */
3389
3390 #if defined(i386) || defined(__i386__) || defined(__x86_64__)
3391 int
3392 tas(volatile int *s) /* tested */
3393 { int r;
3394 __asm__ __volatile__(
3395 "xchgl %0, %1 \n\t"
3396 : "=r"(r), "=m"(*s)
3397 : "0"(1), "m"(*s)
3398 : "memory");
3399
3400 return r;
3401 }
3402 #elif defined(__arm__)
3403 int
3404 tas(volatile int *s) /* not tested */
3405 { int r = 1;
3406 __asm__ __volatile__(
3407 "swpb %0, %0, [%3] \n"
3408 : "=r"(r), "=m"(*s)
3409 : "0"(r), "r"(s));
3410
3411 return r;
3412 }
3413 #elif defined(sparc) || defined(__sparc__)
3414 int
3415 tas(volatile int *s) /* not tested */
3416 { int r = 1;
3417 __asm__ __volatile__(
3418 " ldstub [%2], %0 \n"
3419 : "=r"(r), "=m"(*s)
3420 : "r"(s));
3421
3422 return r;
3423 }
3424 #elif defined(ia64) || defined(__ia64__)
3425 /* Intel Itanium */
3426 int
3427 tas(volatile int *s) /* tested */
3428 { long int r;
3429 __asm__ __volatile__(
3430 " xchg4 %0=%1,%2 \n"
3431 : "=r"(r), "+m"(*s)
3432 : "r"(1)
3433 : "memory");
3434 return (int) r;
3435 }
3436 #else
3437 #error missing definition of test and set operation for this platform
3438 #endif
3439
3440 void
3441 cleanup_shm(int val)
3442 { volatile sh_Allocater *nxt_pool;
3443 unsigned long cnt = 0;
3444 int m;
3445
3446 if (nibis != 0)
3447 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id, val);
3448 return;
3449 } else
3450 { nibis = 1;
3451 }
3452 if (search_terminated != NULL)
3453 { *search_terminated |= 16; /* cleanup_shm */
3454 }
3455
3456 for (m = 0; m < NR_QS; m++)
3457 { if (shmdt((void *) shared_mem[m]) > 0)
3458 { perror("shmdt detaching from shared queues");
3459 } }
3460
3461 #ifdef SEP_STATE
3462 if (shmdt((void *) shmid_X) != 0)
3463 { perror("shmdt detaching from shared state memory");
3464 }
3465 #else
3466 #ifdef BITSTATE
3467 if (SS > 0 && shmdt((void *) SS) != 0)
3468 { if (verbose)
3469 { perror("shmdt detaching from shared bitstate arena");
3470 } }
3471 #else
3472 if (core_id == 0)
3473 { /* before detaching: */
3474 for (nxt_pool = dc_shared; nxt_pool != NULL; nxt_pool = nxt_pool->nxt)
3475 { cnt += nxt_pool->dc_size;
3476 }
3477 if (verbose)
3478 { printf("cpu0: done, %ld Mb of shared state memory left\n",
3479 cnt / (long)(1048576));
3480 } }
3481
3482 if (shmdt((void *) H_tab) != 0)
3483 { perror("shmdt detaching from shared hashtable");
3484 }
3485
3486 for (last_pool = first_pool; last_pool != NULL; last_pool = nxt_pool)
3487 { nxt_pool = last_pool->nxt;
3488 if (shmdt((void *) last_pool->dc_start) != 0)
3489 { perror("shmdt detaching from shared state memory");
3490 } }
3491 first_pool = last_pool = NULL; /* precaution */
3492 #endif
3493 #endif
3494 /* detached from shared memory - so cannot use cpu_printf */
3495 if (verbose)
3496 { printf("cpu%d: done -- got %d states from queue\n",
3497 core_id, nstates_get);
3498 }
3499 }
3500
3501 extern void give_up(int);
3502 extern void Read_Queue(int);
3503
3504 void
3505 mem_get(void)
3506 { SM_frame *f;
3507 int is_parent;
3508
3509 #if defined(MA) && !defined(SEP_STATE)
3510 #error MA without SEP_STATE is not supported with multi-core
3511 #endif
3512 #ifdef BFS
3513 #error BFS is not supported with multi-core
3514 #endif
3515 #ifdef SC
3516 #error SC is not supported with multi-core
3517 #endif
3518 init_shm(); /* we are single threaded when this starts */
3519
3520 if (core_id == 0 && verbose)
3521 { printf("cpu0: step 4: calling fork()\n");
3522 }
3523 fflush(stdout);
3524
3525 /* if NCORE > 1 the child or the parent should fork N-1 more times
3526 * the parent is the only process with core_id == 0 and is_parent > 0
3527 * the workers have is_parent = 0 and core_id = 1..NCORE-1
3528 */
3529 if (core_id == 0)
3530 { worker_pids[0] = getpid(); /* for completeness */
3531 while (++core_id < NCORE) /* first worker sees core_id = 1 */
3532 { is_parent = fork();
3533 if (is_parent == -1)
3534 { Uerror("fork failed");
3535 }
3536 if (is_parent == 0) /* this is a worker process */
3537 { if (proxy_pid == core_id) /* always non-zero */
3538 { start_proxy("-r", 0); /* no return */
3539 }
3540 goto adapt; /* root process continues spawning */
3541 }
3542 worker_pids[core_id] = is_parent;
3543 }
3544 /* note that core_id is now NCORE */
3545 if (proxy_pid > 0 && proxy_pid < NCORE)
3546 { proxy_pid_snd = fork();
3547 if (proxy_pid_snd == -1)
3548 { Uerror("proxy fork failed");
3549 }
3550 if (proxy_pid_snd == 0)
3551 { start_proxy("-s", worker_pids[proxy_pid]); /* no return */
3552 } } /* else continue */
3553 if (is_parent > 0)
3554 { core_id = 0; /* reset core_id for root process */
3555 }
3556 } else /* worker */
3557 { static char db0[16]; /* good for up to 10^6 cores */
3558 static char db1[16];
3559 adapt: tprefix = db0; sprefix = db1;
3560 sprintf(tprefix, "cpu%d_trail", core_id);
3561 sprintf(sprefix, "cpu%d_rst", core_id);
3562 memcnt = 0; /* count only additionally allocated memory */
3563 }
3564 signal(SIGINT, give_up);
3565
3566 if (proxy_pid == 0) /* not in a cluster setup, pan_proxy must attach */
3567 { rm_shared_segments(); /* mark all shared segments for removal on exit */
3568 }
3569 if (verbose)
3570 { cpu_printf("starting core_id %d -- pid %d\n", core_id, getpid());
3571 }
3572 #if defined(SEP_HEAP) && !defined(SEP_STATE)
3573 { int i;
3574 volatile sh_Allocater *ptr;
3575 ptr = first_pool;
3576 for (i = 0; i < NCORE && ptr != NULL; i++)
3577 { if (i == core_id)
3578 { my_heap = (char *) ptr->dc_arena;
3579 my_size = (long) ptr->dc_size;
3580 if (verbose)
3581 cpu_printf("local heap %ld MB\n", my_size/(1048576));
3582 break;
3583 }
3584 ptr = ptr->nxt; /* local */
3585 }
3586 if (my_heap == NULL)
3587 { printf("cpu%d: no local heap\n", core_id);
3588 pan_exit(1);
3589 } /* else */
3590 #if defined(CYGWIN) || defined(__CYGWIN__)
3591 ptr = first_pool;
3592 for (i = 0; i < NCORE && ptr != NULL; i++)
3593 { ptr = ptr->nxt; /* local */
3594 }
3595 dc_shared = ptr; /* any remainder */
3596 #else
3597 dc_shared = NULL; /* used all mem for local heaps */
3598 #endif
3599 }
3600 #endif
3601 if (core_id == 0 && !remote_party)
3602 { new_state(); /* cpu0 explores root */
3603 if (verbose)
3604 cpu_printf("done with 1st dfs, nstates %g (put %d states), read q\n",
3605 nstates, nstates_put);
3606 dfs_phase2 = 1;
3607 }
3608 Read_Queue(core_id); /* all cores */
3609
3610 if (verbose)
3611 { cpu_printf("put %6d states into queue -- got %6d\n",
3612 nstates_put, nstates_get);
3613 }
3614 if (proxy_pid != 0)
3615 { rm_shared_segments();
3616 }
3617 done = 1;
3618 wrapup();
3619 exit(0);
3620 }
3621
3622 #else
3623 int unpack_state(SM_frame *, int);
3624 #endif
3625
3626 struct H_el *
3627 grab_shared(int n)
3628 {
3629 #ifndef SEP_STATE
3630 char *rval = (char *) 0;
3631
3632 if (n == 0)
3633 { printf("cpu%d: grab shared zero\n", core_id); fflush(stdout);
3634 return (struct H_el *) rval;
3635 } else if (n&(sizeof(void *)-1))
3636 { n += sizeof(void *)-(n&(sizeof(void *)-1)); /* alignment */
3637 }
3638
3639 #ifdef SEP_HEAP
3640 /* no locking */
3641 if (my_heap != NULL && my_size > n)
3642 { rval = my_heap;
3643 my_heap += n;
3644 my_size -= n;
3645 goto done;
3646 }
3647 #endif
3648
3649 if (!dc_shared)
3650 { sudden_stop("pan: out of memory");
3651 }
3652
3653 /* another lock is always already in effect when this is called */
3654 /* but not always the same lock -- i.e., on different parts of the hashtable */
3655 enter_critical(GLOBAL_LOCK); /* this must be independently mutex */
3656 #if defined(SEP_HEAP) && !defined(WIN32) && !defined(WIN64)
3657 { static int noted = 0;
3658 if (!noted)
3659 { noted = 1;
3660 printf("cpu%d: global heap has %ld bytes left, needed %d\n",
3661 core_id, dc_shared?dc_shared->dc_size:0, n);
3662 } }
3663 #endif
3664 #if 0
3665 if (dc_shared->pattern != 1234567)
3666 { leave_critical(GLOBAL_LOCK);
3667 Uerror("overrun -- memory corruption");
3668 }
3669 #endif
3670 if (dc_shared->dc_size < n)
3671 { if (verbose)
3672 { printf("Next Pool %g Mb + %d\n", memcnt/(1048576.), n);
3673 }
3674 if (dc_shared->nxt == NULL
3675 || dc_shared->nxt->dc_arena == NULL
3676 || dc_shared->nxt->dc_size < n)
3677 { printf("cpu%d: memcnt %g Mb + wanted %d bytes more\n",
3678 core_id, memcnt / (1048576.), n);
3679 leave_critical(GLOBAL_LOCK);
3680 sudden_stop("out of memory -- aborting");
3681 wrapup(); /* exits */
3682 } else
3683 { dc_shared = (sh_Allocater *) dc_shared->nxt;
3684 } }
3685
3686 rval = (char *) dc_shared->dc_arena;
3687 dc_shared->dc_arena += n;
3688 dc_shared->dc_size -= (long) n;
3689 #if 0
3690 if (VVERBOSE)
3691 printf("cpu%d grab shared (%d bytes) -- %ld left\n",
3692 core_id, n, dc_shared->dc_size);
3693 #endif
3694 leave_critical(GLOBAL_LOCK);
3695 done:
3696 memset(rval, 0, n);
3697 memcnt += (double) n;
3698
3699 return (struct H_el *) rval;
3700 #else
3701 return (struct H_el *) emalloc(n);
3702 #endif
3703 }
3704
3705 SM_frame *
3706 Get_Full_Frame(int n)
3707 { SM_frame *f;
3708 double cnt_start = frame_wait;
3709
3710 f = &m_workq[n][prfull[n]];
3711 while (f->m_vsize == 0) /* await full slot LOCK : full frame */
3712 { iam_alive();
3713 #ifndef NGQ
3714 #ifndef SAFETY
3715 if (!a_cycles || core_id != 0)
3716 #endif
3717 if (*grcnt > 0) /* accessed outside lock, but safe even if wrong */
3718 { enter_critical(GQ_RD); /* gq - read access */
3719 if (*grcnt > 0) /* could have changed */
3720 { f = &m_workq[NCORE][*grfull]; /* global q */
3721 if (f->m_vsize == 0)
3722 { /* writer is still filling the slot */
3723 *gr_writemiss++;
3724 f = &m_workq[n][prfull[n]]; /* reset */
3725 } else
3726 { *grfull = (*grfull+1) % (GN_FRAMES);
3727 enter_critical(GQ_WR);
3728 *grcnt = *grcnt - 1;
3729 leave_critical(GQ_WR);
3730 leave_critical(GQ_RD);
3731 return f;
3732 } }
3733 leave_critical(GQ_RD);
3734 }
3735 #endif
3736 if (frame_wait++ - cnt_start > Delay)
3737 { if (0)
3738 { cpu_printf("timeout on q%d -- %u -- query %d\n",
3739 n, f, query_in_progress);
3740 }
3741 return (SM_frame *) 0; /* timeout */
3742 } }
3743 iam_alive();
3744 if (VVERBOSE) cpu_printf("got frame from q%d\n", n);
3745 prfull[n] = (prfull[n] + 1) % (LN_FRAMES);
3746 enter_critical(QLOCK(n));
3747 prcnt[n]--; /* lock out increments */
3748 leave_critical(QLOCK(n));
3749 return f;
3750 }
3751
3752 SM_frame *
3753 Get_Free_Frame(int n)
3754 { SM_frame *f;
3755 double cnt_start = free_wait;
3756
3757 if (VVERBOSE) { cpu_printf("get free frame from q%d\n", n); }
3758
3759 if (n == NCORE) /* global q */
3760 { f = &(m_workq[n][lrfree]);
3761 } else
3762 { f = &(m_workq[n][prfree[n]]);
3763 }
3764 while (f->m_vsize != 0) /* await free slot LOCK : free slot */
3765 { iam_alive();
3766 if (free_wait++ - cnt_start > OneSecond)
3767 { if (verbose)
3768 { cpu_printf("timeout waiting for free slot q%d\n", n);
3769 }
3770 cnt_start = free_wait;
3771 if (someone_crashed(1))
3772 { printf("cpu%d: search terminated\n", core_id);
3773 sudden_stop("get free frame");
3774 pan_exit(1);
3775 } } }
3776 if (n != NCORE)
3777 { prfree[n] = (prfree[n] + 1) % (LN_FRAMES);
3778 enter_critical(QLOCK(n));
3779 prcnt[n]++; /* lock out decrements */
3780 if (prmax[n] < prcnt[n])
3781 { prmax[n] = prcnt[n];
3782 }
3783 leave_critical(QLOCK(n));
3784 }
3785 return f;
3786 }
3787 #ifndef NGQ
3788 int
3789 GlobalQ_HasRoom(void)
3790 { int rval = 0;
3791
3792 gq_tries++;
3793 if (*grcnt < GN_FRAMES) /* there seems to be room */
3794 { enter_critical(GQ_WR); /* gq write access */
3795 if (*grcnt < GN_FRAMES)
3796 { if (m_workq[NCORE][*grfree].m_vsize != 0)
3797 { /* can happen if reader is slow emptying slot */
3798 *gr_readmiss++;
3799 goto out; /* dont wait: release lock and return */
3800 }
3801 lrfree = *grfree; /* Get_Free_Frame use lrfree in this mode */
3802 *grfree = (*grfree + 1) % GN_FRAMES;
3803 *grcnt = *grcnt + 1; /* count nr of slots filled -- no additional lock needed */
3804 if (*grmax < *grcnt) *grmax = *grcnt;
3805 leave_critical(GQ_WR); /* for short lock duration */
3806 gq_hasroom++;
3807 mem_put(NCORE); /* copy state into reserved slot */
3808 rval = 1; /* successfull handoff */
3809 } else
3810 { gq_hasnoroom++;
3811 out: leave_critical(GQ_WR);
3812 } }
3813 return rval;
3814 }
3815 #endif
3816
3817 int
3818 unpack_state(SM_frame *f, int from_q)
3819 { int i, j;
3820 static struct H_el D_State;
3821
3822 if (f->m_vsize > 0)
3823 { boq = f->m_boq;
3824 if (boq > 256)
3825 { cpu_printf("saw control %d, expected state\n", boq);
3826 return 0;
3827 }
3828 vsize = f->m_vsize;
3829 correct:
3830 memcpy((uchar *) &now, (uchar *) f->m_now, vsize);
3831 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
3832 { Mask[i] = (f->m_Mask[i/8] & (1<<j)) ? 1 : 0;
3833 }
3834 if (now._nr_pr > 0)
3835 { memcpy((uchar *) proc_offset, (uchar *) f->m_p_offset, now._nr_pr * sizeof(OFFT));
3836 memcpy((uchar *) proc_skip, (uchar *) f->m_p_skip, now._nr_pr * sizeof(uchar));
3837 }
3838 if (now._nr_qs > 0)
3839 { memcpy((uchar *) q_offset, (uchar *) f->m_q_offset, now._nr_qs * sizeof(OFFT));
3840 memcpy((uchar *) q_skip, (uchar *) f->m_q_skip, now._nr_qs * sizeof(uchar));
3841 }
3842 #ifndef NOVSZ
3843 if (vsize != now._vsz)
3844 { cpu_printf("vsize %d != now._vsz %d (type %d) %d\n",
3845 vsize, now._vsz, f->m_boq, f->m_vsize);
3846 vsize = now._vsz;
3847 goto correct; /* rare event: a race */
3848 }
3849 #endif
3850 hmax = max(hmax, vsize);
3851
3852 if (f != &cur_Root)
3853 { memcpy((uchar *) &cur_Root, (uchar *) f, sizeof(SM_frame));
3854 }
3855
3856 if (((now._a_t) & 1) == 1) /* i.e., when starting nested DFS */
3857 { A_depth = depthfound = 0;
3858 memcpy((uchar *)&A_Root, (uchar *)&now, vsize);
3859 }
3860 nr_handoffs = f->nr_handoffs;
3861 } else
3862 { cpu_printf("pan: state empty\n");
3863 }
3864
3865 depth = 0;
3866 trpt = &trail[1];
3867 trpt->tau = f->m_tau;
3868 trpt->o_pm = f->m_o_pm;
3869
3870 (trpt-1)->ostate = &D_State; /* stub */
3871 trpt->ostate = &D_State;
3872
3873 #ifdef FULL_TRAIL
3874 if (upto > 0)
3875 { stack_last[core_id] = (Stack_Tree *) f->m_stack;
3876 }
3877 #if defined(VERBOSE)
3878 if (stack_last[core_id])
3879 { cpu_printf("%d: UNPACK -- SET m_stack %u (%d,%d)\n",
3880 depth, stack_last[core_id], stack_last[core_id]->pr,
3881 stack_last[core_id]->t_id);
3882 }
3883 #endif
3884 #endif
3885
3886 if (!trpt->o_t)
3887 { static Trans D_Trans;
3888 trpt->o_t = &D_Trans;
3889 }
3890
3891 #ifdef VERI
3892 if ((trpt->tau & 4) != 4)
3893 { trpt->tau |= 4; /* the claim moves first */
3894 cpu_printf("warning: trpt was not up to date\n");
3895 }
3896 #endif
3897
3898 for (i = 0; i < (int) now._nr_pr; i++)
3899 { P0 *ptr = (P0 *) pptr(i);
3900 #ifndef NP
3901 if (accpstate[ptr->_t][ptr->_p])
3902 { trpt->o_pm |= 2;
3903 }
3904 #else
3905 if (progstate[ptr->_t][ptr->_p])
3906 { trpt->o_pm |= 4;
3907 }
3908 #endif
3909 }
3910
3911 #ifdef EVENT_TRACE
3912 #ifndef NP
3913 if (accpstate[EVENT_TRACE][now._event])
3914 { trpt->o_pm |= 2;
3915 }
3916 #else
3917 if (progstate[EVENT_TRACE][now._event])
3918 { trpt->o_pm |= 4;
3919 }
3920 #endif
3921 #endif
3922
3923 #if defined(C_States) && (HAS_TRACK==1)
3924 /* restore state of tracked C objects */
3925 c_revert((uchar *) &(now.c_state[0]));
3926 #if (HAS_STACK==1)
3927 c_unstack((uchar *) f->m_c_stack); /* unmatched tracked data */
3928 #endif
3929 #endif
3930 return 1;
3931 }
3932
3933 void
3934 write_root(void) /* for trail file */
3935 { int fd;
3936
3937 if (iterative == 0 && Nr_Trails > 1)
3938 sprintf(fnm, "%s%d.%s", TrailFile, Nr_Trails-1, sprefix);
3939 else
3940 sprintf(fnm, "%s.%s", TrailFile, sprefix);
3941
3942 if (cur_Root.m_vsize == 0)
3943 { (void) unlink(fnm); /* remove possible old copy */
3944 return; /* its the default initial state */
3945 }
3946
3947 if ((fd = creat(fnm, TMODE)) < 0)
3948 { char *q;
3949 if ((q = strchr(TrailFile, '.')))
3950 { *q = '\0'; /* strip .pml */
3951 if (iterative == 0 && Nr_Trails-1 > 0)
3952 sprintf(fnm, "%s%d.%s", TrailFile, Nr_Trails-1, sprefix);
3953 else
3954 sprintf(fnm, "%s.%s", TrailFile, sprefix);
3955 *q = '.';
3956 fd = creat(fnm, TMODE);
3957 }
3958 if (fd < 0)
3959 { cpu_printf("pan: cannot create %s\n", fnm);
3960 perror("cause");
3961 return;
3962 } }
3963
3964 if (write(fd, &cur_Root, sizeof(SM_frame)) != sizeof(SM_frame))
3965 { cpu_printf("pan: error writing %s\n", fnm);
3966 } else
3967 { cpu_printf("pan: wrote %s\n", fnm);
3968 }
3969 close(fd);
3970 }
3971
3972 void
3973 set_root(void)
3974 { int fd;
3975 char *q;
3976 char MyFile[512];
3977 char MySuffix[16];
3978 char *ssuffix = "rst";
3979 int try_core = 1;
3980
3981 strcpy(MyFile, TrailFile);
3982 try_again:
3983 if (whichtrail > 0)
3984 { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, ssuffix);
3985 fd = open(fnm, O_RDONLY, 0);
3986 if (fd < 0 && (q = strchr(MyFile, '.')))
3987 { *q = '\0'; /* strip .pml */
3988 sprintf(fnm, "%s%d.%s", MyFile, whichtrail, ssuffix);
3989 *q = '.';
3990 fd = open(fnm, O_RDONLY, 0);
3991 }
3992 } else
3993 { sprintf(fnm, "%s.%s", MyFile, ssuffix);
3994 fd = open(fnm, O_RDONLY, 0);
3995 if (fd < 0 && (q = strchr(MyFile, '.')))
3996 { *q = '\0'; /* strip .pml */
3997 sprintf(fnm, "%s.%s", MyFile, ssuffix);
3998 *q = '.';
3999 fd = open(fnm, O_RDONLY, 0);
4000 } }
4001
4002 if (fd < 0)
4003 { if (try_core < NCORE)
4004 { ssuffix = MySuffix;
4005 sprintf(ssuffix, "cpu%d_rst", try_core++);
4006 goto try_again;
4007 }
4008 cpu_printf("no file '%s.rst' or '%s' (not an error)\n", MyFile, fnm);
4009 } else
4010 { if (read(fd, &cur_Root, sizeof(SM_frame)) != sizeof(SM_frame))
4011 { cpu_printf("read error %s\n", fnm);
4012 close(fd);
4013 pan_exit(1);
4014 }
4015 close(fd);
4016 (void) unpack_state(&cur_Root, -2);
4017 #ifdef SEP_STATE
4018 cpu_printf("partial trail -- last few steps only\n");
4019 #endif
4020 cpu_printf("restored root from '%s'\n", fnm);
4021 printf("=====State:=====\n");
4022 { int i, j; P0 *z;
4023 for (i = 0; i < now._nr_pr; i++)
4024 { z = (P0 *)pptr(i);
4025 printf("proc %2d (%s) ", i, procname[z->_t]);
4026 for (j = 0; src_all[j].src; j++)
4027 if (src_all[j].tp == (int) z->_t)
4028 { printf(" line %3d \"%s\" ",
4029 src_all[j].src[z->_p], PanSource);
4030 break;
4031 }
4032 printf("(state %d)\n", z->_p);
4033 c_locals(i, z->_t);
4034 }
4035 c_globals();
4036 }
4037 printf("================\n");
4038 }
4039 }
4040
4041 #ifdef USE_DISK
4042 unsigned long dsk_written, dsk_drained;
4043 void mem_drain(void);
4044 #endif
4045
4046 void
4047 m_clear_frame(SM_frame *f)
4048 { int i, clr_sz = sizeof(SM_results);
4049
4050 for (i = 0; i <= _NP_; i++) /* all proctypes */
4051 { clr_sz += NrStates[i]*sizeof(uchar);
4052 }
4053 memset(f, 0, clr_sz);
4054 /* caution if sizeof(SM_results) > sizeof(SM_frame) */
4055 }
4056
4057 #define TargetQ_Full(n) (m_workq[n][prfree[n]].m_vsize != 0)
4058 #define TargetQ_NotFull(n) (m_workq[n][prfree[n]].m_vsize == 0)
4059
4060 int
4061 AllQueuesEmpty(void)
4062 { int q;
4063 #ifndef NGQ
4064 if (*grcnt != 0)
4065 { return 0;
4066 }
4067 #endif
4068 for (q = 0; q < NCORE; q++)
4069 { if (prcnt[q] != 0)
4070 { return 0;
4071 } }
4072 return 1;
4073 }
4074
4075 void
4076 Read_Queue(int q)
4077 { SM_frame *f, *of;
4078 int remember, target_q;
4079 SM_results *r;
4080 double patience = 0.0;
4081
4082 target_q = (q + 1) % NCORE;
4083
4084 for (;;)
4085 { f = Get_Full_Frame(q);
4086 if (!f) /* 1 second timeout -- and trigger for Query */
4087 { if (someone_crashed(2))
4088 { printf("cpu%d: search terminated [code %d]\n",
4089 core_id, search_terminated?*search_terminated:-1);
4090 sudden_stop("");
4091 pan_exit(1);
4092 }
4093 #ifdef TESTING
4094 /* to profile with cc -pg and gprof pan.exe -- set handoff depth beyond maxdepth */
4095 exit(0);
4096 #endif
4097 remember = *grfree;
4098 if (core_id == 0 /* root can initiate termination */
4099 && remote_party == 0 /* and only the original root */
4100 && query_in_progress == 0 /* unless its already in progress */
4101 && AllQueuesEmpty())
4102 { f = Get_Free_Frame(target_q);
4103 query_in_progress = 1; /* only root process can do this */
4104 if (!f) { Uerror("Fatal1: no free slot"); }
4105 f->m_boq = QUERY; /* initiate Query */
4106 if (verbose)
4107 { cpu_printf("snd QUERY to q%d (%d) into slot %d\n",
4108 target_q, nstates_get + 1, prfree[target_q]-1);
4109 }
4110 f->m_vsize = remember + 1;
4111 /* number will not change unless we receive more states */
4112 } else if (patience++ > OneHour) /* one hour watchdog timer */
4113 { cpu_printf("timeout -- giving up\n");
4114 sudden_stop("queue timeout");
4115 pan_exit(1);
4116 }
4117 if (0) cpu_printf("timed out -- try again\n");
4118 continue;
4119 }
4120 patience = 0.0; /* reset watchdog */
4121
4122 if (f->m_boq == QUERY)
4123 { if (verbose)
4124 { cpu_printf("got QUERY on q%d (%d <> %d) from slot %d\n",
4125 q, f->m_vsize, nstates_put + 1, prfull[q]-1);
4126 snapshot();
4127 }
4128 remember = f->m_vsize;
4129 f->m_vsize = 0; /* release slot */
4130
4131 if (core_id == 0 && remote_party == 0) /* original root cpu0 */
4132 { if (query_in_progress == 1 /* didn't send more states in the interim */
4133 && *grfree + 1 == remember) /* no action on global queue meanwhile */
4134 { if (verbose) cpu_printf("Termination detected\n");
4135 if (TargetQ_Full(target_q))
4136 { if (verbose)
4137 cpu_printf("warning: target q is full\n");
4138 }
4139 f = Get_Free_Frame(target_q);
4140 if (!f) { Uerror("Fatal2: no free slot"); }
4141 m_clear_frame(f);
4142 f->m_boq = QUIT; /* send final Quit, collect stats */
4143 f->m_vsize = 111; /* anything non-zero will do */
4144 if (verbose)
4145 cpu_printf("put QUIT on q%d\n", target_q);
4146 } else
4147 { if (verbose) cpu_printf("Stale Query\n");
4148 #ifdef USE_DISK
4149 mem_drain();
4150 #endif
4151 }
4152 query_in_progress = 0;
4153 } else
4154 { if (TargetQ_Full(target_q))
4155 { if (verbose)
4156 cpu_printf("warning: forward query - target q full\n");
4157 }
4158 f = Get_Free_Frame(target_q);
4159 if (verbose)
4160 cpu_printf("snd QUERY response to q%d (%d <> %d) in slot %d\n",
4161 target_q, remember, *grfree + 1, prfree[target_q]-1);
4162 if (!f) { Uerror("Fatal4: no free slot"); }
4163
4164 if (*grfree + 1 == remember) /* no action on global queue */
4165 { f->m_boq = QUERY; /* forward query, to root */
4166 f->m_vsize = remember;
4167 } else
4168 { f->m_boq = QUERY_F; /* no match -- busy */
4169 f->m_vsize = 112; /* anything non-zero */
4170 #ifdef USE_DISK
4171 if (dsk_written != dsk_drained)
4172 { mem_drain();
4173 }
4174 #endif
4175 } }
4176 continue;
4177 }
4178
4179 if (f->m_boq == QUERY_F)
4180 { if (verbose)
4181 { cpu_printf("got QUERY_F on q%d from slot %d\n", q, prfull[q]-1);
4182 }
4183 f->m_vsize = 0; /* release slot */
4184
4185 if (core_id == 0 && remote_party == 0) /* original root cpu0 */
4186 { if (verbose) cpu_printf("No Match on Query\n");
4187 query_in_progress = 0;
4188 } else
4189 { if (TargetQ_Full(target_q))
4190 { if (verbose) cpu_printf("warning: forwarding query_f, target queue full\n");
4191 }
4192 f = Get_Free_Frame(target_q);
4193 if (verbose) cpu_printf("forward QUERY_F to q%d into slot %d\n",
4194 target_q, prfree[target_q]-1);
4195 if (!f) { Uerror("Fatal5: no free slot"); }
4196 f->m_boq = QUERY_F; /* cannot terminate yet */
4197 f->m_vsize = 113; /* anything non-zero */
4198 }
4199 #ifdef USE_DISK
4200 if (dsk_written != dsk_drained)
4201 { mem_drain();
4202 }
4203 #endif
4204 continue;
4205 }
4206
4207 if (f->m_boq == QUIT)
4208 { if (0) cpu_printf("done -- local memcnt %g Mb\n", memcnt/(1048576.));
4209 retrieve_info((SM_results *) f); /* collect and combine stats */
4210 if (verbose)
4211 { cpu_printf("received Quit\n");
4212 snapshot();
4213 }
4214 f->m_vsize = 0; /* release incoming slot */
4215 if (core_id != 0)
4216 { f = Get_Free_Frame(target_q); /* new outgoing slot */
4217 if (!f) { Uerror("Fatal6: no free slot"); }
4218 m_clear_frame(f); /* start with zeroed stats */
4219 record_info((SM_results *) f);
4220 f->m_boq = QUIT; /* forward combined results */
4221 f->m_vsize = 114; /* anything non-zero */
4222 if (verbose>1)
4223 cpu_printf("fwd Results to q%d\n", target_q);
4224 }
4225 break; /* successful termination */
4226 }
4227
4228 /* else: 0<= boq <= 255, means STATE transfer */
4229 if (unpack_state(f, q) != 0)
4230 { nstates_get++;
4231 f->m_vsize = 0; /* release slot */
4232 if (VVERBOSE) cpu_printf("Got state\n");
4233
4234 if (search_terminated != NULL
4235 && *search_terminated == 0)
4236 { new_state(); /* explore successors */
4237 memset((uchar *) &cur_Root, 0, sizeof(SM_frame)); /* avoid confusion */
4238 } else
4239 { pan_exit(0);
4240 }
4241 } else
4242 { pan_exit(0);
4243 } }
4244 if (verbose) cpu_printf("done got %d put %d\n", nstates_get, nstates_put);
4245 sleep_report();
4246 }
4247
4248 void
4249 give_up(int unused_x)
4250 {
4251 if (search_terminated != NULL)
4252 { *search_terminated |= 32; /* give_up */
4253 }
4254 if (!writing_trail)
4255 { was_interrupted = 1;
4256 snapshot();
4257 cpu_printf("Give Up\n");
4258 sleep_report();
4259 pan_exit(1);
4260 } else /* we are already terminating */
4261 { cpu_printf("SIGINT\n");
4262 }
4263 }
4264
4265 void
4266 check_overkill(void)
4267 {
4268 vmax_seen = (vmax_seen + 7)/ 8;
4269 vmax_seen *= 8; /* round up to a multiple of 8 */
4270
4271 if (core_id == 0
4272 && !remote_party
4273 && nstates_put > 0
4274 && VMAX - vmax_seen > 8)
4275 {
4276 #ifdef BITSTATE
4277 printf("cpu0: max VMAX value seen in this run: ");
4278 #else
4279 printf("cpu0: recommend recompiling with ");
4280 #endif
4281 printf("-DVMAX=%d\n", vmax_seen);
4282 }
4283 }
4284
4285 void
4286 mem_put(int q) /* handoff state to other cpu, workq q */
4287 { SM_frame *f;
4288 int i, j;
4289
4290 if (vsize > VMAX)
4291 { vsize = (vsize + 7)/8; vsize *= 8; /* round up */
4292 printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize);
4293 Uerror("aborting");
4294 }
4295 if (now._nr_pr > PMAX)
4296 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now._nr_pr);
4297 Uerror("aborting");
4298 }
4299 if (now._nr_qs > QMAX)
4300 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now._nr_qs);
4301 Uerror("aborting");
4302 }
4303 if (vsize > vmax_seen) vmax_seen = vsize;
4304 if (now._nr_pr > pmax_seen) pmax_seen = now._nr_pr;
4305 if (now._nr_qs > qmax_seen) qmax_seen = now._nr_qs;
4306
4307 f = Get_Free_Frame(q); /* not called in likely deadlock states */
4308 if (!f) { Uerror("Fatal3: no free slot"); }
4309
4310 if (VVERBOSE) cpu_printf("putting state into q%d\n", q);
4311
4312 memcpy((uchar *) f->m_now, (uchar *) &now, vsize);
4313 memset((uchar *) f->m_Mask, 0, (VMAX+7)/8 * sizeof(char));
4314 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
4315 { if (Mask[i])
4316 { f->m_Mask[i/8] |= (1<<j);
4317 } }
4318
4319 if (now._nr_pr > 0)
4320 { memcpy((uchar *) f->m_p_offset, (uchar *) proc_offset, now._nr_pr * sizeof(OFFT));
4321 memcpy((uchar *) f->m_p_skip, (uchar *) proc_skip, now._nr_pr * sizeof(uchar));
4322 }
4323 if (now._nr_qs > 0)
4324 { memcpy((uchar *) f->m_q_offset, (uchar *) q_offset, now._nr_qs * sizeof(OFFT));
4325 memcpy((uchar *) f->m_q_skip, (uchar *) q_skip, now._nr_qs * sizeof(uchar));
4326 }
4327 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4328 c_stack((uchar *) f->m_c_stack); /* save unmatched tracked data */
4329 #endif
4330 #ifdef FULL_TRAIL
4331 f->m_stack = stack_last[core_id];
4332 #endif
4333 f->nr_handoffs = nr_handoffs+1;
4334 f->m_tau = trpt->tau;
4335 f->m_o_pm = trpt->o_pm;
4336 f->m_boq = boq;
4337 f->m_vsize = vsize; /* must come last - now the other cpu can see it */
4338
4339 if (query_in_progress == 1)
4340 query_in_progress = 2; /* make sure we know, if a query makes the rounds */
4341 nstates_put++;
4342 }
4343
4344 #ifdef USE_DISK
4345 int Dsk_W_Nr, Dsk_R_Nr;
4346 int dsk_file = -1, dsk_read = -1;
4347 unsigned long dsk_written, dsk_drained;
4348 char dsk_name[512];
4349
4350 #ifndef BFS_DISK
4351 #if defined(WIN32) || defined(WIN64)
4352 #define RFLAGS (O_RDONLY|O_BINARY)
4353 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
4354 #else
4355 #define RFLAGS (O_RDONLY)
4356 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
4357 #endif
4358 #endif
4359
4360 void
4361 dsk_stats(void)
4362 { int i;
4363
4364 if (dsk_written > 0)
4365 { cpu_printf("dsk_written %d states in %d files\ncpu%d: dsk_drained %6d states\n",
4366 dsk_written, Dsk_W_Nr, core_id, dsk_drained);
4367 close(dsk_read);
4368 close(dsk_file);
4369 for (i = 0; i < Dsk_W_Nr; i++)
4370 { sprintf(dsk_name, "Q%.3d_%.3d.tmp", i, core_id);
4371 unlink(dsk_name);
4372 } }
4373 }
4374
4375 void
4376 mem_drain(void)
4377 { SM_frame *f, g;
4378 int q = (core_id + 1) % NCORE; /* target q */
4379 int sz;
4380
4381 if (dsk_read < 0
4382 || dsk_written <= dsk_drained)
4383 { return;
4384 }
4385
4386 while (dsk_written > dsk_drained
4387 && TargetQ_NotFull(q))
4388 { f = Get_Free_Frame(q);
4389 if (!f) { Uerror("Fatal: unhandled condition"); }
4390
4391 if ((dsk_drained+1)%MAX_DSK_FILE == 0) /* 100K states max per file */
4392 { (void) close(dsk_read); /* close current read handle */
4393 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_R_Nr++, core_id);
4394 (void) unlink(dsk_name); /* remove current file */
4395 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_R_Nr, core_id);
4396 cpu_printf("reading %s\n", dsk_name);
4397 dsk_read = open(dsk_name, RFLAGS); /* open next file */
4398 if (dsk_read < 0)
4399 { Uerror("could not open dsk file");
4400 } }
4401 if (read(dsk_read, &g, sizeof(SM_frame)) != sizeof(SM_frame))
4402 { Uerror("bad dsk file read");
4403 }
4404 sz = g.m_vsize;
4405 g.m_vsize = 0;
4406 memcpy(f, &g, sizeof(SM_frame));
4407 f->m_vsize = sz; /* last */
4408
4409 dsk_drained++;
4410 }
4411 }
4412
4413 void
4414 mem_file(void)
4415 { SM_frame f;
4416 int i, j, q = (core_id + 1) % NCORE; /* target q */
4417
4418 if (vsize > VMAX)
4419 { printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize);
4420 Uerror("aborting");
4421 }
4422 if (now._nr_pr > PMAX)
4423 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now._nr_pr);
4424 Uerror("aborting");
4425 }
4426 if (now._nr_qs > QMAX)
4427 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now._nr_qs);
4428 Uerror("aborting");
4429 }
4430
4431 if (VVERBOSE) cpu_printf("filing state for q%d\n", q);
4432
4433 memcpy((uchar *) f.m_now, (uchar *) &now, vsize);
4434 memset((uchar *) f.m_Mask, 0, (VMAX+7)/8 * sizeof(char));
4435 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
4436 { if (Mask[i])
4437 { f.m_Mask[i/8] |= (1<<j);
4438 } }
4439
4440 if (now._nr_pr > 0)
4441 { memcpy((uchar *)f.m_p_offset, (uchar *)proc_offset, now._nr_pr*sizeof(OFFT));
4442 memcpy((uchar *)f.m_p_skip, (uchar *)proc_skip, now._nr_pr*sizeof(uchar));
4443 }
4444 if (now._nr_qs > 0)
4445 { memcpy((uchar *) f.m_q_offset, (uchar *) q_offset, now._nr_qs*sizeof(OFFT));
4446 memcpy((uchar *) f.m_q_skip, (uchar *) q_skip, now._nr_qs*sizeof(uchar));
4447 }
4448 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4449 c_stack((uchar *) f.m_c_stack); /* save unmatched tracked data */
4450 #endif
4451 #ifdef FULL_TRAIL
4452 f.m_stack = stack_last[core_id];
4453 #endif
4454 f.nr_handoffs = nr_handoffs+1;
4455 f.m_tau = trpt->tau;
4456 f.m_o_pm = trpt->o_pm;
4457 f.m_boq = boq;
4458 f.m_vsize = vsize;
4459
4460 if (query_in_progress == 1)
4461 { query_in_progress = 2;
4462 }
4463 if (dsk_file < 0)
4464 { sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_W_Nr, core_id);
4465 dsk_file = open(dsk_name, WFLAGS, 0644);
4466 dsk_read = open(dsk_name, RFLAGS);
4467 if (dsk_file < 0 || dsk_read < 0)
4468 { cpu_printf("File: <%s>\n", dsk_name);
4469 Uerror("cannot open diskfile");
4470 }
4471 Dsk_W_Nr++; /* nr of next file to open */
4472 cpu_printf("created temporary diskfile %s\n", dsk_name);
4473 } else if ((dsk_written+1)%MAX_DSK_FILE == 0)
4474 { close(dsk_file); /* close write handle */
4475 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_W_Nr++, core_id);
4476 dsk_file = open(dsk_name, WFLAGS, 0644);
4477 if (dsk_file < 0)
4478 { cpu_printf("File: <%s>\n", dsk_name);
4479 Uerror("aborting: cannot open new diskfile");
4480 }
4481 cpu_printf("created temporary diskfile %s\n", dsk_name);
4482 }
4483 if (write(dsk_file, &f, sizeof(SM_frame)) != sizeof(SM_frame))
4484 { Uerror("aborting -- disk write failed (disk full?)");
4485 }
4486 nstates_put++;
4487 dsk_written++;
4488 }
4489 #endif
4490
4491 int
4492 mem_hand_off(void)
4493 {
4494 if (search_terminated == NULL
4495 || *search_terminated != 0) /* not a full crash check */
4496 { pan_exit(0);
4497 }
4498 iam_alive(); /* on every transition of Down */
4499 #ifdef USE_DISK
4500 mem_drain(); /* maybe call this also on every Up */
4501 #endif
4502 if (depth > z_handoff /* above handoff limit */
4503 #ifndef SAFETY
4504 && !a_cycles /* not in liveness mode */
4505 #endif
4506 #if SYNC
4507 && boq == -1 /* not mid-rv */
4508 #endif
4509 #ifdef VERI
4510 && (trpt->tau&4) /* claim moves first */
4511 && !((trpt-1)->tau&128) /* not a stutter move */
4512 #endif
4513 && !(trpt->tau&8)) /* not an atomic move */
4514 { int q = (core_id + 1) % NCORE; /* circular handoff */
4515 #ifdef GENEROUS
4516 if (prcnt[q] < LN_FRAMES)
4517 #else
4518 if (TargetQ_NotFull(q)
4519 && (dfs_phase2 == 0 || prcnt[core_id] > 0))
4520 #endif
4521 { mem_put(q);
4522 return 1;
4523 }
4524 { int rval;
4525 #ifndef NGQ
4526 rval = GlobalQ_HasRoom();
4527 #else
4528 rval = 0;
4529 #endif
4530 #ifdef USE_DISK
4531 if (rval == 0)
4532 { void mem_file(void);
4533 mem_file();
4534 rval = 1;
4535 }
4536 #endif
4537 return rval;
4538 }
4539 }
4540 return 0; /* i.e., no handoff */
4541 }
4542
4543 void
4544 mem_put_acc(void) /* liveness mode */
4545 { int q = (core_id + 1) % NCORE;
4546
4547 if (search_terminated == NULL
4548 || *search_terminated != 0)
4549 { pan_exit(0);
4550 }
4551 #ifdef USE_DISK
4552 mem_drain();
4553 #endif
4554 /* some tortured use of preprocessing: */
4555 #if !defined(NGQ) || defined(USE_DISK)
4556 if (TargetQ_Full(q))
4557 {
4558 #endif
4559 #ifndef NGQ
4560 if (GlobalQ_HasRoom())
4561 { return;
4562 }
4563 #endif
4564 #ifdef USE_DISK
4565 mem_file();
4566 } else
4567 #else
4568 #if !defined(NGQ) || defined(USE_DISK)
4569 }
4570 #endif
4571 #endif
4572 { mem_put(q);
4573 }
4574 }
4575
4576 #if defined(WIN32) || defined(WIN64)
4577 void
4578 init_shm(void) /* initialize shared work-queues */
4579 { char key[512];
4580 int n, m;
4581 int must_exit = 0;
4582
4583 if (core_id == 0 && verbose)
4584 { printf("cpu0: step 3: allocate shared work-queues %g Mb\n",
4585 ((double) NCORE * LWQ_SIZE + GWQ_SIZE) / (1048576.));
4586 }
4587 for (m = 0; m < NR_QS; m++) /* last q is global 1 */
4588 { double qsize = (m == NCORE) ? GWQ_SIZE : LWQ_SIZE;
4589 sprintf(key, "Global\\pan_%s_%.3d", PanSource, m);
4590 if (core_id == 0)
4591 { shmid[m] = CreateFileMapping(
4592 INVALID_HANDLE_VALUE, /* use paging file */
4593 NULL, /* default security */
4594 PAGE_READWRITE, /* access permissions */
4595 0, /* high-order 4 bytes */
4596 qsize, /* low-order bytes, size in bytes */
4597 key); /* name */
4598 } else /* worker nodes just open these segments */
4599 { shmid[m] = OpenFileMapping(
4600 FILE_MAP_ALL_ACCESS, /* read/write access */
4601 FALSE, /* children do not inherit handle */
4602 key);
4603 }
4604 if (shmid[m] == NULL)
4605 { fprintf(stderr, "cpu%d: could not create or open shared queues\n",
4606 core_id);
4607 must_exit = 1;
4608 break;
4609 }
4610 /* attach: */
4611 shared_mem[m] = (char *) MapViewOfFile(shmid[m], FILE_MAP_ALL_ACCESS, 0, 0, 0);
4612 if (shared_mem[m] == NULL)
4613 { fprintf(stderr, "cpu%d: cannot attach shared q%d (%d Mb)\n",
4614 core_id, m+1, (int) (qsize/(1048576.)));
4615 must_exit = 1;
4616 break;
4617 }
4618
4619 memcnt += qsize;
4620
4621 m_workq[m] = (SM_frame *) shared_mem[m];
4622 if (core_id == 0)
4623 { int nframes = (m == NCORE) ? GN_FRAMES : LN_FRAMES;
4624 for (n = 0; n < nframes; n++)
4625 { m_workq[m][n].m_vsize = 0;
4626 m_workq[m][n].m_boq = 0;
4627 } } }
4628
4629 if (must_exit)
4630 { fprintf(stderr, "pan: check './pan --' for usage details\n");
4631 pan_exit(1); /* calls cleanup_shm */
4632 }
4633 }
4634
4635 static uchar *
4636 prep_shmid_S(size_t n) /* either sets SS or H_tab, WIN32/WIN64 */
4637 { char *rval;
4638 #ifndef SEP_STATE
4639 char key[512];
4640
4641 if (verbose && core_id == 0)
4642 {
4643 #ifdef BITSTATE
4644 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
4645 (double) n / (1048576.));
4646 #else
4647 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
4648 (double) n / (1048576.));
4649 #endif
4650 }
4651 #ifdef MEMLIM
4652 if (memcnt + (double) n > memlim)
4653 { printf("cpu%d: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
4654 core_id, memcnt/1024., n/1024, memlim/(1048576.));
4655 printf("cpu%d: insufficient memory -- aborting\n", core_id);
4656 exit(1);
4657 }
4658 #endif
4659
4660 /* make key different from queues: */
4661 sprintf(key, "Global\\pan_%s_%.3d", PanSource, NCORE+2); /* different from qs */
4662
4663 if (core_id == 0) /* root */
4664 { shmid_S = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
4665 #ifdef WIN64
4666 PAGE_READWRITE, (n>>32), (n & 0xffffffff), key);
4667 #else
4668 PAGE_READWRITE, 0, n, key);
4669 #endif
4670 memcnt += (double) n;
4671 } else /* worker */
4672 { shmid_S = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, key);
4673 }
4674 if (shmid_S == NULL)
4675 {
4676 #ifdef BITSTATE
4677 fprintf(stderr, "cpu%d: cannot %s shared bitstate",
4678 core_id, core_id?"open":"create");
4679 #else
4680 fprintf(stderr, "cpu%d: cannot %s shared hashtable",
4681 core_id, core_id?"open":"create");
4682 #endif
4683 fprintf(stderr, "pan: check './pan --' for usage details\n");
4684 pan_exit(1);
4685 }
4686
4687 rval = (char *) MapViewOfFile(shmid_S, FILE_MAP_ALL_ACCESS, 0, 0, 0); /* attach */
4688 if ((char *) rval == NULL)
4689 { fprintf(stderr, "cpu%d: cannot attach shared bitstate or hashtable\n", core_id);
4690 fprintf(stderr, "pan: check './pan --' for usage details\n");
4691 pan_exit(1);
4692 }
4693 #else
4694 rval = (char *) emalloc(n);
4695 #endif
4696 return (uchar *) rval;
4697 }
4698
4699 static uchar *
4700 prep_state_mem(size_t n) /* WIN32/WIN64 sets memory arena for states */
4701 { char *rval;
4702 char key[512];
4703 static int cnt = 3; /* start larger than earlier ftok calls */
4704
4705 if (verbose && core_id == 0)
4706 { printf("cpu0: step 2+: pre-allocate memory arena %d of %g Mb\n",
4707 cnt-3, (double) n / (1048576.));
4708 }
4709 #ifdef MEMLIM
4710 if (memcnt + (double) n > memlim)
4711 { printf("cpu%d: error: M %.0f + %.0f exceeds memory limit of %.0f Kb\n",
4712 core_id, memcnt/1024.0, (double) n/1024.0, memlim/1024.0);
4713 return NULL;
4714 }
4715 #endif
4716
4717 sprintf(key, "Global\\pan_%s_%.3d", PanSource, NCORE+cnt); cnt++;
4718
4719 if (core_id == 0)
4720 { shmid_M = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
4721 #ifdef WIN64
4722 PAGE_READWRITE, (n>>32), (n & 0xffffffff), key);
4723 #else
4724 PAGE_READWRITE, 0, n, key);
4725 #endif
4726 } else
4727 { shmid_M = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, key);
4728 }
4729 if (shmid_M == NULL)
4730 { printf("cpu%d: failed to get pool of shared memory nr %d of size %d\n",
4731 core_id, cnt-3, n);
4732 printf("pan: check './pan --' for usage details\n");
4733 return NULL;
4734 }
4735 rval = (char *) MapViewOfFile(shmid_M, FILE_MAP_ALL_ACCESS, 0, 0, 0); /* attach */
4736
4737 if (rval == NULL)
4738 { printf("cpu%d: failed to attach pool of shared memory nr %d of size %d\n",
4739 core_id, cnt-3, n);
4740 return NULL;
4741 }
4742 return (uchar *) rval;
4743 }
4744
4745 void
4746 init_HT(unsigned long n) /* WIN32/WIN64 version */
4747 { volatile char *x;
4748 double get_mem;
4749 #ifndef SEP_STATE
4750 char *dc_mem_start;
4751 #endif
4752 if (verbose) printf("cpu%d: initialization for Windows\n", core_id);
4753
4754 #ifdef SEP_STATE
4755 #ifndef MEMLIM
4756 if (verbose)
4757 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
4758 }
4759 #else
4760 if (verbose)
4761 printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
4762 MEMLIM, ((double)n/(1048576.)), ((double) NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.));
4763 #endif
4764 get_mem = NCORE * sizeof(double) + (1 + CS_NR) * sizeof(void *)+ 4*sizeof(void *) + 2*sizeof(double);
4765 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
4766 get_mem += 4 * NCORE * sizeof(void *);
4767 #ifdef FULL_TRAIL
4768 get_mem += (NCORE) * sizeof(Stack_Tree *);
4769 /* NCORE * stack_last */
4770 #endif
4771 x = (volatile char *) prep_state_mem((size_t) get_mem);
4772 shmid_X = (void *) x;
4773 if (x == NULL)
4774 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
4775 exit(1);
4776 }
4777 search_terminated = (volatile unsigned int *) x; /* comes first */
4778 x += sizeof(void *); /* maintain alignment */
4779
4780 is_alive = (volatile double *) x;
4781 x += NCORE * sizeof(double);
4782
4783 sh_lock = (volatile int *) x;
4784 x += CS_NR * sizeof(void *); /* allow 1 word per entry */
4785
4786 grfree = (volatile int *) x;
4787 x += sizeof(void *);
4788 grfull = (volatile int *) x;
4789 x += sizeof(void *);
4790 grcnt = (volatile int *) x;
4791 x += sizeof(void *);
4792 grmax = (volatile int *) x;
4793 x += sizeof(void *);
4794 prfree = (volatile int *) x;
4795 x += NCORE * sizeof(void *);
4796 prfull = (volatile int *) x;
4797 x += NCORE * sizeof(void *);
4798 prcnt = (volatile int *) x;
4799 x += NCORE * sizeof(void *);
4800 prmax = (volatile int *) x;
4801 x += NCORE * sizeof(void *);
4802 gr_readmiss = (volatile double *) x;
4803 x += sizeof(double);
4804 gr_writemiss = (volatile double *) x;
4805 x += sizeof(double);
4806
4807 #ifdef FULL_TRAIL
4808 stack_last = (volatile Stack_Tree **) x;
4809 x += NCORE * sizeof(Stack_Tree *);
4810 #endif
4811
4812 #ifndef BITSTATE
4813 H_tab = (struct H_el **) emalloc(n);
4814 #endif
4815 #else
4816 #ifndef MEMLIM
4817 #warning MEMLIM not set
4818 #define MEMLIM (2048)
4819 #endif
4820
4821 if (core_id == 0 && verbose)
4822 printf("cpu0: step 0: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb) = %g Mb for state storage\n",
4823 MEMLIM, ((double)n/(1048576.)), ((double) NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.),
4824 (memlim - memcnt - (double) n - ((double) NCORE * LWQ_SIZE + GWQ_SIZE))/(1048576.));
4825 #ifndef BITSTATE
4826 H_tab = (struct H_el **) prep_shmid_S((size_t) n); /* hash_table */
4827 #endif
4828 get_mem = memlim - memcnt - ((double) NCORE) * LWQ_SIZE - GWQ_SIZE;
4829 if (get_mem <= 0)
4830 { Uerror("internal error -- shared state memory");
4831 }
4832
4833 if (core_id == 0 && verbose)
4834 { printf("cpu0: step 2: shared state memory %g Mb\n",
4835 get_mem/(1048576.));
4836 }
4837 x = dc_mem_start = (char *) prep_state_mem((size_t) get_mem); /* for states */
4838 if (x == NULL)
4839 { printf("cpu%d: insufficient memory -- aborting\n", core_id);
4840 exit(1);
4841 }
4842
4843 search_terminated = (volatile unsigned int *) x; /* comes first */
4844 x += sizeof(void *); /* maintain alignment */
4845
4846 is_alive = (volatile double *) x;
4847 x += NCORE * sizeof(double);
4848
4849 sh_lock = (volatile int *) x;
4850 x += CS_NR * sizeof(int);
4851
4852 grfree = (volatile int *) x;
4853 x += sizeof(void *);
4854 grfull = (volatile int *) x;
4855 x += sizeof(void *);
4856 grcnt = (volatile int *) x;
4857 x += sizeof(void *);
4858 grmax = (volatile int *) x;
4859 x += sizeof(void *);
4860 prfree = (volatile int *) x;
4861 x += NCORE * sizeof(void *);
4862 prfull = (volatile int *) x;
4863 x += NCORE * sizeof(void *);
4864 prcnt = (volatile int *) x;
4865 x += NCORE * sizeof(void *);
4866 prmax = (volatile int *) x;
4867 x += NCORE * sizeof(void *);
4868 gr_readmiss = (volatile double *) x;
4869 x += sizeof(double);
4870 gr_writemiss = (volatile double *) x;
4871 x += sizeof(double);
4872
4873 #ifdef FULL_TRAIL
4874 stack_last = (volatile Stack_Tree **) x;
4875 x += NCORE * sizeof(Stack_Tree *);
4876 #endif
4877 if (((long)x)&(sizeof(void *)-1)) /* word alignment */
4878 { x += sizeof(void *)-(((long)x)&(sizeof(void *)-1)); /* 64-bit align */
4879 }
4880
4881 #ifdef COLLAPSE
4882 ncomps = (unsigned long *) x;
4883 x += (256+2) * sizeof(unsigned long);
4884 #endif
4885
4886 dc_shared = (sh_Allocater *) x; /* in shared memory */
4887 x += sizeof(sh_Allocater);
4888
4889 if (core_id == 0) /* root only */
4890 { dc_shared->dc_id = shmid_M;
4891 dc_shared->dc_start = (void *) dc_mem_start;
4892 dc_shared->dc_arena = x;
4893 dc_shared->pattern = 1234567;
4894 dc_shared->dc_size = (long) get_mem - (long) (x - dc_mem_start);
4895 dc_shared->nxt = NULL;
4896 }
4897 #endif
4898 }
4899
4900 #if defined(WIN32) || defined(WIN64) || defined(__i386__) || defined(__x86_64__)
4901 extern BOOLEAN InterlockedBitTestAndSet(LONG volatile* Base, LONG Bit);
4902 int
4903 tas(volatile LONG *s)
4904 { return InterlockedBitTestAndSet(s, 1);
4905 }
4906 #else
4907 #error missing definition of test and set operation for this platform
4908 #endif
4909
4910 void
4911 cleanup_shm(int val)
4912 { int m;
4913 static int nibis = 0;
4914
4915 if (nibis != 0)
4916 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id, val);
4917 return;
4918 } else
4919 { nibis = 1;
4920 }
4921 if (search_terminated != NULL)
4922 { *search_terminated |= 16; /* cleanup_shm */
4923 }
4924
4925 for (m = 0; m < NR_QS; m++)
4926 { if (shmid[m] != NULL)
4927 { UnmapViewOfFile((char *) shared_mem[m]);
4928 CloseHandle(shmid[m]);
4929 } }
4930 #ifdef SEP_STATE
4931 UnmapViewOfFile((void *) shmid_X);
4932 CloseHandle((void *) shmid_M);
4933 #else
4934 #ifdef BITSTATE
4935 if (shmid_S != NULL)
4936 { UnmapViewOfFile(SS);
4937 CloseHandle(shmid_S);
4938 }
4939 #else
4940 if (core_id == 0 && verbose)
4941 { printf("cpu0: done, %ld Mb of shared state memory left\n",
4942 dc_shared->dc_size / (long)(1048576));
4943 }
4944 if (shmid_S != NULL)
4945 { UnmapViewOfFile(H_tab);
4946 CloseHandle(shmid_S);
4947 }
4948 shmid_M = (void *) (dc_shared->dc_id);
4949 UnmapViewOfFile((char *) dc_shared->dc_start);
4950 CloseHandle(shmid_M);
4951 #endif
4952 #endif
4953 /* detached from shared memory - so cannot use cpu_printf */
4954 if (verbose)
4955 { printf("cpu%d: done -- got %d states from queue\n",
4956 core_id, nstates_get);
4957 }
4958 }
4959
4960 void
4961 mem_get(void)
4962 { SM_frame *f;
4963 int is_parent;
4964
4965 #if defined(MA) && !defined(SEP_STATE)
4966 #error MA requires SEP_STATE in multi-core mode
4967 #endif
4968 #ifdef BFS
4969 #error BFS is not supported in multi-core mode
4970 #endif
4971 #ifdef SC
4972 #error SC is not supported in multi-core mode
4973 #endif
4974 init_shm(); /* we are single threaded when this starts */
4975 signal(SIGINT, give_up); /* windows control-c interrupt */
4976
4977 if (core_id == 0 && verbose)
4978 { printf("cpu0: step 4: creating additional workers (proxy %d)\n",
4979 proxy_pid);
4980 }
4981 #if 0
4982 if NCORE > 1 the child or the parent should fork N-1 more times
4983 the parent is the only process with core_id == 0 and is_parent > 0
4984 the others (workers) have is_parent = 0 and core_id = 1..NCORE-1
4985 #endif
4986 if (core_id == 0) /* root starts up the workers */
4987 { worker_pids[0] = (DWORD) getpid(); /* for completeness */
4988 while (++core_id < NCORE) /* first worker sees core_id = 1 */
4989 { char cmdline[64];
4990 STARTUPINFO si = { sizeof(si) };
4991 PROCESS_INFORMATION pi;
4992
4993 if (proxy_pid == core_id) /* always non-zero */
4994 { sprintf(cmdline, "pan_proxy.exe -r %s-Q%d -Z%d",
4995 o_cmdline, getpid(), core_id);
4996 } else
4997 { sprintf(cmdline, "pan.exe %s-Q%d -Z%d",
4998 o_cmdline, getpid(), core_id);
4999 }
5000 if (verbose) printf("cpu%d: spawn %s\n", core_id, cmdline);
5001
5002 is_parent = CreateProcess(0, cmdline, 0, 0, FALSE, 0, 0, 0, &si, &pi);
5003 if (is_parent == 0)
5004 { Uerror("fork failed");
5005 }
5006 worker_pids[core_id] = pi.dwProcessId;
5007 worker_handles[core_id] = pi.hProcess;
5008 if (verbose)
5009 { cpu_printf("created core %d, pid %d\n",
5010 core_id, pi.dwProcessId);
5011 }
5012 if (proxy_pid == core_id) /* we just created the receive half */
5013 { /* add proxy send, store pid in proxy_pid_snd */
5014 sprintf(cmdline, "pan_proxy.exe -s %s-Q%d -Z%d -Y%d",
5015 o_cmdline, getpid(), core_id, worker_pids[proxy_pid]);
5016 if (verbose) printf("cpu%d: spawn %s\n", core_id, cmdline);
5017 is_parent = CreateProcess(0, cmdline, 0,0, FALSE, 0,0,0, &si, &pi);
5018 if (is_parent == 0)
5019 { Uerror("fork failed");
5020 }
5021 proxy_pid_snd = pi.dwProcessId;
5022 proxy_handle_snd = pi.hProcess;
5023 if (verbose)
5024 { cpu_printf("created core %d, pid %d (send proxy)\n",
5025 core_id, pi.dwProcessId);
5026 } } }
5027 core_id = 0; /* reset core_id for root process */
5028 } else /* worker */
5029 { static char db0[16]; /* good for up to 10^6 cores */
5030 static char db1[16];
5031 tprefix = db0; sprefix = db1;
5032 sprintf(tprefix, "cpu%d_trail", core_id); /* avoid conflicts on file access */
5033 sprintf(sprefix, "cpu%d_rst", core_id);
5034 memcnt = 0; /* count only additionally allocated memory */
5035 }
5036 if (verbose)
5037 { cpu_printf("starting core_id %d -- pid %d\n", core_id, getpid());
5038 }
5039 if (core_id == 0 && !remote_party)
5040 { new_state(); /* root starts the search */
5041 if (verbose)
5042 cpu_printf("done with 1st dfs, nstates %g (put %d states), start reading q\n",
5043 nstates, nstates_put);
5044 dfs_phase2 = 1;
5045 }
5046 Read_Queue(core_id); /* all cores */
5047
5048 if (verbose)
5049 { cpu_printf("put %6d states into queue -- got %6d\n",
5050 nstates_put, nstates_get);
5051 }
5052 done = 1;
5053 wrapup();
5054 exit(0);
5055 }
5056 #endif
5057
5058 #ifdef BITSTATE
5059 void
5060 init_SS(unsigned long n)
5061 {
5062 SS = (uchar *) prep_shmid_S((size_t) n);
5063 init_HT(0L);
5064 }
5065 #endif
5066
5067 #endif
5068 clock_t start_time;
5069 #if NCORE>1
5070 clock_t crash_stamp;
5071 #endif
5072 #if !defined(WIN32) && !defined(WIN64)
5073 struct tms start_tm;
5074 #endif
5075
5076 void
5077 start_timer(void)
5078 {
5079 #if defined(WIN32) || defined(WIN64)
5080 start_time = clock();
5081 #else
5082 start_time = times(&start_tm);
5083 #endif
5084 }
5085
5086 void
5087 stop_timer(void)
5088 { clock_t stop_time;
5089 double delta_time;
5090 #if !defined(WIN32) && !defined(WIN64)
5091 struct tms stop_tm;
5092 stop_time = times(&stop_tm);
5093 delta_time = ((double) (stop_time - start_time)) / ((double) sysconf(_SC_CLK_TCK));
5094 #else
5095 stop_time = clock();
5096 delta_time = ((double) (stop_time - start_time)) / ((double) CLOCKS_PER_SEC);
5097 #endif
5098 if (readtrail || delta_time < 0.00) return;
5099 #if NCORE>1
5100 if (core_id == 0 && nstates > (double) 0)
5101 { printf("\ncpu%d: elapsed time %.3g seconds (%g states visited)\n", core_id, delta_time, nstates);
5102 if (delta_time > 0.01)
5103 { printf("cpu%d: rate %g states/second\n", core_id, nstates/delta_time);
5104 }
5105 { void check_overkill(void);
5106 check_overkill();
5107 } }
5108 #else
5109 printf("\npan: elapsed time %.3g seconds\n", delta_time);
5110 if (delta_time > 0.01)
5111 { printf("pan: rate %9.8g states/second\n", nstates/delta_time);
5112 if (verbose)
5113 { printf("pan: avg transition delay %.5g usec\n",
5114 delta_time/(nstates+truncs));
5115 } }
5116 #endif
5117 }
5118
5119 #if NCORE>1
5120 #ifdef T_ALERT
5121 double t_alerts[17];
5122
5123 void
5124 crash_report(void)
5125 { int i;
5126 printf("crash alert intervals:\n");
5127 for (i = 0; i < 17; i++)
5128 { printf("%d\t%g\n", i, t_alerts[i]);
5129 } }
5130 #endif
5131
5132 void
5133 crash_reset(void)
5134 { /* false alarm */
5135 if (crash_stamp != (clock_t) 0)
5136 {
5137 #ifdef T_ALERT
5138 double delta_time;
5139 int i;
5140 #if defined(WIN32) || defined(WIN64)
5141 delta_time = ((double) (clock() - crash_stamp)) / ((double) CLOCKS_PER_SEC);
5142 #else
5143 delta_time = ((double) (times(&start_tm) - crash_stamp)) / ((double) sysconf(_SC_CLK_TCK));
5144 #endif
5145 for (i = 0; i < 16; i++)
5146 { if (delta_time <= (i*30))
5147 { t_alerts[i] = delta_time;
5148 break;
5149 } }
5150 if (i == 16) t_alerts[i] = delta_time;
5151 #endif
5152 if (verbose)
5153 printf("cpu%d: crash alert off\n", core_id);
5154 }
5155 crash_stamp = (clock_t) 0;
5156 }
5157
5158 int
5159 crash_test(double maxtime)
5160 { double delta_time;
5161 if (crash_stamp == (clock_t) 0)
5162 { /* start timing */
5163 #if defined(WIN32) || defined(WIN64)
5164 crash_stamp = clock();
5165 #else
5166 crash_stamp = times(&start_tm);
5167 #endif
5168 if (verbose)
5169 { printf("cpu%d: crash detection\n", core_id);
5170 }
5171 return 0;
5172 }
5173 #if defined(WIN32) || defined(WIN64)
5174 delta_time = ((double) (clock() - crash_stamp)) / ((double) CLOCKS_PER_SEC);
5175 #else
5176 delta_time = ((double) (times(&start_tm) - crash_stamp)) / ((double) sysconf(_SC_CLK_TCK));
5177 #endif
5178 return (delta_time >= maxtime);
5179 }
5180 #endif
5181
5182 void
5183 do_the_search(void)
5184 { int i;
5185 depth = mreached = 0;
5186 trpt = &trail[0];
5187 #ifdef VERI
5188 trpt->tau |= 4; /* the claim moves first */
5189 #endif
5190 for (i = 0; i < (int) now._nr_pr; i++)
5191 { P0 *ptr = (P0 *) pptr(i);
5192 #ifndef NP
5193 if (!(trpt->o_pm&2)
5194 && accpstate[ptr->_t][ptr->_p])
5195 { trpt->o_pm |= 2;
5196 }
5197 #else
5198 if (!(trpt->o_pm&4)
5199 && progstate[ptr->_t][ptr->_p])
5200 { trpt->o_pm |= 4;
5201 }
5202 #endif
5203 }
5204 #ifdef EVENT_TRACE
5205 #ifndef NP
5206 if (accpstate[EVENT_TRACE][now._event])
5207 { trpt->o_pm |= 2;
5208 }
5209 #else
5210 if (progstate[EVENT_TRACE][now._event])
5211 { trpt->o_pm |= 4;
5212 }
5213 #endif
5214 #endif
5215 #ifndef NOCOMP
5216 Mask[0] = Mask[1] = 1; /* _nr_pr, _nr_qs */
5217 if (!a_cycles)
5218 { i = &(now._a_t) - (uchar *) &now;
5219 Mask[i] = 1; /* _a_t */
5220 }
5221 #ifndef NOFAIR
5222 if (!fairness)
5223 { int j = 0;
5224 i = &(now._cnt[0]) - (uchar *) &now;
5225 while (j++ < NFAIR)
5226 Mask[i++] = 1; /* _cnt[] */
5227 }
5228 #endif
5229 #endif
5230 #ifndef NOFAIR
5231 if (fairness
5232 && (a_cycles && (trpt->o_pm&2)))
5233 { now._a_t = 2; /* set the A-bit */
5234 now._cnt[0] = now._nr_pr + 1;
5235 #ifdef VERBOSE
5236 printf("%3d: fairness Rule 1, cnt=%d, _a_t=%d\n",
5237 depth, now._cnt[now._a_t&1], now._a_t);
5238 #endif
5239 }
5240 #endif
5241 c_stack_start = (char *) &i; /* meant to be read-only */
5242 #if defined(HAS_CODE) && defined (C_INIT)
5243 C_INIT; /* initialization of data that must precede fork() */
5244 c_init_done++;
5245 #endif
5246 #if defined(C_States) && (HAS_TRACK==1)
5247 /* capture initial state of tracked C objects */
5248 c_update((uchar *) &(now.c_state[0]));
5249 #endif
5250 #ifdef HAS_CODE
5251 if (readtrail) getrail(); /* no return */
5252 #endif
5253 start_timer();
5254 #ifdef BFS
5255 bfs();
5256 #else
5257 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
5258 /* initial state of tracked & unmatched objects */
5259 c_stack((uchar *) &(svtack->c_stack[0]));
5260 #endif
5261 #ifdef RANDOMIZE
5262 #if RANDOMIZE>0
5263 srand(RANDOMIZE);
5264 #else
5265 srand(123);
5266 #endif
5267 #endif
5268 #if NCORE>1
5269 mem_get();
5270 #else
5271 new_state(); /* start 1st DFS */
5272 #endif
5273 #endif
5274 }
5275 #ifdef INLINE_REV
5276 uchar
5277 do_reverse(Trans *t, short II, uchar M)
5278 { uchar _m = M;
5279 int tt = (int) ((P0 *)this)->_p;
5280 #include REVERSE_MOVES
5281 R999: return _m;
5282 }
5283 #endif
5284 #ifndef INLINE
5285 #ifdef EVENT_TRACE
5286 static char _tp = 'n'; static int _qid = 0;
5287 #endif
5288 uchar
5289 do_transit(Trans *t, short II)
5290 { uchar _m = 0;
5291 int tt = (int) ((P0 *)this)->_p;
5292 #ifdef M_LOSS
5293 uchar delta_m = 0;
5294 #endif
5295 #ifdef EVENT_TRACE
5296 short oboq = boq;
5297 uchar ot = (uchar) ((P0 *)this)->_t;
5298 if (ot == EVENT_TRACE) boq = -1;
5299 #define continue { boq = oboq; return 0; }
5300 #else
5301 #define continue return 0
5302 #ifdef SEPARATE
5303 uchar ot = (uchar) ((P0 *)this)->_t;
5304 #endif
5305 #endif
5306 #include FORWARD_MOVES
5307 P999:
5308 #ifdef EVENT_TRACE
5309 if (ot == EVENT_TRACE) boq = oboq;
5310 #endif
5311 return _m;
5312 #undef continue
5313 }
5314 #ifdef EVENT_TRACE
5315 void
5316 require(char tp, int qid)
5317 { Trans *t;
5318 _tp = tp; _qid = qid;
5319
5320 if (now._event != endevent)
5321 for (t = trans[EVENT_TRACE][now._event]; t; t = t->nxt)
5322 { if (do_transit(t, EVENT_TRACE))
5323 { now._event = t->st;
5324 reached[EVENT_TRACE][t->st] = 1;
5325 #ifdef VERBOSE
5326 printf(" event_trace move to -> %d\n", t->st);
5327 #endif
5328 #ifndef BFS
5329 #ifndef NP
5330 if (accpstate[EVENT_TRACE][now._event])
5331 (trpt+1)->o_pm |= 2;
5332 #else
5333 if (progstate[EVENT_TRACE][now._event])
5334 (trpt+1)->o_pm |= 4;
5335 #endif
5336 #endif
5337 #ifdef NEGATED_TRACE
5338 if (now._event == endevent)
5339 {
5340 #ifndef BFS
5341 depth++; trpt++;
5342 #endif
5343 uerror("event_trace error (all events matched)");
5344 #ifndef BFS
5345 trpt--; depth--;
5346 #endif
5347 break;
5348 }
5349 #endif
5350 for (t = t->nxt; t; t = t->nxt)
5351 { if (do_transit(t, EVENT_TRACE))
5352 Uerror("non-determinism in event-trace");
5353 }
5354 return;
5355 }
5356 #ifdef VERBOSE
5357 else
5358 printf(" event_trace miss '%c' -- %d, %d, %d\n",
5359 tp, qid, now._event, t->forw);
5360 #endif
5361 }
5362 #ifdef NEGATED_TRACE
5363 now._event = endevent; /* only 1st try will count -- fixed 4.2.6 */
5364 #else
5365 #ifndef BFS
5366 depth++; trpt++;
5367 #endif
5368 uerror("event_trace error (no matching event)");
5369 #ifndef BFS
5370 trpt--; depth--;
5371 #endif
5372 #endif
5373 }
5374 #endif
5375 int
5376 enabled(int iam, int pid)
5377 { Trans *t; uchar *othis = this;
5378 int res = 0; int tt; uchar ot;
5379 #ifdef VERI
5380 /* if (pid > 0) */ pid++;
5381 #endif
5382 if (pid == iam)
5383 Uerror("used: enabled(pid=thisproc)");
5384 if (pid < 0 || pid >= (int) now._nr_pr)
5385 return 0;
5386 this = pptr(pid);
5387 TstOnly = 1;
5388 tt = (int) ((P0 *)this)->_p;
5389 ot = (uchar) ((P0 *)this)->_t;
5390 for (t = trans[ot][tt]; t; t = t->nxt)
5391 if (do_transit(t, (short) pid))
5392 { res = 1;
5393 break;
5394 }
5395 TstOnly = 0;
5396 this = othis;
5397 return res;
5398 }
5399 #endif
5400 void
5401 snap_time(void)
5402 { clock_t stop_time;
5403 double delta_time;
5404 #if !defined(WIN32) && !defined(WIN64)
5405 struct tms stop_tm;
5406 stop_time = times(&stop_tm);
5407 delta_time = ((double) (stop_time - start_time)) / ((double) sysconf(_SC_CLK_TCK));
5408 #else
5409 stop_time = clock();
5410 delta_time = ((double) (stop_time - start_time)) / ((double) CLOCKS_PER_SEC);
5411 #endif
5412 if (delta_time > 0.01)
5413 { printf("t= %6.3g ", delta_time);
5414 printf("R= %7.0g", nstates/delta_time);
5415 }
5416 printf("\n");
5417 if (quota > 0.1 && delta_time > quota)
5418 { printf("Time limit of %6.3g minutes exceeded\n", quota/60.0);
5419 #if NCORE>1
5420 fflush(stdout);
5421 leave_critical(GLOBAL_LOCK);
5422 sudden_stop("time-limit");
5423 exit(1);
5424 #endif
5425 wrapup();
5426 }
5427 }
5428 void
5429 snapshot(void)
5430 {
5431 #if NCORE>1
5432 enter_critical(GLOBAL_LOCK); /* snapshot */
5433 printf("cpu%d: ", core_id);
5434 #endif
5435 printf("Depth= %7ld States= %8.3g ",
5436 #if NCORE>1
5437 (long) (nr_handoffs * z_handoff) +
5438 #endif
5439 mreached, nstates);
5440 printf("Transitions= %8.3g ", nstates+truncs);
5441 #ifdef MA
5442 printf("Nodes= %7d ", nr_states);
5443 #endif
5444 printf("Memory= %9.3f\t", memcnt/1048576.);
5445 snap_time();
5446 fflush(stdout);
5447 #if NCORE>1
5448 leave_critical(GLOBAL_LOCK);
5449 #endif
5450 }
5451 #ifdef SC
5452 void
5453 stack2disk(void)
5454 {
5455 if (!stackwrite
5456 && (stackwrite = creat(stackfile, TMODE)) < 0)
5457 Uerror("cannot create stackfile");
5458
5459 if (write(stackwrite, trail, DDD*sizeof(Trail))
5460 != DDD*sizeof(Trail))
5461 Uerror("stackfile write error -- disk is full?");
5462
5463 memmove(trail, &trail[DDD], (HHH-DDD+2)*sizeof(Trail));
5464 memset(&trail[HHH-DDD+2], 0, (omaxdepth - HHH + DDD - 2)*sizeof(Trail));
5465 CNT1++;
5466 }
5467 void
5468 disk2stack(void)
5469 { long have;
5470
5471 CNT2++;
5472 memmove(&trail[DDD], trail, (HHH-DDD+2)*sizeof(Trail));
5473
5474 if (!stackwrite
5475 || lseek(stackwrite, -DDD* (off_t) sizeof(Trail), SEEK_CUR) == -1)
5476 Uerror("disk2stack lseek error");
5477
5478 if (!stackread
5479 && (stackread = open(stackfile, 0)) < 0)
5480 Uerror("cannot open stackfile");
5481
5482 if (lseek(stackread, (CNT1-CNT2)*DDD* (off_t) sizeof(Trail), SEEK_SET) == -1)
5483 Uerror("disk2stack lseek error");
5484
5485 have = read(stackread, trail, DDD*sizeof(Trail));
5486 if (have != DDD*sizeof(Trail))
5487 Uerror("stackfile read error");
5488 }
5489 #endif
5490 uchar *
5491 Pptr(int x)
5492 { if (x < 0 || x >= MAXPROC || !proc_offset[x])
5493 return noptr;
5494 else
5495 return (uchar *) pptr(x);
5496 }
5497 int qs_empty(void);
5498 /*
5499 * new_state() is the main DFS search routine in the verifier
5500 * it has a lot of code ifdef-ed together to support
5501 * different search modes, which makes it quite unreadable.
5502 * if you are studying the code, first use the C preprocessor
5503 * to generate a specific version from the pan.c source,
5504 * e.g. by saying:
5505 * gcc -E -DNOREDUCE -DBITSTATE pan.c > ppan.c
5506 * and then study the resulting file, rather than this one
5507 */
5508 #if !defined(BFS) && (!defined(BITSTATE) || !defined(MA))
5509
5510 #ifdef NSUCC
5511 int N_succ[512];
5512 void
5513 tally_succ(int cnt)
5514 { if (cnt < 512) N_succ[cnt]++;
5515 else printf("tally_succ: cnt %d exceeds range\n", cnt);
5516 }
5517
5518 void
5519 dump_succ(void)
5520 { int i; double sum = 0.0;
5521 double w_avg = 0.0;
5522 printf("Successor counts:\n");
5523 for (i = 0; i < 512; i++)
5524 { sum += (double) N_succ[i];
5525 }
5526 for (i = 0; i < 512; i++)
5527 { if (N_succ[i] > 0)
5528 { printf("%3d %10d (%.4g %% of total)\n",
5529 i, N_succ[i], (100.0 * (double) N_succ[i])/sum);
5530 w_avg += (double) i * (double) N_succ[i];
5531 } }
5532 if (sum > N_succ[0])
5533 printf("mean %.4g (without 0: %.4g)\n", w_avg / sum, w_avg / (sum - (double) N_succ[0]));
5534 }
5535 #endif
5536
5537 void
5538 new_state(void)
5539 { Trans *t;
5540 uchar _n, _m, ot;
5541 #ifdef RANDOMIZE
5542 short ooi, eoi;
5543 #endif
5544 #ifdef M_LOSS
5545 uchar delta_m = 0;
5546 #endif
5547 short II, JJ = 0, kk;
5548 int tt;
5549 #ifdef REVERSE
5550 short From = BASE, To = now._nr_pr-1;
5551 #else
5552 short From = now._nr_pr-1, To = BASE;
5553 #endif
5554 Down:
5555 #ifdef CHECK
5556 cpu_printf("%d: Down - %s %saccepting [pids %d-%d]\n",
5557 depth, (trpt->tau&4)?"claim":"program",
5558 (trpt->o_pm&2)?"":"non-", From, To);
5559 #endif
5560 #ifdef SCHED
5561 if (depth > 0)
5562 { trpt->sched_limit = (trpt-1)->sched_limit;
5563 } else
5564 { trpt->sched_limit = 0;
5565 }
5566 #endif
5567 #ifdef SC
5568 if (depth > hiwater)
5569 { stack2disk();
5570 maxdepth += DDD;
5571 hiwater += DDD;
5572 trpt -= DDD;
5573 if(verbose)
5574 printf("zap %d: %d (maxdepth now %d)\n",
5575 CNT1, hiwater, maxdepth);
5576 }
5577 #endif
5578 trpt->tau &= ~(16|32|64); /* make sure these are off */
5579 #if defined(FULLSTACK) && defined(MA)
5580 trpt->proviso = 0;
5581 #endif
5582 #ifdef NSUCC
5583 trpt->n_succ = 0;
5584 #endif
5585 #if NCORE>1
5586 if (mem_hand_off())
5587 {
5588 #if SYNC
5589 (trpt+1)->o_n = 1; /* not a deadlock: as below */
5590 #endif
5591 #ifndef LOOPSTATE
5592 (trpt-1)->tau |= 16; /* worstcase guess: as below */
5593 #endif
5594 #if NCORE>1 && defined(FULL_TRAIL)
5595 if (upto > 0)
5596 { Pop_Stack_Tree();
5597 }
5598 #endif
5599 goto Up;
5600 }
5601 #endif
5602 if (depth >= maxdepth)
5603 { if (!warned)
5604 { warned = 1;
5605 printf("error: max search depth too small\n");
5606 }
5607 if (bounded)
5608 { uerror("depth limit reached");
5609 }
5610 truncs++;
5611 #if SYNC
5612 (trpt+1)->o_n = 1; /* not a deadlock */
5613 #endif
5614 #ifndef LOOPSTATE
5615 (trpt-1)->tau |= 16; /* worstcase guess */
5616 #endif
5617 #if NCORE>1 && defined(FULL_TRAIL)
5618 if (upto > 0)
5619 { Pop_Stack_Tree();
5620 }
5621 #endif
5622 goto Up;
5623 }
5624 AllOver:
5625 #if (defined(FULLSTACK) && !defined(MA)) || NCORE>1
5626 /* if atomic or rv move, carry forward previous state */
5627 trpt->ostate = (trpt-1)->ostate;
5628 #endif
5629 #ifdef VERI
5630 if ((trpt->tau&4) || ((trpt-1)->tau&128))
5631 #endif
5632 if (boq == -1) { /* if not mid-rv */
5633 #ifndef SAFETY
5634 /* this check should now be redundant
5635 * because the seed state also appears
5636 * on the 1st dfs stack and would be
5637 * matched in hstore below
5638 */
5639 if ((now._a_t&1) && depth > A_depth)
5640 { if (!memcmp((char *)&A_Root,
5641 (char *)&now, vsize))
5642 {
5643 depthfound = A_depth;
5644 #ifdef CHECK
5645 printf("matches seed\n");
5646 #endif
5647 #ifdef NP
5648 uerror("non-progress cycle");
5649 #else
5650 uerror("acceptance cycle");
5651 #endif
5652 #if NCORE>1 && defined(FULL_TRAIL)
5653 if (upto > 0)
5654 { Pop_Stack_Tree();
5655 }
5656 #endif
5657 goto Up;
5658 }
5659 #ifdef CHECK
5660 printf("not seed\n");
5661 #endif
5662 }
5663 #endif
5664 if (!(trpt->tau&8)) /* if no atomic move */
5665 {
5666 #ifdef BITSTATE
5667 #ifdef CNTRSTACK
5668 II = bstore((char *)&now, vsize);
5669 trpt->j6 = j1; trpt->j7 = j2;
5670 JJ = LL[j1] && LL[j2];
5671 #else
5672 #ifdef FULLSTACK
5673 JJ = onstack_now();
5674 #else
5675 #ifndef NOREDUCE
5676 JJ = II; /* worstcase guess for p.o. */
5677 #endif
5678 #endif
5679 II = bstore((char *)&now, vsize);
5680 #endif
5681 #else
5682 #ifdef MA
5683 II = gstore((char *)&now, vsize, 0);
5684 #ifndef FULLSTACK
5685 JJ = II;
5686 #else
5687 JJ = (II == 2)?1:0;
5688 #endif
5689 #else
5690 II = hstore((char *)&now, vsize);
5691 #ifdef FULLSTACK
5692 JJ = (II == 2)?1:0;
5693 #endif
5694 #endif
5695 #endif
5696 kk = (II == 1 || II == 2);
5697 #ifndef SAFETY
5698 #if NCORE==1 || defined (SEP_STATE)
5699 if (II == 2 && ((trpt->o_pm&2) || ((trpt-1)->o_pm&2)))
5700 #ifndef NOFAIR
5701 #if 0
5702 if (!fairness || ((now._a_t&1) && now._cnt[1] == 1)) /* 5.1.4 */
5703 #else
5704 if (a_cycles && !fairness) /* 5.1.6 -- example by Hirofumi Watanabe */
5705 #endif
5706 #endif
5707 {
5708 II = 3; /* Schwoon & Esparza 2005, Gastin&Moro 2004 */
5709 #ifdef VERBOSE
5710 printf("state match on dfs stack\n");
5711 #endif
5712 goto same_case;
5713 }
5714 #endif
5715 #if defined(FULLSTACK) && defined(BITSTATE)
5716 if (!JJ && (now._a_t&1) && depth > A_depth)
5717 { int oj1 = j1;
5718 uchar o_a_t = now._a_t;
5719 now._a_t &= ~(1|16|32);
5720 if (onstack_now())
5721 { II = 3;
5722 #ifdef VERBOSE
5723 printf("state match on 1st dfs stack\n");
5724 #endif
5725 }
5726 now._a_t = o_a_t;
5727 j1 = oj1;
5728 }
5729 #endif
5730 if (II == 3 && a_cycles && (now._a_t&1))
5731 {
5732 #ifndef NOFAIR
5733 if (fairness && now._cnt[1] > 1) /* was != 0 */
5734 {
5735 #ifdef VERBOSE
5736 printf(" fairness count non-zero\n");
5737 #endif
5738 II = 0;
5739 } else
5740 #endif
5741 {
5742 #ifndef BITSTATE
5743 nShadow--;
5744 #endif
5745 same_case: if (Lstate) depthfound = Lstate->D;
5746 #ifdef NP
5747 uerror("non-progress cycle");
5748 #else
5749 uerror("acceptance cycle");
5750 #endif
5751 #if NCORE>1 && defined(FULL_TRAIL)
5752 if (upto > 0)
5753 { Pop_Stack_Tree();
5754 }
5755 #endif
5756 goto Up;
5757 }
5758 }
5759 #endif
5760 #ifndef NOREDUCE
5761 #ifndef SAFETY
5762 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5763 if (II != 0 && (!Lstate || Lstate->cpu_id < core_id))
5764 { (trpt-1)->tau |= 16;
5765 }
5766 #endif
5767 if ((II && JJ) || (II == 3))
5768 { /* marker for liveness proviso */
5769 #ifndef LOOPSTATE
5770 (trpt-1)->tau |= 16;
5771 #endif
5772 truncs2++;
5773 }
5774 #else
5775 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5776 if (!(II != 0 && (!Lstate || Lstate->cpu_id < core_id)))
5777 { /* treat as stack state */
5778 (trpt-1)->tau |= 16;
5779 } else
5780 { /* treat as non-stack state */
5781 (trpt-1)->tau |= 64;
5782 }
5783 #endif
5784 if (!II || !JJ)
5785 { /* successor outside stack */
5786 (trpt-1)->tau |= 64;
5787 }
5788 #endif
5789 #endif
5790 if (II)
5791 { truncs++;
5792 #if NCORE>1 && defined(FULL_TRAIL)
5793 if (upto > 0)
5794 { Pop_Stack_Tree();
5795 if (depth == 0)
5796 { return;
5797 } }
5798 #endif
5799 goto Up;
5800 }
5801 if (!kk)
5802 { static long sdone = (long) 0; long ndone;
5803 nstates++;
5804 #if defined(ZAPH) && defined(BITSTATE)
5805 zstates += (double) hfns;
5806 #endif
5807 ndone = (unsigned long) (nstates/((double) FREQ));
5808 if (ndone != sdone)
5809 { snapshot();
5810 sdone = ndone;
5811 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
5812 if (nstates > ((double)(ONE_L<<(ssize+1))))
5813 { void resize_hashtable(void);
5814 resize_hashtable();
5815 }
5816 #endif
5817 #if defined(ZAPH) && defined(BITSTATE)
5818 if (zstates > ((double)(ONE_L<<(ssize-2))))
5819 { /* more than half the bits set */
5820 void zap_hashtable(void);
5821 zap_hashtable();
5822 zstates = 0;
5823 }
5824 #endif
5825 }
5826 #ifdef SVDUMP
5827 if (vprefix > 0)
5828 if (write(svfd, (uchar *) &now, vprefix) != vprefix)
5829 { fprintf(efd, "writing %s.svd failed\n", PanSource);
5830 wrapup();
5831 }
5832 #endif
5833 #if defined(MA) && defined(W_XPT)
5834 if ((unsigned long) nstates%W_XPT == 0)
5835 { void w_xpoint(void);
5836 w_xpoint();
5837 }
5838 #endif
5839 }
5840 #if defined(FULLSTACK) || defined(CNTRSTACK)
5841 onstack_put();
5842 #ifdef DEBUG2
5843 #if defined(FULLSTACK) && !defined(MA)
5844 printf("%d: putting %u (%d)\n", depth,
5845 trpt->ostate,
5846 (trpt->ostate)?trpt->ostate->tagged:0);
5847 #else
5848 printf("%d: putting\n", depth);
5849 #endif
5850 #endif
5851 #else
5852 #if NCORE>1
5853 trpt->ostate = Lstate;
5854 #endif
5855 #endif
5856 } }
5857 if (depth > mreached)
5858 mreached = depth;
5859 #ifdef VERI
5860 if (trpt->tau&4)
5861 #endif
5862 trpt->tau &= ~(1|2); /* timeout and -request off */
5863 _n = 0;
5864 #if SYNC
5865 (trpt+1)->o_n = 0;
5866 #endif
5867 #ifdef VERI
5868 if (now._nr_pr == 0) /* claim terminated */
5869 uerror("end state in claim reached");
5870 check_claim(((P0 *)pptr(0))->_p);
5871 Stutter:
5872 if (trpt->tau&4) /* must make a claimmove */
5873 {
5874 #ifndef NOFAIR
5875 if ((now._a_t&2) /* A-bit set */
5876 && now._cnt[now._a_t&1] == 1)
5877 { now._a_t &= ~2;
5878 now._cnt[now._a_t&1] = 0;
5879 trpt->o_pm |= 16;
5880 #ifdef DEBUG
5881 printf("%3d: fairness Rule 3.: _a_t = %d\n",
5882 depth, now._a_t);
5883 #endif
5884 }
5885 #endif
5886 II = 0; /* never */
5887 goto Veri0;
5888 }
5889 #endif
5890 #ifndef NOREDUCE
5891 /* Look for a process with only safe transitions */
5892 /* (special rules apply in the 2nd dfs) */
5893 if (boq == -1 && From != To
5894
5895 #ifdef SAFETY
5896 #if NCORE>1
5897 && (depth < z_handoff)
5898 #endif
5899 )
5900 #else
5901 #if NCORE>1
5902 && ((a_cycles) || (!a_cycles && depth < z_handoff))
5903 #endif
5904 && (!(now._a_t&1)
5905 || (a_cycles &&
5906 #ifndef BITSTATE
5907 #ifdef MA
5908 #ifdef VERI
5909 !((trpt-1)->proviso))
5910 #else
5911 !(trpt->proviso))
5912 #endif
5913 #else
5914 #ifdef VERI
5915 (trpt-1)->ostate &&
5916 !(((char *)&((trpt-1)->ostate->state))[0] & 128))
5917 #else
5918 !(((char *)&(trpt->ostate->state))[0] & 128))
5919 #endif
5920 #endif
5921 #else
5922 #ifdef VERI
5923 (trpt-1)->ostate &&
5924 (trpt-1)->ostate->proviso == 0)
5925 #else
5926 trpt->ostate->proviso == 0)
5927 #endif
5928 #endif
5929 ))
5930 #endif
5931
5932 #ifdef REVERSE
5933 for (II = From; II <= To; II++)
5934 #else
5935 for (II = From; II >= To; II--)
5936 #endif
5937 {
5938 Resume: /* pick up here if preselect fails */
5939 this = pptr(II);
5940 tt = (int) ((P0 *)this)->_p;
5941 ot = (uchar) ((P0 *)this)->_t;
5942 if (trans[ot][tt]->atom & 8)
5943 { t = trans[ot][tt];
5944 if (t->qu[0] != 0)
5945 { Ccheck++;
5946 if (!q_cond(II, t))
5947 continue;
5948 Cholds++;
5949 }
5950 From = To = II; /* the process preselected */
5951 #ifdef NIBIS
5952 t->om = 0;
5953 #endif
5954 trpt->tau |= 32; /* preselect marker */
5955 #ifdef DEBUG
5956 #ifdef NIBIS
5957 printf("%3d: proc %d Pre", depth, II);
5958 printf("Selected (om=%d, tau=%d)\n",
5959 t->om, trpt->tau);
5960 #else
5961 printf("%3d: proc %d PreSelected (tau=%d)\n",
5962 depth, II, trpt->tau);
5963 #endif
5964 #endif
5965 goto Again;
5966 }
5967 }
5968 trpt->tau &= ~32;
5969 #endif
5970 #if !defined(NOREDUCE) || (defined(ETIM) && !defined(VERI))
5971 Again:
5972 #endif
5973 /* The Main Expansion Loop over Processes */
5974 trpt->o_pm &= ~(8|16|32|64); /* fairness-marks */
5975 #ifndef NOFAIR
5976 if (fairness && boq == -1
5977 #ifdef VERI
5978 && (!(trpt->tau&4) && !((trpt-1)->tau&128))
5979 #endif
5980 && !(trpt->tau&8))
5981 { /* A_bit = 1; Cnt = N in acc states with A_bit 0 */
5982 if (!(now._a_t&2))
5983 {
5984 if (a_cycles && (trpt->o_pm&2))
5985 { /* Accepting state */
5986 now._a_t |= 2;
5987 now._cnt[now._a_t&1] = now._nr_pr + 1;
5988 trpt->o_pm |= 8;
5989 #ifdef DEBUG
5990 printf("%3d: fairness Rule 1: cnt=%d, _a_t=%d\n",
5991 depth, now._cnt[now._a_t&1], now._a_t);
5992 #endif
5993 }
5994 } else
5995 { /* A_bit = 0 when Cnt 0 */
5996 if (now._cnt[now._a_t&1] == 1)
5997 { now._a_t &= ~2;
5998 now._cnt[now._a_t&1] = 0;
5999 trpt->o_pm |= 16;
6000 #ifdef DEBUG
6001 printf("%3d: fairness Rule 3: _a_t = %d\n",
6002 depth, now._a_t);
6003 #endif
6004 } } }
6005 #endif
6006
6007 #ifdef REVERSE
6008 for (II = From; II <= To; II++)
6009 #else
6010 for (II = From; II >= To; II--)
6011 #endif
6012 {
6013 #if SYNC
6014 /* no rendezvous with same proc */
6015 if (boq != -1 && trpt->pr == II) continue;
6016 #endif
6017 #ifdef SCHED
6018 /* limit max nr of interleavings */
6019 if (From != To
6020 && depth > 0
6021 #ifdef VERI
6022 && II != 0
6023 #endif
6024 && (trpt-1)->pr != II
6025 && trpt->sched_limit >= sched_max)
6026 { continue;
6027 }
6028 #endif
6029 #ifdef VERI
6030 Veri0:
6031 #endif
6032 this = pptr(II);
6033 tt = (int) ((P0 *)this)->_p;
6034 ot = (uchar) ((P0 *)this)->_t;
6035 #ifdef NIBIS
6036 /* don't repeat a previous preselected expansion */
6037 /* could hit this if reduction proviso was false */
6038 t = trans[ot][tt];
6039 if (!(trpt->tau&4)
6040 && !(trpt->tau&1)
6041 && !(trpt->tau&32)
6042 && (t->atom & 8)
6043 && boq == -1
6044 && From != To)
6045 { if (t->qu[0] == 0
6046 || q_cond(II, t))
6047 { _m = t->om;
6048 if (_m>_n||(_n>3&&_m!=0)) _n=_m;
6049 continue; /* did it before */
6050 } }
6051 #endif
6052 trpt->o_pm &= ~1; /* no move in this pid yet */
6053 #ifdef EVENT_TRACE
6054 (trpt+1)->o_event = now._event;
6055 #endif
6056 /* Fairness: Cnt++ when Cnt == II */
6057 #ifndef NOFAIR
6058 trpt->o_pm &= ~64; /* didn't apply rule 2 */
6059 if (fairness
6060 && boq == -1
6061 && !(trpt->o_pm&32)
6062 && (now._a_t&2)
6063 && now._cnt[now._a_t&1] == II+2)
6064 { now._cnt[now._a_t&1] -= 1;
6065 #ifdef VERI
6066 /* claim need not participate */
6067 if (II == 1)
6068 now._cnt[now._a_t&1] = 1;
6069 #endif
6070 #ifdef DEBUG
6071 printf("%3d: proc %d fairness ", depth, II);
6072 printf("Rule 2: --cnt to %d (%d)\n",
6073 now._cnt[now._a_t&1], now._a_t);
6074 #endif
6075 trpt->o_pm |= (32|64);
6076 }
6077 #endif
6078 #ifdef HAS_PROVIDED
6079 if (!provided(II, ot, tt, t)) continue;
6080 #endif
6081 /* check all trans of proc II - escapes first */
6082 #ifdef HAS_UNLESS
6083 trpt->e_state = 0;
6084 #endif
6085 (trpt+1)->pr = (uchar) II;
6086 (trpt+1)->st = tt;
6087 #ifdef RANDOMIZE
6088 for (ooi = eoi = 0, t = trans[ot][tt]; t; t = t->nxt, ooi++)
6089 { if (strcmp(t->tp, "else") == 0)
6090 { eoi++;
6091 break;
6092 } }
6093 if (eoi > 0)
6094 { t = trans[ot][tt];
6095 #ifdef VERBOSE
6096 printf("randomizer: suppressed, saw else\n");
6097 #endif
6098 } else
6099 { eoi = rand()%ooi;
6100 #ifdef VERBOSE
6101 printf("randomizer: skip %d in %d\n", eoi, ooi);
6102 #endif
6103 for (t = trans[ot][tt]; t; t = t->nxt)
6104 if (eoi-- <= 0) break;
6105 }
6106 domore:
6107 for ( ; t && ooi > 0; t = t->nxt, ooi--)
6108 #else
6109 for (t = trans[ot][tt]; t; t = t->nxt)
6110 #endif
6111 {
6112 #ifdef HAS_UNLESS
6113 /* exploring all transitions from
6114 * a single escape state suffices
6115 */
6116 if (trpt->e_state > 0
6117 && trpt->e_state != t->e_trans)
6118 {
6119 #ifdef DEBUG
6120 printf("skip 2nd escape %d (did %d before)\n",
6121 t->e_trans, trpt->e_state);
6122 #endif
6123 break;
6124 }
6125 #endif
6126 (trpt+1)->o_t = t;
6127 #ifdef INLINE
6128 #include FORWARD_MOVES
6129 P999: /* jumps here when move succeeds */
6130 #else
6131 if (!(_m = do_transit(t, II))) continue;
6132 #endif
6133 #ifdef SCHED
6134 if (depth > 0
6135 #ifdef VERI
6136 && II != 0
6137 #endif
6138 && (trpt-1)->pr != II)
6139 { trpt->sched_limit = 1 + (trpt-1)->sched_limit;
6140 }
6141 #endif
6142 if (boq == -1)
6143 #ifdef CTL
6144 /* for branching-time, can accept reduction only if */
6145 /* the persistent set contains just 1 transition */
6146 { if ((trpt->tau&32) && (trpt->o_pm&1))
6147 trpt->tau |= 16;
6148 trpt->o_pm |= 1; /* we moved */
6149 }
6150 #else
6151 trpt->o_pm |= 1; /* we moved */
6152 #endif
6153 #ifdef LOOPSTATE
6154 if (loopstate[ot][tt])
6155 {
6156 #ifdef VERBOSE
6157 printf("exiting from loopstate:\n");
6158 #endif
6159 trpt->tau |= 16;
6160 cnt_loops++;
6161 }
6162 #endif
6163 #ifdef PEG
6164 peg[t->forw]++;
6165 #endif
6166 #if defined(VERBOSE) || defined(CHECK)
6167 #if defined(SVDUMP)
6168 cpu_printf("%3d: proc %d exec %d \n", depth, II, t->t_id);
6169 #else
6170 cpu_printf("%3d: proc %d exec %d, %d to %d, %s %s %s %saccepting [tau=%d]\n",
6171 depth, II, t->forw, tt, t->st, t->tp,
6172 (t->atom&2)?"atomic":"",
6173 (boq != -1)?"rendez-vous":"",
6174 (trpt->o_pm&2)?"":"non-", trpt->tau);
6175 #ifdef HAS_UNLESS
6176 if (t->e_trans)
6177 cpu_printf("\t(escape to state %d)\n", t->st);
6178 #endif
6179 #endif
6180 #ifdef RANDOMIZE
6181 cpu_printf("\t(randomizer %d)\n", ooi);
6182 #endif
6183 #endif
6184 #ifdef HAS_LAST
6185 #ifdef VERI
6186 if (II != 0)
6187 #endif
6188 now._last = II - BASE;
6189 #endif
6190 #ifdef HAS_UNLESS
6191 trpt->e_state = t->e_trans;
6192 #endif
6193 depth++; trpt++;
6194 trpt->pr = (uchar) II;
6195 trpt->st = tt;
6196 trpt->o_pm &= ~(2|4);
6197 if (t->st > 0)
6198 { ((P0 *)this)->_p = t->st;
6199 /* moved down reached[ot][t->st] = 1; */
6200 }
6201 #ifndef SAFETY
6202 if (a_cycles)
6203 {
6204 #if (ACCEPT_LAB>0 && !defined(NP)) || (PROG_LAB>0 && defined(HAS_NP))
6205 int ii;
6206 #endif
6207 #define P__Q ((P0 *)pptr(ii))
6208 #if ACCEPT_LAB>0
6209 #ifdef NP
6210 /* state 1 of np_ claim is accepting */
6211 if (((P0 *)pptr(0))->_p == 1)
6212 trpt->o_pm |= 2;
6213 #else
6214 for (ii = 0; ii < (int) now._nr_pr; ii++)
6215 { if (accpstate[P__Q->_t][P__Q->_p])
6216 { trpt->o_pm |= 2;
6217 break;
6218 } }
6219 #endif
6220 #endif
6221 #if defined(HAS_NP) && PROG_LAB>0
6222 for (ii = 0; ii < (int) now._nr_pr; ii++)
6223 { if (progstate[P__Q->_t][P__Q->_p])
6224 { trpt->o_pm |= 4;
6225 break;
6226 } }
6227 #endif
6228 #undef P__Q
6229 }
6230 #endif
6231 trpt->o_t = t; trpt->o_n = _n;
6232 trpt->o_ot = ot; trpt->o_tt = tt;
6233 trpt->o_To = To; trpt->o_m = _m;
6234 trpt->tau = 0;
6235 #ifdef RANDOMIZE
6236 trpt->oo_i = ooi;
6237 #endif
6238 if (boq != -1 || (t->atom&2))
6239 { trpt->tau |= 8;
6240 #ifdef VERI
6241 /* atomic sequence in claim */
6242 if((trpt-1)->tau&4)
6243 trpt->tau |= 4;
6244 else
6245 trpt->tau &= ~4;
6246 } else
6247 { if ((trpt-1)->tau&4)
6248 trpt->tau &= ~4;
6249 else
6250 trpt->tau |= 4;
6251 }
6252 /* if claim allowed timeout, so */
6253 /* does the next program-step: */
6254 if (((trpt-1)->tau&1) && !(trpt->tau&4))
6255 trpt->tau |= 1;
6256 #else
6257 } else
6258 trpt->tau &= ~8;
6259 #endif
6260 if (boq == -1 && (t->atom&2))
6261 { From = To = II; nlinks++;
6262 } else
6263 #ifdef REVERSE
6264 { From = BASE; To = now._nr_pr-1;
6265 #else
6266 { From = now._nr_pr-1; To = BASE;
6267 #endif
6268 }
6269 #if NCORE>1 && defined(FULL_TRAIL)
6270 if (upto > 0)
6271 { Push_Stack_Tree(II, t->t_id);
6272 }
6273 #endif
6274 goto Down; /* pseudo-recursion */
6275 Up:
6276 #ifdef CHECK
6277 cpu_printf("%d: Up - %s\n", depth,
6278 (trpt->tau&4)?"claim":"program");
6279 #endif
6280 #if NCORE>1
6281 iam_alive();
6282 #ifdef USE_DISK
6283 mem_drain();
6284 #endif
6285 #endif
6286 #if defined(MA) || NCORE>1
6287 if (depth <= 0) return;
6288 /* e.g., if first state is old, after a restart */
6289 #endif
6290 #ifdef SC
6291 if (CNT1 > CNT2
6292 && depth < hiwater - (HHH-DDD) + 2)
6293 {
6294 trpt += DDD;
6295 disk2stack();
6296 maxdepth -= DDD;
6297 hiwater -= DDD;
6298 if(verbose)
6299 printf("unzap %d: %d\n", CNT2, hiwater);
6300 }
6301 #endif
6302 #ifndef NOFAIR
6303 if (trpt->o_pm&128) /* fairness alg */
6304 { now._cnt[now._a_t&1] = trpt->bup.oval;
6305 _n = 1; trpt->o_pm &= ~128;
6306 depth--; trpt--;
6307 #if defined(VERBOSE) || defined(CHECK)
6308 printf("%3d: reversed fairness default move\n", depth);
6309 #endif
6310 goto Q999;
6311 }
6312 #endif
6313 #ifdef HAS_LAST
6314 #ifdef VERI
6315 { int d; Trail *trl;
6316 now._last = 0;
6317 for (d = 1; d < depth; d++)
6318 { trl = getframe(depth-d); /* was (trpt-d) */
6319 if (trl->pr != 0)
6320 { now._last = trl->pr - BASE;
6321 break;
6322 } } }
6323 #else
6324 now._last = (depth<1)?0:(trpt-1)->pr;
6325 #endif
6326 #endif
6327 #ifdef EVENT_TRACE
6328 now._event = trpt->o_event;
6329 #endif
6330 #ifndef SAFETY
6331 if ((now._a_t&1) && depth <= A_depth)
6332 return; /* to checkcycles() */
6333 #endif
6334 t = trpt->o_t; _n = trpt->o_n;
6335 ot = trpt->o_ot; II = trpt->pr;
6336 tt = trpt->o_tt; this = pptr(II);
6337 To = trpt->o_To; _m = trpt->o_m;
6338 #ifdef RANDOMIZE
6339 ooi = trpt->oo_i;
6340 #endif
6341 #ifdef INLINE_REV
6342 _m = do_reverse(t, II, _m);
6343 #else
6344 #include REVERSE_MOVES
6345 R999: /* jumps here when done */
6346 #endif
6347 #ifdef VERBOSE
6348 cpu_printf("%3d: proc %d reverses %d, %d to %d\n",
6349 depth, II, t->forw, tt, t->st);
6350 cpu_printf("\t%s [abit=%d,adepth=%d,tau=%d,%d]\n",
6351 t->tp, now._a_t, A_depth, trpt->tau, (trpt-1)->tau);
6352 #endif
6353 #ifndef NOREDUCE
6354 /* pass the proviso tags */
6355 if ((trpt->tau&8) /* rv or atomic */
6356 && (trpt->tau&16))
6357 (trpt-1)->tau |= 16;
6358 #ifdef SAFETY
6359 if ((trpt->tau&8) /* rv or atomic */
6360 && (trpt->tau&64))
6361 (trpt-1)->tau |= 64;
6362 #endif
6363 #endif
6364 depth--; trpt--;
6365
6366 #ifdef NSUCC
6367 trpt->n_succ++;
6368 #endif
6369 #ifdef NIBIS
6370 (trans[ot][tt])->om = _m; /* head of list */
6371 #endif
6372 /* i.e., not set if rv fails */
6373 if (_m)
6374 {
6375 #if defined(VERI) && !defined(NP)
6376 if (II == 0 && verbose && !reached[ot][t->st])
6377 {
6378 printf("depth %d: Claim reached state %d (line %d)\n",
6379 depth, t->st, src_claim [t->st]);
6380 fflush(stdout);
6381 }
6382 #endif
6383 reached[ot][t->st] = 1;
6384 reached[ot][tt] = 1;
6385 }
6386 #ifdef HAS_UNLESS
6387 else trpt->e_state = 0; /* undo */
6388 #endif
6389 if (_m>_n||(_n>3&&_m!=0)) _n=_m;
6390 ((P0 *)this)->_p = tt;
6391 } /* all options */
6392 #ifdef RANDOMIZE
6393 if (!t && ooi > 0)
6394 { t = trans[ot][tt];
6395 #ifdef VERBOSE
6396 printf("randomizer: continue for %d more\n", ooi);
6397 #endif
6398 goto domore;
6399 }
6400 #ifdef VERBOSE
6401 else
6402 printf("randomizer: done\n");
6403 #endif
6404 #endif
6405 #ifndef NOFAIR
6406 /* Fairness: undo Rule 2 */
6407 if ((trpt->o_pm&32)
6408 && (trpt->o_pm&64))
6409 { if (trpt->o_pm&1)
6410 {
6411 #ifdef VERI
6412 if (now._cnt[now._a_t&1] == 1)
6413 now._cnt[now._a_t&1] = 2;
6414 #endif
6415 now._cnt[now._a_t&1] += 1;
6416 #ifdef VERBOSE
6417 printf("%3d: proc %d fairness ", depth, II);
6418 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6419 now._cnt[now._a_t&1], now._a_t);
6420 #endif
6421 trpt->o_pm &= ~(32|64);
6422 } else
6423 { if (_n > 0)
6424 {
6425 trpt->o_pm &= ~64;
6426 #ifdef REVERSE
6427 II = From-1;
6428 #else
6429 II = From+1;
6430 #endif
6431 } } }
6432 #endif
6433 #ifdef VERI
6434 if (II == 0) break; /* never claim */
6435 #endif
6436 } /* all processes */
6437 #ifdef NSUCC
6438 tally_succ(trpt->n_succ);
6439 #endif
6440 #ifdef SCHED
6441 if (_n == 0 /* no process could move */
6442 #ifdef VERI
6443 && II != 0
6444 #endif
6445 && depth > 0
6446 && trpt->sched_limit >= sched_max)
6447 { _n = 1; /* not a deadlock */
6448 }
6449 #endif
6450 #ifndef NOFAIR
6451 /* Fairness: undo Rule 2 */
6452 if (trpt->o_pm&32) /* remains if proc blocked */
6453 {
6454 #ifdef VERI
6455 if (now._cnt[now._a_t&1] == 1)
6456 now._cnt[now._a_t&1] = 2;
6457 #endif
6458 now._cnt[now._a_t&1] += 1;
6459 #ifdef VERBOSE
6460 printf("%3d: proc -- fairness ", depth);
6461 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6462 now._cnt[now._a_t&1], now._a_t);
6463 #endif
6464 trpt->o_pm &= ~32;
6465 }
6466 #ifndef NP
6467 if (fairness
6468 && _n == 0 /* nobody moved */
6469 #ifdef VERI
6470 && !(trpt->tau&4) /* in program move */
6471 #endif
6472 && !(trpt->tau&8) /* not an atomic one */
6473 #ifdef OTIM
6474 && ((trpt->tau&1) || endstate())
6475 #else
6476 #ifdef ETIM
6477 && (trpt->tau&1) /* already tried timeout */
6478 #endif
6479 #endif
6480 #ifndef NOREDUCE
6481 /* see below */
6482 && !((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6483 #endif
6484 && now._cnt[now._a_t&1] > 0) /* needed more procs */
6485 { depth++; trpt++;
6486 trpt->o_pm |= 128 | ((trpt-1)->o_pm&(2|4));
6487 trpt->bup.oval = now._cnt[now._a_t&1];
6488 now._cnt[now._a_t&1] = 1;
6489 #ifdef VERI
6490 trpt->tau = 4;
6491 #else
6492 trpt->tau = 0;
6493 #endif
6494 #ifdef REVERSE
6495 From = BASE; To = now._nr_pr-1;
6496 #else
6497 From = now._nr_pr-1; To = BASE;
6498 #endif
6499 #if defined(VERBOSE) || defined(CHECK)
6500 printf("%3d: fairness default move ", depth);
6501 printf("(all procs block)\n");
6502 #endif
6503 goto Down;
6504 }
6505 #endif
6506 Q999: /* returns here with _n>0 when done */;
6507 if (trpt->o_pm&8)
6508 { now._a_t &= ~2;
6509 now._cnt[now._a_t&1] = 0;
6510 trpt->o_pm &= ~8;
6511 #ifdef VERBOSE
6512 printf("%3d: fairness undo Rule 1, _a_t=%d\n",
6513 depth, now._a_t);
6514 #endif
6515 }
6516 if (trpt->o_pm&16)
6517 { now._a_t |= 2;
6518 now._cnt[now._a_t&1] = 1;
6519 trpt->o_pm &= ~16;
6520 #ifdef VERBOSE
6521 printf("%3d: fairness undo Rule 3, _a_t=%d\n",
6522 depth, now._a_t);
6523 #endif
6524 }
6525 #endif
6526 #ifndef NOREDUCE
6527 #ifdef SAFETY
6528 #ifdef LOOPSTATE
6529 /* at least one move that was preselected at this */
6530 /* level, blocked or was a loop control flow point */
6531 if ((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6532 #else
6533 /* preselected move - no successors outside stack */
6534 if ((trpt->tau&32) && !(trpt->tau&64))
6535 #endif
6536 #ifdef REVERSE
6537 { From = BASE; To = now._nr_pr-1;
6538 #else
6539 { From = now._nr_pr-1; To = BASE;
6540 #endif
6541 #ifdef DEBUG
6542 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6543 depth, II+1, _n, trpt->tau);
6544 #endif
6545 _n = 0; trpt->tau &= ~(16|32|64);
6546 #ifdef REVERSE
6547 if (II <= To) /* II already decremented */
6548 #else
6549 if (II >= BASE) /* II already decremented */
6550 #endif
6551 goto Resume;
6552 else
6553 goto Again;
6554 }
6555 #else
6556 /* at least one move that was preselected at this */
6557 /* level, blocked or truncated at the next level */
6558 /* implied: #ifdef FULLSTACK */
6559 if ((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6560 {
6561 #ifdef DEBUG
6562 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6563 depth, II+1, (int) _n, trpt->tau);
6564 #endif
6565 if (a_cycles && (trpt->tau&16))
6566 { if (!(now._a_t&1))
6567 {
6568 #ifdef DEBUG
6569 printf("%3d: setting proviso bit\n", depth);
6570 #endif
6571 #ifndef BITSTATE
6572 #ifdef MA
6573 #ifdef VERI
6574 (trpt-1)->proviso = 1;
6575 #else
6576 trpt->proviso = 1;
6577 #endif
6578 #else
6579 #ifdef VERI
6580 if ((trpt-1)->ostate)
6581 ((char *)&((trpt-1)->ostate->state))[0] |= 128;
6582 #else
6583 ((char *)&(trpt->ostate->state))[0] |= 128;
6584 #endif
6585 #endif
6586 #else
6587 #ifdef VERI
6588 if ((trpt-1)->ostate)
6589 (trpt-1)->ostate->proviso = 1;
6590 #else
6591 trpt->ostate->proviso = 1;
6592 #endif
6593 #endif
6594 #ifdef REVERSE
6595 From = BASE; To = now._nr_pr-1;
6596 #else
6597 From = now._nr_pr-1; To = BASE;
6598 #endif
6599 _n = 0; trpt->tau &= ~(16|32|64);
6600 goto Again; /* do full search */
6601 } /* else accept reduction */
6602 } else
6603 #ifdef REVERSE
6604 { From = BASE; To = now._nr_pr-1;
6605 #else
6606 { From = now._nr_pr-1; To = BASE;
6607 #endif
6608 _n = 0; trpt->tau &= ~(16|32|64);
6609 #ifdef REVERSE
6610 if (II <= To) /* already decremented */
6611 #else
6612 if (II >= BASE) /* already decremented */
6613 #endif
6614 goto Resume;
6615 else
6616 goto Again;
6617 } }
6618 /* #endif */
6619 #endif
6620 #endif
6621 if (_n == 0 || ((trpt->tau&4) && (trpt->tau&2)))
6622 {
6623 #ifdef DEBUG
6624 cpu_printf("%3d: no move [II=%d, tau=%d, boq=%d]\n",
6625 depth, II, trpt->tau, boq);
6626 #endif
6627 #if SYNC
6628 /* ok if a rendez-vous fails: */
6629 if (boq != -1) goto Done;
6630 #endif
6631 /* ok if no procs or we're at maxdepth */
6632 if ((now._nr_pr == 0 && (!strict || qs_empty()))
6633 #ifdef OTIM
6634 || endstate()
6635 #endif
6636 || depth >= maxdepth-1) goto Done;
6637 if ((trpt->tau&8) && !(trpt->tau&4))
6638 { trpt->tau &= ~(1|8);
6639 /* 1=timeout, 8=atomic */
6640 #ifdef REVERSE
6641 From = BASE; To = now._nr_pr-1;
6642 #else
6643 From = now._nr_pr-1; To = BASE;
6644 #endif
6645 #ifdef DEBUG
6646 cpu_printf("%3d: atomic step proc %d unexecutable\n", depth, II+1);
6647 #endif
6648 #ifdef VERI
6649 trpt->tau |= 4; /* switch to claim */
6650 #endif
6651 goto AllOver;
6652 }
6653 #ifdef ETIM
6654 if (!(trpt->tau&1)) /* didn't try timeout yet */
6655 {
6656 #ifdef VERI
6657 if (trpt->tau&4)
6658 {
6659 #ifndef NTIM
6660 if (trpt->tau&2) /* requested */
6661 #endif
6662 { trpt->tau |= 1;
6663 trpt->tau &= ~2;
6664 #ifdef DEBUG
6665 cpu_printf("%d: timeout\n", depth);
6666 #endif
6667 goto Stutter;
6668 } }
6669 else
6670 { /* only claim can enable timeout */
6671 if ((trpt->tau&8)
6672 && !((trpt-1)->tau&4))
6673 /* blocks inside an atomic */ goto BreakOut;
6674 #ifdef DEBUG
6675 cpu_printf("%d: req timeout\n",
6676 depth);
6677 #endif
6678 (trpt-1)->tau |= 2; /* request */
6679 #if NCORE>1 && defined(FULL_TRAIL)
6680 if (upto > 0)
6681 { Pop_Stack_Tree();
6682 }
6683 #endif
6684 goto Up;
6685 }
6686 #else
6687 #ifdef DEBUG
6688 cpu_printf("%d: timeout\n", depth);
6689 #endif
6690 trpt->tau |= 1;
6691 goto Again;
6692 #endif
6693 }
6694 #endif
6695 #ifdef VERI
6696 BreakOut:
6697 #ifndef NOSTUTTER
6698 if (!(trpt->tau&4))
6699 { trpt->tau |= 4; /* claim stuttering */
6700 trpt->tau |= 128; /* stutter mark */
6701 #ifdef DEBUG
6702 cpu_printf("%d: claim stutter\n", depth);
6703 #endif
6704 goto Stutter;
6705 }
6706 #else
6707 ;
6708 #endif
6709 #else
6710 if (!noends && !a_cycles && !endstate())
6711 { depth--; trpt--; /* new 4.2.3 */
6712 uerror("invalid end state");
6713 depth++; trpt++;
6714 }
6715 #ifndef NOSTUTTER
6716 else if (a_cycles && (trpt->o_pm&2)) /* new 4.2.4 */
6717 { depth--; trpt--;
6718 uerror("accept stutter");
6719 depth++; trpt++;
6720 }
6721 #endif
6722 #endif
6723 }
6724 Done:
6725 if (!(trpt->tau&8)) /* not in atomic seqs */
6726 {
6727 #ifndef SAFETY
6728 if (_n != 0
6729 #ifdef VERI
6730 /* --after-- a program-step, i.e., */
6731 /* after backtracking a claim-step */
6732 && (trpt->tau&4)
6733 /* with at least one running process */
6734 /* unless in a stuttered accept state */
6735 && ((now._nr_pr > 1) || (trpt->o_pm&2))
6736 #endif
6737 && !(now._a_t&1))
6738 {
6739 #ifndef NOFAIR
6740 if (fairness)
6741 {
6742 #ifdef VERBOSE
6743 cpu_printf("Consider check %d %d...\n",
6744 now._a_t, now._cnt[0]);
6745 #endif
6746 if ((now._a_t&2) /* A-bit */
6747 && (now._cnt[0] == 1))
6748 checkcycles();
6749 } else
6750 #endif
6751 if (a_cycles && (trpt->o_pm&2))
6752 checkcycles();
6753 }
6754 #endif
6755 #ifndef MA
6756 #if defined(FULLSTACK) || defined(CNTRSTACK)
6757 #ifdef VERI
6758 if (boq == -1
6759 && (((trpt->tau&4) && !(trpt->tau&128))
6760 || ( (trpt-1)->tau&128)))
6761 #else
6762 if (boq == -1)
6763 #endif
6764 {
6765 #ifdef DEBUG2
6766 #if defined(FULLSTACK)
6767 printf("%d: zapping %u (%d)\n",
6768 depth, trpt->ostate,
6769 (trpt->ostate)?trpt->ostate->tagged:0);
6770 #endif
6771 #endif
6772 onstack_zap();
6773 }
6774 #endif
6775 #else
6776 #ifdef VERI
6777 if (boq == -1
6778 && (((trpt->tau&4) && !(trpt->tau&128))
6779 || ( (trpt-1)->tau&128)))
6780 #else
6781 if (boq == -1)
6782 #endif
6783 {
6784 #ifdef DEBUG
6785 printf("%d: zapping\n", depth);
6786 #endif
6787 onstack_zap();
6788 #ifndef NOREDUCE
6789 if (trpt->proviso)
6790 gstore((char *) &now, vsize, 1);
6791 #endif
6792 }
6793 #endif
6794 }
6795 if (depth > 0)
6796 {
6797 #if NCORE>1 && defined(FULL_TRAIL)
6798 if (upto > 0)
6799 { Pop_Stack_Tree();
6800 }
6801 #endif
6802 goto Up;
6803 }
6804 }
6805
6806 #else
6807 void new_state(void) { /* place holder */ }
6808 #endif
6809
6810 void
6811 assert(int a, char *s, int ii, int tt, Trans *t)
6812 {
6813 if (!a && !noasserts)
6814 { char bad[1024];
6815 strcpy(bad, "assertion violated ");
6816 if (strlen(s) > 1000)
6817 { strncpy(&bad[19], (const char *) s, 1000);
6818 bad[1019] = '\0';
6819 } else
6820 strcpy(&bad[19], s);
6821 uerror(bad);
6822 }
6823 }
6824 #ifndef NOBOUNDCHECK
6825 int
6826 Boundcheck(int x, int y, int a1, int a2, Trans *a3)
6827 {
6828 assert((x >= 0 && x < y), "- invalid array index",
6829 a1, a2, a3);
6830 return x;
6831 }
6832 #endif
6833 void
6834 wrap_stats(void)
6835 {
6836 if (nShadow>0)
6837 printf("%9.8g states, stored (%g visited)\n",
6838 nstates - nShadow, nstates);
6839 else
6840 printf("%9.8g states, stored\n", nstates);
6841 #ifdef BFS
6842 #if SYNC
6843 printf(" %8g nominal states (- rv and atomic)\n", nstates-midrv-nlinks+revrv);
6844 printf(" %8g rvs succeeded\n", midrv-failedrv);
6845 #else
6846 printf(" %8g nominal states (stored-atomic)\n", nstates-nlinks);
6847 #endif
6848 #ifdef DEBUG
6849 printf(" %8g midrv\n", midrv);
6850 printf(" %8g failedrv\n", failedrv);
6851 printf(" %8g revrv\n", revrv);
6852 #endif
6853 #endif
6854 printf("%9.8g states, matched\n", truncs);
6855 #ifdef CHECK
6856 printf("%9.8g matches within stack\n",truncs2);
6857 #endif
6858 if (nShadow>0)
6859 printf("%9.8g transitions (= visited+matched)\n",
6860 nstates+truncs);
6861 else
6862 printf("%9.8g transitions (= stored+matched)\n",
6863 nstates+truncs);
6864 printf("%9.8g atomic steps\n", nlinks);
6865 if (nlost) printf("%g lost messages\n", (double) nlost);
6866
6867 #ifndef BITSTATE
6868 printf("hash conflicts: %9.8g (resolved)\n", hcmp);
6869 #ifndef AUTO_RESIZE
6870 if (hcmp > (double) (1<<ssize))
6871 { printf("hint: increase hashtable-size (-w) to reduce runtime\n");
6872 } /* in multi-core: also reduces lock delays on access to hashtable */
6873 #endif
6874 #else
6875 #ifdef CHECK
6876 printf("%8g states allocated for dfs stack\n", ngrabs);
6877 #endif
6878 if (udmem)
6879 printf("\nhash factor: %4g (best if > 100.)\n\n",
6880 (double)(((double) udmem) * 8.0) / (double) nstates);
6881 else
6882 printf("\nhash factor: %4g (best if > 100.)\n\n",
6883 (double)(1<<(ssize-8)) / (double) nstates * 256.0);
6884 printf("bits set per state: %u (-k%u)\n", hfns, hfns);
6885 #if 0
6886 if (udmem)
6887 { printf("total bits available: %8g (-M%ld)\n",
6888 ((double) udmem) * 8.0, udmem/(1024L*1024L));
6889 } else
6890 printf("total bits available: %8g (-w%d)\n",
6891 ((double) (ONE_L << (ssize-4)) * 16.0), ssize);
6892 #endif
6893 #endif
6894 #ifdef BFS_DISK
6895 printf("bfs disk reads: %ld writes %ld -- diff %ld\n",
6896 bfs_dsk_reads, bfs_dsk_writes, bfs_dsk_writes-bfs_dsk_reads);
6897 if (bfs_dsk_read >= 0) (void) close(bfs_dsk_read);
6898 if (bfs_dsk_write >= 0) (void) close(bfs_dsk_write);
6899 (void) unlink("pan_bfs_dsk.tmp");
6900 #endif
6901 }
6902
6903 void
6904 wrapup(void)
6905 {
6906 #if defined(BITSTATE) || !defined(NOCOMP)
6907 double nr1, nr2, nr3 = 0.0, nr4, nr5 = 0.0;
6908 #if !defined(MA) && (defined(MEMCNT) || defined(MEMLIM))
6909 int mverbose = 1;
6910 #else
6911 int mverbose = verbose;
6912 #endif
6913 #endif
6914 #if NCORE>1
6915 if (verbose) cpu_printf("wrapup -- %d error(s)\n", errors);
6916 if (core_id != 0)
6917 {
6918 #ifdef USE_DISK
6919 void dsk_stats(void);
6920 dsk_stats();
6921 #endif
6922 if (search_terminated != NULL)
6923 { *search_terminated |= 2; /* wrapup */
6924 }
6925 exit(0); /* normal termination, not an error */
6926 }
6927 #endif
6928 #if !defined(WIN32) && !defined(WIN64)
6929 signal(SIGINT, SIG_DFL);
6930 #endif
6931 printf("\n(%s)\n", SpinVersion);
6932 if (!done) printf("Warning: Search not completed\n");
6933 #ifdef SC
6934 (void) unlink((const char *)stackfile);
6935 #endif
6936 #if NCORE>1
6937 if (a_cycles)
6938 { printf(" + Multi-Core (NCORE=%d)\n", NCORE);
6939 } else
6940 { printf(" + Multi-Core (NCORE=%d -z%d)\n", NCORE, z_handoff);
6941 }
6942 #endif
6943 #ifdef BFS
6944 printf(" + Using Breadth-First Search\n");
6945 #endif
6946 #ifndef NOREDUCE
6947 printf(" + Partial Order Reduction\n");
6948 #endif
6949 #ifdef REVERSE
6950 printf(" + Reverse Depth-First Search Order\n");
6951 #endif
6952 #ifdef T_REVERSE
6953 printf(" + Reverse Transition Ordering\n");
6954 #endif
6955 #ifdef RANDOMIZE
6956 printf(" + Randomized Transition Ordering\n");
6957 #endif
6958 #ifdef SCHED
6959 printf(" + Scheduling Restriction (-DSCHED=%d)\n", sched_max);
6960 #endif
6961 #ifdef COLLAPSE
6962 printf(" + Compression\n");
6963 #endif
6964 #ifdef MA
6965 printf(" + Graph Encoding (-DMA=%d)\n", MA);
6966 #ifdef R_XPT
6967 printf(" Restarted from checkpoint %s.xpt\n", PanSource);
6968 #endif
6969 #endif
6970 #ifdef CHECK
6971 #ifdef FULLSTACK
6972 printf(" + FullStack Matching\n");
6973 #endif
6974 #ifdef CNTRSTACK
6975 printf(" + CntrStack Matching\n");
6976 #endif
6977 #endif
6978 #ifdef BITSTATE
6979 printf("\nBit statespace search for:\n");
6980 #else
6981 #ifdef HC
6982 printf("\nHash-Compact %d search for:\n", HC);
6983 #else
6984 printf("\nFull statespace search for:\n");
6985 #endif
6986 #endif
6987 #ifdef EVENT_TRACE
6988 #ifdef NEGATED_TRACE
6989 printf(" notrace assertion +\n");
6990 #else
6991 printf(" trace assertion +\n");
6992 #endif
6993 #endif
6994 #ifdef VERI
6995 printf(" never claim +\n");
6996 printf(" assertion violations ");
6997 if (noasserts)
6998 printf("- (disabled by -A flag)\n");
6999 else
7000 printf("+ (if within scope of claim)\n");
7001 #else
7002 #ifdef NOCLAIM
7003 printf(" never claim - (not selected)\n");
7004 #else
7005 printf(" never claim - (none specified)\n");
7006 #endif
7007 printf(" assertion violations ");
7008 if (noasserts)
7009 printf("- (disabled by -A flag)\n");
7010 else
7011 printf("+\n");
7012 #endif
7013 #ifndef SAFETY
7014 #ifdef NP
7015 printf(" non-progress cycles ");
7016 #else
7017 printf(" acceptance cycles ");
7018 #endif
7019 if (a_cycles)
7020 printf("+ (fairness %sabled)\n",
7021 fairness?"en":"dis");
7022 else printf("- (not selected)\n");
7023 #else
7024 printf(" cycle checks - (disabled by -DSAFETY)\n");
7025 #endif
7026 #ifdef VERI
7027 printf(" invalid end states - ");
7028 printf("(disabled by ");
7029 if (noends)
7030 printf("-E flag)\n\n");
7031 else
7032 printf("never claim)\n\n");
7033 #else
7034 printf(" invalid end states ");
7035 if (noends)
7036 printf("- (disabled by -E flag)\n\n");
7037 else
7038 printf("+\n\n");
7039 #endif
7040 printf("State-vector %d byte, depth reached %ld", hmax,
7041 #if NCORE>1
7042 (nr_handoffs * z_handoff) +
7043 #endif
7044 mreached);
7045 printf(", errors: %d\n", errors);
7046 fflush(stdout);
7047 #ifdef MA
7048 if (done)
7049 { extern void dfa_stats(void);
7050 if (maxgs+a_cycles+2 < MA)
7051 printf("MA stats: -DMA=%d is sufficient\n",
7052 maxgs+a_cycles+2);
7053 dfa_stats();
7054 }
7055 #endif
7056 wrap_stats();
7057 #ifdef CHECK
7058 printf("stackframes: %d/%d\n\n", smax, svmax);
7059 printf("stats: fa %d, fh %d, zh %d, zn %d - ",
7060 Fa, Fh, Zh, Zn);
7061 printf("check %d holds %d\n", Ccheck, Cholds);
7062 printf("stack stats: puts %d, probes %d, zaps %d\n",
7063 PUT, PROBE, ZAPS);
7064 #else
7065 printf("\n");
7066 #endif
7067
7068 #if defined(BITSTATE) || !defined(NOCOMP)
7069 nr1 = (nstates-nShadow)*
7070 (double)(hmax+sizeof(struct H_el)-sizeof(unsigned));
7071 #ifdef BFS
7072 nr2 = 0.0;
7073 #else
7074 nr2 = (double) ((maxdepth+3)*sizeof(Trail));
7075 #endif
7076 #ifndef BITSTATE
7077 #if !defined(MA) || defined(COLLAPSE)
7078 nr3 = (double) (ONE_L<<ssize)*sizeof(struct H_el *);
7079 #endif
7080 #else
7081 if (udmem)
7082 nr3 = (double) (udmem);
7083 else
7084 nr3 = (double) (ONE_L<<(ssize-3));
7085 #ifdef CNTRSTACK
7086 nr5 = (double) (ONE_L<<(ssize-3));
7087 #endif
7088 #ifdef FULLSTACK
7089 nr5 = (double) (maxdepth*sizeof(struct H_el *));
7090 #endif
7091 #endif
7092 nr4 = (double) (svmax * (sizeof(Svtack) + hmax))
7093 + (double) (smax * (sizeof(Stack) + Maxbody));
7094 #ifndef MA
7095 if (mverbose || memcnt < nr1+nr2+nr3+nr4+nr5)
7096 #endif
7097 { double remainder = memcnt;
7098 double tmp_nr = memcnt-nr3-nr4-(nr2-fragment)-nr5;
7099 #if NCORE>1 && !defined(SEP_STATE)
7100 tmp_nr -= ((double) NCORE * LWQ_SIZE) + GWQ_SIZE;
7101 #endif
7102 if (tmp_nr < 0.0) tmp_nr = 0.;
7103 printf("Stats on memory usage (in Megabytes):\n");
7104 printf("%9.3f equivalent memory usage for states",
7105 nr1/1048576.); /* 1024*1024=1048576 */
7106 printf(" (stored*(State-vector + overhead))\n");
7107 #if NCORE>1 && !defined(WIN32) && !defined(WIN64)
7108 printf("%9.3f shared memory reserved for state storage\n",
7109 mem_reserved/1048576.);
7110 #ifdef SEP_HEAP
7111 printf(" in %d local heaps of %7.3f MB each\n",
7112 NCORE, mem_reserved/(NCORE*1048576.));
7113 #endif
7114 printf("\n");
7115 #endif
7116 #ifdef BITSTATE
7117 if (udmem)
7118 printf("%9.3f memory used for hash array (-M%ld)\n",
7119 nr3/1048576., udmem/(1024L*1024L));
7120 else
7121 printf("%9.3f memory used for hash array (-w%d)\n",
7122 nr3/1048576., ssize);
7123 if (nr5 > 0.0)
7124 printf("%9.3f memory used for bit stack\n",
7125 nr5/1048576.);
7126 remainder = remainder - nr3 - nr5;
7127 #else
7128 printf("%9.3f actual memory usage for states",
7129 tmp_nr/1048576.);
7130 remainder -= tmp_nr;
7131 printf(" (");
7132 if (tmp_nr > 0.)
7133 { if (tmp_nr > nr1) printf("unsuccessful ");
7134 printf("compression: %.2f%%)\n",
7135 (100.0*tmp_nr)/nr1);
7136 } else
7137 printf("less than 1k)\n");
7138 #ifndef MA
7139 if (tmp_nr > 0.)
7140 { printf(" state-vector as stored = %.0f byte",
7141 (tmp_nr)/(nstates-nShadow) -
7142 (double) (sizeof(struct H_el) - sizeof(unsigned)));
7143 printf(" + %ld byte overhead\n",
7144 (long int) sizeof(struct H_el)-sizeof(unsigned));
7145 }
7146 #endif
7147 #if !defined(MA) || defined(COLLAPSE)
7148 printf("%9.3f memory used for hash table (-w%d)\n",
7149 nr3/1048576., ssize);
7150 remainder -= nr3;
7151 #endif
7152 #endif
7153 #ifndef BFS
7154 printf("%9.3f memory used for DFS stack (-m%ld)\n",
7155 nr2/1048576., maxdepth);
7156 remainder -= nr2;
7157 #endif
7158 #if NCORE>1
7159 remainder -= ((double) NCORE * LWQ_SIZE) + GWQ_SIZE;
7160 printf("%9.3f shared memory used for work-queues\n",
7161 (GWQ_SIZE + (double) NCORE * LWQ_SIZE) /1048576.);
7162 printf(" in %d queues of %7.3f MB each",
7163 NCORE, (double) LWQ_SIZE /1048576.);
7164 #ifndef NGQ
7165 printf(" + a global q of %7.3f MB\n",
7166 (double) GWQ_SIZE / 1048576.);
7167 #else
7168 printf("\n");
7169 #endif
7170 #endif
7171 if (remainder - fragment > 1048576.)
7172 printf("%9.3f other (proc and chan stacks)\n",
7173 (remainder-fragment)/1048576.);
7174 if (fragment > 1048576.)
7175 printf("%9.3f memory lost to fragmentation\n",
7176 fragment/1048576.);
7177 printf("%9.3f total actual memory usage\n\n",
7178 memcnt/1048576.);
7179 }
7180 #ifndef MA
7181 else
7182 #endif
7183 #endif
7184 #ifndef MA
7185 printf("%9.3f memory usage (Mbyte)\n\n",
7186 memcnt/1048576.);
7187 #endif
7188 #ifdef COLLAPSE
7189 printf("nr of templates: [ globals chans procs ]\n");
7190 printf("collapse counts: [ ");
7191 { int i; for (i = 0; i < 256+2; i++)
7192 if (ncomps[i] != 0)
7193 printf("%d ", ncomps[i]);
7194 printf("]\n");
7195 }
7196 #endif
7197 if ((done || verbose) && !no_rck) do_reach();
7198 #ifdef PEG
7199 { int i;
7200 printf("\nPeg Counts (transitions executed):\n");
7201 for (i = 1; i < NTRANS; i++)
7202 { if (peg[i]) putpeg(i, peg[i]);
7203 } }
7204 #endif
7205 #ifdef VAR_RANGES
7206 dumpranges();
7207 #endif
7208 #ifdef SVDUMP
7209 if (vprefix > 0) close(svfd);
7210 #endif
7211 #ifdef LOOPSTATE
7212 printf("%g loopstates hit\n", cnt_loops);
7213 #endif
7214 #ifdef NSUCC
7215 dump_succ();
7216 #endif
7217 #if NCORE>1 && defined(T_ALERT)
7218 crash_report();
7219 #endif
7220 pan_exit(0);
7221 }
7222
7223 void
7224 stopped(int arg)
7225 { printf("Interrupted\n");
7226 #if NCORE>1
7227 was_interrupted = 1;
7228 #endif
7229 wrapup();
7230 pan_exit(0);
7231 }
7232
7233 #ifdef SFH
7234 /*
7235 * super fast hash, based on Paul Hsieh's function
7236 * http://www.azillionmonkeys.com/qed/hash.html
7237 */
7238 #include <stdint.h>
7239 #undef get16bits
7240 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
7241 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
7242 #define get16bits(d) (*((const uint16_t *) (d)))
7243 #endif
7244
7245 #ifndef get16bits
7246 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
7247 +(uint32_t)(((const uint8_t *)(d))[0]) )
7248 #endif
7249
7250 void
7251 d_sfh(const char *s, int len)
7252 { uint32_t h = len, tmp;
7253 int rem;
7254
7255 rem = len & 3;
7256 len >>= 2;
7257
7258 for ( ; len > 0; len--)
7259 { h += get16bits(s);
7260 tmp = (get16bits(s+2) << 11) ^ h;
7261 h = (h << 16) ^ tmp;
7262 s += 2*sizeof(uint16_t);
7263 h += h >> 11;
7264 }
7265 switch (rem) {
7266 case 3: h += get16bits(s);
7267 h ^= h << 16;
7268 h ^= s[sizeof(uint16_t)] << 18;
7269 h += h >> 11;
7270 break;
7271 case 2: h += get16bits(s);
7272 h ^= h << 11;
7273 h += h >> 17;
7274 break;
7275 case 1: h += *s;
7276 h ^= h << 10;
7277 h += h >> 1;
7278 break;
7279 }
7280 h ^= h << 3;
7281 h += h >> 5;
7282 h ^= h << 4;
7283 h += h >> 17;
7284 h ^= h << 25;
7285 h += h >> 6;
7286
7287 K1 = h;
7288 }
7289 #endif
7290
7291 #include <stdint.h>
7292 #if defined(HASH64) || defined(WIN64)
7293 /* 64-bit Jenkins hash, 1997
7294 * http://burtleburtle.net/bob/c/lookup8.c
7295 */
7296 #define mix(a,b,c) \
7297 { a -= b; a -= c; a ^= (c>>43); \
7298 b -= c; b -= a; b ^= (a<<9); \
7299 c -= a; c -= b; c ^= (b>>8); \
7300 a -= b; a -= c; a ^= (c>>38); \
7301 b -= c; b -= a; b ^= (a<<23); \
7302 c -= a; c -= b; c ^= (b>>5); \
7303 a -= b; a -= c; a ^= (c>>35); \
7304 b -= c; b -= a; b ^= (a<<49); \
7305 c -= a; c -= b; c ^= (b>>11); \
7306 a -= b; a -= c; a ^= (c>>12); \
7307 b -= c; b -= a; b ^= (a<<18); \
7308 c -= a; c -= b; c ^= (b>>22); \
7309 }
7310 #else
7311 /* 32-bit Jenkins hash, 2006
7312 * http://burtleburtle.net/bob/c/lookup3.c
7313 */
7314 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k))))
7315
7316 #define mix(a,b,c) \
7317 { a -= c; a ^= rot(c, 4); c += b; \
7318 b -= a; b ^= rot(a, 6); a += c; \
7319 c -= b; c ^= rot(b, 8); b += a; \
7320 a -= c; a ^= rot(c,16); c += b; \
7321 b -= a; b ^= rot(a,19); a += c; \
7322 c -= b; c ^= rot(b, 4); b += a; \
7323 }
7324
7325 #define final(a,b,c) \
7326 { c ^= b; c -= rot(b,14); \
7327 a ^= c; a -= rot(c,11); \
7328 b ^= a; b -= rot(a,25); \
7329 c ^= b; c -= rot(b,16); \
7330 a ^= c; a -= rot(c,4); \
7331 b ^= a; b -= rot(a,14); \
7332 c ^= b; c -= rot(b,24); \
7333 }
7334 #endif
7335
7336 void
7337 d_hash(uchar *kb, int nbytes)
7338 { uint8_t *bp;
7339 #if defined(HASH64) || defined(WIN64)
7340 uint64_t a = 0, b, c, n;
7341 uint64_t *k = (uint64_t *) kb;
7342 #else
7343 uint32_t a, b, c, n;
7344 uint32_t *k = (uint32_t *) kb;
7345 #endif
7346 /* extend to multiple of words, if needed */
7347 n = nbytes/WS; /* nr of words */
7348 a = nbytes - (n*WS);
7349 if (a > 0)
7350 { n++;
7351 bp = kb + nbytes;
7352 switch (a) {
7353 case 3: *bp++ = 0; /* fall thru */
7354 case 2: *bp++ = 0; /* fall thru */
7355 case 1: *bp = 0;
7356 case 0: break;
7357 } }
7358 #if defined(HASH64) || defined(WIN64)
7359 b = HASH_CONST[HASH_NR];
7360 c = 0x9e3779b97f4a7c13LL; /* arbitrary value */
7361 while (n >= 3)
7362 { a += k[0];
7363 b += k[1];
7364 c += k[2];
7365 mix(a,b,c);
7366 n -= 3;
7367 k += 3;
7368 }
7369 c += (((uint64_t) nbytes)<<3);
7370 switch (n) {
7371 case 2: b += k[1];
7372 case 1: a += k[0];
7373 case 0: break;
7374 }
7375 mix(a,b,c);
7376 #else
7377 a = c = 0xdeadbeef + (n<<2);
7378 b = HASH_CONST[HASH_NR];
7379 while (n > 3)
7380 { a += k[0];
7381 b += k[1];
7382 c += k[2];
7383 mix(a,b,c);
7384 n -= 3;
7385 k += 3;
7386 }
7387 switch (n) {
7388 case 3: c += k[2];
7389 case 2: b += k[1];
7390 case 1: a += k[0];
7391 case 0: break;
7392 }
7393 final(a,b,c);
7394 #endif
7395 j1 = c&nmask; j3 = a&7; /* 1st bit */
7396 j2 = b&nmask; j4 = (a>>3)&7; /* 2nd bit */
7397 K1 = c; K2 = b;
7398 }
7399
7400 void
7401 s_hash(uchar *cp, int om)
7402 {
7403 #if defined(SFH)
7404 d_sfh((const char *) cp, om); /* sets K1 */
7405 #else
7406 d_hash(cp, om); /* sets K1 etc */
7407 #endif
7408 #ifdef BITSTATE
7409 if (S_Tab == H_tab)
7410 j1 = K1 % omaxdepth;
7411 else
7412 #endif
7413 if (ssize < 8*WS)
7414 j1 = K1&mask;
7415 else
7416 j1 = K1;
7417 }
7418 #ifndef RANDSTOR
7419 int *prerand;
7420 void
7421 inirand(void)
7422 { int i;
7423 srand(123); /* fixed startpoint */
7424 prerand = (int *) emalloc((omaxdepth+3)*sizeof(int));
7425 for (i = 0; i < omaxdepth+3; i++)
7426 prerand[i] = rand();
7427 }
7428 int
7429 pan_rand(void)
7430 { if (!prerand) inirand();
7431 return prerand[depth];
7432 }
7433 #endif
7434
7435 void
7436 set_masks(void) /* 4.2.5 */
7437 {
7438 if (WS == 4 && ssize >= 32)
7439 { mask = 0xffffffff;
7440 #ifdef BITSTATE
7441 switch (ssize) {
7442 case 34: nmask = (mask>>1); break;
7443 case 33: nmask = (mask>>2); break;
7444 default: nmask = (mask>>3); break;
7445 }
7446 #else
7447 nmask = mask;
7448 #endif
7449 } else if (WS == 8)
7450 { mask = ((ONE_L<<ssize)-1); /* hash init */
7451 #ifdef BITSTATE
7452 nmask = mask>>3;
7453 #else
7454 nmask = mask;
7455 #endif
7456 } else if (WS != 4)
7457 { fprintf(stderr, "pan: wordsize %ld not supported\n", (long int) WS);
7458 exit(1);
7459 } else /* WS == 4 and ssize < 32 */
7460 { mask = ((ONE_L<<ssize)-1); /* hash init */
7461 nmask = (mask>>3);
7462 }
7463 }
7464
7465 static long reclaim_size;
7466 static char *reclaim_mem;
7467 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
7468 #if NCORE>1
7469 #error cannot combine AUTO_RESIZE with NCORE>1 yet
7470 #endif
7471 static struct H_el **N_tab;
7472 void
7473 reverse_capture(struct H_el *p)
7474 { if (!p) return;
7475 reverse_capture(p->nxt);
7476 /* last element of list moves first */
7477 /* to preserve list-order */
7478 j2 = p->m_K1;
7479 if (ssize < 8*WS) /* probably always true */
7480 { j2 &= mask;
7481 }
7482 p->nxt = N_tab[j2];
7483 N_tab[j2] = p;
7484 }
7485 void
7486 resize_hashtable(void)
7487 {
7488 if (WS == 4 && ssize >= 27 - 1)
7489 { return; /* canot increase further */
7490 }
7491
7492 ssize += 2; /* 4x size */
7493
7494 printf("pan: resizing hashtable to -w%d.. ", ssize);
7495
7496 N_tab = (struct H_el **)
7497 emalloc((ONE_L<<ssize)*sizeof(struct H_el *));
7498
7499 set_masks(); /* they changed */
7500
7501 for (j1 = 0; j1 < (ONE_L << (ssize - 2)); j1++)
7502 { reverse_capture(H_tab[j1]);
7503 }
7504 reclaim_mem = (char *) H_tab;
7505 reclaim_size = (ONE_L << (ssize - 2));
7506 H_tab = N_tab;
7507
7508 printf(" done\n");
7509 }
7510 #endif
7511 #if defined(ZAPH) && defined(BITSTATE)
7512 void
7513 zap_hashtable(void)
7514 { cpu_printf("pan: resetting hashtable\n");
7515 if (udmem)
7516 { memset(SS, 0, udmem);
7517 } else
7518 { memset(SS, 0, ONE_L<<(ssize-3));
7519 }
7520 }
7521 #endif
7522
7523 int
7524 main(int argc, char *argv[])
7525 { void to_compile(void);
7526
7527 efd = stderr; /* default */
7528 #ifdef BITSTATE
7529 bstore = bstore_reg; /* default */
7530 #endif
7531 #if NCORE>1
7532 { int i, j;
7533 strcpy(o_cmdline, "");
7534 for (j = 1; j < argc; j++)
7535 { strcat(o_cmdline, argv[j]);
7536 strcat(o_cmdline, " ");
7537 }
7538 /* printf("Command Line: %s\n", o_cmdline); */
7539 if (strlen(o_cmdline) >= sizeof(o_cmdline))
7540 { Uerror("option list too long");
7541 } }
7542 #endif
7543 while (argc > 1 && argv[1][0] == '-')
7544 { switch (argv[1][1]) {
7545 #ifndef SAFETY
7546 #ifdef NP
7547 case 'a': fprintf(efd, "error: -a disabled");
7548 usage(efd); break;
7549 #else
7550 case 'a': a_cycles = 1; break;
7551 #endif
7552 #endif
7553 case 'A': noasserts = 1; break;
7554 case 'b': bounded = 1; break;
7555 #ifdef HAS_CODE
7556 case 'C': coltrace = 1; goto samething;
7557 #endif
7558 case 'c': upto = atoi(&argv[1][2]); break;
7559 case 'd': state_tables++; break;
7560 case 'e': every_error = 1; Nr_Trails = 1; break;
7561 case 'E': noends = 1; break;
7562 #ifdef SC
7563 case 'F': if (strlen(argv[1]) > 2)
7564 stackfile = &argv[1][2];
7565 break;
7566 #endif
7567 #if !defined(SAFETY) && !defined(NOFAIR)
7568 case 'f': fairness = 1; break;
7569 #endif
7570 #ifdef HAS_CODE
7571 case 'g': gui = 1; goto samething;
7572 #endif
7573 case 'h': if (!argv[1][2]) usage(efd); else
7574 HASH_NR = atoi(&argv[1][2])%33; break;
7575 case 'I': iterative = 2; every_error = 1; break;
7576 case 'i': iterative = 1; every_error = 1; break;
7577 case 'J': like_java = 1; break; /* Klaus Havelund */
7578 #ifdef BITSTATE
7579 case 'k': hfns = atoi(&argv[1][2]); break;
7580 #endif
7581 #ifdef SCHED
7582 case 'L': sched_max = atoi(&argv[1][2]); break;
7583 #endif
7584 #ifndef SAFETY
7585 #ifdef NP
7586 case 'l': a_cycles = 1; break;
7587 #else
7588 case 'l': fprintf(efd, "error: -l disabled");
7589 usage(efd); break;
7590 #endif
7591 #endif
7592 #ifdef BITSTATE
7593 case 'M': udmem = atoi(&argv[1][2]); break;
7594 case 'G': udmem = atoi(&argv[1][2]); udmem *= 1024; break;
7595 #else
7596 case 'M': case 'G':
7597 fprintf(stderr, "-M and -G affect only -DBITSTATE\n");
7598 break;
7599 #endif
7600 case 'm': maxdepth = atoi(&argv[1][2]); break;
7601 case 'n': no_rck = 1; break;
7602 case 'P': readtrail = 1; onlyproc = atoi(&argv[1][2]);
7603 if (argv[2][0] != '-') /* check next arg */
7604 { trailfilename = argv[2];
7605 argc--; argv++; /* skip next arg */
7606 }
7607 break;
7608 #ifdef SVDUMP
7609 case 'p': vprefix = atoi(&argv[1][2]); break;
7610 #endif
7611 #if NCORE==1
7612 case 'Q': quota = (double) 60.0 * (double) atoi(&argv[1][2]); break;
7613 #endif
7614 case 'q': strict = 1; break;
7615 case 'R': Nrun = atoi(&argv[1][2]); break;
7616 #ifdef HAS_CODE
7617 case 'r':
7618 samething: readtrail = 1;
7619 if (isdigit(argv[1][2]))
7620 whichtrail = atoi(&argv[1][2]);
7621 else if (argc > 2 && argv[2][0] != '-') /* check next arg */
7622 { trailfilename = argv[2];
7623 argc--; argv++; /* skip next arg */
7624 }
7625 break;
7626 case 'S': silent = 1; goto samething;
7627 #endif
7628 #ifdef BITSTATE
7629 case 's': hfns = 1; break;
7630 #endif
7631 case 'T': TMODE = 0444; break;
7632 case 't': if (argv[1][2]) tprefix = &argv[1][2]; break;
7633 case 'V': start_timer(); printf("Generated by %s\n", SpinVersion);
7634 to_compile(); pan_exit(2); break;
7635 case 'v': verbose++; break;
7636 case 'w': ssize = atoi(&argv[1][2]); break;
7637 case 'Y': signoff = 1; break;
7638 case 'X': efd = stdout; break;
7639 case 'x': exclusive = 1; break;
7640 #if NCORE>1
7641 /* -B ip is passthru to proxy of remote ip address: */
7642 case 'B': argc--; argv++; break;
7643 case 'Q': worker_pids[0] = atoi(&argv[1][2]); break;
7644 /* -Un means that the nth worker should be instantiated as a proxy */
7645 case 'U': proxy_pid = atoi(&argv[1][2]); break;
7646 /* -W means that this copy is started by a cluster-server as a remote */
7647 /* this flag is passed to ./pan_proxy, which interprets it */
7648 case 'W': remote_party++; break;
7649 case 'Z': core_id = atoi(&argv[1][2]);
7650 if (verbose)
7651 { printf("cpu%d: pid %d parent %d\n",
7652 core_id, getpid(), worker_pids[0]);
7653 }
7654 break;
7655 case 'z': z_handoff = atoi(&argv[1][2]); break;
7656 #else
7657 case 'z': break; /* ignored for single-core */
7658 #endif
7659 default : fprintf(efd, "saw option -%c\n", argv[1][1]); usage(efd); break;
7660 }
7661 argc--; argv++;
7662 }
7663 if (iterative && TMODE != 0666)
7664 { TMODE = 0666;
7665 fprintf(efd, "warning: -T ignored when -i or -I is used\n");
7666 }
7667 #if defined(HASH32) && !defined(SFH)
7668 if (WS > 4)
7669 { fprintf(efd, "strong warning: compiling -DHASH32 on a 64-bit machine\n");
7670 fprintf(efd, " without -DSFH can slow down performance a lot\n");
7671 }
7672 #endif
7673 #if defined(WIN32) || defined(WIN64)
7674 if (TMODE == 0666)
7675 TMODE = _S_IWRITE | _S_IREAD;
7676 else
7677 TMODE = _S_IREAD;
7678 #endif
7679 #if NCORE>1
7680 store_proxy_pid = proxy_pid; /* for checks in mem_file() and someone_crashed() */
7681 if (core_id != 0) { proxy_pid = 0; }
7682 #ifndef SEP_STATE
7683 if (core_id == 0 && a_cycles)
7684 { fprintf(efd, "hint: this search may be more efficient ");
7685 fprintf(efd, "if pan.c is compiled -DSEP_STATE\n");
7686 }
7687 #endif
7688 if (z_handoff < 0)
7689 { z_handoff = 20; /* conservative default - for non-liveness checks */
7690 }
7691 #if defined(NGQ) || defined(LWQ_FIXED)
7692 LWQ_SIZE = (double) (128.*1048576.);
7693 #else
7694 LWQ_SIZE = (double) ( z_handoff + 2.) * (double) sizeof(SM_frame);
7695 #endif
7696 #if NCORE>2
7697 if (a_cycles)
7698 { fprintf(efd, "warning: the intended nr of cores to be used in liveness mode is 2\n");
7699 #ifndef SEP_STATE
7700 fprintf(efd, "warning: without -DSEP_STATE there is no guarantee that all liveness violations are found\n");
7701 #endif
7702 }
7703 #endif
7704 #ifdef HAS_HIDDEN
7705 #error cannot use hidden variables when compiling multi-core
7706 #endif
7707 #endif
7708 #ifdef BITSTATE
7709 if (hfns <= 0)
7710 { hfns = 1;
7711 fprintf(efd, "warning: using -k%d as minimal usable value\n", hfns);
7712 }
7713 #endif
7714 omaxdepth = maxdepth;
7715 #ifdef BITSTATE
7716 if (WS == 4 && ssize > 34)
7717 { ssize = 34;
7718 fprintf(efd, "warning: using -w%d as max usable value\n", ssize);
7719 /*
7720 * -w35 would not work: 35-3 = 32 but 1^31 is the largest
7721 * power of 2 that can be represented in an unsigned long
7722 */
7723 }
7724 #else
7725 if (WS == 4 && ssize > 27)
7726 { ssize = 27;
7727 fprintf(efd, "warning: using -w%d as max usable value\n", ssize);
7728 /*
7729 * for emalloc, the lookup table size multiplies by 4 for the pointers
7730 * the largest power of 2 that can be represented in a ulong is 1^31
7731 * hence the largest number of lookup table slots is 31-4 = 27
7732 */
7733 }
7734 #endif
7735 #ifdef SC
7736 hiwater = HHH = maxdepth-10;
7737 DDD = HHH/2;
7738 if (!stackfile)
7739 { stackfile = (char *) emalloc(strlen(PanSource)+4+1);
7740 sprintf(stackfile, "%s._s_", PanSource);
7741 }
7742 if (iterative)
7743 { fprintf(efd, "error: cannot use -i or -I with -DSC\n");
7744 pan_exit(1);
7745 }
7746 #endif
7747 #if (defined(R_XPT) || defined(W_XPT)) && !defined(MA)
7748 #warning -DR_XPT and -DW_XPT assume -DMA (ignored)
7749 #endif
7750 if (iterative && a_cycles)
7751 fprintf(efd, "warning: -i or -I work for safety properties only\n");
7752 #ifdef BFS
7753 #ifdef SC
7754 #error -DBFS not compatible with -DSC
7755 #endif
7756 #ifdef HAS_LAST
7757 #error -DBFS not compatible with _last
7758 #endif
7759 #ifdef HAS_STACK
7760 #error cannot use c_track UnMatched with BFS
7761 #endif
7762 #ifdef REACH
7763 #warning -DREACH is redundant when -DBFS is used
7764 #endif
7765 #endif
7766 #if defined(MERGED) && defined(PEG)
7767 #error to use -DPEG use: spin -o3 -a
7768 #endif
7769 #ifdef HC
7770 #ifdef SFH
7771 #error cannot combine -DHC and -DSFH
7772 /* use of NOCOMP is the real reason */
7773 #else
7774 #ifdef NOCOMP
7775 #error cannot combine -DHC and -DNOCOMP
7776 #endif
7777 #endif
7778 #ifdef BITSTATE
7779 #error cannot combine -DHC and -DBITSTATE
7780 #endif
7781 #endif
7782 #if defined(SAFETY) && defined(NP)
7783 #error cannot combine -DNP and -DBFS or -DSAFETY
7784 #endif
7785 #ifdef MA
7786 #ifdef BITSTATE
7787 #error cannot combine -DMA and -DBITSTATE
7788 #endif
7789 #if MA <= 0
7790 #error usage: -DMA=N with N > 0 and N < VECTORSZ
7791 #endif
7792 #endif
7793 #ifdef COLLAPSE
7794 #ifdef BITSTATE
7795 #error cannot combine -DBITSTATE and -DCOLLAPSE
7796 #endif
7797 #ifdef SFH
7798 #error cannot combine -DCOLLAPSE and -DSFH
7799 /* use of NOCOMP is the real reason */
7800 #else
7801 #ifdef NOCOMP
7802 #error cannot combine -DCOLLAPSE and -DNOCOMP
7803 #endif
7804 #endif
7805 #endif
7806 if (maxdepth <= 0 || ssize <= 1) usage(efd);
7807 #if SYNC>0 && !defined(NOREDUCE)
7808 if (a_cycles && fairness)
7809 { fprintf(efd, "error: p.o. reduction not compatible with ");
7810 fprintf(efd, "fairness (-f) in models\n");
7811 fprintf(efd, " with rendezvous operations: ");
7812 fprintf(efd, "recompile with -DNOREDUCE\n");
7813 pan_exit(1);
7814 }
7815 #endif
7816 #if defined(REM_VARS) && !defined(NOREDUCE)
7817 #warning p.o. reduction not compatible with remote varrefs (use -DNOREDUCE)
7818 #endif
7819 #if defined(NOCOMP) && !defined(BITSTATE)
7820 if (a_cycles)
7821 { fprintf(efd, "error: use of -DNOCOMP voids -l and -a\n");
7822 pan_exit(1);
7823 }
7824 #endif
7825 #ifdef MEMLIM
7826 memlim = ((double) MEMLIM) * (double) (1<<20); /* size in Mbyte */
7827 #endif
7828 #ifndef BITSTATE
7829 if (Nrun > 1) HASH_NR = Nrun - 1;
7830 #endif
7831 if (Nrun < 1 || Nrun > 32)
7832 { fprintf(efd, "error: invalid arg for -R\n");
7833 usage(efd);
7834 }
7835 #ifndef SAFETY
7836 if (fairness && !a_cycles)
7837 { fprintf(efd, "error: -f requires -a or -l\n");
7838 usage(efd);
7839 }
7840 #if ACCEPT_LAB==0
7841 if (a_cycles)
7842 { fprintf(efd, "error: no accept labels defined ");
7843 fprintf(efd, "in model (for option -a)\n");
7844 usage(efd);
7845 }
7846 #endif
7847 #endif
7848 #ifndef NOREDUCE
7849 #ifdef HAS_ENABLED
7850 #error use of enabled() requires -DNOREDUCE
7851 #endif
7852 #ifdef HAS_PCVALUE
7853 #error use of pcvalue() requires -DNOREDUCE
7854 #endif
7855 #ifdef HAS_BADELSE
7856 #error use of 'else' combined with i/o stmnts requires -DNOREDUCE
7857 #endif
7858 #ifdef HAS_LAST
7859 #error use of _last requires -DNOREDUCE
7860 #endif
7861 #endif
7862 #if SYNC>0 && !defined(NOREDUCE)
7863 #ifdef HAS_UNLESS
7864 fprintf(efd, "warning: use of a rendezvous stmnts in the escape\n");
7865 fprintf(efd, " of an unless clause, if present, could make p.o. reduction\n");
7866 fprintf(efd, " invalid (use -DNOREDUCE to avoid this)\n");
7867 #ifdef BFS
7868 fprintf(efd, " (this type of rv is also not compatible with -DBFS)\n");
7869 #endif
7870 #endif
7871 #endif
7872 #if SYNC>0 && defined(BFS)
7873 #warning use of rendezvous with BFS does not preserve all invalid endstates
7874 #endif
7875 #if !defined(REACH) && !defined(BITSTATE)
7876 if (iterative != 0 && a_cycles == 0)
7877 { fprintf(efd, "warning: -i and -I need -DREACH to work accurately\n");
7878 }
7879 #endif
7880 #if defined(BITSTATE) && defined(REACH)
7881 #warning -DREACH is voided by -DBITSTATE
7882 #endif
7883 #if defined(MA) && defined(REACH)
7884 #warning -DREACH is voided by -DMA
7885 #endif
7886 #if defined(FULLSTACK) && defined(CNTRSTACK)
7887 #error cannot combine -DFULLSTACK and -DCNTRSTACK
7888 #endif
7889 #if defined(VERI)
7890 #if ACCEPT_LAB>0
7891 #ifndef BFS
7892 if (!a_cycles
7893 #ifdef HAS_CODE
7894 && !readtrail
7895 #endif
7896 #if NCORE>1
7897 && core_id == 0
7898 #endif
7899 && !state_tables)
7900 { fprintf(efd, "warning: never claim + accept labels ");
7901 fprintf(efd, "requires -a flag to fully verify\n");
7902 }
7903 #else
7904 if (!state_tables
7905 #ifdef HAS_CODE
7906 && !readtrail
7907 #endif
7908 )
7909 { fprintf(efd, "warning: verification in BFS mode ");
7910 fprintf(efd, "is restricted to safety properties\n");
7911 }
7912 #endif
7913 #endif
7914 #endif
7915 #ifndef SAFETY
7916 if (!a_cycles
7917 #ifdef HAS_CODE
7918 && !readtrail
7919 #endif
7920 #if NCORE>1
7921 && core_id == 0
7922 #endif
7923 && !state_tables)
7924 { fprintf(efd, "hint: this search is more efficient ");
7925 fprintf(efd, "if pan.c is compiled -DSAFETY\n");
7926 }
7927 #ifndef NOCOMP
7928 if (!a_cycles)
7929 { S_A = 0;
7930 } else
7931 { if (!fairness)
7932 { S_A = 1; /* _a_t */
7933 #ifndef NOFAIR
7934 } else /* _a_t and _cnt[NFAIR] */
7935 { S_A = (&(now._cnt[0]) - (uchar *) &now) + NFAIR - 2;
7936 /* -2 because first two uchars in now are masked */
7937 #endif
7938 } }
7939 #endif
7940 #endif
7941 signal(SIGINT, stopped);
7942 set_masks();
7943 #ifdef BFS
7944 trail = (Trail *) emalloc(6*sizeof(Trail));
7945 trail += 3;
7946 #else
7947 trail = (Trail *) emalloc((maxdepth+3)*sizeof(Trail));
7948 trail++; /* protect trpt-1 refs at depth 0 */
7949 #endif
7950 #ifdef SVDUMP
7951 if (vprefix > 0)
7952 { char nm[64];
7953 sprintf(nm, "%s.svd", PanSource);
7954 if ((svfd = creat(nm, TMODE)) < 0)
7955 { fprintf(efd, "couldn't create %s\n", nm);
7956 vprefix = 0;
7957 } }
7958 #endif
7959 #ifdef RANDSTOR
7960 srand(123);
7961 #endif
7962 #if SYNC>0 && ASYNC==0
7963 set_recvs();
7964 #endif
7965 run();
7966 done = 1;
7967 wrapup();
7968 return 0;
7969 }
7970
7971 void
7972 usage(FILE *fd)
7973 {
7974 fprintf(fd, "%s\n", SpinVersion);
7975 fprintf(fd, "Valid Options are:\n");
7976 #ifndef SAFETY
7977 #ifdef NP
7978 fprintf(fd, " -a -> is disabled by -DNP ");
7979 fprintf(fd, "(-DNP compiles for -l only)\n");
7980 #else
7981 fprintf(fd, " -a find acceptance cycles\n");
7982 #endif
7983 #else
7984 fprintf(fd, " -a,-l,-f -> are disabled by -DSAFETY\n");
7985 #endif
7986 fprintf(fd, " -A ignore assert() violations\n");
7987 fprintf(fd, " -b consider it an error to exceed the depth-limit\n");
7988 fprintf(fd, " -cN stop at Nth error ");
7989 fprintf(fd, "(defaults to -c1)\n");
7990 fprintf(fd, " -d print state tables and stop\n");
7991 fprintf(fd, " -e create trails for all errors\n");
7992 fprintf(fd, " -E ignore invalid end states\n");
7993 #ifdef SC
7994 fprintf(fd, " -Ffile use 'file' to store disk-stack\n");
7995 #endif
7996 #ifndef NOFAIR
7997 fprintf(fd, " -f add weak fairness (to -a or -l)\n");
7998 #endif
7999 fprintf(fd, " -hN use different hash-seed N:1..32\n");
8000 fprintf(fd, " -i search for shortest path to error\n");
8001 fprintf(fd, " -I like -i, but approximate and faster\n");
8002 fprintf(fd, " -J reverse eval order of nested unlesses\n");
8003 #ifdef BITSTATE
8004 fprintf(fd, " -kN set N bits per state (defaults to 3)\n");
8005 #endif
8006 #ifdef SCHED
8007 fprintf(fd, " -LN set scheduling restriction to N (default 10)\n");
8008 #endif
8009 #ifndef SAFETY
8010 #ifdef NP
8011 fprintf(fd, " -l find non-progress cycles\n");
8012 #else
8013 fprintf(fd, " -l find non-progress cycles -> ");
8014 fprintf(fd, "disabled, requires ");
8015 fprintf(fd, "compilation with -DNP\n");
8016 #endif
8017 #endif
8018 #ifdef BITSTATE
8019 fprintf(fd, " -MN use N Megabytes for bitstate hash array\n");
8020 fprintf(fd, " -GN use N Gigabytes for bitstate hash array\n");
8021 #endif
8022 fprintf(fd, " -mN max depth N steps (default=10k)\n");
8023 fprintf(fd, " -n no listing of unreached states\n");
8024 #ifdef SVDUMP
8025 fprintf(fd, " -pN create svfile (save N bytes per state)\n");
8026 #endif
8027 fprintf(fd, " -QN set time-limit on execution of N minutes\n");
8028 fprintf(fd, " -q require empty chans in valid end states\n");
8029 #ifdef HAS_CODE
8030 fprintf(fd, " -r read and execute trail - can add -v,-n,-PN,-g,-C\n");
8031 fprintf(fd, " -rN read and execute N-th error trail\n");
8032 fprintf(fd, " -C read and execute trail - columnated output (can add -v,-n)\n");
8033 fprintf(fd, " -PN read and execute trail - restrict trail output to proc N\n");
8034 fprintf(fd, " -g read and execute trail + msc gui support\n");
8035 fprintf(fd, " -S silent replay: only user defined printfs show\n");
8036 #endif
8037 #ifdef BITSTATE
8038 fprintf(fd, " -RN repeat run Nx with N ");
8039 fprintf(fd, "[1..32] independent hash functions\n");
8040 fprintf(fd, " -s same as -k1 (single bit per state)\n");
8041 #endif
8042 fprintf(fd, " -T create trail files in read-only mode\n");
8043 fprintf(fd, " -tsuf replace .trail with .suf on trailfiles\n");
8044 fprintf(fd, " -V print SPIN version number\n");
8045 fprintf(fd, " -v verbose -- filenames in unreached state listing\n");
8046 fprintf(fd, " -wN hashtable of 2^N entries ");
8047 fprintf(fd, "(defaults to -w%d)\n", ssize);
8048 fprintf(fd, " -x do not overwrite an existing trail file\n");
8049 #if NCORE>1
8050 fprintf(fd, " -zN handoff states below depth N to 2nd cpu (multi_core)\n");
8051 #endif
8052 #ifdef HAS_CODE
8053 fprintf(fd, "\n options -r, -C, -PN, -g, and -S can optionally be followed by\n");
8054 fprintf(fd, " a filename argument, as in '-r filename', naming the trailfile\n");
8055 #endif
8056 #if NCORE>1
8057 multi_usage(fd);
8058 #endif
8059 exit(1);
8060 }
8061
8062 char *
8063 Malloc(unsigned long n)
8064 { char *tmp;
8065 #ifdef MEMLIM
8066 if (memcnt+ (double) n > memlim) goto err;
8067 #endif
8068 #if 1
8069 tmp = (char *) malloc(n);
8070 if (!tmp)
8071 #else
8072 tmp = (char *) sbrk(n);
8073 if (tmp == (char *) -ONE_L)
8074 #endif
8075 {
8076 #ifdef MEMLIM
8077 err:
8078 #endif
8079 printf("pan: out of memory\n");
8080 #ifdef MEMLIM
8081 printf(" %g bytes used\n", memcnt);
8082 printf(" %g bytes more needed\n", (double) n);
8083 printf(" %g bytes limit\n",
8084 memlim);
8085 #endif
8086 #ifdef COLLAPSE
8087 printf("hint: to reduce memory, recompile with\n");
8088 #ifndef MA
8089 printf(" -DMA=%d # better/slower compression, or\n", hmax);
8090 #endif
8091 printf(" -DBITSTATE # supertrace, approximation\n");
8092 #else
8093 #ifndef BITSTATE
8094 printf("hint: to reduce memory, recompile with\n");
8095 #ifndef HC
8096 printf(" -DCOLLAPSE # good, fast compression, or\n");
8097 #ifndef MA
8098 printf(" -DMA=%d # better/slower compression, or\n", hmax);
8099 #endif
8100 printf(" -DHC # hash-compaction, approximation\n");
8101 #endif
8102 printf(" -DBITSTATE # supertrace, approximation\n");
8103 #endif
8104 #endif
8105 #if NCORE>1
8106 #ifdef FULL_TRAIL
8107 printf(" omit -DFULL_TRAIL or use pan -c0 to reduce memory\n");
8108 #endif
8109 #ifdef SEP_STATE
8110 printf("hint: to reduce memory, recompile without\n");
8111 printf(" -DSEP_STATE # may be faster, but uses more memory\n");
8112 #endif
8113 #endif
8114 wrapup();
8115 }
8116 memcnt += (double) n;
8117 return tmp;
8118 }
8119
8120 #define CHUNK (100*VECTORSZ)
8121
8122 char *
8123 emalloc(unsigned long n) /* never released or reallocated */
8124 { char *tmp;
8125 if (n == 0)
8126 return (char *) NULL;
8127 if (n&(sizeof(void *)-1)) /* for proper alignment */
8128 n += sizeof(void *)-(n&(sizeof(void *)-1));
8129 if ((unsigned long) left < n)
8130 { grow = (n < CHUNK) ? CHUNK : n;
8131 have = Malloc(grow);
8132 fragment += (double) left;
8133 left = grow;
8134 }
8135 tmp = have;
8136 have += (long) n;
8137 left -= (long) n;
8138 memset(tmp, 0, n);
8139 return tmp;
8140 }
8141 void
8142 Uerror(char *str)
8143 { /* always fatal */
8144 uerror(str);
8145 #if NCORE>1
8146 sudden_stop("Uerror");
8147 #endif
8148 wrapup();
8149 }
8150
8151 #if defined(MA) && !defined(SAFETY)
8152 int
8153 Unwind(void)
8154 { Trans *t; uchar ot, _m; int tt; short II;
8155 #ifdef VERBOSE
8156 int i;
8157 #endif
8158 uchar oat = now._a_t;
8159 now._a_t &= ~(1|16|32);
8160 memcpy((char *) &comp_now, (char *) &now, vsize);
8161 now._a_t = oat;
8162 Up:
8163 #ifdef SC
8164 trpt = getframe(depth);
8165 #endif
8166 #ifdef VERBOSE
8167 printf("%d State: ", depth);
8168 for (i = 0; i < vsize; i++) printf("%d%s,",
8169 ((char *)&now)[i], Mask[i]?"*":"");
8170 printf("\n");
8171 #endif
8172 #ifndef NOFAIR
8173 if (trpt->o_pm&128) /* fairness alg */
8174 { now._cnt[now._a_t&1] = trpt->bup.oval;
8175 depth--;
8176 #ifdef SC
8177 trpt = getframe(depth);
8178 #else
8179 trpt--;
8180 #endif
8181 goto Q999;
8182 }
8183 #endif
8184 #ifdef HAS_LAST
8185 #ifdef VERI
8186 { int d; Trail *trl;
8187 now._last = 0;
8188 for (d = 1; d < depth; d++)
8189 { trl = getframe(depth-d); /* was trl = (trpt-d); */
8190 if (trl->pr != 0)
8191 { now._last = trl->pr - BASE;
8192 break;
8193 } } }
8194 #else
8195 now._last = (depth<1)?0:(trpt-1)->pr;
8196 #endif
8197 #endif
8198 #ifdef EVENT_TRACE
8199 now._event = trpt->o_event;
8200 #endif
8201 if ((now._a_t&1) && depth <= A_depth)
8202 { now._a_t &= ~(1|16|32);
8203 if (fairness) now._a_t |= 2; /* ? */
8204 A_depth = 0;
8205 goto CameFromHere; /* checkcycles() */
8206 }
8207 t = trpt->o_t;
8208 ot = trpt->o_ot; II = trpt->pr;
8209 tt = trpt->o_tt; this = pptr(II);
8210 _m = do_reverse(t, II, trpt->o_m);
8211 #ifdef VERBOSE
8212 printf("%3d: proc %d ", depth, II);
8213 printf("reverses %d, %d to %d,",
8214 t->forw, tt, t->st);
8215 printf(" %s [abit=%d,adepth=%d,",
8216 t->tp, now._a_t, A_depth);
8217 printf("tau=%d,%d] <unwind>\n",
8218 trpt->tau, (trpt-1)->tau);
8219 #endif
8220 depth--;
8221 #ifdef SC
8222 trpt = getframe(depth);
8223 #else
8224 trpt--;
8225 #endif
8226 /* reached[ot][t->st] = 1; 3.4.13 */
8227 ((P0 *)this)->_p = tt;
8228 #ifndef NOFAIR
8229 if ((trpt->o_pm&32))
8230 {
8231 #ifdef VERI
8232 if (now._cnt[now._a_t&1] == 0)
8233 now._cnt[now._a_t&1] = 1;
8234 #endif
8235 now._cnt[now._a_t&1] += 1;
8236 }
8237 Q999:
8238 if (trpt->o_pm&8)
8239 { now._a_t &= ~2;
8240 now._cnt[now._a_t&1] = 0;
8241 }
8242 if (trpt->o_pm&16)
8243 now._a_t |= 2;
8244 #endif
8245 CameFromHere:
8246 if (memcmp((char *) &now, (char *) &comp_now, vsize) == 0)
8247 return depth;
8248 if (depth > 0) goto Up;
8249 return 0;
8250 }
8251 #endif
8252 static char unwinding;
8253 void
8254 uerror(char *str)
8255 { static char laststr[256];
8256 int is_cycle;
8257
8258 if (unwinding) return; /* 1.4.2 */
8259 if (strncmp(str, laststr, 254))
8260 #if NCORE>1
8261 cpu_printf("pan: %s (at depth %ld)\n", str,
8262 #else
8263 printf("pan: %s (at depth %ld)\n", str,
8264 #endif
8265 #if NCORE>1
8266 (nr_handoffs * z_handoff) +
8267 #endif
8268 ((depthfound==-1)?depth:depthfound));
8269 strncpy(laststr, str, 254);
8270 errors++;
8271 #ifdef HAS_CODE
8272 if (readtrail) { wrap_trail(); return; }
8273 #endif
8274 is_cycle = (strstr(str, " cycle") != (char *) 0);
8275 if (!is_cycle)
8276 { depth++; trpt++;
8277 }
8278 if ((every_error != 0)
8279 || errors == upto)
8280 {
8281 #if defined(MA) && !defined(SAFETY)
8282 if (is_cycle)
8283 { int od = depth;
8284 unwinding = 1;
8285 depthfound = Unwind();
8286 unwinding = 0;
8287 depth = od;
8288 }
8289 #endif
8290 #if NCORE>1
8291 writing_trail = 1;
8292 #endif
8293 #ifdef BFS
8294 if (depth > 1) trpt--;
8295 nuerror(str);
8296 if (depth > 1) trpt++;
8297 #else
8298 putrail();
8299 #endif
8300 #if defined(MA) && !defined(SAFETY)
8301 if (strstr(str, " cycle"))
8302 { if (every_error)
8303 printf("sorry: MA writes 1 trail max\n");
8304 wrapup(); /* no recovery from unwind */
8305 }
8306 #endif
8307 #if NCORE>1
8308 if (search_terminated != NULL)
8309 { *search_terminated |= 4; /* uerror */
8310 }
8311 writing_trail = 0;
8312 #endif
8313 }
8314 if (!is_cycle)
8315 { depth--; trpt--; /* undo */
8316 }
8317 #ifndef BFS
8318 if (iterative != 0 && maxdepth > 0)
8319 { maxdepth = (iterative == 1)?(depth-1):(depth/2);
8320 warned = 1;
8321 printf("pan: reducing search depth to %ld\n",
8322 maxdepth);
8323 } else
8324 #endif
8325 if (errors >= upto && upto != 0)
8326 {
8327 #if NCORE>1
8328 sudden_stop("uerror");
8329 #endif
8330 wrapup();
8331 }
8332 depthfound = -1;
8333 }
8334
8335 int
8336 xrefsrc(int lno, S_F_MAP *mp, int M, int i)
8337 { Trans *T; int j, retval=1;
8338 for (T = trans[M][i]; T; T = T->nxt)
8339 if (T && T->tp)
8340 { if (strcmp(T->tp, ".(goto)") == 0
8341 || strncmp(T->tp, "goto :", 6) == 0)
8342 return 1; /* not reported */
8343
8344 printf("\tline %d", lno);
8345 if (verbose)
8346 for (j = 0; j < sizeof(mp); j++)
8347 if (i >= mp[j].from && i <= mp[j].upto)
8348 { printf(", \"%s\"", mp[j].fnm);
8349 break;
8350 }
8351 printf(", state %d", i);
8352 if (strcmp(T->tp, "") != 0)
8353 { char *q;
8354 q = transmognify(T->tp);
8355 printf(", \"%s\"", q?q:"");
8356 } else if (stopstate[M][i])
8357 printf(", -end state-");
8358 printf("\n");
8359 retval = 0; /* reported */
8360 }
8361 return retval;
8362 }
8363
8364 void
8365 r_ck(uchar *which, int N, int M, short *src, S_F_MAP *mp)
8366 { int i, m=0;
8367
8368 #ifdef VERI
8369 if (M == VERI && !verbose) return;
8370 #endif
8371 printf("unreached in proctype %s\n", procname[M]);
8372 for (i = 1; i < N; i++)
8373 if (which[i] == 0
8374 && (mapstate[M][i] == 0
8375 || which[mapstate[M][i]] == 0))
8376 m += xrefsrc((int) src[i], mp, M, i);
8377 else
8378 m++;
8379 printf(" (%d of %d states)\n", N-1-m, N-1);
8380 }
8381 #if NCORE>1 && !defined(SEP_STATE)
8382 static long rev_trail_cnt;
8383
8384 #ifdef FULL_TRAIL
8385 void
8386 rev_trail(int fd, volatile Stack_Tree *st_tr)
8387 { long j; char snap[64];
8388
8389 if (!st_tr)
8390 { return;
8391 }
8392 rev_trail(fd, st_tr->prv);
8393 #ifdef VERBOSE
8394 printf("%d (%d) LRT [%d,%d] -- %9u (root %9u)\n",
8395 depth, rev_trail_cnt, st_tr->pr, st_tr->t_id, st_tr, stack_last[core_id]);
8396 #endif
8397 if (st_tr->pr != 255)
8398 { sprintf(snap, "%ld:%d:%d\n",
8399 rev_trail_cnt++, st_tr->pr, st_tr->t_id);
8400 j = strlen(snap);
8401 if (write(fd, snap, j) != j)
8402 { printf("pan: error writing trailfile\n");
8403 close(fd);
8404 wrapup();
8405 return;
8406 }
8407 } else /* handoff point */
8408 { if (a_cycles)
8409 { write(fd, "-1:-1:-1\n", 9);
8410 } }
8411 }
8412 #endif
8413 #endif
8414
8415 void
8416 putrail(void)
8417 { int fd;
8418 #if defined VERI || defined(MERGED)
8419 char snap[64];
8420 #endif
8421 #if NCORE==1 || defined(SEP_STATE) || !defined(FULL_TRAIL)
8422 long i, j;
8423 Trail *trl;
8424 #endif
8425 fd = make_trail();
8426 if (fd < 0) return;
8427 #ifdef VERI
8428 sprintf(snap, "-2:%d:-2\n", VERI);
8429 write(fd, snap, strlen(snap));
8430 #endif
8431 #ifdef MERGED
8432 sprintf(snap, "-4:-4:-4\n");
8433 write(fd, snap, strlen(snap));
8434 #endif
8435 #if NCORE>1 && !defined(SEP_STATE) && defined(FULL_TRAIL)
8436 rev_trail_cnt = 1;
8437 enter_critical(GLOBAL_LOCK);
8438 rev_trail(fd, stack_last[core_id]);
8439 leave_critical(GLOBAL_LOCK);
8440 #else
8441 i = 1; /* trail starts at position 1 */
8442 #if NCORE>1 && defined(SEP_STATE)
8443 if (cur_Root.m_vsize > 0) { i++; depth++; }
8444 #endif
8445 for ( ; i <= depth; i++)
8446 { if (i == depthfound+1)
8447 write(fd, "-1:-1:-1\n", 9);
8448 trl = getframe(i);
8449 if (!trl->o_t) continue;
8450 if (trl->o_pm&128) continue;
8451 sprintf(snap, "%ld:%d:%d\n",
8452 i, trl->pr, trl->o_t->t_id);
8453 j = strlen(snap);
8454 if (write(fd, snap, j) != j)
8455 { printf("pan: error writing trailfile\n");
8456 close(fd);
8457 wrapup();
8458 } }
8459 #endif
8460 close(fd);
8461 #if NCORE>1
8462 cpu_printf("pan: wrote trailfile\n");
8463 #endif
8464 }
8465
8466 void
8467 sv_save(void) /* push state vector onto save stack */
8468 { if (!svtack->nxt)
8469 { svtack->nxt = (Svtack *) emalloc(sizeof(Svtack));
8470 svtack->nxt->body = emalloc(vsize*sizeof(char));
8471 svtack->nxt->lst = svtack;
8472 svtack->nxt->m_delta = vsize;
8473 svmax++;
8474 } else if (vsize > svtack->nxt->m_delta)
8475 { svtack->nxt->body = emalloc(vsize*sizeof(char));
8476 svtack->nxt->lst = svtack;
8477 svtack->nxt->m_delta = vsize;
8478 svmax++;
8479 }
8480 svtack = svtack->nxt;
8481 #if SYNC
8482 svtack->o_boq = boq;
8483 #endif
8484 svtack->o_delta = vsize; /* don't compress */
8485 memcpy((char *)(svtack->body), (char *) &now, vsize);
8486 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
8487 c_stack((uchar *) &(svtack->c_stack[0]));
8488 #endif
8489 #ifdef DEBUG
8490 cpu_printf("%d: sv_save\n", depth);
8491 #endif
8492 }
8493
8494 void
8495 sv_restor(void) /* pop state vector from save stack */
8496 {
8497 memcpy((char *)&now, svtack->body, svtack->o_delta);
8498 #if SYNC
8499 boq = svtack->o_boq;
8500 #endif
8501 #if defined(C_States) && (HAS_TRACK==1)
8502 #ifdef HAS_STACK
8503 c_unstack((uchar *) &(svtack->c_stack[0]));
8504 #endif
8505 c_revert((uchar *) &(now.c_state[0]));
8506 #endif
8507 if (vsize != svtack->o_delta)
8508 Uerror("sv_restor");
8509 if (!svtack->lst)
8510 Uerror("error: v_restor");
8511 svtack = svtack->lst;
8512 #ifdef DEBUG
8513 cpu_printf(" sv_restor\n");
8514 #endif
8515 }
8516
8517 void
8518 p_restor(int h)
8519 { int i; char *z = (char *) &now;
8520
8521 proc_offset[h] = stack->o_offset;
8522 proc_skip[h] = (uchar) stack->o_skip;
8523 #ifndef XUSAFE
8524 p_name[h] = stack->o_name;
8525 #endif
8526 #ifndef NOCOMP
8527 for (i = vsize + stack->o_skip; i > vsize; i--)
8528 Mask[i-1] = 1; /* align */
8529 #endif
8530 vsize += stack->o_skip;
8531 memcpy(z+vsize, stack->body, stack->o_delta);
8532 vsize += stack->o_delta;
8533 #ifndef NOVSZ
8534 now._vsz = vsize;
8535 #endif
8536 #ifndef NOCOMP
8537 for (i = 1; i <= Air[((P0 *)pptr(h))->_t]; i++)
8538 Mask[vsize - i] = 1; /* pad */
8539 Mask[proc_offset[h]] = 1; /* _pid */
8540 #endif
8541 if (BASE > 0 && h > 0)
8542 ((P0 *)pptr(h))->_pid = h-BASE;
8543 else
8544 ((P0 *)pptr(h))->_pid = h;
8545 i = stack->o_delqs;
8546 now._nr_pr += 1;
8547 if (!stack->lst) /* debugging */
8548 Uerror("error: p_restor");
8549 stack = stack->lst;
8550 this = pptr(h);
8551 while (i-- > 0)
8552 q_restor();
8553 }
8554
8555 void
8556 q_restor(void)
8557 { char *z = (char *) &now;
8558 #ifndef NOCOMP
8559 int k, k_end;
8560 #endif
8561 q_offset[now._nr_qs] = stack->o_offset;
8562 q_skip[now._nr_qs] = (uchar) stack->o_skip;
8563 #ifndef XUSAFE
8564 q_name[now._nr_qs] = stack->o_name;
8565 #endif
8566 vsize += stack->o_skip;
8567 memcpy(z+vsize, stack->body, stack->o_delta);
8568 vsize += stack->o_delta;
8569 #ifndef NOVSZ
8570 now._vsz = vsize;
8571 #endif
8572 now._nr_qs += 1;
8573 #ifndef NOCOMP
8574 k_end = stack->o_offset;
8575 k = k_end - stack->o_skip;
8576 #if SYNC
8577 #ifndef BFS
8578 if (q_zero(now._nr_qs)) k_end += stack->o_delta;
8579 #endif
8580 #endif
8581 for ( ; k < k_end; k++)
8582 Mask[k] = 1;
8583 #endif
8584 if (!stack->lst) /* debugging */
8585 Uerror("error: q_restor");
8586 stack = stack->lst;
8587 }
8588 typedef struct IntChunks {
8589 int *ptr;
8590 struct IntChunks *nxt;
8591 } IntChunks;
8592 IntChunks *filled_chunks[512];
8593 IntChunks *empty_chunks[512];
8594 int *
8595 grab_ints(int nr)
8596 { IntChunks *z;
8597 if (nr >= 512) Uerror("cannot happen grab_int");
8598 if (filled_chunks[nr])
8599 { z = filled_chunks[nr];
8600 filled_chunks[nr] = filled_chunks[nr]->nxt;
8601 } else
8602 { z = (IntChunks *) emalloc(sizeof(IntChunks));
8603 z->ptr = (int *) emalloc(nr * sizeof(int));
8604 }
8605 z->nxt = empty_chunks[nr];
8606 empty_chunks[nr] = z;
8607 return z->ptr;
8608 }
8609 void
8610 ungrab_ints(int *p, int nr)
8611 { IntChunks *z;
8612 if (!empty_chunks[nr]) Uerror("cannot happen ungrab_int");
8613 z = empty_chunks[nr];
8614 empty_chunks[nr] = empty_chunks[nr]->nxt;
8615 z->ptr = p;
8616 z->nxt = filled_chunks[nr];
8617 filled_chunks[nr] = z;
8618 }
8619 int
8620 delproc(int sav, int h)
8621 { int d, i=0;
8622 #ifndef NOCOMP
8623 int o_vsize = vsize;
8624 #endif
8625 if (h+1 != (int) now._nr_pr) return 0;
8626
8627 while (now._nr_qs
8628 && q_offset[now._nr_qs-1] > proc_offset[h])
8629 { delq(sav);
8630 i++;
8631 }
8632 d = vsize - proc_offset[h];
8633 if (sav)
8634 { if (!stack->nxt)
8635 { stack->nxt = (Stack *)
8636 emalloc(sizeof(Stack));
8637 stack->nxt->body =
8638 emalloc(Maxbody*sizeof(char));
8639 stack->nxt->lst = stack;
8640 smax++;
8641 }
8642 stack = stack->nxt;
8643 stack->o_offset = proc_offset[h];
8644 #if VECTORSZ>32000
8645 stack->o_skip = (int) proc_skip[h];
8646 #else
8647 stack->o_skip = (short) proc_skip[h];
8648 #endif
8649 #ifndef XUSAFE
8650 stack->o_name = p_name[h];
8651 #endif
8652 stack->o_delta = d;
8653 stack->o_delqs = i;
8654 memcpy(stack->body, (char *)pptr(h), d);
8655 }
8656 vsize = proc_offset[h];
8657 now._nr_pr = now._nr_pr - 1;
8658 memset((char *)pptr(h), 0, d);
8659 vsize -= (int) proc_skip[h];
8660 #ifndef NOVSZ
8661 now._vsz = vsize;
8662 #endif
8663 #ifndef NOCOMP
8664 for (i = vsize; i < o_vsize; i++)
8665 Mask[i] = 0; /* reset */
8666 #endif
8667 return 1;
8668 }
8669
8670 void
8671 delq(int sav)
8672 { int h = now._nr_qs - 1;
8673 int d = vsize - q_offset[now._nr_qs - 1];
8674 #ifndef NOCOMP
8675 int k, o_vsize = vsize;
8676 #endif
8677 if (sav)
8678 { if (!stack->nxt)
8679 { stack->nxt = (Stack *)
8680 emalloc(sizeof(Stack));
8681 stack->nxt->body =
8682 emalloc(Maxbody*sizeof(char));
8683 stack->nxt->lst = stack;
8684 smax++;
8685 }
8686 stack = stack->nxt;
8687 stack->o_offset = q_offset[h];
8688 #if VECTORSZ>32000
8689 stack->o_skip = (int) q_skip[h];
8690 #else
8691 stack->o_skip = (short) q_skip[h];
8692 #endif
8693 #ifndef XUSAFE
8694 stack->o_name = q_name[h];
8695 #endif
8696 stack->o_delta = d;
8697 memcpy(stack->body, (char *)qptr(h), d);
8698 }
8699 vsize = q_offset[h];
8700 now._nr_qs = now._nr_qs - 1;
8701 memset((char *)qptr(h), 0, d);
8702 vsize -= (int) q_skip[h];
8703 #ifndef NOVSZ
8704 now._vsz = vsize;
8705 #endif
8706 #ifndef NOCOMP
8707 for (k = vsize; k < o_vsize; k++)
8708 Mask[k] = 0; /* reset */
8709 #endif
8710 }
8711
8712 int
8713 qs_empty(void)
8714 { int i;
8715 for (i = 0; i < (int) now._nr_qs; i++)
8716 { if (q_sz(i) > 0)
8717 return 0;
8718 }
8719 return 1;
8720 }
8721
8722 int
8723 endstate(void)
8724 { int i; P0 *ptr;
8725 for (i = BASE; i < (int) now._nr_pr; i++)
8726 { ptr = (P0 *) pptr(i);
8727 if (!stopstate[ptr->_t][ptr->_p])
8728 return 0;
8729 }
8730 if (strict) return qs_empty();
8731 #if defined(EVENT_TRACE) && !defined(OTIM)
8732 if (!stopstate[EVENT_TRACE][now._event] && !a_cycles)
8733 { printf("pan: event_trace not completed\n");
8734 return 0;
8735 }
8736 #endif
8737 return 1;
8738 }
8739
8740 #ifndef SAFETY
8741 void
8742 checkcycles(void)
8743 { uchar o_a_t = now._a_t;
8744 #ifdef SCHED
8745 int o_limit;
8746 #endif
8747 #ifndef NOFAIR
8748 uchar o_cnt = now._cnt[1];
8749 #endif
8750 #ifdef FULLSTACK
8751 #ifndef MA
8752 struct H_el *sv = trpt->ostate; /* save */
8753 #else
8754 uchar prov = trpt->proviso; /* save */
8755 #endif
8756 #endif
8757 #ifdef DEBUG
8758 { int i; uchar *v = (uchar *) &now;
8759 printf(" set Seed state ");
8760 #ifndef NOFAIR
8761 if (fairness) printf("(cnt = %d:%d, nrpr=%d) ",
8762 now._cnt[0], now._cnt[1], now._nr_pr);
8763 #endif
8764 /* for (i = 0; i < n; i++) printf("%d,", v[i]); */
8765 printf("\n");
8766 }
8767 printf("%d: cycle check starts\n", depth);
8768 #endif
8769 now._a_t |= (1|16|32);
8770 /* 1 = 2nd DFS; (16|32) to help hasher */
8771 #ifndef NOFAIR
8772 now._cnt[1] = now._cnt[0];
8773 #endif
8774 memcpy((char *)&A_Root, (char *)&now, vsize);
8775 A_depth = depthfound = depth;
8776 #if NCORE>1
8777 mem_put_acc();
8778 #else
8779 #ifdef SCHED
8780 o_limit = trpt->sched_limit;
8781 trpt->sched_limit = 0;
8782 #endif
8783 new_state(); /* start 2nd DFS */
8784 #ifdef SCHED
8785 trpt->sched_limit = o_limit;
8786 #endif
8787 #endif
8788 now._a_t = o_a_t;
8789 #ifndef NOFAIR
8790 now._cnt[1] = o_cnt;
8791 #endif
8792 A_depth = 0; depthfound = -1;
8793 #ifdef DEBUG
8794 printf("%d: cycle check returns\n", depth);
8795 #endif
8796 #ifdef FULLSTACK
8797 #ifndef MA
8798 trpt->ostate = sv; /* restore */
8799 #else
8800 trpt->proviso = prov;
8801 #endif
8802 #endif
8803 }
8804 #endif
8805
8806 #if defined(FULLSTACK) && defined(BITSTATE)
8807 struct H_el *Free_list = (struct H_el *) 0;
8808 void
8809 onstack_init(void) /* to store stack states in a bitstate search */
8810 { S_Tab = (struct H_el **) emalloc(maxdepth*sizeof(struct H_el *));
8811 }
8812 struct H_el *
8813 grab_state(int n)
8814 { struct H_el *v, *last = 0;
8815 if (H_tab == S_Tab)
8816 { for (v = Free_list; v && ((int) v->tagged >= n); v=v->nxt)
8817 { if ((int) v->tagged == n)
8818 { if (last)
8819 last->nxt = v->nxt;
8820 else
8821 gotcha: Free_list = v->nxt;
8822 v->tagged = 0;
8823 v->nxt = 0;
8824 #ifdef COLLAPSE
8825 v->ln = 0;
8826 #endif
8827 return v;
8828 }
8829 Fh++; last=v;
8830 }
8831 /* new: second try */
8832 v = Free_list;
8833 if (v && ((int) v->tagged >= n))
8834 goto gotcha;
8835 ngrabs++;
8836 }
8837 return (struct H_el *)
8838 emalloc(sizeof(struct H_el)+n-sizeof(unsigned));
8839 }
8840
8841 #else
8842 #if NCORE>1
8843 struct H_el *
8844 grab_state(int n)
8845 { struct H_el *grab_shared(int);
8846 return grab_shared(sizeof(struct H_el)+n-sizeof(unsigned));
8847 }
8848 #else
8849 #ifndef AUTO_RESIZE
8850 #define grab_state(n) (struct H_el *) \
8851 emalloc(sizeof(struct H_el)+n-sizeof(unsigned long));
8852 #else
8853 struct H_el *
8854 grab_state(int n)
8855 { struct H_el *p;
8856 int cnt = sizeof(struct H_el)+n-sizeof(unsigned long);
8857
8858 if (reclaim_size >= cnt+WS)
8859 { if ((cnt & (WS-1)) != 0) /* alignment */
8860 { cnt += WS - (cnt & (WS-1));
8861 }
8862 p = (struct H_el *) reclaim_mem;
8863 reclaim_mem += cnt;
8864 reclaim_size -= cnt;
8865 memset(p, 0, cnt);
8866 } else
8867 { p = (struct H_el *) emalloc(cnt);
8868 }
8869 return p;
8870 }
8871 #endif
8872 #endif
8873 #endif
8874 #ifdef COLLAPSE
8875 unsigned long
8876 ordinal(char *v, long n, short tp)
8877 { struct H_el *tmp, *ntmp; long m;
8878 struct H_el *olst = (struct H_el *) 0;
8879 s_hash((uchar *)v, n);
8880 #if NCORE>1 && !defined(SEP_STATE)
8881 enter_critical(CS_ID); /* uses spinlock - 1..128 */
8882 #endif
8883 tmp = H_tab[j1];
8884 if (!tmp)
8885 { tmp = grab_state(n);
8886 H_tab[j1] = tmp;
8887 } else
8888 for ( ;; olst = tmp, tmp = tmp->nxt)
8889 { m = memcmp(((char *)&(tmp->state)), v, n);
8890 if (n == tmp->ln)
8891 {
8892 if (m == 0)
8893 goto done;
8894 if (m < 0)
8895 {
8896 Insert: ntmp = grab_state(n);
8897 ntmp->nxt = tmp;
8898 if (!olst)
8899 H_tab[j1] = ntmp;
8900 else
8901 olst->nxt = ntmp;
8902 tmp = ntmp;
8903 break;
8904 } else if (!tmp->nxt)
8905 {
8906 Append: tmp->nxt = grab_state(n);
8907 tmp = tmp->nxt;
8908 break;
8909 }
8910 continue;
8911 }
8912 if (n < tmp->ln)
8913 goto Insert;
8914 else if (!tmp->nxt)
8915 goto Append;
8916 }
8917 m = ++ncomps[tp];
8918 #ifdef FULLSTACK
8919 tmp->tagged = m;
8920 #else
8921 tmp->st_id = m;
8922 #endif
8923 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
8924 tmp->m_K1 = K1;
8925 #endif
8926 memcpy(((char *)&(tmp->state)), v, n);
8927 tmp->ln = n;
8928 done:
8929 #if NCORE>1 && !defined(SEP_STATE)
8930 leave_critical(CS_ID); /* uses spinlock */
8931 #endif
8932 #ifdef FULLSTACK
8933 return tmp->tagged;
8934 #else
8935 return tmp->st_id;
8936 #endif
8937 }
8938
8939 int
8940 compress(char *vin, int nin) /* collapse compression */
8941 { char *w, *v = (char *) &comp_now;
8942 int i, j;
8943 unsigned long n;
8944 static char *x;
8945 static uchar nbytes[513]; /* 1 + 256 + 256 */
8946 static unsigned short nbytelen;
8947 long col_q(int, char *);
8948 long col_p(int, char *);
8949 #ifndef SAFETY
8950 if (a_cycles)
8951 *v++ = now._a_t;
8952 #ifndef NOFAIR
8953 if (fairness)
8954 for (i = 0; i < NFAIR; i++)
8955 *v++ = now._cnt[i];
8956 #endif
8957 #endif
8958 nbytelen = 0;
8959 #ifndef JOINPROCS
8960 for (i = 0; i < (int) now._nr_pr; i++)
8961 { n = col_p(i, (char *) 0);
8962 #ifdef NOFIX
8963 nbytes[nbytelen] = 0;
8964 #else
8965 nbytes[nbytelen] = 1;
8966 *v++ = ((P0 *) pptr(i))->_t;
8967 #endif
8968 *v++ = n&255;
8969 if (n >= (1<<8))
8970 { nbytes[nbytelen]++;
8971 *v++ = (n>>8)&255;
8972 }
8973 if (n >= (1<<16))
8974 { nbytes[nbytelen]++;
8975 *v++ = (n>>16)&255;
8976 }
8977 if (n >= (1<<24))
8978 { nbytes[nbytelen]++;
8979 *v++ = (n>>24)&255;
8980 }
8981 nbytelen++;
8982 }
8983 #else
8984 x = scratch;
8985 for (i = 0; i < (int) now._nr_pr; i++)
8986 x += col_p(i, x);
8987 n = ordinal(scratch, x-scratch, 2); /* procs */
8988 *v++ = n&255;
8989 nbytes[nbytelen] = 0;
8990 if (n >= (1<<8))
8991 { nbytes[nbytelen]++;
8992 *v++ = (n>>8)&255;
8993 }
8994 if (n >= (1<<16))
8995 { nbytes[nbytelen]++;
8996 *v++ = (n>>16)&255;
8997 }
8998 if (n >= (1<<24))
8999 { nbytes[nbytelen]++;
9000 *v++ = (n>>24)&255;
9001 }
9002 nbytelen++;
9003 #endif
9004 #ifdef SEPQS
9005 for (i = 0; i < (int) now._nr_qs; i++)
9006 { n = col_q(i, (char *) 0);
9007 nbytes[nbytelen] = 0;
9008 *v++ = n&255;
9009 if (n >= (1<<8))
9010 { nbytes[nbytelen]++;
9011 *v++ = (n>>8)&255;
9012 }
9013 if (n >= (1<<16))
9014 { nbytes[nbytelen]++;
9015 *v++ = (n>>16)&255;
9016 }
9017 if (n >= (1<<24))
9018 { nbytes[nbytelen]++;
9019 *v++ = (n>>24)&255;
9020 }
9021 nbytelen++;
9022 }
9023 #endif
9024 #ifdef NOVSZ
9025 /* 3 = _a_t, _nr_pr, _nr_qs */
9026 w = (char *) &now + 3 * sizeof(uchar);
9027 #ifndef NOFAIR
9028 w += NFAIR;
9029 #endif
9030 #else
9031 #if VECTORSZ<65536
9032 w = (char *) &(now._vsz) + sizeof(unsigned short);
9033 #else
9034 w = (char *) &(now._vsz) + sizeof(unsigned long);
9035 #endif
9036 #endif
9037 x = scratch;
9038 *x++ = now._nr_pr;
9039 *x++ = now._nr_qs;
9040 if (now._nr_qs > 0 && qptr(0) < pptr(0))
9041 n = qptr(0) - (uchar *) w;
9042 else
9043 n = pptr(0) - (uchar *) w;
9044 j = w - (char *) &now;
9045 for (i = 0; i < (int) n; i++, w++)
9046 if (!Mask[j++]) *x++ = *w;
9047 #ifndef SEPQS
9048 for (i = 0; i < (int) now._nr_qs; i++)
9049 x += col_q(i, x);
9050 #endif
9051 x--;
9052 for (i = 0, j = 6; i < nbytelen; i++)
9053 { if (j == 6)
9054 { j = 0;
9055 *(++x) = 0;
9056 } else
9057 j += 2;
9058 *x |= (nbytes[i] << j);
9059 }
9060 x++;
9061 for (j = 0; j < WS-1; j++)
9062 *x++ = 0;
9063 x -= j; j = 0;
9064 n = ordinal(scratch, x-scratch, 0); /* globals */
9065 *v++ = n&255;
9066 if (n >= (1<< 8)) { *v++ = (n>> 8)&255; j++; }
9067 if (n >= (1<<16)) { *v++ = (n>>16)&255; j++; }
9068 if (n >= (1<<24)) { *v++ = (n>>24)&255; j++; }
9069 *v++ = j; /* add last count as a byte */
9070 for (i = 0; i < WS-1; i++)
9071 *v++ = 0;
9072 v -= i;
9073 #if 0
9074 printf("collapse %d -> %d\n",
9075 vsize, v - (char *)&comp_now);
9076 #endif
9077 return v - (char *)&comp_now;
9078 }
9079 #else
9080 #if !defined(NOCOMP)
9081 int
9082 compress(char *vin, int n) /* default compression */
9083 {
9084 #ifdef HC
9085 int delta = 0;
9086 s_hash((uchar *)vin, n); /* sets K1 and K2 */
9087 #ifndef SAFETY
9088 if (S_A)
9089 { delta++; /* _a_t */
9090 #ifndef NOFAIR
9091 if (S_A > NFAIR)
9092 delta += NFAIR; /* _cnt[] */
9093 #endif
9094 }
9095 #endif
9096 memcpy((char *) &comp_now + delta, (char *) &K1, WS);
9097 delta += WS;
9098 #if HC>0
9099 memcpy((char *) &comp_now + delta, (char *) &K2, HC);
9100 delta += HC;
9101 #endif
9102 return delta;
9103 #else
9104 char *vv = vin;
9105 char *v = (char *) &comp_now;
9106 int i;
9107 #ifndef NO_FAST_C
9108 int r = 0, unroll = n/8;
9109 if (unroll > 0)
9110 { i = 0;
9111 while (r++ < unroll)
9112 { /* unroll 8 times, avoid ifs */
9113 /* 1 */ *v = *vv++;
9114 v += 1 - Mask[i++];
9115 /* 2 */ *v = *vv++;
9116 v += 1 - Mask[i++];
9117 /* 3 */ *v = *vv++;
9118 v += 1 - Mask[i++];
9119 /* 4 */ *v = *vv++;
9120 v += 1 - Mask[i++];
9121 /* 5 */ *v = *vv++;
9122 v += 1 - Mask[i++];
9123 /* 6 */ *v = *vv++;
9124 v += 1 - Mask[i++];
9125 /* 7 */ *v = *vv++;
9126 v += 1 - Mask[i++];
9127 /* 8 */ *v = *vv++;
9128 v += 1 - Mask[i++];
9129 }
9130 r = n - i; /* the rest, at most 7 */
9131 switch (r) {
9132 case 7: *v = *vv++; v += 1 - Mask[i++];
9133 case 6: *v = *vv++; v += 1 - Mask[i++];
9134 case 5: *v = *vv++; v += 1 - Mask[i++];
9135 case 4: *v = *vv++; v += 1 - Mask[i++];
9136 case 3: *v = *vv++; v += 1 - Mask[i++];
9137 case 2: *v = *vv++; v += 1 - Mask[i++];
9138 case 1: *v = *vv++; v += 1 - Mask[i++];
9139 case 0: break;
9140 }
9141 r = (n+WS-1)/WS; /* words rounded up */
9142 r *= WS; /* bytes */
9143 i = r - i; /* remainder */
9144 switch (i) {
9145 case 7: *v++ = 0; /* fall thru */
9146 case 6: *v++ = 0;
9147 case 5: *v++ = 0;
9148 case 4: *v++ = 0;
9149 case 3: *v++ = 0;
9150 case 2: *v++ = 0;
9151 case 1: *v++ = 0;
9152 case 0: break;
9153 default: Uerror("unexpected wordsize");
9154 }
9155 v -= i;
9156 } else
9157 #endif
9158 { for (i = 0; i < n; i++, vv++)
9159 if (!Mask[i]) *v++ = *vv;
9160 for (i = 0; i < WS-1; i++)
9161 *v++ = 0;
9162 v -= i;
9163 }
9164 #if 0
9165 printf("compress %d -> %d\n",
9166 n, v - (char *)&comp_now);
9167 #endif
9168 return v - (char *)&comp_now;
9169 #endif
9170 }
9171 #endif
9172 #endif
9173 #if defined(FULLSTACK) && defined(BITSTATE)
9174 #if defined(MA)
9175 #if !defined(onstack_now)
9176 int onstack_now(void) {}
9177 #endif
9178 #if !defined(onstack_put)
9179 void onstack_put(void) {}
9180 #endif
9181 #if !defined(onstack_zap)
9182 void onstack_zap(void) {}
9183 #endif
9184 #else
9185 void
9186 onstack_zap(void)
9187 { struct H_el *v, *w, *last = 0;
9188 struct H_el **tmp = H_tab;
9189 char *nv; int n, m;
9190
9191 static char warned = 0;
9192
9193 H_tab = S_Tab;
9194 #ifndef NOCOMP
9195 nv = (char *) &comp_now;
9196 n = compress((char *)&now, vsize);
9197 #else
9198 #if defined(BITSTATE) && defined(LC)
9199 nv = (char *) &comp_now;
9200 n = compact_stack((char *)&now, vsize);
9201 #else
9202 nv = (char *) &now;
9203 n = vsize;
9204 #endif
9205 #endif
9206 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9207 s_hash((uchar *)nv, n);
9208 #endif
9209 H_tab = tmp;
9210 for (v = S_Tab[j1]; v; Zh++, last=v, v=v->nxt)
9211 { m = memcmp(&(v->state), nv, n);
9212 if (m == 0)
9213 goto Found;
9214 if (m < 0)
9215 break;
9216 }
9217 /* NotFound: */
9218 #ifndef ZAPH
9219 #if defined(BITSTATE) && NCORE>1
9220 /* seen this happen, likely harmless, but not yet understood */
9221 if (warned == 0)
9222 #endif
9223 { /* Uerror("stack out of wack - zap"); */
9224 cpu_printf("pan: warning, stack incomplete\n");
9225 warned = 1;
9226 }
9227 #endif
9228 return;
9229 Found:
9230 ZAPS++;
9231 if (last)
9232 last->nxt = v->nxt;
9233 else
9234 S_Tab[j1] = v->nxt;
9235 v->tagged = (unsigned) n;
9236 #if !defined(NOREDUCE) && !defined(SAFETY)
9237 v->proviso = 0;
9238 #endif
9239 v->nxt = last = (struct H_el *) 0;
9240 for (w = Free_list; w; Fa++, last=w, w = w->nxt)
9241 { if ((int) w->tagged <= n)
9242 { if (last)
9243 { v->nxt = w;
9244 last->nxt = v;
9245 } else
9246 { v->nxt = Free_list;
9247 Free_list = v;
9248 }
9249 return;
9250 }
9251 if (!w->nxt)
9252 { w->nxt = v;
9253 return;
9254 } }
9255 Free_list = v;
9256 }
9257 void
9258 onstack_put(void)
9259 { struct H_el **tmp = H_tab;
9260 H_tab = S_Tab;
9261 if (hstore((char *)&now, vsize) != 0)
9262 #if defined(BITSTATE) && defined(LC)
9263 printf("pan: warning, double stack entry\n");
9264 #else
9265 #ifndef ZAPH
9266 Uerror("cannot happen - unstack_put");
9267 #endif
9268 #endif
9269 H_tab = tmp;
9270 trpt->ostate = Lstate;
9271 PUT++;
9272 }
9273 int
9274 onstack_now(void)
9275 { struct H_el *tmp;
9276 struct H_el **tmp2 = H_tab;
9277 char *v; int n, m = 1;
9278
9279 H_tab = S_Tab;
9280 #ifdef NOCOMP
9281 #if defined(BITSTATE) && defined(LC)
9282 v = (char *) &comp_now;
9283 n = compact_stack((char *)&now, vsize);
9284 #else
9285 v = (char *) &now;
9286 n = vsize;
9287 #endif
9288 #else
9289 v = (char *) &comp_now;
9290 n = compress((char *)&now, vsize);
9291 #endif
9292 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9293 s_hash((uchar *)v, n);
9294 #endif
9295 H_tab = tmp2;
9296 for (tmp = S_Tab[j1]; tmp; Zn++, tmp = tmp->nxt)
9297 { m = memcmp(((char *)&(tmp->state)),v,n);
9298 if (m <= 0)
9299 { Lstate = (struct H_el *) tmp;
9300 break;
9301 } }
9302 PROBE++;
9303 return (m == 0);
9304 }
9305 #endif
9306 #endif
9307 #ifndef BITSTATE
9308 void
9309 hinit(void)
9310 {
9311 #ifdef MA
9312 #ifdef R_XPT
9313 { void r_xpoint(void);
9314 r_xpoint();
9315 }
9316 #else
9317 dfa_init((unsigned short) (MA+a_cycles));
9318 #if NCORE>1 && !defined(COLLAPSE)
9319 if (!readtrail)
9320 { void init_HT(unsigned long);
9321 init_HT(0L);
9322 }
9323 #endif
9324 #endif
9325 #endif
9326 #if !defined(MA) || defined(COLLAPSE)
9327 #if NCORE>1
9328 if (!readtrail)
9329 { void init_HT(unsigned long);
9330 init_HT((unsigned long) (ONE_L<<ssize)*sizeof(struct H_el *));
9331 } else
9332 #endif
9333 H_tab = (struct H_el **)
9334 emalloc((ONE_L<<ssize)*sizeof(struct H_el *));
9335 #endif
9336 }
9337 #endif
9338
9339 #if !defined(BITSTATE) || defined(FULLSTACK)
9340 #ifdef DEBUG
9341 void
9342 dumpstate(int wasnew, char *v, int n, int tag)
9343 { int i;
9344 #ifndef SAFETY
9345 if (S_A)
9346 { printf(" state tags %d (%d::%d): ",
9347 V_A, wasnew, v[0]);
9348 #ifdef FULLSTACK
9349 printf(" %d ", tag);
9350 #endif
9351 printf("\n");
9352 }
9353 #endif
9354 #ifdef SDUMP
9355 #ifndef NOCOMP
9356 printf(" State: ");
9357 for (i = 0; i < vsize; i++) printf("%d%s,",
9358 ((char *)&now)[i], Mask[i]?"*":"");
9359 #endif
9360 printf("\n Vector: ");
9361 for (i = 0; i < n; i++) printf("%d,", v[i]);
9362 printf("\n");
9363 #endif
9364 }
9365 #endif
9366 #ifdef MA
9367 int
9368 gstore(char *vin, int nin, uchar pbit)
9369 { int n, i;
9370 int ret_val = 1;
9371 uchar *v;
9372 static uchar Info[MA+1];
9373 #ifndef NOCOMP
9374 n = compress(vin, nin);
9375 v = (uchar *) &comp_now;
9376 #else
9377 n = nin;
9378 v = vin;
9379 #endif
9380 if (n >= MA)
9381 { printf("pan: error, MA too small, recompile pan.c");
9382 printf(" with -DMA=N with N>%d\n", n);
9383 Uerror("aborting");
9384 }
9385 if (n > (int) maxgs)
9386 { maxgs = (unsigned int) n;
9387 }
9388 for (i = 0; i < n; i++)
9389 { Info[i] = v[i];
9390 }
9391 for ( ; i < MA-1; i++)
9392 { Info[i] = 0;
9393 }
9394 Info[MA-1] = pbit;
9395 if (a_cycles) /* place _a_t at the end */
9396 { Info[MA] = Info[0];
9397 Info[0] = 0;
9398 }
9399
9400 #if NCORE>1 && !defined(SEP_STATE)
9401 enter_critical(GLOBAL_LOCK); /* crude, but necessary */
9402 /* to make this mode work, also replace emalloc with grab_shared inside store MA routines */
9403 #endif
9404
9405 if (!dfa_store(Info))
9406 { if (pbit == 0
9407 && (now._a_t&1)
9408 && depth > A_depth)
9409 { Info[MA] &= ~(1|16|32); /* _a_t */
9410 if (dfa_member(MA))
9411 { Info[MA-1] = 4; /* off-stack bit */
9412 nShadow++;
9413 if (!dfa_member(MA-1))
9414 { ret_val = 3;
9415 #ifdef VERBOSE
9416 printf("intersected 1st dfs stack\n");
9417 #endif
9418 goto done;
9419 } } }
9420 ret_val = 0;
9421 #ifdef VERBOSE
9422 printf("new state\n");
9423 #endif
9424 goto done;
9425 }
9426 #ifdef FULLSTACK
9427 if (pbit == 0)
9428 { Info[MA-1] = 1; /* proviso bit */
9429 #ifndef BFS
9430 trpt->proviso = dfa_member(MA-1);
9431 #endif
9432 Info[MA-1] = 4; /* off-stack bit */
9433 if (dfa_member(MA-1))
9434 { ret_val = 1; /* off-stack */
9435 #ifdef VERBOSE
9436 printf("old state\n");
9437 #endif
9438 } else
9439 { ret_val = 2; /* on-stack */
9440 #ifdef VERBOSE
9441 printf("on-stack\n");
9442 #endif
9443 }
9444 goto done;
9445 }
9446 #endif
9447 ret_val = 1;
9448 #ifdef VERBOSE
9449 printf("old state\n");
9450 #endif
9451 done:
9452 #if NCORE>1 && !defined(SEP_STATE)
9453 leave_critical(GLOBAL_LOCK);
9454 #endif
9455 return ret_val; /* old state */
9456 }
9457 #endif
9458 #if defined(BITSTATE) && defined(LC)
9459 int
9460 compact_stack(char *vin, int n)
9461 { int delta = 0;
9462 s_hash((uchar *)vin, n); /* sets K1 and K2 */
9463 #ifndef SAFETY
9464 delta++; /* room for state[0] |= 128 */
9465 #endif
9466 memcpy((char *) &comp_now + delta, (char *) &K1, WS);
9467 delta += WS;
9468 memcpy((char *) &comp_now + delta, (char *) &K2, WS);
9469 delta += WS; /* use all available bits */
9470 return delta;
9471 }
9472 #endif
9473 int
9474 hstore(char *vin, int nin) /* hash table storage */
9475 { struct H_el *ntmp;
9476 struct H_el *tmp, *olst = (struct H_el *) 0;
9477 char *v; int n, m=0;
9478 #ifdef HC
9479 uchar rem_a;
9480 #endif
9481 #ifdef NOCOMP
9482 #if defined(BITSTATE) && defined(LC)
9483 if (S_Tab == H_tab)
9484 { v = (char *) &comp_now;
9485 n = compact_stack(vin, nin);
9486 } else
9487 { v = vin; n = nin;
9488 }
9489 #else
9490 v = vin; n = nin;
9491 #endif
9492 #else
9493 v = (char *) &comp_now;
9494 #ifdef HC
9495 rem_a = now._a_t;
9496 now._a_t = 0;
9497 #endif
9498 n = compress(vin, nin);
9499 #ifdef HC
9500 now._a_t = rem_a;
9501 #endif
9502 #ifndef SAFETY
9503 if (S_A)
9504 { v[0] = 0; /* _a_t */
9505 #ifndef NOFAIR
9506 if (S_A > NFAIR)
9507 for (m = 0; m < NFAIR; m++)
9508 v[m+1] = 0; /* _cnt[] */
9509 #endif
9510 m = 0;
9511 }
9512 #endif
9513 #endif
9514 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9515 s_hash((uchar *)v, n);
9516 #endif
9517 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9518 enter_critical(CS_ID); /* uses spinlock */
9519 #endif
9520 tmp = H_tab[j1];
9521 if (!tmp)
9522 { tmp = grab_state(n);
9523 #if NCORE>1
9524 if (!tmp)
9525 { /* if we get here -- we've already issued a warning */
9526 /* but we want to allow the normal distributed termination */
9527 /* to collect the stats on all cpus in the wrapup */
9528 #if !defined(SEP_STATE) && !defined(BITSTATE)
9529 leave_critical(CS_ID);
9530 #endif
9531 return 1; /* allow normal termination */
9532 }
9533 #endif
9534 H_tab[j1] = tmp;
9535 } else
9536 { for (;; hcmp++, olst = tmp, tmp = tmp->nxt)
9537 { /* skip the _a_t and the _cnt bytes */
9538 #ifdef COLLAPSE
9539 if (tmp->ln != 0)
9540 { if (!tmp->nxt) goto Append;
9541 continue;
9542 }
9543 #endif
9544 m = memcmp(((char *)&(tmp->state)) + S_A,
9545 v + S_A, n - S_A);
9546 if (m == 0) {
9547 #ifdef SAFETY
9548 #define wasnew 0
9549 #else
9550 int wasnew = 0;
9551 #endif
9552 #ifndef SAFETY
9553 #ifndef NOCOMP
9554 if (S_A)
9555 { if ((((char *)&(tmp->state))[0] & V_A) != V_A)
9556 { wasnew = 1; nShadow++;
9557 ((char *)&(tmp->state))[0] |= V_A;
9558 }
9559 #ifndef NOFAIR
9560 if (S_A > NFAIR)
9561 { /* 0 <= now._cnt[now._a_t&1] < MAXPROC */
9562 unsigned ci, bp; /* index, bit pos */
9563 ci = (now._cnt[now._a_t&1] / 8);
9564 bp = (now._cnt[now._a_t&1] - 8*ci);
9565 if (now._a_t&1) /* use tail-bits in _cnt */
9566 { ci = (NFAIR - 1) - ci;
9567 bp = 7 - bp; /* bp = 0..7 */
9568 }
9569 ci++; /* skip over _a_t */
9570 bp = 1 << bp; /* the bit mask */
9571 if ((((char *)&(tmp->state))[ci] & bp)==0)
9572 { if (!wasnew)
9573 { wasnew = 1;
9574 nShadow++;
9575 }
9576 ((char *)&(tmp->state))[ci] |= bp;
9577 }
9578 }
9579 /* else: wasnew == 0, i.e., old state */
9580 #endif
9581 }
9582 #endif
9583 #endif
9584 #if NCORE>1
9585 Lstate = (struct H_el *) tmp;
9586 #endif
9587 #ifdef FULLSTACK
9588 #ifndef SAFETY
9589 if (wasnew)
9590 { Lstate = (struct H_el *) tmp;
9591 tmp->tagged |= V_A;
9592 if ((now._a_t&1)
9593 && (tmp->tagged&A_V)
9594 && depth > A_depth)
9595 {
9596 intersect:
9597 #ifdef CHECK
9598 #if NCORE>1
9599 printf("cpu%d: ", core_id);
9600 #endif
9601 printf("1st dfs-stack intersected on state %d+\n",
9602 (int) tmp->st_id);
9603 #endif
9604 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9605 leave_critical(CS_ID);
9606 #endif
9607 return 3;
9608 }
9609 #ifdef CHECK
9610 #if NCORE>1
9611 printf("cpu%d: ", core_id);
9612 #endif
9613 printf(" New state %d+\n", (int) tmp->st_id);
9614 #endif
9615 #ifdef DEBUG
9616 dumpstate(1, (char *)&(tmp->state),n,tmp->tagged);
9617 #endif
9618 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9619 leave_critical(CS_ID);
9620 #endif
9621 return 0;
9622 } else
9623 #endif
9624 if ((S_A)?(tmp->tagged&V_A):tmp->tagged)
9625 { Lstate = (struct H_el *) tmp;
9626 #ifndef SAFETY
9627 /* already on current dfs stack */
9628 /* but may also be on 1st dfs stack */
9629 if ((now._a_t&1)
9630 && (tmp->tagged&A_V)
9631 && depth > A_depth
9632 #ifndef NOFAIR
9633 && (!fairness || now._cnt[1] <= 1)
9634 #endif
9635 )
9636 goto intersect;
9637 #endif
9638 #ifdef CHECK
9639 #if NCORE>1
9640 printf("cpu%d: ", core_id);
9641 #endif
9642 printf(" Stack state %d\n", (int) tmp->st_id);
9643 #endif
9644 #ifdef DEBUG
9645 dumpstate(0, (char *)&(tmp->state),n,tmp->tagged);
9646 #endif
9647 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9648 leave_critical(CS_ID);
9649 #endif
9650 return 2; /* match on stack */
9651 }
9652 #else
9653 if (wasnew)
9654 {
9655 #ifdef CHECK
9656 #if NCORE>1
9657 printf("cpu%d: ", core_id);
9658 #endif
9659 printf(" New state %d+\n", (int) tmp->st_id);
9660 #endif
9661 #ifdef DEBUG
9662 dumpstate(1, (char *)&(tmp->state), n, 0);
9663 #endif
9664 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9665 leave_critical(CS_ID);
9666 #endif
9667 return 0;
9668 }
9669 #endif
9670 #ifdef CHECK
9671 #if NCORE>1
9672 printf("cpu%d: ", core_id);
9673 #endif
9674 printf(" Old state %d\n", (int) tmp->st_id);
9675 #endif
9676 #ifdef DEBUG
9677 dumpstate(0, (char *)&(tmp->state), n, 0);
9678 #endif
9679 #ifdef REACH
9680 if (tmp->D > depth)
9681 { tmp->D = depth;
9682 #ifdef CHECK
9683 #if NCORE>1
9684 printf("cpu%d: ", core_id);
9685 #endif
9686 printf(" ReVisiting (from smaller depth)\n");
9687 #endif
9688 nstates--;
9689 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9690 leave_critical(CS_ID);
9691 #endif
9692 return 0;
9693 }
9694 #endif
9695 #if (defined(BFS) && defined(Q_PROVISO)) || NCORE>1
9696 Lstate = (struct H_el *) tmp;
9697 #endif
9698 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9699 leave_critical(CS_ID);
9700 #endif
9701 return 1; /* match outside stack */
9702 } else if (m < 0)
9703 { /* insert state before tmp */
9704 ntmp = grab_state(n);
9705 #if NCORE>1
9706 if (!ntmp)
9707 {
9708 #if !defined(SEP_STATE) && !defined(BITSTATE)
9709 leave_critical(CS_ID);
9710 #endif
9711 return 1; /* allow normal termination */
9712 }
9713 #endif
9714 ntmp->nxt = tmp;
9715 if (!olst)
9716 H_tab[j1] = ntmp;
9717 else
9718 olst->nxt = ntmp;
9719 tmp = ntmp;
9720 break;
9721 } else if (!tmp->nxt)
9722 { /* append after tmp */
9723 #ifdef COLLAPSE
9724 Append:
9725 #endif
9726 tmp->nxt = grab_state(n);
9727 #if NCORE>1
9728 if (!tmp->nxt)
9729 {
9730 #if !defined(SEP_STATE) && !defined(BITSTATE)
9731 leave_critical(CS_ID);
9732 #endif
9733 return 1; /* allow normal termination */
9734 }
9735 #endif
9736 tmp = tmp->nxt;
9737 break;
9738 } }
9739 }
9740 #ifdef CHECK
9741 tmp->st_id = (unsigned) nstates;
9742 #if NCORE>1
9743 printf("cpu%d: ", core_id);
9744 #endif
9745 #ifdef BITSTATE
9746 printf(" Push state %d\n", ((int) nstates) - 1);
9747 #else
9748 printf(" New state %d\n", (int) nstates);
9749 #endif
9750 #endif
9751 #if !defined(SAFETY) || defined(REACH)
9752 tmp->D = depth;
9753 #endif
9754 #ifndef SAFETY
9755 #ifndef NOCOMP
9756 if (S_A)
9757 { v[0] = V_A;
9758 #ifndef NOFAIR
9759 if (S_A > NFAIR)
9760 { unsigned ci, bp; /* as above */
9761 ci = (now._cnt[now._a_t&1] / 8);
9762 bp = (now._cnt[now._a_t&1] - 8*ci);
9763 if (now._a_t&1)
9764 { ci = (NFAIR - 1) - ci;
9765 bp = 7 - bp; /* bp = 0..7 */
9766 }
9767 v[1+ci] = 1 << bp;
9768 }
9769 #endif
9770 }
9771 #endif
9772 #endif
9773 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
9774 tmp->m_K1 = K1;
9775 #endif
9776 memcpy(((char *)&(tmp->state)), v, n);
9777 #ifdef FULLSTACK
9778 tmp->tagged = (S_A)?V_A:(depth+1);
9779 #ifdef DEBUG
9780 dumpstate(-1, v, n, tmp->tagged);
9781 #endif
9782 Lstate = (struct H_el *) tmp;
9783 #else
9784 #ifdef DEBUG
9785 dumpstate(-1, v, n, 0);
9786 #endif
9787 #if NCORE>1
9788 Lstate = (struct H_el *) tmp;
9789 #endif
9790 #endif
9791 /* #if NCORE>1 && !defined(SEP_STATE) */
9792 #if NCORE>1
9793 #ifdef V_PROVISO
9794 tmp->cpu_id = core_id;
9795 #endif
9796 #if !defined(SEP_STATE) && !defined(BITSTATE)
9797 leave_critical(CS_ID);
9798 #endif
9799 #endif
9800 return 0;
9801 }
9802 #endif
9803 #include TRANSITIONS
9804 void
9805 do_reach(void)
9806 {
9807 r_ck(reached0, nstates0, 0, src_ln0, src_file0);
9808 r_ck(reached1, nstates1, 1, src_ln1, src_file1);
9809 r_ck(reached2, nstates2, 2, src_ln2, src_file2);
9810 r_ck(reached3, nstates3, 3, src_ln3, src_file3);
9811 r_ck(reached4, nstates4, 4, src_ln4, src_file4);
9812 }
9813
9814 void
9815 iniglobals(void)
9816 {
9817 deliver = 0;
9818 { int l_in;
9819 for (l_in = 0; l_in < 4; l_in++)
9820 {
9821 now.buffer_use[l_in] = 0;
9822 }
9823 }
9824 now.write_off = 0;
9825 { int l_in;
9826 for (l_in = 0; l_in < 2; l_in++)
9827 {
9828 now.commit_count[l_in] = 0;
9829 }
9830 }
9831 now.read_off = 0;
9832 now.events_lost = 0;
9833 now.refcount = 0;
9834 #ifdef VAR_RANGES
9835 { int l_in;
9836 for (l_in = 0; l_in < 4; l_in++)
9837 {
9838 logval("buffer_use[l_in]", now.buffer_use[l_in]);
9839 }
9840 }
9841 logval("write_off", now.write_off);
9842 { int l_in;
9843 for (l_in = 0; l_in < 2; l_in++)
9844 {
9845 logval("commit_count[l_in]", now.commit_count[l_in]);
9846 }
9847 }
9848 logval("read_off", now.read_off);
9849 logval("events_lost", now.events_lost);
9850 logval("refcount", now.refcount);
9851 #endif
9852 Maxbody = max(Maxbody, sizeof(State)-VECTORSZ);
9853 }
9854
9855 int
9856 addqueue(int n, int is_rv)
9857 { int j=0, i = now._nr_qs;
9858 #ifndef NOCOMP
9859 int k;
9860 #endif
9861 if (i >= MAXQ)
9862 Uerror("too many queues");
9863 switch (n) {
9864 default: Uerror("bad queue - addqueue");
9865 }
9866 if (vsize%WS)
9867 q_skip[i] = WS-(vsize%WS);
9868 else
9869 q_skip[i] = 0;
9870 #ifndef NOCOMP
9871 k = vsize;
9872 #ifndef BFS
9873 if (is_rv) k += j;
9874 #endif
9875 for (k += (int) q_skip[i]; k > vsize; k--)
9876 Mask[k-1] = 1;
9877 #endif
9878 vsize += (int) q_skip[i];
9879 q_offset[i] = vsize;
9880 now._nr_qs += 1;
9881 vsize += j;
9882 #ifndef NOVSZ
9883 now._vsz = vsize;
9884 #endif
9885 hmax = max(hmax, vsize);
9886 if (vsize >= VECTORSZ)
9887 Uerror("VECTORSZ is too small, edit pan.h");
9888 memset((char *)qptr(i), 0, j);
9889 ((Q0 *)qptr(i))->_t = n;
9890 return i+1;
9891 }
9892
9893 #if NQS>0
9894 void
9895 qsend(int into, int sorted, int args_given)
9896 { int j; uchar *z;
9897
9898 #ifdef HAS_SORTED
9899 int k;
9900 #endif
9901 if (!into--)
9902 uerror("ref to uninitialized chan name (sending)");
9903 if (into >= (int) now._nr_qs || into < 0)
9904 Uerror("qsend bad queue#");
9905 z = qptr(into);
9906 j = ((Q0 *)qptr(into))->Qlen;
9907 switch (((Q0 *)qptr(into))->_t) {
9908 case 0: printf("queue %d was deleted\n", into+1);
9909 default: Uerror("bad queue - qsend");
9910 }
9911 #ifdef EVENT_TRACE
9912 if (in_s_scope(into+1))
9913 require('s', into);
9914 #endif
9915 }
9916 #endif
9917
9918 #if SYNC
9919 int
9920 q_zero(int from)
9921 { if (!from--)
9922 { uerror("ref to uninitialized chan name (q_zero)");
9923 return 0;
9924 }
9925 switch(((Q0 *)qptr(from))->_t) {
9926 case 0: printf("queue %d was deleted\n", from+1);
9927 }
9928 Uerror("bad queue q-zero");
9929 return -1;
9930 }
9931 int
9932 not_RV(int from)
9933 { if (q_zero(from))
9934 { printf("==>> a test of the contents of a rv ");
9935 printf("channel always returns FALSE\n");
9936 uerror("error to poll rendezvous channel");
9937 }
9938 return 1;
9939 }
9940 #endif
9941 #ifndef XUSAFE
9942 void
9943 setq_claim(int x, int m, char *s, int y, char *p)
9944 { if (x == 0)
9945 uerror("x[rs] claim on uninitialized channel");
9946 if (x < 0 || x > MAXQ)
9947 Uerror("cannot happen setq_claim");
9948 q_claim[x] |= m;
9949 p_name[y] = p;
9950 q_name[x] = s;
9951 if (m&2) q_S_check(x, y);
9952 if (m&1) q_R_check(x, y);
9953 }
9954 short q_sender[MAXQ+1];
9955 int
9956 q_S_check(int x, int who)
9957 { if (!q_sender[x])
9958 { q_sender[x] = who+1;
9959 #if SYNC
9960 if (q_zero(x))
9961 { printf("chan %s (%d), ",
9962 q_name[x], x-1);
9963 printf("sndr proc %s (%d)\n",
9964 p_name[who], who);
9965 uerror("xs chans cannot be used for rv");
9966 }
9967 #endif
9968 } else
9969 if (q_sender[x] != who+1)
9970 { printf("pan: xs assertion violated: ");
9971 printf("access to chan <%s> (%d)\npan: by ",
9972 q_name[x], x-1);
9973 if (q_sender[x] > 0 && p_name[q_sender[x]-1])
9974 printf("%s (proc %d) and by ",
9975 p_name[q_sender[x]-1], q_sender[x]-1);
9976 printf("%s (proc %d)\n",
9977 p_name[who], who);
9978 uerror("error, partial order reduction invalid");
9979 }
9980 return 1;
9981 }
9982 short q_recver[MAXQ+1];
9983 int
9984 q_R_check(int x, int who)
9985 { if (!q_recver[x])
9986 { q_recver[x] = who+1;
9987 #if SYNC
9988 if (q_zero(x))
9989 { printf("chan %s (%d), ",
9990 q_name[x], x-1);
9991 printf("recv proc %s (%d)\n",
9992 p_name[who], who);
9993 uerror("xr chans cannot be used for rv");
9994 }
9995 #endif
9996 } else
9997 if (q_recver[x] != who+1)
9998 { printf("pan: xr assertion violated: ");
9999 printf("access to chan %s (%d)\npan: ",
10000 q_name[x], x-1);
10001 if (q_recver[x] > 0 && p_name[q_recver[x]-1])
10002 printf("by %s (proc %d) and ",
10003 p_name[q_recver[x]-1], q_recver[x]-1);
10004 printf("by %s (proc %d)\n",
10005 p_name[who], who);
10006 uerror("error, partial order reduction invalid");
10007 }
10008 return 1;
10009 }
10010 #endif
10011 int
10012 q_len(int x)
10013 { if (!x--)
10014 uerror("ref to uninitialized chan name (len)");
10015 return ((Q0 *)qptr(x))->Qlen;
10016 }
10017
10018 int
10019 q_full(int from)
10020 { if (!from--)
10021 uerror("ref to uninitialized chan name (qfull)");
10022 switch(((Q0 *)qptr(from))->_t) {
10023 case 0: printf("queue %d was deleted\n", from+1);
10024 }
10025 Uerror("bad queue - q_full");
10026 return 0;
10027 }
10028
10029 #ifdef HAS_UNLESS
10030 int
10031 q_e_f(int from)
10032 { /* empty or full */
10033 return !q_len(from) || q_full(from);
10034 }
10035 #endif
10036 #if NQS>0
10037 int
10038 qrecv(int from, int slot, int fld, int done)
10039 { uchar *z;
10040 int j, k, r=0;
10041
10042 if (!from--)
10043 uerror("ref to uninitialized chan name (receiving)");
10044 if (from >= (int) now._nr_qs || from < 0)
10045 Uerror("qrecv bad queue#");
10046 z = qptr(from);
10047 #ifdef EVENT_TRACE
10048 if (done && (in_r_scope(from+1)))
10049 require('r', from);
10050 #endif
10051 switch (((Q0 *)qptr(from))->_t) {
10052 case 0: printf("queue %d was deleted\n", from+1);
10053 default: Uerror("bad queue - qrecv");
10054 }
10055 return r;
10056 }
10057 #endif
10058
10059 #ifndef BITSTATE
10060 #ifdef COLLAPSE
10061 long
10062 col_q(int i, char *z)
10063 { int j=0, k;
10064 char *x, *y;
10065 Q0 *ptr = (Q0 *) qptr(i);
10066 switch (ptr->_t) {
10067 default: Uerror("bad qtype - collapse");
10068 }
10069 if (z) x = z; else x = scratch;
10070 y = (char *) ptr; k = q_offset[i];
10071 /* no need to store the empty slots at the end */
10072 j -= (q_max[ptr->_t] - ptr->Qlen) * ((j - 2)/q_max[ptr->_t]);
10073 for ( ; j > 0; j--, y++)
10074 if (!Mask[k++]) *x++ = *y;
10075 for (j = 0; j < WS-1; j++)
10076 *x++ = 0;
10077 x -= j;
10078 if (z) return (long) (x - z);
10079 return ordinal(scratch, x-scratch, 1); /* chan */
10080 }
10081 #endif
10082 #endif
10083 int
10084 unsend(int into)
10085 { int _m=0, j; uchar *z;
10086
10087 #ifdef HAS_SORTED
10088 int k;
10089 #endif
10090 if (!into--)
10091 uerror("ref to uninitialized chan (unsend)");
10092 z = qptr(into);
10093 j = ((Q0 *)z)->Qlen;
10094 ((Q0 *)z)->Qlen = --j;
10095 switch (((Q0 *)qptr(into))->_t) {
10096 default: Uerror("bad queue - unsend");
10097 }
10098 return _m;
10099 }
10100
10101 void
10102 unrecv(int from, int slot, int fld, int fldvar, int strt)
10103 { int j; uchar *z;
10104
10105 if (!from--)
10106 uerror("ref to uninitialized chan (unrecv)");
10107 z = qptr(from);
10108 j = ((Q0 *)z)->Qlen;
10109 if (strt) ((Q0 *)z)->Qlen = j+1;
10110 switch (((Q0 *)qptr(from))->_t) {
10111 default: Uerror("bad queue - qrecv");
10112 }
10113 }
10114 int
10115 q_cond(short II, Trans *t)
10116 { int i = 0;
10117 for (i = 0; i < 6; i++)
10118 { if (t->ty[i] == TIMEOUT_F) return 1;
10119 if (t->ty[i] == ALPHA_F)
10120 #ifdef GLOB_ALPHA
10121 return 0;
10122 #else
10123 return (II+1 == (short) now._nr_pr && II+1 < MAXPROC);
10124 #endif
10125 switch (t->qu[i]) {
10126 case 0: break;
10127 default: Uerror("unknown qid - q_cond");
10128 return 0;
10129 }
10130 }
10131 return 1;
10132 }
10133 void
10134 to_compile(void)
10135 { char ctd[1024], carg[64];
10136 #ifdef BITSTATE
10137 strcpy(ctd, "-DBITSTATE ");
10138 #else
10139 strcpy(ctd, "");
10140 #endif
10141 #ifdef NOVSZ
10142 strcat(ctd, "-DNOVSZ ");
10143 #endif
10144 #ifdef REVERSE
10145 strcat(ctd, "-DREVERSE ");
10146 #endif
10147 #ifdef T_REVERSE
10148 strcat(ctd, "-DT_REVERSE ");
10149 #endif
10150 #ifdef RANDOMIZE
10151 #if RANDOMIZE>0
10152 sprintf(carg, "-DRANDOMIZE=%d ", RANDOMIZE);
10153 strcat(ctd, carg);
10154 #else
10155 strcat(ctd, "-DRANDOMIZE ");
10156 #endif
10157 #endif
10158 #ifdef SCHED
10159 sprintf(carg, "-DSCHED=%d ", SCHED);
10160 strcat(ctd, carg);
10161 #endif
10162 #ifdef BFS
10163 strcat(ctd, "-DBFS ");
10164 #endif
10165 #ifdef MEMLIM
10166 sprintf(carg, "-DMEMLIM=%d ", MEMLIM);
10167 strcat(ctd, carg);
10168 #else
10169 #ifdef MEMCNT
10170 sprintf(carg, "-DMEMCNT=%d ", MEMCNT);
10171 strcat(ctd, carg);
10172 #endif
10173 #endif
10174 #ifdef NOCLAIM
10175 strcat(ctd, "-DNOCLAIM ");
10176 #endif
10177 #ifdef SAFETY
10178 strcat(ctd, "-DSAFETY ");
10179 #else
10180 #ifdef NOFAIR
10181 strcat(ctd, "-DNOFAIR ");
10182 #else
10183 #ifdef NFAIR
10184 if (NFAIR != 2)
10185 { sprintf(carg, "-DNFAIR=%d ", NFAIR);
10186 strcat(ctd, carg);
10187 }
10188 #endif
10189 #endif
10190 #endif
10191 #ifdef NOREDUCE
10192 strcat(ctd, "-DNOREDUCE ");
10193 #else
10194 #ifdef XUSAFE
10195 strcat(ctd, "-DXUSAFE ");
10196 #endif
10197 #endif
10198 #ifdef NP
10199 strcat(ctd, "-DNP ");
10200 #endif
10201 #ifdef PEG
10202 strcat(ctd, "-DPEG ");
10203 #endif
10204 #ifdef VAR_RANGES
10205 strcat(ctd, "-DVAR_RANGES ");
10206 #endif
10207 #ifdef HC0
10208 strcat(ctd, "-DHC0 ");
10209 #endif
10210 #ifdef HC1
10211 strcat(ctd, "-DHC1 ");
10212 #endif
10213 #ifdef HC2
10214 strcat(ctd, "-DHC2 ");
10215 #endif
10216 #ifdef HC3
10217 strcat(ctd, "-DHC3 ");
10218 #endif
10219 #ifdef HC4
10220 strcat(ctd, "-DHC4 ");
10221 #endif
10222 #ifdef CHECK
10223 strcat(ctd, "-DCHECK ");
10224 #endif
10225 #ifdef CTL
10226 strcat(ctd, "-DCTL ");
10227 #endif
10228 #ifdef NIBIS
10229 strcat(ctd, "-DNIBIS ");
10230 #endif
10231 #ifdef NOBOUNDCHECK
10232 strcat(ctd, "-DNOBOUNDCHECK ");
10233 #endif
10234 #ifdef NOSTUTTER
10235 strcat(ctd, "-DNOSTUTTER ");
10236 #endif
10237 #ifdef REACH
10238 strcat(ctd, "-DREACH ");
10239 #endif
10240 #ifdef PRINTF
10241 strcat(ctd, "-DPRINTF ");
10242 #endif
10243 #ifdef OTIM
10244 strcat(ctd, "-DOTIM ");
10245 #endif
10246 #ifdef COLLAPSE
10247 strcat(ctd, "-DCOLLAPSE ");
10248 #endif
10249 #ifdef MA
10250 sprintf(carg, "-DMA=%d ", MA);
10251 strcat(ctd, carg);
10252 #endif
10253 #ifdef SVDUMP
10254 strcat(ctd, "-DSVDUMP ");
10255 #endif
10256 #ifdef VECTORSZ
10257 if (VECTORSZ != 1024)
10258 { sprintf(carg, "-DVECTORSZ=%d ", VECTORSZ);
10259 strcat(ctd, carg);
10260 }
10261 #endif
10262 #ifdef VERBOSE
10263 strcat(ctd, "-DVERBOSE ");
10264 #endif
10265 #ifdef CHECK
10266 strcat(ctd, "-DCHECK ");
10267 #endif
10268 #ifdef SDUMP
10269 strcat(ctd, "-DSDUMP ");
10270 #endif
10271 #if NCORE>1
10272 sprintf(carg, "-DNCORE=%d ", NCORE);
10273 strcat(ctd, carg);
10274 #endif
10275 #ifdef SFH
10276 sprintf(carg, "-DSFH ");
10277 strcat(ctd, carg);
10278 #endif
10279 #ifdef VMAX
10280 if (VMAX != 256)
10281 { sprintf(carg, "-DVMAX=%d ", VMAX);
10282 strcat(ctd, carg);
10283 }
10284 #endif
10285 #ifdef PMAX
10286 if (PMAX != 16)
10287 { sprintf(carg, "-DPMAX=%d ", PMAX);
10288 strcat(ctd, carg);
10289 }
10290 #endif
10291 #ifdef QMAX
10292 if (QMAX != 16)
10293 { sprintf(carg, "-DQMAX=%d ", QMAX);
10294 strcat(ctd, carg);
10295 }
10296 #endif
10297 #ifdef SET_WQ_SIZE
10298 sprintf(carg, "-DSET_WQ_SIZE=%d ", SET_WQ_SIZE);
10299 strcat(ctd, carg);
10300 #endif
10301 printf("Compiled as: cc -o pan %span.c\n", ctd);
10302 }
10303 void
10304 active_procs(void)
10305 {
10306 if (!permuted) {
10307 Addproc(4);
10308 } else {
10309 Addproc(4);
10310 }
10311 }
10312 #ifdef MA
10313 /*
10314 #include <stdio.h>
10315 #define uchar unsigned char
10316 */
10317 #define ulong unsigned long
10318 #define ushort unsigned short
10319
10320 #define TWIDTH 256
10321 #define HASH(y,n) (n)*(((long)y))
10322 #define INRANGE(e,h) ((h>=e->From && h<=e->To)||(e->s==1 && e->S==h))
10323
10324 extern char *emalloc(unsigned long); /* imported routine */
10325 extern void dfa_init(ushort); /* 4 exported routines */
10326 extern int dfa_member(ulong);
10327 extern int dfa_store(uchar *);
10328 extern void dfa_stats(void);
10329
10330 typedef struct Edge {
10331 uchar From, To; /* max range 0..255 */
10332 uchar s, S; /* if s=1, S is singleton */
10333 struct Vertex *Dst;
10334 struct Edge *Nxt;
10335 } Edge;
10336
10337 typedef struct Vertex {
10338 ulong key, num; /* key for splay tree, nr incoming edges */
10339 uchar from[2], to[2]; /* in-node predefined edge info */
10340 struct Vertex *dst[2];/* most nodes have 2 or more edges */
10341 struct Edge *Succ; /* in case there are more edges */
10342 struct Vertex *lnk, *left, *right; /* splay tree plumbing */
10343 } Vertex;
10344
10345 static Edge *free_edges;
10346 static Vertex *free_vertices;
10347 static Vertex **layers; /* one splay tree of nodes per layer */
10348 static Vertex **path; /* run of word in the DFA */
10349 static Vertex *R, *F, *NF; /* Root, Final, Not-Final */
10350 static uchar *word, *lastword;/* string, and last string inserted */
10351 static int dfa_depth, iv=0, nv=0, pfrst=0, Tally;
10352
10353 static void insert_it(Vertex *, int); /* splay-tree code */
10354 static void delete_it(Vertex *, int);
10355 static Vertex *find_it(Vertex *, Vertex *, uchar, int);
10356
10357 static void
10358 recyc_edges(Edge *e)
10359 {
10360 if (!e) return;
10361 recyc_edges(e->Nxt);
10362 e->Nxt = free_edges;
10363 free_edges = e;
10364 }
10365
10366 static Edge *
10367 new_edge(Vertex *dst)
10368 { Edge *e;
10369
10370 if (free_edges)
10371 { e = free_edges;
10372 free_edges = e->Nxt;
10373 e->From = e->To = e->s = e->S = 0;
10374 e->Nxt = (Edge *) 0;
10375 } else
10376 e = (Edge *) emalloc(sizeof(Edge));
10377 e->Dst = dst;
10378
10379 return e;
10380 }
10381
10382 static void
10383 recyc_vertex(Vertex *v)
10384 {
10385 recyc_edges(v->Succ);
10386 v->Succ = (Edge *) free_vertices;
10387 free_vertices = v;
10388 nr_states--;
10389 }
10390
10391 static Vertex *
10392 new_vertex(void)
10393 { Vertex *v;
10394
10395 if (free_vertices)
10396 { v = free_vertices;
10397 free_vertices = (Vertex *) v->Succ;
10398 v->Succ = (Edge *) 0;
10399 v->num = 0;
10400 } else
10401 v = (Vertex *) emalloc(sizeof(Vertex));
10402
10403 nr_states++;
10404 return v;
10405 }
10406
10407 static Vertex *
10408 allDelta(Vertex *v, int n)
10409 { Vertex *dst = new_vertex();
10410
10411 v->from[0] = 0;
10412 v->to[0] = 255;
10413 v->dst[0] = dst;
10414 dst->num = 256;
10415 insert_it(v, n);
10416 return dst;
10417 }
10418
10419 static void
10420 insert_edge(Vertex *v, Edge *e)
10421 { /* put new edge first */
10422 if (!v->dst[0])
10423 { v->dst[0] = e->Dst;
10424 v->from[0] = e->From;
10425 v->to[0] = e->To;
10426 recyc_edges(e);
10427 return;
10428 }
10429 if (!v->dst[1])
10430 { v->from[1] = v->from[0]; v->from[0] = e->From;
10431 v->to[1] = v->to[0]; v->to[0] = e->To;
10432 v->dst[1] = v->dst[0]; v->dst[0] = e->Dst;
10433 recyc_edges(e);
10434 return;
10435 } /* shift */
10436 { int f = v->from[1];
10437 int t = v->to[1];
10438 Vertex *d = v->dst[1];
10439 v->from[1] = v->from[0]; v->from[0] = e->From;
10440 v->to[1] = v->to[0]; v->to[0] = e->To;
10441 v->dst[1] = v->dst[0]; v->dst[0] = e->Dst;
10442 e->From = f;
10443 e->To = t;
10444 e->Dst = d;
10445 }
10446 e->Nxt = v->Succ;
10447 v->Succ = e;
10448 }
10449
10450 static void
10451 copyRecursive(Vertex *v, Edge *e)
10452 { Edge *f;
10453 if (e->Nxt) copyRecursive(v, e->Nxt);
10454 f = new_edge(e->Dst);
10455 f->From = e->From;
10456 f->To = e->To;
10457 f->s = e->s;
10458 f->S = e->S;
10459 f->Nxt = v->Succ;
10460 v->Succ = f;
10461 }
10462
10463 static void
10464 copyEdges(Vertex *to, Vertex *from)
10465 { int i;
10466 for (i = 0; i < 2; i++)
10467 { to->from[i] = from->from[i];
10468 to->to[i] = from->to[i];
10469 to->dst[i] = from->dst[i];
10470 }
10471 if (from->Succ) copyRecursive(to, from->Succ);
10472 }
10473
10474 static Edge *
10475 cacheDelta(Vertex *v, int h, int first)
10476 { static Edge *ov, tmp; int i;
10477
10478 if (!first && INRANGE(ov,h))
10479 return ov; /* intercepts about 10% */
10480 for (i = 0; i < 2; i++)
10481 if (v->dst[i] && h >= v->from[i] && h <= v->to[i])
10482 { tmp.From = v->from[i];
10483 tmp.To = v->to[i];
10484 tmp.Dst = v->dst[i];
10485 tmp.s = tmp.S = 0;
10486 ov = &tmp;
10487 return ov;
10488 }
10489 for (ov = v->Succ; ov; ov = ov->Nxt)
10490 if (INRANGE(ov,h)) return ov;
10491
10492 Uerror("cannot get here, cacheDelta");
10493 return (Edge *) 0;
10494 }
10495
10496 static Vertex *
10497 Delta(Vertex *v, int h) /* v->delta[h] */
10498 { Edge *e;
10499
10500 if (v->dst[0] && h >= v->from[0] && h <= v->to[0])
10501 return v->dst[0]; /* oldest edge */
10502 if (v->dst[1] && h >= v->from[1] && h <= v->to[1])
10503 return v->dst[1];
10504 for (e = v->Succ; e; e = e->Nxt)
10505 if (INRANGE(e,h))
10506 return e->Dst;
10507 Uerror("cannot happen Delta");
10508 return (Vertex *) 0;
10509 }
10510
10511 static void
10512 numDelta(Vertex *v, int d)
10513 { Edge *e;
10514 ulong cnt;
10515 int i;
10516
10517 for (i = 0; i < 2; i++)
10518 if (v->dst[i])
10519 { cnt = v->dst[i]->num + d*(1 + v->to[i] - v->from[i]);
10520 if (d == 1 && cnt < v->dst[i]->num) goto bad;
10521 v->dst[i]->num = cnt;
10522 }
10523 for (e = v->Succ; e; e = e->Nxt)
10524 { cnt = e->Dst->num + d*(1 + e->To - e->From + e->s);
10525 if (d == 1 && cnt < e->Dst->num)
10526 bad: Uerror("too many incoming edges");
10527 e->Dst->num = cnt;
10528 }
10529 }
10530
10531 static void
10532 setDelta(Vertex *v, int h, Vertex *newdst) /* v->delta[h] = newdst; */
10533 { Edge *e, *f = (Edge *) 0, *g;
10534 int i;
10535
10536 /* remove the old entry, if there */
10537 for (i = 0; i < 2; i++)
10538 if (v->dst[i] && h >= v->from[i] && h <= v->to[i])
10539 { if (h == v->from[i])
10540 { if (h == v->to[i])
10541 { v->dst[i] = (Vertex *) 0;
10542 v->from[i] = v->to[i] = 0;
10543 } else
10544 v->from[i]++;
10545 } else if (h == v->to[i])
10546 { v->to[i]--;
10547 } else
10548 { g = new_edge(v->dst[i]);/* same dst */
10549 g->From = v->from[i];
10550 g->To = h-1; /* left half */
10551 v->from[i] = h+1; /* right half */
10552 insert_edge(v, g);
10553 }
10554 goto part2;
10555 }
10556 for (e = v->Succ; e; f = e, e = e->Nxt)
10557 { if (e->s == 1 && e->S == h)
10558 { e->s = e->S = 0;
10559 goto rem_tst;
10560 }
10561 if (h >= e->From && h <= e->To)
10562 { if (h == e->From)
10563 { if (h == e->To)
10564 { if (e->s)
10565 { e->From = e->To = e->S;
10566 e->s = 0;
10567 break;
10568 } else
10569 goto rem_do;
10570 } else
10571 e->From++;
10572 } else if (h == e->To)
10573 { e->To--;
10574 } else /* split */
10575 { g = new_edge(e->Dst); /* same dst */
10576 g->From = e->From;
10577 g->To = h-1; /* g=left half */
10578 e->From = h+1; /* e=right half */
10579 g->Nxt = e->Nxt; /* insert g */
10580 e->Nxt = g; /* behind e */
10581 break; /* done */
10582 }
10583
10584 rem_tst: if (e->From > e->To)
10585 { if (e->s == 0) {
10586 rem_do: if (f)
10587 f->Nxt = e->Nxt;
10588 else
10589 v->Succ = e->Nxt;
10590 e->Nxt = (Edge *) 0;
10591 recyc_edges(e);
10592 } else
10593 { e->From = e->To = e->S;
10594 e->s = 0;
10595 } }
10596 break;
10597 } }
10598 part2:
10599 /* check if newdst is already there */
10600 for (i = 0; i < 2; i++)
10601 if (v->dst[i] == newdst)
10602 { if (h+1 == (int) v->from[i])
10603 { v->from[i] = h;
10604 return;
10605 }
10606 if (h == (int) v->to[i]+1)
10607 { v->to[i] = h;
10608 return;
10609 } }
10610 for (e = v->Succ; e; e = e->Nxt)
10611 { if (e->Dst == newdst)
10612 { if (h+1 == (int) e->From)
10613 { e->From = h;
10614 if (e->s == 1 && e->S+1 == e->From)
10615 { e->From = e->S;
10616 e->s = e->S = 0;
10617 }
10618 return;
10619 }
10620 if (h == (int) e->To+1)
10621 { e->To = h;
10622 if (e->s == 1 && e->S == e->To+1)
10623 { e->To = e->S;
10624 e->s = e->S = 0;
10625 }
10626 return;
10627 }
10628 if (e->s == 0)
10629 { e->s = 1;
10630 e->S = h;
10631 return;
10632 } } }
10633 /* add as a new edge */
10634 e = new_edge(newdst);
10635 e->From = e->To = h;
10636 insert_edge(v, e);
10637 }
10638
10639 static ulong
10640 cheap_key(Vertex *v)
10641 { ulong vk2 = 0;
10642
10643 if (v->dst[0])
10644 { vk2 = (ulong) v->dst[0];
10645 if ((ulong) v->dst[1] > vk2)
10646 vk2 = (ulong) v->dst[1];
10647 } else if (v->dst[1])
10648 vk2 = (ulong) v->dst[1];
10649 if (v->Succ)
10650 { Edge *e;
10651 for (e = v->Succ; e; e = e->Nxt)
10652 if ((ulong) e->Dst > vk2)
10653 vk2 = (ulong) e->Dst;
10654 }
10655 Tally = (vk2>>2)&(TWIDTH-1);
10656 return v->key;
10657 }
10658
10659 static ulong
10660 mk_key(Vertex *v) /* not sensitive to order */
10661 { ulong m = 0, vk2 = 0;
10662 Edge *e;
10663
10664 if (v->dst[0])
10665 { m += HASH(v->dst[0], v->to[0] - v->from[0] + 1);
10666 vk2 = (ulong) v->dst[0];
10667 }
10668 if (v->dst[1])
10669 { m += HASH(v->dst[1], v->to[1] - v->from[1] + 1);
10670 if ((ulong) v->dst[1] > vk2) vk2 = (ulong) v->dst[1];
10671 }
10672 for (e = v->Succ; e; e = e->Nxt)
10673 { m += HASH(e->Dst, e->To - e->From + 1 + e->s);
10674 if ((ulong) e->Dst > vk2) vk2 = (ulong) e->Dst;
10675 }
10676 Tally = (vk2>>2)&(TWIDTH-1);
10677 return m;
10678 }
10679
10680 static ulong
10681 mk_special(int sigma, Vertex *n, Vertex *v)
10682 { ulong m = 0, vk2 = 0;
10683 Edge *f;
10684 int i;
10685
10686 for (i = 0; i < 2; i++)
10687 if (v->dst[i])
10688 { if (sigma >= v->from[i] && sigma <= v->to[i])
10689 { m += HASH(v->dst[i], v->to[i]-v->from[i]);
10690 if ((ulong) v->dst[i] > vk2
10691 && v->to[i] > v->from[i])
10692 vk2 = (ulong) v->dst[i];
10693 } else
10694 { m += HASH(v->dst[i], v->to[i]-v->from[i]+1);
10695 if ((ulong) v->dst[i] > vk2)
10696 vk2 = (ulong) v->dst[i];
10697 } }
10698 for (f = v->Succ; f; f = f->Nxt)
10699 { if (sigma >= f->From && sigma <= f->To)
10700 { m += HASH(f->Dst, f->To - f->From + f->s);
10701 if ((ulong) f->Dst > vk2
10702 && f->To - f->From + f->s > 0)
10703 vk2 = (ulong) f->Dst;
10704 } else if (f->s == 1 && sigma == f->S)
10705 { m += HASH(f->Dst, f->To - f->From + 1);
10706 if ((ulong) f->Dst > vk2) vk2 = (ulong) f->Dst;
10707 } else
10708 { m += HASH(f->Dst, f->To - f->From + 1 + f->s);
10709 if ((ulong) f->Dst > vk2) vk2 = (ulong) f->Dst;
10710 } }
10711
10712 if ((ulong) n > vk2) vk2 = (ulong) n;
10713 Tally = (vk2>>2)&(TWIDTH-1);
10714 m += HASH(n, 1);
10715 return m;
10716 }
10717
10718 void
10719 dfa_init(ushort nr_layers)
10720 { int i; Vertex *r, *t;
10721
10722 dfa_depth = nr_layers; /* one byte per layer */
10723 path = (Vertex **) emalloc((dfa_depth+1)*sizeof(Vertex *));
10724 layers = (Vertex **) emalloc(TWIDTH*(dfa_depth+1)*sizeof(Vertex *));
10725 lastword = (uchar *) emalloc((dfa_depth+1)*sizeof(uchar));
10726 lastword[dfa_depth] = lastword[0] = 255;
10727 path[0] = R = new_vertex(); F = new_vertex();
10728
10729 for (i = 1, r = R; i < dfa_depth; i++, r = t)
10730 t = allDelta(r, i-1);
10731 NF = allDelta(r, i-1);
10732 }
10733
10734 #if 0
10735 static void complement_dfa(void) { Vertex *tmp = F; F = NF; NF = tmp; }
10736 #endif
10737
10738 double
10739 tree_stats(Vertex *t)
10740 { Edge *e; double cnt=0.0;
10741 if (!t) return 0;
10742 if (!t->key) return 0;
10743 t->key = 0; /* precaution */
10744 if (t->dst[0]) cnt++;
10745 if (t->dst[1]) cnt++;
10746 for (e = t->Succ; e; e = e->Nxt)
10747 cnt++;
10748 cnt += tree_stats(t->lnk);
10749 cnt += tree_stats(t->left);
10750 cnt += tree_stats(t->right);
10751 return cnt;
10752 }
10753
10754 void
10755 dfa_stats(void)
10756 { int i, j; double cnt = 0.0;
10757 for (j = 0; j < TWIDTH; j++)
10758 for (i = 0; i < dfa_depth+1; i++)
10759 cnt += tree_stats(layers[i*TWIDTH+j]);
10760 printf("Minimized Automaton: %6d nodes and %6g edges\n",
10761 nr_states, cnt);
10762 }
10763
10764 int
10765 dfa_member(ulong n)
10766 { Vertex **p, **q;
10767 uchar *w = &word[n];
10768 int i;
10769
10770 p = &path[n]; q = (p+1);
10771 for (i = n; i < dfa_depth; i++)
10772 *q++ = Delta(*p++, *w++);
10773 return (*p == F);
10774 }
10775
10776 int
10777 dfa_store(uchar *sv)
10778 { Vertex **p, **q, *s, *y, *old, *new = F;
10779 uchar *w, *u = lastword;
10780 int i, j, k;
10781
10782 w = word = sv;
10783 while (*w++ == *u++) /* find first byte that differs */
10784 ;
10785 pfrst = (int) (u - lastword) - 1;
10786 memcpy(&lastword[pfrst], &sv[pfrst], dfa_depth-pfrst);
10787 if (pfrst > iv) pfrst = iv;
10788 if (pfrst > nv) pfrst = nv;
10789 /* phase1: */
10790 p = &path[pfrst]; q = (p+1); w = &word[pfrst];
10791 for (i = pfrst; i < dfa_depth; i++)
10792 *q++ = Delta(*p++, *w++); /* (*p)->delta[*w++]; */
10793
10794 if (*p == F) return 1; /* it's already there */
10795 /* phase2: */
10796 iv = dfa_depth;
10797 do { iv--;
10798 old = new;
10799 new = find_it(path[iv], old, word[iv], iv);
10800 } while (new && iv > 0);
10801
10802 /* phase3: */
10803 nv = k = 0; s = path[0];
10804 for (j = 1; j <= iv; ++j)
10805 if (path[j]->num > 1)
10806 { y = new_vertex();
10807 copyEdges(y, path[j]);
10808 insert_it(y, j);
10809 numDelta(y, 1);
10810 delete_it(s, j-1);
10811 setDelta(s, word[j-1], y);
10812 insert_it(s, j-1);
10813 y->num = 1; /* initial value 1 */
10814 s = y;
10815 path[j]->num--; /* only 1 moved from j to y */
10816 k = 1;
10817 } else
10818 { s = path[j];
10819 if (!k) nv = j;
10820 }
10821 y = Delta(s, word[iv]);
10822 y->num--;
10823 delete_it(s, iv);
10824 setDelta(s, word[iv], old);
10825 insert_it(s, iv);
10826 old->num++;
10827
10828 for (j = iv+1; j < dfa_depth; j++)
10829 if (path[j]->num == 0)
10830 { numDelta(path[j], -1);
10831 delete_it(path[j], j);
10832 recyc_vertex(path[j]);
10833 } else
10834 break;
10835 return 0;
10836 }
10837
10838 static Vertex *
10839 splay(ulong i, Vertex *t)
10840 { Vertex N, *l, *r, *y;
10841
10842 if (!t) return t;
10843 N.left = N.right = (Vertex *) 0;
10844 l = r = &N;
10845 for (;;)
10846 { if (i < t->key)
10847 { if (!t->left) break;
10848 if (i < t->left->key)
10849 { y = t->left;
10850 t->left = y->right;
10851 y->right = t;
10852 t = y;
10853 if (!t->left) break;
10854 }
10855 r->left = t;
10856 r = t;
10857 t = t->left;
10858 } else if (i > t->key)
10859 { if (!t->right) break;
10860 if (i > t->right->key)
10861 { y = t->right;
10862 t->right = y->left;
10863 y->left = t;
10864 t = y;
10865 if (!t->right) break;
10866 }
10867 l->right = t;
10868 l = t;
10869 t = t->right;
10870 } else
10871 break;
10872 }
10873 l->right = t->left;
10874 r->left = t->right;
10875 t->left = N.right;
10876 t->right = N.left;
10877 return t;
10878 }
10879
10880 static void
10881 insert_it(Vertex *v, int L)
10882 { Vertex *new, *t;
10883 ulong i; int nr;
10884
10885 i = mk_key(v);
10886 nr = ((L*TWIDTH)+Tally);
10887 t = layers[nr];
10888
10889 v->key = i;
10890 if (!t)
10891 { layers[nr] = v;
10892 return;
10893 }
10894 t = splay(i, t);
10895 if (i < t->key)
10896 { new = v;
10897 new->left = t->left;
10898 new->right = t;
10899 t->left = (Vertex *) 0;
10900 } else if (i > t->key)
10901 { new = v;
10902 new->right = t->right;
10903 new->left = t;
10904 t->right = (Vertex *) 0;
10905 } else /* it's already there */
10906 { v->lnk = t->lnk; /* put in linked list off v */
10907 t->lnk = v;
10908 new = t;
10909 }
10910 layers[nr] = new;
10911 }
10912
10913 static int
10914 checkit(Vertex *h, Vertex *v, Vertex *n, uchar sigma)
10915 { Edge *g, *f;
10916 int i, k, j = 1;
10917
10918 for (k = 0; k < 2; k++)
10919 if (h->dst[k])
10920 { if (sigma >= h->from[k] && sigma <= h->to[k])
10921 { if (h->dst[k] != n) goto no_match;
10922 }
10923 for (i = h->from[k]; i <= h->to[k]; i++)
10924 { if (i == sigma) continue;
10925 g = cacheDelta(v, i, j); j = 0;
10926 if (h->dst[k] != g->Dst)
10927 goto no_match;
10928 if (g->s == 0 || g->S != i)
10929 i = g->To;
10930 } }
10931 for (f = h->Succ; f; f = f->Nxt)
10932 { if (INRANGE(f,sigma))
10933 { if (f->Dst != n) goto no_match;
10934 }
10935 for (i = f->From; i <= f->To; i++)
10936 { if (i == sigma) continue;
10937 g = cacheDelta(v, i, j); j = 0;
10938 if (f->Dst != g->Dst)
10939 goto no_match;
10940 if (g->s == 1 && i == g->S)
10941 continue;
10942 i = g->To;
10943 }
10944 if (f->s && f->S != sigma)
10945 { g = cacheDelta(v, f->S, 1);
10946 if (f->Dst != g->Dst)
10947 goto no_match;
10948 }
10949 }
10950 if (h->Succ || h->dst[0] || h->dst[1]) return 1;
10951 no_match:
10952 return 0;
10953 }
10954
10955 static Vertex *
10956 find_it(Vertex *v, Vertex *n, uchar sigma, int L)
10957 { Vertex *z, *t;
10958 ulong i; int nr;
10959
10960 i = mk_special(sigma,n,v);
10961 nr = ((L*TWIDTH)+Tally);
10962 t = layers[nr];
10963
10964 if (!t) return (Vertex *) 0;
10965 layers[nr] = t = splay(i, t);
10966 if (i == t->key)
10967 for (z = t; z; z = z->lnk)
10968 if (checkit(z, v, n, sigma))
10969 return z;
10970
10971 return (Vertex *) 0;
10972 }
10973
10974 static void
10975 delete_it(Vertex *v, int L)
10976 { Vertex *x, *t;
10977 ulong i; int nr;
10978
10979 i = cheap_key(v);
10980 nr = ((L*TWIDTH)+Tally);
10981 t = layers[nr];
10982 if (!t) return;
10983
10984 t = splay(i, t);
10985 if (i == t->key)
10986 { Vertex *z, *y = (Vertex *) 0;
10987 for (z = t; z && z != v; y = z, z = z->lnk)
10988 ;
10989 if (z != v) goto bad;
10990 if (y)
10991 { y->lnk = z->lnk;
10992 z->lnk = (Vertex *) 0;
10993 layers[nr] = t;
10994 return;
10995 } else if (z->lnk) /* z == t == v */
10996 { y = z->lnk;
10997 y->left = t->left;
10998 y->right = t->right;
10999 t->left = t->right = t->lnk = (Vertex *) 0;
11000 layers[nr] = y;
11001 return;
11002 }
11003 /* delete the node itself */
11004 if (!t->left)
11005 { x = t->right;
11006 } else
11007 { x = splay(i, t->left);
11008 x->right = t->right;
11009 }
11010 t->left = t->right = t->lnk = (Vertex *) 0;
11011 layers[nr] = x;
11012 return;
11013 }
11014 bad: Uerror("cannot happen delete");
11015 }
11016 #endif
11017 #if defined(MA) && (defined(W_XPT) || defined(R_XPT))
11018 static Vertex **temptree;
11019 static char wbuf[4096];
11020 static int WCNT = 4096, wcnt=0;
11021 static uchar stacker[MA+1];
11022 static ulong stackcnt = 0;
11023 extern double nstates, nlinks, truncs, truncs2;
11024
11025 static void
11026 xwrite(int fd, char *b, int n)
11027 {
11028 if (wcnt+n >= 4096)
11029 { write(fd, wbuf, wcnt);
11030 wcnt = 0;
11031 }
11032 memcpy(&wbuf[wcnt], b, n);
11033 wcnt += n;
11034 }
11035
11036 static void
11037 wclose(fd)
11038 {
11039 if (wcnt > 0)
11040 write(fd, wbuf, wcnt);
11041 wcnt = 0;
11042 close(fd);
11043 }
11044
11045 static void
11046 w_vertex(int fd, Vertex *v)
11047 { char t[3]; int i; Edge *e;
11048
11049 xwrite(fd, (char *) &v, sizeof(Vertex *));
11050 t[0] = 0;
11051 for (i = 0; i < 2; i++)
11052 if (v->dst[i])
11053 { t[1] = v->from[i], t[2] = v->to[i];
11054 xwrite(fd, t, 3);
11055 xwrite(fd, (char *) &(v->dst[i]), sizeof(Vertex *));
11056 }
11057 for (e = v->Succ; e; e = e->Nxt)
11058 { t[1] = e->From, t[2] = e->To;
11059 xwrite(fd, t, 3);
11060 xwrite(fd, (char *) &(e->Dst), sizeof(Vertex *));
11061
11062 if (e->s)
11063 { t[1] = t[2] = e->S;
11064 xwrite(fd, t, 3);
11065 xwrite(fd, (char *) &(e->Dst), sizeof(Vertex *));
11066 } }
11067 }
11068
11069 static void
11070 w_layer(int fd, Vertex *v)
11071 { uchar c=1;
11072
11073 if (!v) return;
11074 xwrite(fd, (char *) &c, 1);
11075 w_vertex(fd, v);
11076 w_layer(fd, v->lnk);
11077 w_layer(fd, v->left);
11078 w_layer(fd, v->right);
11079 }
11080
11081 void
11082 w_xpoint(void)
11083 { int fd; char nm[64];
11084 int i, j; uchar c;
11085 static uchar xwarned = 0;
11086
11087 sprintf(nm, "%s.xpt", PanSource);
11088 if ((fd = creat(nm, 0666)) <= 0)
11089 if (!xwarned)
11090 { xwarned = 1;
11091 printf("cannot creat checkpoint file\n");
11092 return;
11093 }
11094 xwrite(fd, (char *) &nstates, sizeof(double));
11095 xwrite(fd, (char *) &truncs, sizeof(double));
11096 xwrite(fd, (char *) &truncs2, sizeof(double));
11097 xwrite(fd, (char *) &nlinks, sizeof(double));
11098 xwrite(fd, (char *) &dfa_depth, sizeof(int));
11099 xwrite(fd, (char *) &R, sizeof(Vertex *));
11100 xwrite(fd, (char *) &F, sizeof(Vertex *));
11101 xwrite(fd, (char *) &NF, sizeof(Vertex *));
11102
11103 for (j = 0; j < TWIDTH; j++)
11104 for (i = 0; i < dfa_depth+1; i++)
11105 { w_layer(fd, layers[i*TWIDTH+j]);
11106 c = 2; xwrite(fd, (char *) &c, 1);
11107 }
11108 wclose(fd);
11109 }
11110
11111 static void
11112 xread(int fd, char *b, int n)
11113 { int m = wcnt; int delta = 0;
11114 if (m < n)
11115 { if (m > 0) memcpy(b, &wbuf[WCNT-m], m);
11116 delta = m;
11117 WCNT = wcnt = read(fd, wbuf, 4096);
11118 if (wcnt < n-m)
11119 Uerror("xread failed -- insufficient data");
11120 n -= m;
11121 }
11122 memcpy(&b[delta], &wbuf[WCNT-wcnt], n);
11123 wcnt -= n;
11124 }
11125
11126 static void
11127 x_cleanup(Vertex *c)
11128 { Edge *e; /* remove the tree and edges from c */
11129 if (!c) return;
11130 for (e = c->Succ; e; e = e->Nxt)
11131 x_cleanup(e->Dst);
11132 recyc_vertex(c);
11133 }
11134
11135 static void
11136 x_remove(void)
11137 { Vertex *tmp; int i, s;
11138 int r, j;
11139 /* double-check: */
11140 stacker[dfa_depth-1] = 0; r = dfa_store(stacker);
11141 stacker[dfa_depth-1] = 4; j = dfa_member(dfa_depth-1);
11142 if (r != 1 || j != 0)
11143 { printf("%d: ", stackcnt);
11144 for (i = 0; i < dfa_depth; i++)
11145 printf("%d,", stacker[i]);
11146 printf(" -- not a stackstate <o:%d,4:%d>\n", r, j);
11147 return;
11148 }
11149 stacker[dfa_depth-1] = 1;
11150 s = dfa_member(dfa_depth-1);
11151
11152 { tmp = F; F = NF; NF = tmp; } /* complement */
11153 if (s) dfa_store(stacker);
11154 stacker[dfa_depth-1] = 0;
11155 dfa_store(stacker);
11156 stackcnt++;
11157 { tmp = F; F = NF; NF = tmp; }
11158 }
11159
11160 static void
11161 x_rm_stack(Vertex *t, int k)
11162 { int j; Edge *e;
11163
11164 if (k == 0)
11165 { x_remove();
11166 return;
11167 }
11168 if (t)
11169 for (e = t->Succ; e; e = e->Nxt)
11170 { for (j = e->From; j <= (int) e->To; j++)
11171 { stacker[k] = (uchar) j;
11172 x_rm_stack(e->Dst, k-1);
11173 }
11174 if (e->s)
11175 { stacker[k] = e->S;
11176 x_rm_stack(e->Dst, k-1);
11177 } }
11178 }
11179
11180 static Vertex *
11181 insert_withkey(Vertex *v, int L)
11182 { Vertex *new, *t = temptree[L];
11183
11184 if (!t) { temptree[L] = v; return v; }
11185 t = splay(v->key, t);
11186 if (v->key < t->key)
11187 { new = v;
11188 new->left = t->left;
11189 new->right = t;
11190 t->left = (Vertex *) 0;
11191 } else if (v->key > t->key)
11192 { new = v;
11193 new->right = t->right;
11194 new->left = t;
11195 t->right = (Vertex *) 0;
11196 } else
11197 { if (t != R && t != F && t != NF)
11198 Uerror("double insert, bad checkpoint data");
11199 else
11200 { recyc_vertex(v);
11201 new = t;
11202 } }
11203 temptree[L] = new;
11204
11205 return new;
11206 }
11207
11208 static Vertex *
11209 find_withkey(Vertex *v, int L)
11210 { Vertex *t = temptree[L];
11211 if (t)
11212 { temptree[L] = t = splay((ulong) v, t);
11213 if (t->key == (ulong) v)
11214 return t;
11215 }
11216 Uerror("not found error, bad checkpoint data");
11217 return (Vertex *) 0;
11218 }
11219
11220 void
11221 r_layer(int fd, int n)
11222 { Vertex *v;
11223 Edge *e;
11224 char c, t[2];
11225
11226 for (;;)
11227 { xread(fd, &c, 1);
11228 if (c == 2) break;
11229 if (c == 1)
11230 { v = new_vertex();
11231 xread(fd, (char *) &(v->key), sizeof(Vertex *));
11232 v = insert_withkey(v, n);
11233 } else /* c == 0 */
11234 { e = new_edge((Vertex *) 0);
11235 xread(fd, t, 2);
11236 e->From = t[0];
11237 e->To = t[1];
11238 xread(fd, (char *) &(e->Dst), sizeof(Vertex *));
11239 insert_edge(v, e);
11240 } }
11241 }
11242
11243 static void
11244 v_fix(Vertex *t, int nr)
11245 { int i; Edge *e;
11246
11247 if (!t) return;
11248
11249 for (i = 0; i < 2; i++)
11250 if (t->dst[i])
11251 t->dst[i] = find_withkey(t->dst[i], nr);
11252
11253 for (e = t->Succ; e; e = e->Nxt)
11254 e->Dst = find_withkey(e->Dst, nr);
11255
11256 v_fix(t->left, nr);
11257 v_fix(t->right, nr);
11258 }
11259
11260 static void
11261 v_insert(Vertex *t, int nr)
11262 { Edge *e; int i;
11263
11264 if (!t) return;
11265 v_insert(t->left, nr);
11266 v_insert(t->right, nr);
11267
11268 /* remove only leafs from temptree */
11269 t->left = t->right = t->lnk = (Vertex *) 0;
11270 insert_it(t, nr); /* into layers */
11271 for (i = 0; i < 2; i++)
11272 if (t->dst[i])
11273 t->dst[i]->num += (t->to[i] - t->from[i] + 1);
11274 for (e = t->Succ; e; e = e->Nxt)
11275 e->Dst->num += (e->To - e->From + 1 + e->s);
11276 }
11277
11278 static void
11279 x_fixup(void)
11280 { int i;
11281
11282 for (i = 0; i < dfa_depth; i++)
11283 v_fix(temptree[i], (i+1));
11284
11285 for (i = dfa_depth; i >= 0; i--)
11286 v_insert(temptree[i], i);
11287 }
11288
11289 static Vertex *
11290 x_tail(Vertex *t, ulong want)
11291 { int i, yes, no; Edge *e; Vertex *v = (Vertex *) 0;
11292
11293 if (!t) return v;
11294
11295 yes = no = 0;
11296 for (i = 0; i < 2; i++)
11297 if ((ulong) t->dst[i] == want)
11298 { /* was t->from[i] <= 0 && t->to[i] >= 0 */
11299 /* but from and to are uchar */
11300 if (t->from[i] == 0)
11301 yes = 1;
11302 else
11303 if (t->from[i] <= 4 && t->to[i] >= 4)
11304 no = 1;
11305 }
11306
11307 for (e = t->Succ; e; e = e->Nxt)
11308 if ((ulong) e->Dst == want)
11309 { /* was INRANGE(e,0) but From and To are uchar */
11310 if ((e->From == 0) || (e->s==1 && e->S==0))
11311 yes = 1;
11312 else if (INRANGE(e, 4))
11313 no = 1;
11314 }
11315 if (yes && !no) return t;
11316 v = x_tail(t->left, want); if (v) return v;
11317 v = x_tail(t->right, want); if (v) return v;
11318 return (Vertex *) 0;
11319 }
11320
11321 static void
11322 x_anytail(Vertex *t, Vertex *c, int nr)
11323 { int i; Edge *e, *f; Vertex *v;
11324
11325 if (!t) return;
11326
11327 for (i = 0; i < 2; i++)
11328 if ((ulong) t->dst[i] == c->key)
11329 { v = new_vertex(); v->key = t->key;
11330 f = new_edge(v);
11331 f->From = t->from[i];
11332 f->To = t->to[i];
11333 f->Nxt = c->Succ;
11334 c->Succ = f;
11335 if (nr > 0)
11336 x_anytail(temptree[nr-1], v, nr-1);
11337 }
11338
11339 for (e = t->Succ; e; e = e->Nxt)
11340 if ((ulong) e->Dst == c->key)
11341 { v = new_vertex(); v->key = t->key;
11342 f = new_edge(v);
11343 f->From = e->From;
11344 f->To = e->To;
11345 f->s = e->s;
11346 f->S = e->S;
11347 f->Nxt = c->Succ;
11348 c->Succ = f;
11349 x_anytail(temptree[nr-1], v, nr-1);
11350 }
11351
11352 x_anytail(t->left, c, nr);
11353 x_anytail(t->right, c, nr);
11354 }
11355
11356 static Vertex *
11357 x_cpy_rev(void)
11358 { Vertex *c, *v; /* find 0 and !4 predecessor of F */
11359
11360 v = x_tail(temptree[dfa_depth-1], F->key);
11361 if (!v) return (Vertex *) 0;
11362
11363 c = new_vertex(); c->key = v->key;
11364
11365 /* every node on dfa_depth-2 that has v->key as succ */
11366 /* make copy and let c point to these (reversing ptrs) */
11367
11368 x_anytail(temptree[dfa_depth-2], c, dfa_depth-2);
11369
11370 return c;
11371 }
11372
11373 void
11374 r_xpoint(void)
11375 { int fd; char nm[64]; Vertex *d;
11376 int i, j;
11377
11378 wcnt = 0;
11379 sprintf(nm, "%s.xpt", PanSource);
11380 if ((fd = open(nm, 0)) < 0) /* O_RDONLY */
11381 Uerror("cannot open checkpoint file");
11382
11383 xread(fd, (char *) &nstates, sizeof(double));
11384 xread(fd, (char *) &truncs, sizeof(double));
11385 xread(fd, (char *) &truncs2, sizeof(double));
11386 xread(fd, (char *) &nlinks, sizeof(double));
11387 xread(fd, (char *) &dfa_depth, sizeof(int));
11388
11389 if (dfa_depth != MA+a_cycles)
11390 Uerror("bad dfa_depth in checkpoint file");
11391
11392 path = (Vertex **) emalloc((dfa_depth+1)*sizeof(Vertex *));
11393 layers = (Vertex **) emalloc(TWIDTH*(dfa_depth+1)*sizeof(Vertex *));
11394 temptree = (Vertex **) emalloc((dfa_depth+2)*sizeof(Vertex *));
11395 lastword = (uchar *) emalloc((dfa_depth+1)*sizeof(uchar));
11396 lastword[dfa_depth] = lastword[0] = 255;
11397
11398 path[0] = R = new_vertex();
11399 xread(fd, (char *) &R->key, sizeof(Vertex *));
11400 R = insert_withkey(R, 0);
11401
11402 F = new_vertex();
11403 xread(fd, (char *) &F->key, sizeof(Vertex *));
11404 F = insert_withkey(F, dfa_depth);
11405
11406 NF = new_vertex();
11407 xread(fd, (char *) &NF->key, sizeof(Vertex *));
11408 NF = insert_withkey(NF, dfa_depth);
11409
11410 for (j = 0; j < TWIDTH; j++)
11411 for (i = 0; i < dfa_depth+1; i++)
11412 r_layer(fd, i);
11413
11414 if (wcnt != 0) Uerror("bad count in checkpoint file");
11415
11416 d = x_cpy_rev();
11417 x_fixup();
11418 stacker[dfa_depth-1] = 0;
11419 x_rm_stack(d, dfa_depth-2);
11420 x_cleanup(d);
11421 close(fd);
11422
11423 printf("pan: removed %d stackstates\n", stackcnt);
11424 nstates -= (double) stackcnt;
11425 }
11426 #endif
11427 #ifdef VERI
11428 void
11429 check_claim(int st)
11430 {
11431 if (st == endclaim)
11432 uerror("claim violated!");
11433 if (stopstate[VERI][st])
11434 uerror("end state in claim reached");
11435 }
11436 #endif
11437 void
11438 c_globals(void)
11439 { /* int i; */
11440 printf("global vars:\n");
11441 printf(" byte write_off: %d\n", now.write_off);
11442 { int l_in;
11443 for (l_in = 0; l_in < 2; l_in++)
11444 {
11445 printf(" byte commit_count[%d]: %d\n", l_in, now.commit_count[l_in]);
11446 }
11447 }
11448 printf(" byte read_off: %d\n", now.read_off);
11449 printf(" byte events_lost: %d\n", now.events_lost);
11450 printf(" byte refcount: %d\n", now.refcount);
11451 { int l_in;
11452 for (l_in = 0; l_in < 4; l_in++)
11453 {
11454 printf(" bit buffer_use[%d]: %d\n", l_in, now.buffer_use[l_in]);
11455 }
11456 }
11457 }
11458 void
11459 c_locals(int pid, int tp)
11460 { /* int i; */
11461 switch(tp) {
11462 case 4:
11463 printf("local vars proc %d (:init:):\n", pid);
11464 printf(" byte i: %d\n", ((P4 *)pptr(pid))->i);
11465 printf(" byte j: %d\n", ((P4 *)pptr(pid))->j);
11466 printf(" byte sum: %d\n", ((P4 *)pptr(pid))->sum);
11467 printf(" byte commit_sum: %d\n", ((P4 *)pptr(pid))->commit_sum);
11468 break;
11469 case 3:
11470 /* none */
11471 break;
11472 case 2:
11473 printf("local vars proc %d (reader):\n", pid);
11474 printf(" byte i: %d\n", ((P2 *)pptr(pid))->i);
11475 printf(" byte j: %d\n", ((P2 *)pptr(pid))->j);
11476 break;
11477 case 1:
11478 printf("local vars proc %d (tracer):\n", pid);
11479 printf(" byte size: %d\n", ((P1 *)pptr(pid))->size);
11480 printf(" byte prev_off: %d\n", ((P1 *)pptr(pid))->prev_off);
11481 printf(" byte new_off: %d\n", ((P1 *)pptr(pid))->new_off);
11482 printf(" byte tmp_commit: %d\n", ((P1 *)pptr(pid))->tmp_commit);
11483 printf(" byte i: %d\n", ((P1 *)pptr(pid))->i);
11484 printf(" byte j: %d\n", ((P1 *)pptr(pid))->j);
11485 break;
11486 case 0:
11487 printf("local vars proc %d (switcher):\n", pid);
11488 printf(" byte prev_off: %d\n", ((P0 *)pptr(pid))->prev_off);
11489 printf(" byte new_off: %d\n", ((P0 *)pptr(pid))->new_off);
11490 printf(" byte tmp_commit: %d\n", ((P0 *)pptr(pid))->tmp_commit);
11491 printf(" byte size: %d\n", ((P0 *)pptr(pid))->size);
11492 break;
11493 }
11494 }
11495 void
11496 printm(int x)
11497 {
11498 switch (x) {
11499 default: Printf("%d", x);
11500 }
11501 }
11502 void
11503 c_chandump(int unused) { unused++; /* avoid complaints */ }
This page took 0.270375 seconds and 4 git commands to generate.