add formal verif
[lttv.git] / trunk / verif / examples / pan.c
1 /*** Generated by Spin Version 5.1.6 -- 9 May 2008 ***/
2 /*** From source: buffer.spin ***/
3
4 #ifdef SC
5 #define _FILE_OFFSET_BITS 64
6 #endif
7 #include <stdio.h>
8 #include <signal.h>
9 #include <stdlib.h>
10 #include <stdarg.h>
11 #include <string.h>
12 #include <ctype.h>
13 #include <errno.h>
14 #if defined(WIN32) || defined(WIN64)
15 #include <time.h>
16 #else
17 #include <unistd.h>
18 #include <sys/times.h>
19 #endif
20 #include <sys/types.h>
21 #include <sys/stat.h>
22 #include <fcntl.h>
23 #define Offsetof(X, Y) ((unsigned long)(&(((X *)0)->Y)))
24 #ifndef max
25 #define max(a,b) (((a)<(b)) ? (b) : (a))
26 #endif
27 #ifndef PRINTF
28 int Printf(const char *fmt, ...); /* prototype only */
29 #endif
30 #include "pan.h"
31 #ifdef LOOPSTATE
32 double cnt_loops;
33 #endif
34 State A_Root; /* seed-state for cycles */
35 State now; /* the full state-vector */
36 #undef C_States
37 #if defined(C_States) && defined(HAS_TRACK)
38 void
39 c_update(uchar *p_t_r)
40 {
41 #ifdef VERBOSE
42 printf("c_update %u\n", p_t_r);
43 #endif
44 }
45 void
46 c_revert(uchar *p_t_r)
47 {
48 #ifdef VERBOSE
49 printf("c_revert %u\n", p_t_r);
50 #endif
51 }
52 #endif
53 void
54 globinit(void)
55 {
56 }
57 void
58 locinit4(int h)
59 {
60 }
61 void
62 locinit3(int h)
63 {
64 }
65 void
66 locinit2(int h)
67 {
68 }
69 void
70 locinit1(int h)
71 {
72 }
73 void
74 locinit0(int h)
75 {
76 }
77 #ifdef CNTRSTACK
78 #define onstack_now() (LL[trpt->j6] && LL[trpt->j7])
79 #define onstack_put() LL[trpt->j6]++; LL[trpt->j7]++
80 #define onstack_zap() LL[trpt->j6]--; LL[trpt->j7]--
81 #endif
82 #if !defined(SAFETY) && !defined(NOCOMP)
83 #define V_A (((now._a_t&1)?2:1) << (now._a_t&2))
84 #define A_V (((now._a_t&1)?1:2) << (now._a_t&2))
85 int S_A = 0;
86 #else
87 #define V_A 0
88 #define A_V 0
89 #define S_A 0
90 #endif
91 #ifdef MA
92 #undef onstack_now
93 #undef onstack_put
94 #undef onstack_zap
95 #define onstack_put() ;
96 #define onstack_zap() gstore((char *) &now, vsize, 4)
97 #else
98 #if defined(FULLSTACK) && !defined(BITSTATE)
99 #define onstack_put() trpt->ostate = Lstate
100 #define onstack_zap() { \
101 if (trpt->ostate) \
102 trpt->ostate->tagged = \
103 (S_A)? (trpt->ostate->tagged&~V_A) : 0; \
104 }
105 #endif
106 #endif
107 #ifndef NO_V_PROVISO
108 #define V_PROVISO
109 #endif
110 #if !defined(NO_RESIZE) && !defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(SPACE) && NCORE==1
111 #define AUTO_RESIZE
112 #endif
113
114 struct H_el {
115 struct H_el *nxt;
116 #ifdef FULLSTACK
117 unsigned int tagged;
118 #if defined(BITSTATE) && !defined(NOREDUCE) && !defined(SAFETY)
119 unsigned int proviso;
120 #endif
121 #endif
122 #if defined(CHECK) || (defined(COLLAPSE) && !defined(FULLSTACK))
123 unsigned long st_id;
124 #endif
125 #if !defined(SAFETY) || defined(REACH)
126 unsigned int D;
127 #endif
128 #if NCORE>1
129 /* could cost 1 extra word: 4 bytes if 32-bit and 8 bytes if 64-bit */
130 #ifdef V_PROVISO
131 uchar cpu_id; /* id of cpu that created the state */
132 #endif
133 #endif
134 #ifdef COLLAPSE
135 #if VECTORSZ<65536
136 unsigned short ln;
137 #else
138 unsigned long ln;
139 #endif
140 #endif
141 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
142 unsigned long m_K1;
143 #endif
144 unsigned long state;
145 } **H_tab, **S_Tab;
146
147 typedef struct Trail {
148 int st; /* current state */
149 uchar pr; /* process id */
150 uchar tau; /* 8 bit-flags */
151 uchar o_pm; /* 8 more bit-flags */
152 #if 0
153 Meaning of bit-flags:
154 tau&1 -> timeout enabled
155 tau&2 -> request to enable timeout 1 level up (in claim)
156 tau&4 -> current transition is a claim move
157 tau&8 -> current transition is an atomic move
158 tau&16 -> last move was truncated on stack
159 tau&32 -> current transition is a preselected move
160 tau&64 -> at least one next state is not on the stack
161 tau&128 -> current transition is a stutter move
162 o_pm&1 -> the current pid moved -- implements else
163 o_pm&2 -> this is an acceptance state
164 o_pm&4 -> this is a progress state
165 o_pm&8 -> fairness alg rule 1 undo mark
166 o_pm&16 -> fairness alg rule 3 undo mark
167 o_pm&32 -> fairness alg rule 2 undo mark
168 o_pm&64 -> the current proc applied rule2
169 o_pm&128 -> a fairness, dummy move - all procs blocked
170 #endif
171 #ifdef NSUCC
172 uchar n_succ; /* nr of successor states */
173 #endif
174 #if defined(FULLSTACK) && defined(MA) && !defined(BFS)
175 uchar proviso;
176 #endif
177 #ifndef BFS
178 uchar o_n, o_ot; /* to save locals */
179 #endif
180 uchar o_m;
181 #ifdef EVENT_TRACE
182 #if nstates_event<256
183 uchar o_event;
184 #else
185 unsigned short o_event;
186 #endif
187 #endif
188 int o_tt;
189 #ifndef BFS
190 short o_To;
191 #ifdef RANDOMIZE
192 short oo_i;
193 #endif
194 #endif
195 #if defined(HAS_UNLESS) && !defined(BFS)
196 int e_state; /* if escape trans - state of origin */
197 #endif
198 #if (defined(FULLSTACK) && !defined(MA)) || defined(BFS) || (NCORE>1)
199 struct H_el *ostate; /* pointer to stored state */
200 #endif
201 #if defined(CNTRSTACK) && !defined(BFS)
202 long j6, j7;
203 #endif
204 Trans *o_t;
205 #ifdef SCHED
206 /* based on Qadeer&Rehof, Tacas 2005, LNCS 3440, pp. 93-107 */
207 #if NCORE>1
208 #error "-DSCHED cannot be combined with -DNCORE (yet)"
209 #endif
210 int sched_limit;
211 #endif
212 #ifdef HAS_SORTED
213 short ipt;
214 #endif
215 union {
216 int oval;
217 int *ovals;
218 } bup;
219 } Trail;
220 Trail *trail, *trpt;
221 FILE *efd;
222 uchar *this;
223 long maxdepth=10000;
224 long omaxdepth=10000;
225 #ifdef SCHED
226 int sched_max = 10;
227 #endif
228 #ifdef PERMUTED
229 uchar permuted = 1;
230 #else
231 uchar permuted = 0;
232 #endif
233 double quota; /* time limit */
234 #if NCORE>1
235 long z_handoff = -1;
236 #endif
237 #ifdef SC
238 char *stackfile;
239 #endif
240 uchar *SS, *LL;
241 uchar HASH_NR = 0;
242
243 double memcnt = (double) 0;
244 double memlim = (double) (1<<30); /* 1 GB */
245 #if NCORE>1
246 double mem_reserved = (double) 0;
247 #endif
248
249 /* for emalloc: */
250 static char *have;
251 static long left = 0L;
252 static double fragment = (double) 0;
253 static unsigned long grow;
254
255 unsigned int HASH_CONST[] = {
256 /* asuming 4 bytes per int */
257 0x88888EEF, 0x00400007,
258 0x04c11db7, 0x100d4e63,
259 0x0fc22f87, 0x3ff0c3ff,
260 0x38e84cd7, 0x02b148e9,
261 0x98b2e49d, 0xb616d379,
262 0xa5247fd9, 0xbae92a15,
263 0xb91c8bc5, 0x8e5880f3,
264 0xacd7c069, 0xb4c44bb3,
265 0x2ead1fb7, 0x8e428171,
266 0xdbebd459, 0x828ae611,
267 0x6cb25933, 0x86cdd651,
268 0x9e8f5f21, 0xd5f8d8e7,
269 0x9c4e956f, 0xb5cf2c71,
270 0x2e805a6d, 0x33fc3a55,
271 0xaf203ed1, 0xe31f5909,
272 0x5276db35, 0x0c565ef7,
273 0x273d1aa5, 0x8923b1dd,
274 0
275 };
276 #if NCORE>1
277 extern int core_id;
278 #endif
279 long mreached=0;
280 int done=0, errors=0, Nrun=1;
281 int c_init_done=0;
282 char *c_stack_start = (char *) 0;
283 double nstates=0, nlinks=0, truncs=0, truncs2=0;
284 double nlost=0, nShadow=0, hcmp=0, ngrabs=0;
285 #if defined(ZAPH) && defined(BITSTATE)
286 double zstates = 0;
287 #endif
288 int c_init_run;
289 #ifdef BFS
290 double midrv=0, failedrv=0, revrv=0;
291 #endif
292 unsigned long nr_states=0; /* nodes in DFA */
293 long Fa=0, Fh=0, Zh=0, Zn=0;
294 long PUT=0, PROBE=0, ZAPS=0;
295 long Ccheck=0, Cholds=0;
296 int a_cycles=0, upto=1, strict=0, verbose = 0, signoff = 0;
297 #ifdef HAS_CODE
298 int gui = 0, coltrace = 0, readtrail = 0;
299 int whichtrail = 0, onlyproc = -1, silent = 0;
300 #endif
301 int state_tables=0, fairness=0, no_rck=0, Nr_Trails=0;
302 char simvals[128];
303 #ifndef INLINE
304 int TstOnly=0;
305 #endif
306 unsigned long mask, nmask;
307 #ifdef BITSTATE
308 int ssize=23; /* 1 Mb */
309 #else
310 int ssize=19; /* 512K slots */
311 #endif
312 int hmax=0, svmax=0, smax=0;
313 int Maxbody=0, XX;
314 uchar *noptr; /* used by macro Pptr(x) */
315 #ifdef VAR_RANGES
316 void logval(char *, int);
317 void dumpranges(void);
318 #endif
319 #ifdef MA
320 #define INLINE_REV
321 extern void dfa_init(unsigned short);
322 extern int dfa_member(unsigned long);
323 extern int dfa_store(uchar *);
324 unsigned int maxgs = 0;
325 #endif
326
327 #ifdef ALIGNED
328 State comp_now __attribute__ ((aligned (8)));
329 /* gcc 64-bit aligned for Itanium2 systems */
330 /* MAJOR runtime penalty if not used on those systems */
331 #else
332 State comp_now; /* compressed state vector */
333 #endif
334
335 State comp_msk;
336 uchar *Mask = (uchar *) &comp_msk;
337 #ifdef COLLAPSE
338 State comp_tmp;
339 static char *scratch = (char *) &comp_tmp;
340 #endif
341 Stack *stack; /* for queues, processes */
342 Svtack *svtack; /* for old state vectors */
343 #ifdef BITSTATE
344 static unsigned int hfns = 3; /* new default */
345 #endif
346 static unsigned long j1;
347 static unsigned long K1, K2;
348 static unsigned long j2, j3, j4;
349 #ifdef BITSTATE
350 static long udmem;
351 #endif
352 static long A_depth = 0;
353 long depth = 0;
354 #if NCORE>1
355 long nr_handoffs = 0;
356 #endif
357 static uchar warned = 0, iterative = 0, exclusive = 0, like_java = 0, every_error = 0;
358 static uchar noasserts = 0, noends = 0, bounded = 0;
359 #if SYNC>0 && ASYNC==0
360 void set_recvs(void);
361 int no_recvs(int);
362 #endif
363 #if SYNC
364 #define IfNotBlocked if (boq != -1) continue;
365 #define UnBlock boq = -1
366 #else
367 #define IfNotBlocked /* cannot block */
368 #define UnBlock /* don't bother */
369 #endif
370
371 #ifdef BITSTATE
372 int (*bstore)(char *, int);
373 int bstore_reg(char *, int);
374 int bstore_mod(char *, int);
375 #endif
376 void active_procs(void);
377 void cleanup(void);
378 void do_the_search(void);
379 void find_shorter(int);
380 void iniglobals(void);
381 void stopped(int);
382 void wrapup(void);
383 int *grab_ints(int);
384 void ungrab_ints(int *, int);
385 #ifndef NOBOUNDCHECK
386 #define Index(x, y) Boundcheck(x, y, II, tt, t)
387 #else
388 #define Index(x, y) x
389 #endif
390 short Air[] = { (short) Air0, (short) Air1, (short) Air2, (short) Air3, (short) Air4, (short) Air5 };
391 int
392 addproc(int n)
393 { int j, h = now._nr_pr;
394 #ifndef NOCOMP
395 int k;
396 #endif
397 uchar *o_this = this;
398
399 #ifndef INLINE
400 if (TstOnly) return (h < MAXPROC);
401 #endif
402 #ifndef NOBOUNDCHECK
403 /* redefine Index only within this procedure */
404 #undef Index
405 #define Index(x, y) Boundcheck(x, y, 0, 0, 0)
406 #endif
407 if (h >= MAXPROC)
408 Uerror("too many processes");
409 switch (n) {
410 case 0: j = sizeof(P0); break;
411 case 1: j = sizeof(P1); break;
412 case 2: j = sizeof(P2); break;
413 case 3: j = sizeof(P3); break;
414 case 4: j = sizeof(P4); break;
415 case 5: j = sizeof(P5); break;
416 default: Uerror("bad proc - addproc");
417 }
418 if (vsize%WS)
419 proc_skip[h] = WS-(vsize%WS);
420 else
421 proc_skip[h] = 0;
422 #ifndef NOCOMP
423 for (k = vsize + (int) proc_skip[h]; k > vsize; k--)
424 Mask[k-1] = 1; /* align */
425 #endif
426 vsize += (int) proc_skip[h];
427 proc_offset[h] = vsize;
428 #ifdef SVDUMP
429 if (vprefix > 0)
430 { int dummy = 0;
431 write(svfd, (uchar *) &dummy, sizeof(int)); /* mark */
432 write(svfd, (uchar *) &h, sizeof(int));
433 write(svfd, (uchar *) &n, sizeof(int));
434 #if VECTORSZ>32000
435 write(svfd, (uchar *) &proc_offset[h], sizeof(int));
436 #else
437 write(svfd, (uchar *) &proc_offset[h], sizeof(short));
438 #endif
439 write(svfd, (uchar *) &now, vprefix-4*sizeof(int)); /* padd */
440 }
441 #endif
442 now._nr_pr += 1;
443 if (fairness && ((int) now._nr_pr + 1 >= (8*NFAIR)/2))
444 { printf("pan: error: too many processes -- current");
445 printf(" max is %d procs (-DNFAIR=%d)\n",
446 (8*NFAIR)/2 - 2, NFAIR);
447 printf("\trecompile with -DNFAIR=%d\n",
448 NFAIR+1);
449 pan_exit(1);
450 }
451 vsize += j;
452 #ifndef NOVSZ
453 now._vsz = vsize;
454 #endif
455 #ifndef NOCOMP
456 for (k = 1; k <= Air[n]; k++)
457 Mask[vsize - k] = 1; /* pad */
458 Mask[vsize-j] = 1; /* _pid */
459 #endif
460 hmax = max(hmax, vsize);
461 if (vsize >= VECTORSZ)
462 { printf("pan: error, VECTORSZ too small, recompile pan.c");
463 printf(" with -DVECTORSZ=N with N>%d\n", (int) vsize);
464 Uerror("aborting");
465 }
466 memset((char *)pptr(h), 0, j);
467 this = pptr(h);
468 if (BASE > 0 && h > 0)
469 ((P0 *)this)->_pid = h-BASE;
470 else
471 ((P0 *)this)->_pid = h;
472 switch (n) {
473 case 5: /* np_ */
474 ((P5 *)pptr(h))->_t = 5;
475 ((P5 *)pptr(h))->_p = 0;
476 reached5[0] = 1;
477 accpstate[5][1] = 1;
478 break;
479 case 4: /* :init: */
480 ((P4 *)pptr(h))->_t = 4;
481 ((P4 *)pptr(h))->_p = 42; reached4[42]=1;
482 /* params: */
483 /* locals: */
484 ((P4 *)pptr(h))->i = 0;
485 ((P4 *)pptr(h))->j = 0;
486 ((P4 *)pptr(h))->sum = 0;
487 ((P4 *)pptr(h))->commit_sum = 0;
488 #ifdef VAR_RANGES
489 logval(":init::i", ((P4 *)pptr(h))->i);
490 logval(":init::j", ((P4 *)pptr(h))->j);
491 logval(":init::sum", ((P4 *)pptr(h))->sum);
492 logval(":init::commit_sum", ((P4 *)pptr(h))->commit_sum);
493 #endif
494 #ifdef HAS_CODE
495 locinit4(h);
496 #endif
497 break;
498 case 3: /* cleaner */
499 ((P3 *)pptr(h))->_t = 3;
500 ((P3 *)pptr(h))->_p = 8; reached3[8]=1;
501 /* params: */
502 /* locals: */
503 #ifdef VAR_RANGES
504 #endif
505 #ifdef HAS_CODE
506 locinit3(h);
507 #endif
508 break;
509 case 2: /* reader */
510 ((P2 *)pptr(h))->_t = 2;
511 ((P2 *)pptr(h))->_p = 28; reached2[28]=1;
512 /* params: */
513 /* locals: */
514 ((P2 *)pptr(h))->i = 0;
515 ((P2 *)pptr(h))->j = 0;
516 ((P2 *)pptr(h))->tmp_retrieve = 0;
517 ((P2 *)pptr(h))->lwrite_off = 0;
518 ((P2 *)pptr(h))->lcommit_count = 0;
519 #ifdef VAR_RANGES
520 logval("reader:i", ((P2 *)pptr(h))->i);
521 logval("reader:j", ((P2 *)pptr(h))->j);
522 logval("reader:tmp_retrieve", ((P2 *)pptr(h))->tmp_retrieve);
523 logval("reader:lwrite_off", ((P2 *)pptr(h))->lwrite_off);
524 logval("reader:lcommit_count", ((P2 *)pptr(h))->lcommit_count);
525 #endif
526 #ifdef HAS_CODE
527 locinit2(h);
528 #endif
529 break;
530 case 1: /* tracer */
531 ((P1 *)pptr(h))->_t = 1;
532 ((P1 *)pptr(h))->_p = 3; reached1[3]=1;
533 /* params: */
534 /* locals: */
535 ((P1 *)pptr(h))->size = 1;
536 ((P1 *)pptr(h))->prev_off = 0;
537 ((P1 *)pptr(h))->new_off = 0;
538 ((P1 *)pptr(h))->tmp_commit = 0;
539 ((P1 *)pptr(h))->i = 0;
540 ((P1 *)pptr(h))->j = 0;
541 #ifdef VAR_RANGES
542 logval("tracer:size", ((P1 *)pptr(h))->size);
543 logval("tracer:prev_off", ((P1 *)pptr(h))->prev_off);
544 logval("tracer:new_off", ((P1 *)pptr(h))->new_off);
545 logval("tracer:tmp_commit", ((P1 *)pptr(h))->tmp_commit);
546 logval("tracer:i", ((P1 *)pptr(h))->i);
547 logval("tracer:j", ((P1 *)pptr(h))->j);
548 #endif
549 #ifdef HAS_CODE
550 locinit1(h);
551 #endif
552 break;
553 case 0: /* switcher */
554 ((P0 *)pptr(h))->_t = 0;
555 ((P0 *)pptr(h))->_p = 11; reached0[11]=1;
556 /* params: */
557 /* locals: */
558 ((P0 *)pptr(h))->prev_off = 0;
559 ((P0 *)pptr(h))->new_off = 0;
560 ((P0 *)pptr(h))->tmp_commit = 0;
561 ((P0 *)pptr(h))->size = 0;
562 #ifdef VAR_RANGES
563 logval("switcher:prev_off", ((P0 *)pptr(h))->prev_off);
564 logval("switcher:new_off", ((P0 *)pptr(h))->new_off);
565 logval("switcher:tmp_commit", ((P0 *)pptr(h))->tmp_commit);
566 logval("switcher:size", ((P0 *)pptr(h))->size);
567 #endif
568 #ifdef HAS_CODE
569 locinit0(h);
570 #endif
571 break;
572 }
573 this = o_this;
574 return h-BASE;
575 #ifndef NOBOUNDCHECK
576 #undef Index
577 #define Index(x, y) Boundcheck(x, y, II, tt, t)
578 #endif
579 }
580
581 #if defined(BITSTATE) && defined(COLLAPSE)
582 /* just to allow compilation, to generate the error */
583 long col_p(int i, char *z) { return 0; }
584 long col_q(int i, char *z) { return 0; }
585 #endif
586 #ifndef BITSTATE
587 #ifdef COLLAPSE
588 long
589 col_p(int i, char *z)
590 { int j, k; unsigned long ordinal(char *, long, short);
591 char *x, *y;
592 P0 *ptr = (P0 *) pptr(i);
593 switch (ptr->_t) {
594 case 0: j = sizeof(P0); break;
595 case 1: j = sizeof(P1); break;
596 case 2: j = sizeof(P2); break;
597 case 3: j = sizeof(P3); break;
598 case 4: j = sizeof(P4); break;
599 case 5: j = sizeof(P5); break;
600 default: Uerror("bad proctype - collapse");
601 }
602 if (z) x = z; else x = scratch;
603 y = (char *) ptr; k = proc_offset[i];
604 for ( ; j > 0; j--, y++)
605 if (!Mask[k++]) *x++ = *y;
606 for (j = 0; j < WS-1; j++)
607 *x++ = 0;
608 x -= j;
609 if (z) return (long) (x - z);
610 return ordinal(scratch, x-scratch, (short) (2+ptr->_t));
611 }
612 #endif
613 #endif
614 void
615 run(void)
616 { /* int i; */
617 memset((char *)&now, 0, sizeof(State));
618 vsize = (unsigned long) (sizeof(State) - VECTORSZ);
619 #ifndef NOVSZ
620 now._vsz = vsize;
621 #endif
622 /* optional provisioning statements, e.g. to */
623 /* set hidden variables, used as constants */
624 #ifdef PROV
625 #include PROV
626 #endif
627 settable();
628 Maxbody = max(Maxbody, ((int) sizeof(P0)));
629 Maxbody = max(Maxbody, ((int) sizeof(P1)));
630 Maxbody = max(Maxbody, ((int) sizeof(P2)));
631 Maxbody = max(Maxbody, ((int) sizeof(P3)));
632 Maxbody = max(Maxbody, ((int) sizeof(P4)));
633 Maxbody = max(Maxbody, ((int) sizeof(P5)));
634 reached[0] = reached0;
635 reached[1] = reached1;
636 reached[2] = reached2;
637 reached[3] = reached3;
638 reached[4] = reached4;
639 reached[5] = reached5;
640 accpstate[0] = (uchar *) emalloc(nstates0);
641 accpstate[1] = (uchar *) emalloc(nstates1);
642 accpstate[2] = (uchar *) emalloc(nstates2);
643 accpstate[3] = (uchar *) emalloc(nstates3);
644 accpstate[4] = (uchar *) emalloc(nstates4);
645 accpstate[5] = (uchar *) emalloc(nstates5);
646 progstate[0] = (uchar *) emalloc(nstates0);
647 progstate[1] = (uchar *) emalloc(nstates1);
648 progstate[2] = (uchar *) emalloc(nstates2);
649 progstate[3] = (uchar *) emalloc(nstates3);
650 progstate[4] = (uchar *) emalloc(nstates4);
651 progstate[5] = (uchar *) emalloc(nstates5);
652 loopstate0 = loopstate[0] = (uchar *) emalloc(nstates0);
653 loopstate1 = loopstate[1] = (uchar *) emalloc(nstates1);
654 loopstate2 = loopstate[2] = (uchar *) emalloc(nstates2);
655 loopstate3 = loopstate[3] = (uchar *) emalloc(nstates3);
656 loopstate4 = loopstate[4] = (uchar *) emalloc(nstates4);
657 loopstate5 = loopstate[5] = (uchar *) emalloc(nstates5);
658 stopstate[0] = (uchar *) emalloc(nstates0);
659 stopstate[1] = (uchar *) emalloc(nstates1);
660 stopstate[2] = (uchar *) emalloc(nstates2);
661 stopstate[3] = (uchar *) emalloc(nstates3);
662 stopstate[4] = (uchar *) emalloc(nstates4);
663 stopstate[5] = (uchar *) emalloc(nstates5);
664 visstate[0] = (uchar *) emalloc(nstates0);
665 visstate[1] = (uchar *) emalloc(nstates1);
666 visstate[2] = (uchar *) emalloc(nstates2);
667 visstate[3] = (uchar *) emalloc(nstates3);
668 visstate[4] = (uchar *) emalloc(nstates4);
669 visstate[5] = (uchar *) emalloc(nstates5);
670 mapstate[0] = (short *) emalloc(nstates0 * sizeof(short));
671 mapstate[1] = (short *) emalloc(nstates1 * sizeof(short));
672 mapstate[2] = (short *) emalloc(nstates2 * sizeof(short));
673 mapstate[3] = (short *) emalloc(nstates3 * sizeof(short));
674 mapstate[4] = (short *) emalloc(nstates4 * sizeof(short));
675 mapstate[5] = (short *) emalloc(nstates5 * sizeof(short));
676 #ifdef HAS_CODE
677 #ifdef HAS_CODE
678 #ifdef HAS_CODE
679 #ifdef HAS_CODE
680 #ifdef HAS_CODE
681 #ifdef HAS_CODE
682 NrStates[0] = nstates0;
683 NrStates[1] = nstates1;
684 NrStates[2] = nstates2;
685 NrStates[3] = nstates3;
686 NrStates[4] = nstates4;
687 NrStates[5] = nstates5;
688 #endif
689 #endif
690 #endif
691 #endif
692 #endif
693 #endif
694 stopstate[0][endstate0] = 1;
695 stopstate[1][endstate1] = 1;
696 stopstate[2][endstate2] = 1;
697 stopstate[3][endstate3] = 1;
698 stopstate[4][endstate4] = 1;
699 stopstate[5][endstate5] = 1;
700 stopstate[1][48] = 1;
701 retrans(0, nstates0, start0, src_ln0, reached0, loopstate0);
702 retrans(1, nstates1, start1, src_ln1, reached1, loopstate1);
703 retrans(2, nstates2, start2, src_ln2, reached2, loopstate2);
704 retrans(3, nstates3, start3, src_ln3, reached3, loopstate3);
705 retrans(4, nstates4, start4, src_ln4, reached4, loopstate4);
706 if (state_tables)
707 { printf("\nTransition Type: ");
708 printf("A=atomic; D=d_step; L=local; G=global\n");
709 printf("Source-State Labels: ");
710 printf("p=progress; e=end; a=accept;\n");
711 #ifdef MERGED
712 printf("Note: statement merging was used. Only the first\n");
713 printf(" stmnt executed in each merge sequence is shown\n");
714 printf(" (use spin -a -o3 to disable statement merging)\n");
715 #endif
716 pan_exit(0);
717 }
718 iniglobals();
719 #if defined(VERI) && !defined(NOREDUCE) && !defined(NP)
720 if (!state_tables
721 #ifdef HAS_CODE
722 && !readtrail
723 #endif
724 #if NCORE>1
725 && core_id == 0
726 #endif
727 )
728 { printf("warning: for p.o. reduction to be valid ");
729 printf("the never claim must be stutter-invariant\n");
730 printf("(never claims generated from LTL ");
731 printf("formulae are stutter-invariant)\n");
732 }
733 #endif
734 UnBlock; /* disable rendez-vous */
735 #ifdef BITSTATE
736 if (udmem)
737 { udmem *= 1024L*1024L;
738 #if NCORE>1
739 if (!readtrail)
740 { void init_SS(unsigned long);
741 init_SS((unsigned long) udmem);
742 } else
743 #endif
744 SS = (uchar *) emalloc(udmem);
745 bstore = bstore_mod;
746 } else
747 #if NCORE>1
748 { void init_SS(unsigned long);
749 init_SS(ONE_L<<(ssize-3));
750 }
751 #else
752 SS = (uchar *) emalloc(ONE_L<<(ssize-3));
753 #endif
754 #else
755 hinit();
756 #endif
757 #if defined(FULLSTACK) && defined(BITSTATE)
758 onstack_init();
759 #endif
760 #if defined(CNTRSTACK) && !defined(BFS)
761 LL = (uchar *) emalloc(ONE_L<<(ssize-3));
762 #endif
763 stack = ( Stack *) emalloc(sizeof(Stack));
764 svtack = (Svtack *) emalloc(sizeof(Svtack));
765 /* a place to point for Pptr of non-running procs: */
766 noptr = (uchar *) emalloc(Maxbody * sizeof(char));
767 #ifdef SVDUMP
768 if (vprefix > 0)
769 write(svfd, (uchar *) &vprefix, sizeof(int));
770 #endif
771 #ifdef VERI
772 Addproc(VERI); /* never - pid = 0 */
773 #endif
774 active_procs(); /* started after never */
775 #ifdef EVENT_TRACE
776 now._event = start_event;
777 reached[EVENT_TRACE][start_event] = 1;
778 #endif
779 #ifdef HAS_CODE
780 globinit();
781 #endif
782 #ifdef BITSTATE
783 go_again:
784 #endif
785 do_the_search();
786 #ifdef BITSTATE
787 if (--Nrun > 0 && HASH_CONST[++HASH_NR])
788 { printf("Run %d:\n", HASH_NR);
789 wrap_stats();
790 printf("\n");
791 memset(SS, 0, ONE_L<<(ssize-3));
792 #ifdef CNTRSTACK
793 memset(LL, 0, ONE_L<<(ssize-3));
794 #endif
795 #ifdef FULLSTACK
796 memset((uchar *) S_Tab, 0,
797 maxdepth*sizeof(struct H_el *));
798 #endif
799 nstates=nlinks=truncs=truncs2=ngrabs = 0;
800 nlost=nShadow=hcmp = 0;
801 Fa=Fh=Zh=Zn = 0;
802 PUT=PROBE=ZAPS=Ccheck=Cholds = 0;
803 goto go_again;
804 }
805 #endif
806 }
807 #ifdef HAS_PROVIDED
808 int provided(int, uchar, int, Trans *);
809 #endif
810 #if NCORE>1
811 #define GLOBAL_LOCK (0)
812 #ifndef CS_N
813 #define CS_N (256*NCORE)
814 #endif
815 #ifdef NGQ
816 #define NR_QS (NCORE)
817 #define CS_NR (CS_N+1) /* 2^N + 1, nr critical sections */
818 #define GQ_RD GLOBAL_LOCK
819 #define GQ_WR GLOBAL_LOCK
820 #define CS_ID (1 + (int) (j1 & (CS_N-1))) /* mask: 2^N - 1, zero reserved */
821 #define QLOCK(n) (1+n)
822 #else
823 #define NR_QS (NCORE+1)
824 #define CS_NR (CS_N+3)
825 #define GQ_RD (1)
826 #define GQ_WR (2)
827 #define CS_ID (3 + (int) (j1 & (CS_N-1)))
828 #define QLOCK(n) (3+n)
829 #endif
830
831 void e_critical(int);
832 void x_critical(int);
833
834 #ifndef SEP_STATE
835 #define enter_critical(w) e_critical(w)
836 #define leave_critical(w) x_critical(w)
837 #else
838 #ifdef NGQ
839 #define enter_critical(w) { if (w < 1+NCORE) e_critical(w); }
840 #define leave_critical(w) { if (w < 1+NCORE) x_critical(w); }
841 #else
842 #define enter_critical(w) { if (w < 3+NCORE) e_critical(w); }
843 #define leave_critical(w) { if (w < 3+NCORE) x_critical(w); }
844 #endif
845 #endif
846
847 int
848 cpu_printf(const char *fmt, ...)
849 { va_list args;
850 enter_critical(GLOBAL_LOCK); /* printing */
851 printf("cpu%d: ", core_id);
852 fflush(stdout);
853 va_start(args, fmt);
854 vprintf(fmt, args);
855 va_end(args);
856 fflush(stdout);
857 leave_critical(GLOBAL_LOCK);
858 return 1;
859 }
860 #else
861 int
862 cpu_printf(const char *fmt, ...)
863 { va_list args;
864 va_start(args, fmt);
865 vprintf(fmt, args);
866 va_end(args);
867 return 1;
868 }
869 #endif
870 int
871 Printf(const char *fmt, ...)
872 { /* Make sure the args to Printf
873 * are always evaluated (e.g., they
874 * could contain a run stmnt)
875 * but do not generate the output
876 * during verification runs
877 * unless explicitly wanted
878 * If this fails on your system
879 * compile SPIN itself -DPRINTF
880 * and this code is not generated
881 */
882 #ifdef HAS_CODE
883 if (readtrail)
884 { va_list args;
885 va_start(args, fmt);
886 vprintf(fmt, args);
887 va_end(args);
888 return 1;
889 }
890 #endif
891 #ifdef PRINTF
892 va_list args;
893 va_start(args, fmt);
894 vprintf(fmt, args);
895 va_end(args);
896 #endif
897 return 1;
898 }
899 extern void printm(int);
900 #ifndef SC
901 #define getframe(i) &trail[i];
902 #else
903 static long HHH, DDD, hiwater;
904 static long CNT1, CNT2;
905 static int stackwrite;
906 static int stackread;
907 static Trail frameptr;
908 Trail *
909 getframe(int d)
910 {
911 if (CNT1 == CNT2)
912 return &trail[d];
913
914 if (d >= (CNT1-CNT2)*DDD)
915 return &trail[d - (CNT1-CNT2)*DDD];
916
917 if (!stackread
918 && (stackread = open(stackfile, 0)) < 0)
919 { printf("getframe: cannot open %s\n", stackfile);
920 wrapup();
921 }
922 if (lseek(stackread, d* (off_t) sizeof(Trail), SEEK_SET) == -1
923 || read(stackread, &frameptr, sizeof(Trail)) != sizeof(Trail))
924 { printf("getframe: frame read error\n");
925 wrapup();
926 }
927 return &frameptr;
928 }
929 #endif
930 #if !defined(SAFETY) && !defined(BITSTATE)
931 #if !defined(FULLSTACK) || defined(MA)
932 #define depth_of(x) A_depth /* an estimate */
933 #else
934 int
935 depth_of(struct H_el *s)
936 { Trail *t; int d;
937 for (d = 0; d <= A_depth; d++)
938 { t = getframe(d);
939 if (s == t->ostate)
940 return d;
941 }
942 printf("pan: cannot happen, depth_of\n");
943 return depthfound;
944 }
945 #endif
946 #endif
947 #if NCORE>1
948 extern void cleanup_shm(int);
949 volatile unsigned int *search_terminated; /* to signal early termination */
950 #endif
951 void
952 pan_exit(int val)
953 { void stop_timer(void);
954 if (signoff)
955 { printf("--end of output--\n");
956 }
957 #if NCORE>1
958 if (search_terminated != NULL)
959 { *search_terminated |= 1; /* pan_exit */
960 }
961 #ifdef USE_DISK
962 { void dsk_stats(void);
963 dsk_stats();
964 }
965 #endif
966 if (!state_tables && !readtrail)
967 { cleanup_shm(1);
968 }
969 #endif
970 if (val == 2)
971 { val = 0;
972 } else
973 { stop_timer();
974 }
975 exit(val);
976 }
977 #ifdef HAS_CODE
978 char *
979 transmognify(char *s)
980 { char *v, *w;
981 static char buf[2][2048];
982 int i, toggle = 0;
983 if (!s || strlen(s) > 2047) return s;
984 memset(buf[0], 0, 2048);
985 memset(buf[1], 0, 2048);
986 strcpy(buf[toggle], s);
987 while ((v = strstr(buf[toggle], "{c_code")))
988 { *v = '\0'; v++;
989 strcpy(buf[1-toggle], buf[toggle]);
990 for (w = v; *w != '}' && *w != '\0'; w++) /* skip */;
991 if (*w != '}') return s;
992 *w = '\0'; w++;
993 for (i = 0; code_lookup[i].c; i++)
994 if (strcmp(v, code_lookup[i].c) == 0
995 && strlen(v) == strlen(code_lookup[i].c))
996 { if (strlen(buf[1-toggle])
997 + strlen(code_lookup[i].t)
998 + strlen(w) > 2047)
999 return s;
1000 strcat(buf[1-toggle], code_lookup[i].t);
1001 break;
1002 }
1003 strcat(buf[1-toggle], w);
1004 toggle = 1 - toggle;
1005 }
1006 buf[toggle][2047] = '\0';
1007 return buf[toggle];
1008 }
1009 #else
1010 char * transmognify(char *s) { return s; }
1011 #endif
1012 #ifdef HAS_CODE
1013 void
1014 add_src_txt(int ot, int tt)
1015 { Trans *t;
1016 char *q;
1017
1018 for (t = trans[ot][tt]; t; t = t->nxt)
1019 { printf("\t\t");
1020 q = transmognify(t->tp);
1021 for ( ; q && *q; q++)
1022 if (*q == '\n')
1023 printf("\\n");
1024 else
1025 putchar(*q);
1026 printf("\n");
1027 }
1028 }
1029 void
1030 wrap_trail(void)
1031 { static int wrap_in_progress = 0;
1032 int i; short II;
1033 P0 *z;
1034
1035 if (wrap_in_progress++) return;
1036
1037 printf("spin: trail ends after %ld steps\n", depth);
1038 if (onlyproc >= 0)
1039 { if (onlyproc >= now._nr_pr) { pan_exit(0); }
1040 II = onlyproc;
1041 z = (P0 *)pptr(II);
1042 printf("%3ld: proc %d (%s) ",
1043 depth, II, procname[z->_t]);
1044 for (i = 0; src_all[i].src; i++)
1045 if (src_all[i].tp == (int) z->_t)
1046 { printf(" line %3d",
1047 src_all[i].src[z->_p]);
1048 break;
1049 }
1050 printf(" (state %2d)", z->_p);
1051 if (!stopstate[z->_t][z->_p])
1052 printf(" (invalid end state)");
1053 printf("\n");
1054 add_src_txt(z->_t, z->_p);
1055 pan_exit(0);
1056 }
1057 printf("#processes %d:\n", now._nr_pr);
1058 if (depth < 0) depth = 0;
1059 for (II = 0; II < now._nr_pr; II++)
1060 { z = (P0 *)pptr(II);
1061 printf("%3ld: proc %d (%s) ",
1062 depth, II, procname[z->_t]);
1063 for (i = 0; src_all[i].src; i++)
1064 if (src_all[i].tp == (int) z->_t)
1065 { printf(" line %3d",
1066 src_all[i].src[z->_p]);
1067 break;
1068 }
1069 printf(" (state %2d)", z->_p);
1070 if (!stopstate[z->_t][z->_p])
1071 printf(" (invalid end state)");
1072 printf("\n");
1073 add_src_txt(z->_t, z->_p);
1074 }
1075 c_globals();
1076 for (II = 0; II < now._nr_pr; II++)
1077 { z = (P0 *)pptr(II);
1078 c_locals(II, z->_t);
1079 }
1080 #ifdef ON_EXIT
1081 ON_EXIT;
1082 #endif
1083 pan_exit(0);
1084 }
1085 FILE *
1086 findtrail(void)
1087 { FILE *fd;
1088 char fnm[512], *q;
1089 char MyFile[512];
1090 char MySuffix[16];
1091 int try_core;
1092 int candidate_files;
1093
1094 if (trailfilename != NULL)
1095 { fd = fopen(trailfilename, "r");
1096 if (fd == NULL)
1097 { printf("pan: cannot find %s\n", trailfilename);
1098 pan_exit(1);
1099 } /* else */
1100 goto success;
1101 }
1102 talk:
1103 try_core = 1;
1104 candidate_files = 0;
1105 tprefix = "trail";
1106 strcpy(MyFile, TrailFile);
1107 do { /* see if there's more than one possible trailfile */
1108 if (whichtrail)
1109 { sprintf(fnm, "%s%d.%s",
1110 MyFile, whichtrail, tprefix);
1111 fd = fopen(fnm, "r");
1112 if (fd != NULL)
1113 { candidate_files++;
1114 if (verbose==100)
1115 printf("trail%d: %s\n",
1116 candidate_files, fnm);
1117 fclose(fd);
1118 }
1119 if ((q = strchr(MyFile, '.')) != NULL)
1120 { *q = '\0';
1121 sprintf(fnm, "%s%d.%s",
1122 MyFile, whichtrail, tprefix);
1123 *q = '.';
1124 fd = fopen(fnm, "r");
1125 if (fd != NULL)
1126 { candidate_files++;
1127 if (verbose==100)
1128 printf("trail%d: %s\n",
1129 candidate_files, fnm);
1130 fclose(fd);
1131 } }
1132 } else
1133 { sprintf(fnm, "%s.%s", MyFile, tprefix);
1134 fd = fopen(fnm, "r");
1135 if (fd != NULL)
1136 { candidate_files++;
1137 if (verbose==100)
1138 printf("trail%d: %s\n",
1139 candidate_files, fnm);
1140 fclose(fd);
1141 }
1142 if ((q = strchr(MyFile, '.')) != NULL)
1143 { *q = '\0';
1144 sprintf(fnm, "%s.%s", MyFile, tprefix);
1145 *q = '.';
1146 fd = fopen(fnm, "r");
1147 if (fd != NULL)
1148 { candidate_files++;
1149 if (verbose==100)
1150 printf("trail%d: %s\n",
1151 candidate_files, fnm);
1152 fclose(fd);
1153 } } }
1154 tprefix = MySuffix;
1155 sprintf(tprefix, "cpu%d_trail", try_core++);
1156 } while (try_core <= NCORE);
1157
1158 if (candidate_files != 1)
1159 { if (verbose != 100)
1160 { printf("error: there are %d trail files:\n",
1161 candidate_files);
1162 verbose = 100;
1163 goto talk;
1164 } else
1165 { printf("pan: rm or mv all except one\n");
1166 exit(1);
1167 } }
1168 try_core = 1;
1169 strcpy(MyFile, TrailFile); /* restore */
1170 tprefix = "trail";
1171 try_again:
1172 if (whichtrail)
1173 { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, tprefix);
1174 fd = fopen(fnm, "r");
1175 if (fd == NULL && (q = strchr(MyFile, '.')))
1176 { *q = '\0';
1177 sprintf(fnm, "%s%d.%s",
1178 MyFile, whichtrail, tprefix);
1179 *q = '.';
1180 fd = fopen(fnm, "r");
1181 }
1182 } else
1183 { sprintf(fnm, "%s.%s", MyFile, tprefix);
1184 fd = fopen(fnm, "r");
1185 if (fd == NULL && (q = strchr(MyFile, '.')))
1186 { *q = '\0';
1187 sprintf(fnm, "%s.%s", MyFile, tprefix);
1188 *q = '.';
1189 fd = fopen(fnm, "r");
1190 } }
1191 if (fd == NULL)
1192 { if (try_core < NCORE)
1193 { tprefix = MySuffix;
1194 sprintf(tprefix, "cpu%d_trail", try_core++);
1195 goto try_again;
1196 }
1197 printf("pan: cannot find trailfile %s\n", fnm);
1198 pan_exit(1);
1199 }
1200 success:
1201 #if NCORE>1 && defined(SEP_STATE)
1202 { void set_root(void); /* for partial traces from local root */
1203 set_root();
1204 }
1205 #endif
1206 return fd;
1207 }
1208
1209 uchar do_transit(Trans *, short);
1210
1211 void
1212 getrail(void)
1213 { FILE *fd;
1214 char *q;
1215 int i, t_id, lastnever=-1; short II;
1216 Trans *t;
1217 P0 *z;
1218
1219 fd = findtrail(); /* exits if unsuccessful */
1220 while (fscanf(fd, "%ld:%d:%d\n", &depth, &i, &t_id) == 3)
1221 { if (depth == -1)
1222 printf("<<<<<START OF CYCLE>>>>>\n");
1223 if (depth < 0)
1224 continue;
1225 if (i > now._nr_pr)
1226 { printf("pan: Error, proc %d invalid pid ", i);
1227 printf("transition %d\n", t_id);
1228 break;
1229 }
1230 II = i;
1231 z = (P0 *)pptr(II);
1232 for (t = trans[z->_t][z->_p]; t; t = t->nxt)
1233 if (t->t_id == (T_ID) t_id)
1234 break;
1235 if (!t)
1236 { for (i = 0; i < NrStates[z->_t]; i++)
1237 { t = trans[z->_t][i];
1238 if (t && t->t_id == (T_ID) t_id)
1239 { printf("\tRecovered at state %d\n", i);
1240 z->_p = i;
1241 goto recovered;
1242 } }
1243 printf("pan: Error, proc %d type %d state %d: ",
1244 II, z->_t, z->_p);
1245 printf("transition %d not found\n", t_id);
1246 printf("pan: list of possible transitions in this process:\n");
1247 if (z->_t >= 0 && z->_t <= _NP_)
1248 for (t = trans[z->_t][z->_p]; t; t = t->nxt)
1249 printf(" t_id %d -- case %d, [%s]\n",
1250 t->t_id, t->forw, t->tp);
1251 break; /* pan_exit(1); */
1252 }
1253 recovered:
1254 q = transmognify(t->tp);
1255 if (gui) simvals[0] = '\0';
1256 this = pptr(II);
1257 trpt->tau |= 1;
1258 if (!do_transit(t, II))
1259 { if (onlyproc >= 0 && II != onlyproc)
1260 goto moveon;
1261 printf("pan: error, next transition UNEXECUTABLE on replay\n");
1262 printf(" most likely causes: missing c_track statements\n");
1263 printf(" or illegal side-effects in c_expr statements\n");
1264 }
1265 if (onlyproc >= 0 && II != onlyproc)
1266 goto moveon;
1267 if (verbose)
1268 { printf("%3ld: proc %2d (%s) ", depth, II, procname[z->_t]);
1269 for (i = 0; src_all[i].src; i++)
1270 if (src_all[i].tp == (int) z->_t)
1271 { printf(" line %3d \"%s\" ",
1272 src_all[i].src[z->_p], PanSource);
1273 break;
1274 }
1275 printf("(state %d) trans {%d,%d} [%s]\n",
1276 z->_p, t_id, t->forw, q?q:"");
1277 c_globals();
1278 for (i = 0; i < now._nr_pr; i++)
1279 { c_locals(i, ((P0 *)pptr(i))->_t);
1280 }
1281 } else
1282 if (strcmp(procname[z->_t], ":never:") == 0)
1283 { if (lastnever != (int) z->_p)
1284 { for (i = 0; src_all[i].src; i++)
1285 if (src_all[i].tp == (int) z->_t)
1286 { printf("MSC: ~G %d\n",
1287 src_all[i].src[z->_p]);
1288 break;
1289 }
1290 if (!src_all[i].src)
1291 printf("MSC: ~R %d\n", z->_p);
1292 }
1293 lastnever = z->_p;
1294 goto sameas;
1295 } else
1296 if (strcmp(procname[z->_t], ":np_:") != 0)
1297 {
1298 sameas: if (no_rck) goto moveon;
1299 if (coltrace)
1300 { printf("%ld: ", depth);
1301 for (i = 0; i < II; i++)
1302 printf("\t\t");
1303 printf("%s(%d):", procname[z->_t], II);
1304 printf("[%s]\n", q?q:"");
1305 } else if (!silent)
1306 { if (strlen(simvals) > 0) {
1307 printf("%3ld: proc %2d (%s)",
1308 depth, II, procname[z->_t]);
1309 for (i = 0; src_all[i].src; i++)
1310 if (src_all[i].tp == (int) z->_t)
1311 { printf(" line %3d \"%s\" ",
1312 src_all[i].src[z->_p], PanSource);
1313 break;
1314 }
1315 printf("(state %d) [values: %s]\n", z->_p, simvals);
1316 }
1317 printf("%3ld: proc %2d (%s)",
1318 depth, II, procname[z->_t]);
1319 for (i = 0; src_all[i].src; i++)
1320 if (src_all[i].tp == (int) z->_t)
1321 { printf(" line %3d \"%s\" ",
1322 src_all[i].src[z->_p], PanSource);
1323 break;
1324 }
1325 printf("(state %d) [%s]\n", z->_p, q?q:"");
1326 /* printf("\n"); */
1327 } }
1328 moveon: z->_p = t->st;
1329 }
1330 wrap_trail();
1331 }
1332 #endif
1333 int
1334 f_pid(int pt)
1335 { int i;
1336 P0 *z;
1337 for (i = 0; i < now._nr_pr; i++)
1338 { z = (P0 *)pptr(i);
1339 if (z->_t == (unsigned) pt)
1340 return BASE+z->_pid;
1341 }
1342 return -1;
1343 }
1344 #ifdef VERI
1345 void check_claim(int);
1346 #endif
1347
1348 #if !defined(HASH64) && !defined(HASH32)
1349 #define HASH32
1350 #endif
1351 #if defined(HASH32) && defined(SAFETY) && !defined(SFH) && !defined(SPACE)
1352 #define SFH
1353 #endif
1354 #if defined(SFH) && (defined(BITSTATE) || defined(COLLAPSE) || defined(HC) || defined(HASH64))
1355 #undef SFH
1356 #endif
1357 #if defined(SFH) && !defined(NOCOMP)
1358 #define NOCOMP /* go for speed */
1359 #endif
1360 #if NCORE>1 && !defined(GLOB_HEAP)
1361 #define SEP_HEAP /* version 5.1.2 */
1362 #endif
1363
1364 #ifdef BITSTATE
1365 int
1366 bstore_mod(char *v, int n) /* hasharray size not a power of two */
1367 { unsigned long x, y;
1368 unsigned int i = 1;
1369
1370 d_hash((uchar *) v, n); /* sets j3, j4, K1, K2 */
1371 x = K1; y = j3;
1372 for (;;)
1373 { if (!(SS[x%udmem]&(1<<y))) break;
1374 if (i == hfns) {
1375 #ifdef DEBUG
1376 printf("Old bitstate\n");
1377 #endif
1378 return 1;
1379 }
1380 x = (x + K2 + i);
1381 y = (y + j4) & 7;
1382 i++;
1383 }
1384 #ifdef RANDSTOR
1385 if (rand()%100 > RANDSTOR) return 0;
1386 #endif
1387 for (;;)
1388 { SS[x%udmem] |= (1<<y);
1389 if (i == hfns) break;
1390 x = (x + K2 + i);
1391 y = (y + j4) & 7;
1392 i++;
1393 }
1394 #ifdef DEBUG
1395 printf("New bitstate\n");
1396 #endif
1397 if (now._a_t&1)
1398 { nShadow++;
1399 }
1400 return 0;
1401 }
1402 int
1403 bstore_reg(char *v, int n) /* extended hashing, Peter Dillinger, 2004 */
1404 { unsigned long x, y;
1405 unsigned int i = 1;
1406
1407 d_hash((uchar *) v, n); /* sets j1-j4 */
1408 x = j2; y = j3;
1409 for (;;)
1410 { if (!(SS[x]&(1<<y))) break;
1411 if (i == hfns) {
1412 #ifdef DEBUG
1413 printf("Old bitstate\n");
1414 #endif
1415 return 1;
1416 }
1417 x = (x + j1 + i) & nmask;
1418 y = (y + j4) & 7;
1419 i++;
1420 }
1421 #ifdef RANDSTOR
1422 if (rand()%100 > RANDSTOR) return 0;
1423 #endif
1424 for (;;)
1425 { SS[x] |= (1<<y);
1426 if (i == hfns) break;
1427 x = (x + j1 + i) & nmask;
1428 y = (y + j4) & 7;
1429 i++;
1430 }
1431 #ifdef DEBUG
1432 printf("New bitstate\n");
1433 #endif
1434 if (now._a_t&1)
1435 { nShadow++;
1436 }
1437 return 0;
1438 }
1439 #endif
1440 unsigned long TMODE = 0666; /* file permission bits for trail files */
1441
1442 int trcnt=1;
1443 char snap[64], fnm[512];
1444
1445 int
1446 make_trail(void)
1447 { int fd;
1448 char *q;
1449 char MyFile[512];
1450 int w_flags = O_CREAT|O_WRONLY|O_TRUNC;
1451
1452 if (exclusive == 1 && iterative == 0)
1453 { w_flags |= O_EXCL;
1454 }
1455
1456 q = strrchr(TrailFile, '/');
1457 if (q == NULL) q = TrailFile; else q++;
1458 strcpy(MyFile, q); /* TrailFile is not a writable string */
1459
1460 if (iterative == 0 && Nr_Trails++ > 0)
1461 { sprintf(fnm, "%s%d.%s",
1462 MyFile, Nr_Trails-1, tprefix);
1463 } else
1464 {
1465 #ifdef PUTPID
1466 sprintf(fnm, "%s%d.%s", MyFile, getpid(), tprefix);
1467 #else
1468 sprintf(fnm, "%s.%s", MyFile, tprefix);
1469 #endif
1470 }
1471 if ((fd = open(fnm, w_flags, TMODE)) < 0)
1472 { if ((q = strchr(MyFile, '.')))
1473 { *q = '\0';
1474 if (iterative == 0 && Nr_Trails-1 > 0)
1475 sprintf(fnm, "%s%d.%s",
1476 MyFile, Nr_Trails-1, tprefix);
1477 else
1478 sprintf(fnm, "%s.%s", MyFile, tprefix);
1479 *q = '.';
1480 fd = open(fnm, w_flags, TMODE);
1481 } }
1482 if (fd < 0)
1483 { printf("pan: cannot create %s\n", fnm);
1484 perror("cause");
1485 } else
1486 {
1487 #if NCORE>1 && (defined(SEP_STATE) || !defined(FULL_TRAIL))
1488 void write_root(void);
1489 write_root();
1490 #else
1491 printf("pan: wrote %s\n", fnm);
1492 #endif
1493 }
1494 return fd;
1495 }
1496
1497 #ifndef FREQ
1498 #define FREQ (1000000)
1499 #endif
1500 #ifdef BFS
1501 #define Q_PROVISO
1502 #ifndef INLINE_REV
1503 #define INLINE_REV
1504 #endif
1505
1506 typedef struct SV_Hold {
1507 State *sv;
1508 int sz;
1509 struct SV_Hold *nxt;
1510 } SV_Hold;
1511
1512 typedef struct EV_Hold {
1513 char *sv;
1514 int sz;
1515 int nrpr;
1516 int nrqs;
1517 char *po;
1518 char *qo;
1519 char *ps, *qs;
1520 struct EV_Hold *nxt;
1521 } EV_Hold;
1522
1523 typedef struct BFS_Trail {
1524 Trail *frame;
1525 SV_Hold *onow;
1526 EV_Hold *omask;
1527 #ifdef Q_PROVISO
1528 struct H_el *lstate;
1529 #endif
1530 short boq;
1531 struct BFS_Trail *nxt;
1532 } BFS_Trail;
1533
1534 BFS_Trail *bfs_trail, *bfs_bot, *bfs_free;
1535
1536 SV_Hold *svhold, *svfree;
1537
1538 #ifdef BFS_DISK
1539 #ifndef BFS_LIMIT
1540 #define BFS_LIMIT 100000
1541 #endif
1542 #ifndef BFS_DSK_LIMIT
1543 #define BFS_DSK_LIMIT 1000000
1544 #endif
1545 #if defined(WIN32) || defined(WIN64)
1546 #define RFLAGS (O_RDONLY|O_BINARY)
1547 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
1548 #else
1549 #define RFLAGS (O_RDONLY)
1550 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
1551 #endif
1552 long bfs_size_limit;
1553 int bfs_dsk_write = -1;
1554 int bfs_dsk_read = -1;
1555 long bfs_dsk_writes, bfs_dsk_reads;
1556 int bfs_dsk_seqno_w, bfs_dsk_seqno_r;
1557 #endif
1558
1559 uchar do_reverse(Trans *, short, uchar);
1560 void snapshot(void);
1561
1562 SV_Hold *
1563 getsv(int n)
1564 { SV_Hold *h = (SV_Hold *) 0, *oh;
1565
1566 oh = (SV_Hold *) 0;
1567 for (h = svfree; h; oh = h, h = h->nxt)
1568 { if (n == h->sz)
1569 { if (!oh)
1570 svfree = h->nxt;
1571 else
1572 oh->nxt = h->nxt;
1573 h->nxt = (SV_Hold *) 0;
1574 break;
1575 }
1576 if (n < h->sz)
1577 { h = (SV_Hold *) 0;
1578 break;
1579 }
1580 /* else continue */
1581 }
1582
1583 if (!h)
1584 { h = (SV_Hold *) emalloc(sizeof(SV_Hold));
1585 h->sz = n;
1586 #ifdef BFS_DISK
1587 if (bfs_size_limit >= BFS_LIMIT)
1588 { h->sv = (State *) 0; /* means: read disk */
1589 bfs_dsk_writes++; /* count */
1590 if (bfs_dsk_write < 0 /* file descriptor */
1591 || bfs_dsk_writes%BFS_DSK_LIMIT == 0)
1592 { char dsk_nm[32];
1593 if (bfs_dsk_write >= 0)
1594 { (void) close(bfs_dsk_write);
1595 }
1596 sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_w++);
1597 bfs_dsk_write = open(dsk_nm, WFLAGS, 0644);
1598 if (bfs_dsk_write < 0)
1599 { Uerror("could not create tmp disk file");
1600 }
1601 printf("pan: created disk file %s\n", dsk_nm);
1602 }
1603 if (write(bfs_dsk_write, (char *) &now, n) != n)
1604 { Uerror("aborting -- disk write failed (disk full?)");
1605 }
1606 return h; /* no memcpy */
1607 }
1608 bfs_size_limit++;
1609 #endif
1610 h->sv = (State *) emalloc(sizeof(State) - VECTORSZ + n);
1611 }
1612
1613 memcpy((char *)h->sv, (char *)&now, n);
1614 return h;
1615 }
1616
1617 EV_Hold *
1618 getsv_mask(int n)
1619 { EV_Hold *h;
1620 static EV_Hold *kept = (EV_Hold *) 0;
1621
1622 for (h = kept; h; h = h->nxt)
1623 if (n == h->sz
1624 && (memcmp((char *) Mask, (char *) h->sv, n) == 0)
1625 && (now._nr_pr == h->nrpr)
1626 && (now._nr_qs == h->nrqs)
1627 #if VECTORSZ>32000
1628 && (memcmp((char *) proc_offset, (char *) h->po, now._nr_pr * sizeof(int)) == 0)
1629 && (memcmp((char *) q_offset, (char *) h->qo, now._nr_qs * sizeof(int)) == 0)
1630 #else
1631 && (memcmp((char *) proc_offset, (char *) h->po, now._nr_pr * sizeof(short)) == 0)
1632 && (memcmp((char *) q_offset, (char *) h->qo, now._nr_qs * sizeof(short)) == 0)
1633 #endif
1634 && (memcmp((char *) proc_skip, (char *) h->ps, now._nr_pr * sizeof(uchar)) == 0)
1635 && (memcmp((char *) q_skip, (char *) h->qs, now._nr_qs * sizeof(uchar)) == 0))
1636 break;
1637 if (!h)
1638 { h = (EV_Hold *) emalloc(sizeof(EV_Hold));
1639 h->sz = n;
1640 h->nrpr = now._nr_pr;
1641 h->nrqs = now._nr_qs;
1642
1643 h->sv = (char *) emalloc(n * sizeof(char));
1644 memcpy((char *) h->sv, (char *) Mask, n);
1645
1646 if (now._nr_pr > 0)
1647 { h->ps = (char *) emalloc(now._nr_pr * sizeof(int));
1648 memcpy((char *) h->ps, (char *) proc_skip, now._nr_pr * sizeof(uchar));
1649 #if VECTORSZ>32000
1650 h->po = (char *) emalloc(now._nr_pr * sizeof(int));
1651 memcpy((char *) h->po, (char *) proc_offset, now._nr_pr * sizeof(int));
1652 #else
1653 h->po = (char *) emalloc(now._nr_pr * sizeof(short));
1654 memcpy((char *) h->po, (char *) proc_offset, now._nr_pr * sizeof(short));
1655 #endif
1656 }
1657 if (now._nr_qs > 0)
1658 { h->qs = (char *) emalloc(now._nr_qs * sizeof(int));
1659 memcpy((char *) h->qs, (char *) q_skip, now._nr_qs * sizeof(uchar));
1660 #if VECTORSZ>32000
1661 h->qo = (char *) emalloc(now._nr_qs * sizeof(int));
1662 memcpy((char *) h->qo, (char *) q_offset, now._nr_qs * sizeof(int));
1663 #else
1664 h->qo = (char *) emalloc(now._nr_qs * sizeof(short));
1665 memcpy((char *) h->qo, (char *) q_offset, now._nr_qs * sizeof(short));
1666 #endif
1667 }
1668
1669 h->nxt = kept;
1670 kept = h;
1671 }
1672 return h;
1673 }
1674
1675 void
1676 freesv(SV_Hold *p)
1677 { SV_Hold *h, *oh;
1678
1679 oh = (SV_Hold *) 0;
1680 for (h = svfree; h; oh = h, h = h->nxt)
1681 if (h->sz >= p->sz)
1682 break;
1683
1684 if (!oh)
1685 { p->nxt = svfree;
1686 svfree = p;
1687 } else
1688 { p->nxt = h;
1689 oh->nxt = p;
1690 }
1691 }
1692
1693 BFS_Trail *
1694 get_bfs_frame(void)
1695 { BFS_Trail *t;
1696
1697 if (bfs_free)
1698 { t = bfs_free;
1699 bfs_free = bfs_free->nxt;
1700 t->nxt = (BFS_Trail *) 0;
1701 } else
1702 { t = (BFS_Trail *) emalloc(sizeof(BFS_Trail));
1703 }
1704 t->frame = (Trail *) emalloc(sizeof(Trail));
1705 return t;
1706 }
1707
1708 void
1709 push_bfs(Trail *f, int d)
1710 { BFS_Trail *t;
1711
1712 t = get_bfs_frame();
1713 memcpy((char *)t->frame, (char *)f, sizeof(Trail));
1714 t->frame->o_tt = d; /* depth */
1715
1716 t->boq = boq;
1717 t->onow = getsv(vsize);
1718 t->omask = getsv_mask(vsize);
1719 #if defined(FULLSTACK) && defined(Q_PROVISO)
1720 t->lstate = Lstate;
1721 #endif
1722 if (!bfs_bot)
1723 { bfs_bot = bfs_trail = t;
1724 } else
1725 { bfs_bot->nxt = t;
1726 bfs_bot = t;
1727 }
1728 #ifdef CHECK
1729 printf("PUSH %u (%d)\n", t->frame, d);
1730 #endif
1731 }
1732
1733 Trail *
1734 pop_bfs(void)
1735 { BFS_Trail *t;
1736
1737 if (!bfs_trail)
1738 return (Trail *) 0;
1739
1740 t = bfs_trail;
1741 bfs_trail = t->nxt;
1742 if (!bfs_trail)
1743 bfs_bot = (BFS_Trail *) 0;
1744 #if defined(Q_PROVISO) && !defined(BITSTATE) && !defined(NOREDUCE)
1745 if (t->lstate) t->lstate->tagged = 0;
1746 #endif
1747
1748 t->nxt = bfs_free;
1749 bfs_free = t;
1750
1751 vsize = t->onow->sz;
1752 boq = t->boq;
1753 #ifdef BFS_DISK
1754 if (t->onow->sv == (State *) 0)
1755 { char dsk_nm[32];
1756 bfs_dsk_reads++; /* count */
1757 if (bfs_dsk_read >= 0 /* file descriptor */
1758 && bfs_dsk_reads%BFS_DSK_LIMIT == 0)
1759 { (void) close(bfs_dsk_read);
1760 sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_r-1);
1761 (void) unlink(dsk_nm);
1762 bfs_dsk_read = -1;
1763 }
1764 if (bfs_dsk_read < 0)
1765 { sprintf(dsk_nm, "pan_bfs_%d.tmp", bfs_dsk_seqno_r++);
1766 bfs_dsk_read = open(dsk_nm, RFLAGS);
1767 if (bfs_dsk_read < 0)
1768 { Uerror("could not open temp disk file");
1769 } }
1770 if (read(bfs_dsk_read, (char *) &now, vsize) != vsize)
1771 { Uerror("bad bfs disk file read");
1772 }
1773 #ifndef NOVSZ
1774 if (now._vsz != vsize)
1775 { Uerror("disk read vsz mismatch");
1776 }
1777 #endif
1778 } else
1779 #endif
1780 memcpy((uchar *) &now, (uchar *) t->onow->sv, vsize);
1781 memcpy((uchar *) Mask, (uchar *) t->omask->sv, vsize);
1782 if (now._nr_pr > 0)
1783 #if VECTORSZ>32000
1784 { memcpy((char *)proc_offset, (char *)t->omask->po, now._nr_pr * sizeof(int));
1785 #else
1786 { memcpy((char *)proc_offset, (char *)t->omask->po, now._nr_pr * sizeof(short));
1787 #endif
1788 memcpy((char *)proc_skip, (char *)t->omask->ps, now._nr_pr * sizeof(uchar));
1789 }
1790 if (now._nr_qs > 0)
1791 #if VECTORSZ>32000
1792 { memcpy((uchar *)q_offset, (uchar *)t->omask->qo, now._nr_qs * sizeof(int));
1793 #else
1794 { memcpy((uchar *)q_offset, (uchar *)t->omask->qo, now._nr_qs * sizeof(short));
1795 #endif
1796 memcpy((uchar *)q_skip, (uchar *)t->omask->qs, now._nr_qs * sizeof(uchar));
1797 }
1798 #ifdef BFS_DISK
1799 if (t->onow->sv != (State *) 0)
1800 #endif
1801 freesv(t->onow); /* omask not freed */
1802 #ifdef CHECK
1803 printf("POP %u (%d)\n", t->frame, t->frame->o_tt);
1804 #endif
1805 return t->frame;
1806 }
1807
1808 void
1809 store_state(Trail *ntrpt, int shortcut, short oboq)
1810 {
1811 #ifdef VERI
1812 Trans *t2 = (Trans *) 0;
1813 uchar ot; int tt, E_state;
1814 uchar o_opm = trpt->o_pm, *othis = this;
1815
1816 if (shortcut)
1817 {
1818 #ifdef VERBOSE
1819 printf("claim: shortcut\n");
1820 #endif
1821 goto store_it; /* no claim move */
1822 }
1823
1824 this = (((uchar *)&now)+proc_offset[0]); /* 0 = never claim */
1825 trpt->o_pm = 0;
1826
1827 tt = (int) ((P0 *)this)->_p;
1828 ot = (uchar) ((P0 *)this)->_t;
1829
1830 #ifdef HAS_UNLESS
1831 E_state = 0;
1832 #endif
1833 for (t2 = trans[ot][tt]; t2; t2 = t2?t2->nxt:(Trans *)0)
1834 {
1835 #ifdef HAS_UNLESS
1836 if (E_state > 0
1837 && E_state != t2->e_trans)
1838 break;
1839 #endif
1840 if (do_transit(t2, 0))
1841 {
1842 #ifdef VERBOSE
1843 if (!reached[ot][t2->st])
1844 printf("depth: %d -- claim move from %d -> %d\n",
1845 trpt->o_tt, ((P0 *)this)->_p, t2->st);
1846 #endif
1847 #ifdef HAS_UNLESS
1848 E_state = t2->e_trans;
1849 #endif
1850 if (t2->st > 0)
1851 { ((P0 *)this)->_p = t2->st;
1852 reached[ot][t2->st] = 1;
1853 #ifndef NOCLAIM
1854 check_claim(t2->st);
1855 #endif
1856 }
1857 if (now._nr_pr == 0) /* claim terminated */
1858 uerror("end state in claim reached");
1859
1860 #ifdef PEG
1861 peg[t2->forw]++;
1862 #endif
1863 trpt->o_pm |= 1;
1864 if (t2->atom&2)
1865 Uerror("atomic in claim not supported in BFS mode");
1866 store_it:
1867
1868 #endif
1869
1870 #ifdef BITSTATE
1871 if (!bstore((char *)&now, vsize))
1872 #else
1873 #ifdef MA
1874 if (!gstore((char *)&now, vsize, 0))
1875 #else
1876 if (!hstore((char *)&now, vsize))
1877 #endif
1878 #endif
1879 { static long sdone = (long) 0; long ndone;
1880 nstates++;
1881 #ifndef NOREDUCE
1882 trpt->tau |= 64;
1883 #endif
1884 ndone = (unsigned long) (nstates/((double) FREQ));
1885 if (ndone != sdone && mreached%10 != 0)
1886 { snapshot();
1887 sdone = ndone;
1888 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
1889 if (nstates > ((double)(1<<(ssize+1))))
1890 { void resize_hashtable(void);
1891 resize_hashtable();
1892 }
1893 #endif
1894 }
1895 #if SYNC
1896 if (boq != -1)
1897 midrv++;
1898 else if (oboq != -1)
1899 { Trail *x;
1900 x = (Trail *) trpt->ostate; /* pre-rv state */
1901 if (x) x->o_pm |= 4; /* mark success */
1902 }
1903 #endif
1904 push_bfs(ntrpt, trpt->o_tt+1);
1905 } else
1906 { truncs++;
1907 #if !defined(NOREDUCE) && defined(FULLSTACK) && defined(Q_PROVISO)
1908 #if !defined(BITSTATE)
1909 if (Lstate && Lstate->tagged) trpt->tau |= 64;
1910 #else
1911 if (trpt->tau&32)
1912 { BFS_Trail *tprov;
1913 for (tprov = bfs_trail; tprov; tprov = tprov->nxt)
1914 if (tprov->onow->sv != (State *) 0
1915 && memcmp((uchar *)&now, (uchar *)tprov->onow->sv, vsize) == 0)
1916 { trpt->tau |= 64;
1917 break; /* state is in queue */
1918 } }
1919 #endif
1920 #endif
1921 }
1922 #ifdef VERI
1923 ((P0 *)this)->_p = tt; /* reset claim */
1924 if (t2)
1925 do_reverse(t2, 0, 0);
1926 else
1927 break;
1928 } }
1929 this = othis;
1930 trpt->o_pm = o_opm;
1931 #endif
1932 }
1933
1934 Trail *ntrpt;
1935
1936 void
1937 bfs(void)
1938 { Trans *t; Trail *otrpt, *x;
1939 uchar _n, _m, ot, nps = 0;
1940 int tt, E_state;
1941 short II, From = (short) (now._nr_pr-1), To = BASE;
1942 short oboq = boq;
1943
1944 ntrpt = (Trail *) emalloc(sizeof(Trail));
1945 trpt->ostate = (struct H_el *) 0;
1946 trpt->tau = 0;
1947
1948 trpt->o_tt = -1;
1949 store_state(ntrpt, 0, oboq); /* initial state */
1950
1951 while ((otrpt = pop_bfs())) /* also restores now */
1952 { memcpy((char *) trpt, (char *) otrpt, sizeof(Trail));
1953 #if defined(C_States) && (HAS_TRACK==1)
1954 c_revert((uchar *) &(now.c_state[0]));
1955 #endif
1956 if (trpt->o_pm & 4)
1957 {
1958 #ifdef VERBOSE
1959 printf("Revisit of atomic not needed (%d)\n",
1960 trpt->o_pm);
1961 #endif
1962 continue;
1963 }
1964 #ifndef NOREDUCE
1965 nps = 0;
1966 #endif
1967 if (trpt->o_pm == 8)
1968 { revrv++;
1969 if (trpt->tau&8)
1970 {
1971 #ifdef VERBOSE
1972 printf("Break atomic (pm:%d,tau:%d)\n",
1973 trpt->o_pm, trpt->tau);
1974 #endif
1975 trpt->tau &= ~8;
1976 }
1977 #ifndef NOREDUCE
1978 else if (trpt->tau&32)
1979 {
1980 #ifdef VERBOSE
1981 printf("Void preselection (pm:%d,tau:%d)\n",
1982 trpt->o_pm, trpt->tau);
1983 #endif
1984 trpt->tau &= ~32;
1985 nps = 1; /* no preselection in repeat */
1986 }
1987 #endif
1988 }
1989 trpt->o_pm &= ~(4|8);
1990 if (trpt->o_tt > mreached)
1991 { mreached = trpt->o_tt;
1992 if (mreached%10 == 0)
1993 { snapshot();
1994 } }
1995 depth = trpt->o_tt;
1996 if (depth >= maxdepth)
1997 {
1998 #if SYNC
1999 Trail *x;
2000 if (boq != -1)
2001 { x = (Trail *) trpt->ostate;
2002 if (x) x->o_pm |= 4; /* not failing */
2003 }
2004 #endif
2005 truncs++;
2006 if (!warned)
2007 { warned = 1;
2008 printf("error: max search depth too small\n");
2009 }
2010 if (bounded)
2011 uerror("depth limit reached");
2012 continue;
2013 }
2014 #ifndef NOREDUCE
2015 if (boq == -1 && !(trpt->tau&8) && nps == 0)
2016 for (II = now._nr_pr-1; II >= BASE; II -= 1)
2017 {
2018 Pickup: this = pptr(II);
2019 tt = (int) ((P0 *)this)->_p;
2020 ot = (uchar) ((P0 *)this)->_t;
2021 if (trans[ot][tt]->atom & 8)
2022 { t = trans[ot][tt];
2023 if (t->qu[0] != 0)
2024 { Ccheck++;
2025 if (!q_cond(II, t))
2026 continue;
2027 Cholds++;
2028 }
2029 From = To = II;
2030 trpt->tau |= 32; /* preselect marker */
2031 #ifdef DEBUG
2032 printf("%3d: proc %d PreSelected (tau=%d)\n",
2033 depth, II, trpt->tau);
2034 #endif
2035 goto MainLoop;
2036 } }
2037 trpt->tau &= ~32;
2038 #endif
2039 Repeat:
2040 if (trpt->tau&8) /* atomic */
2041 { From = To = (short ) trpt->pr;
2042 nlinks++;
2043 } else
2044 { From = now._nr_pr-1;
2045 To = BASE;
2046 }
2047 MainLoop:
2048 _n = _m = 0;
2049 for (II = From; II >= To; II -= 1)
2050 {
2051 this = (((uchar *)&now)+proc_offset[II]);
2052 tt = (int) ((P0 *)this)->_p;
2053 ot = (uchar) ((P0 *)this)->_t;
2054 #if SYNC
2055 /* no rendezvous with same proc */
2056 if (boq != -1 && trpt->pr == II) continue;
2057 #endif
2058 ntrpt->pr = (uchar) II;
2059 ntrpt->st = tt;
2060 trpt->o_pm &= ~1; /* no move yet */
2061 #ifdef EVENT_TRACE
2062 trpt->o_event = now._event;
2063 #endif
2064 #ifdef HAS_PROVIDED
2065 if (!provided(II, ot, tt, t)) continue;
2066 #endif
2067 #ifdef HAS_UNLESS
2068 E_state = 0;
2069 #endif
2070 for (t = trans[ot][tt]; t; t = t->nxt)
2071 {
2072 #ifdef HAS_UNLESS
2073 if (E_state > 0
2074 && E_state != t->e_trans)
2075 break;
2076 #endif
2077 ntrpt->o_t = t;
2078
2079 oboq = boq;
2080
2081 if (!(_m = do_transit(t, II)))
2082 continue;
2083
2084 trpt->o_pm |= 1; /* we moved */
2085 (trpt+1)->o_m = _m; /* for unsend */
2086 #ifdef PEG
2087 peg[t->forw]++;
2088 #endif
2089 #ifdef CHECK
2090 printf("%3d: proc %d exec %d, ",
2091 depth, II, t->forw);
2092 printf("%d to %d, %s %s %s",
2093 tt, t->st, t->tp,
2094 (t->atom&2)?"atomic":"",
2095 (boq != -1)?"rendez-vous":"");
2096 #ifdef HAS_UNLESS
2097 if (t->e_trans)
2098 printf(" (escapes to state %d)", t->st);
2099 #endif
2100 printf(" %saccepting [tau=%d]\n",
2101 (trpt->o_pm&2)?"":"non-", trpt->tau);
2102 #endif
2103 #ifdef HAS_UNLESS
2104 E_state = t->e_trans;
2105 #if SYNC>0
2106 if (t->e_trans > 0 && (boq != -1 /* || oboq != -1 */))
2107 { fprintf(efd, "error: the use of rendezvous stmnt in the escape clause\n");
2108 fprintf(efd, " of an unless stmnt is not compatible with -DBFS\n");
2109 pan_exit(1);
2110 }
2111 #endif
2112 #endif
2113 if (t->st > 0) ((P0 *)this)->_p = t->st;
2114
2115 /* ptr to pred: */ ntrpt->ostate = (struct H_el *) otrpt;
2116 ntrpt->st = tt;
2117 if (boq == -1 && (t->atom&2)) /* atomic */
2118 ntrpt->tau = 8; /* record for next move */
2119 else
2120 ntrpt->tau = 0;
2121
2122 store_state(ntrpt, (boq != -1 || (t->atom&2)), oboq);
2123 #ifdef EVENT_TRACE
2124 now._event = trpt->o_event;
2125 #endif
2126
2127 /* undo move and continue */
2128 trpt++; /* this is where ovals and ipt are set */
2129 do_reverse(t, II, _m); /* restore now. */
2130 trpt--;
2131 #ifdef CHECK
2132 #if NCORE>1
2133 enter_critical(GLOBAL_LOCK); /* in verbose mode only */
2134 printf("cpu%d: ", core_id);
2135 #endif
2136 printf("%3d: proc %d ", depth, II);
2137 printf("reverses %d, %d to %d,",
2138 t->forw, tt, t->st);
2139 printf(" %s [abit=%d,adepth=%d,",
2140 t->tp, now._a_t, A_depth);
2141 printf("tau=%d,%d]\n",
2142 trpt->tau, (trpt-1)->tau);
2143 #if NCORE>1
2144 leave_critical(GLOBAL_LOCK);
2145 #endif
2146 #endif
2147 reached[ot][t->st] = 1;
2148 reached[ot][tt] = 1;
2149
2150 ((P0 *)this)->_p = tt;
2151 _n |= _m;
2152 } }
2153 #ifndef NOREDUCE
2154 /* preselected - no succ definitely outside stack */
2155 if ((trpt->tau&32) && !(trpt->tau&64))
2156 { From = now._nr_pr-1; To = BASE;
2157 #ifdef DEBUG
2158 cpu_printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
2159 depth, II+1, (int) _n, trpt->tau);
2160 #endif
2161 _n = 0; trpt->tau &= ~32;
2162 if (II >= BASE)
2163 goto Pickup;
2164 goto MainLoop;
2165 }
2166 trpt->tau &= ~(32|64);
2167 #endif
2168 if (_n != 0)
2169 continue;
2170 #ifdef DEBUG
2171 printf("%3d: no move [II=%d, tau=%d, boq=%d, _nr_pr=%d]\n",
2172 depth, II, trpt->tau, boq, now._nr_pr);
2173 #endif
2174 if (boq != -1)
2175 { failedrv++;
2176 x = (Trail *) trpt->ostate; /* pre-rv state */
2177 if (!x) continue; /* root state */
2178 if ((x->tau&8) || (x->tau&32)) /* break atomic or preselect at parent */
2179 { x->o_pm |= 8; /* mark failure */
2180 this = (((uchar *)&now)+proc_offset[otrpt->pr]);
2181 #ifdef VERBOSE
2182 printf("\treset state of %d from %d to %d\n",
2183 otrpt->pr, ((P0 *)this)->_p, otrpt->st);
2184 #endif
2185 ((P0 *)this)->_p = otrpt->st;
2186 unsend(boq); /* retract rv offer */
2187 boq = -1;
2188 push_bfs(x, x->o_tt);
2189 #ifdef VERBOSE
2190 printf("failed rv, repush with %d\n", x->o_pm);
2191 #endif
2192 }
2193 #ifdef VERBOSE
2194 else printf("failed rv, tau at parent: %d\n", x->tau);
2195 #endif
2196 } else if (now._nr_pr > 0)
2197 {
2198 if ((trpt->tau&8)) /* atomic */
2199 { trpt->tau &= ~(1|8); /* 1=timeout, 8=atomic */
2200 #ifdef DEBUG
2201 printf("%3d: atomic step proc %d blocks\n",
2202 depth, II+1);
2203 #endif
2204 goto Repeat;
2205 }
2206
2207 if (!(trpt->tau&1)) /* didn't try timeout yet */
2208 { trpt->tau |= 1;
2209 #ifdef DEBUG
2210 printf("%d: timeout\n", depth);
2211 #endif
2212 goto MainLoop;
2213 }
2214 #ifndef VERI
2215 if (!noends && !a_cycles && !endstate())
2216 uerror("invalid end state");
2217 #endif
2218 } }
2219 }
2220
2221 void
2222 putter(Trail *trpt, int fd)
2223 { long j;
2224
2225 if (!trpt) return;
2226
2227 if (trpt != (Trail *) trpt->ostate)
2228 putter((Trail *) trpt->ostate, fd);
2229
2230 if (trpt->o_t)
2231 { sprintf(snap, "%d:%d:%d\n",
2232 trcnt++, trpt->pr, trpt->o_t->t_id);
2233 j = strlen(snap);
2234 if (write(fd, snap, j) != j)
2235 { printf("pan: error writing %s\n", fnm);
2236 pan_exit(1);
2237 } }
2238 }
2239
2240 void
2241 nuerror(char *str)
2242 { int fd = make_trail();
2243 int j;
2244
2245 if (fd < 0) return;
2246 #ifdef VERI
2247 sprintf(snap, "-2:%d:-2\n", VERI);
2248 write(fd, snap, strlen(snap));
2249 #endif
2250 #ifdef MERGED
2251 sprintf(snap, "-4:-4:-4\n");
2252 write(fd, snap, strlen(snap));
2253 #endif
2254 trcnt = 1;
2255 putter(trpt, fd);
2256 if (ntrpt->o_t)
2257 { sprintf(snap, "%d:%d:%d\n",
2258 trcnt++, ntrpt->pr, ntrpt->o_t->t_id);
2259 j = strlen(snap);
2260 if (write(fd, snap, j) != j)
2261 { printf("pan: error writing %s\n", fnm);
2262 pan_exit(1);
2263 } }
2264 close(fd);
2265 if (errors >= upto && upto != 0)
2266 { wrapup();
2267 }
2268 }
2269 #endif
2270 #if NCORE>1
2271 #if defined(WIN32) || defined(WIN64)
2272 #ifndef _CONSOLE
2273 #define _CONSOLE
2274 #endif
2275 #ifdef WIN64
2276 #undef long
2277 #endif
2278 #include <windows.h>
2279
2280 #ifdef WIN64
2281 #define long long long
2282 #endif
2283 #else
2284 #include <sys/ipc.h>
2285 #include <sys/sem.h>
2286 #include <sys/shm.h>
2287 #endif
2288
2289 /* code common to cygwin/linux and win32/win64: */
2290
2291 #ifdef VERBOSE
2292 #define VVERBOSE (1)
2293 #else
2294 #define VVERBOSE (0)
2295 #endif
2296
2297 /* the following values must be larger than 256 and must fit in an int */
2298 #define QUIT 1024 /* terminate now command */
2299 #define QUERY 512 /* termination status query message */
2300 #define QUERY_F 513 /* query failed, cannot quit */
2301
2302 #define GN_FRAMES (int) (GWQ_SIZE / (double) sizeof(SM_frame))
2303 #define LN_FRAMES (int) (LWQ_SIZE / (double) sizeof(SM_frame))
2304
2305 #ifndef VMAX
2306 #define VMAX VECTORSZ
2307 #endif
2308 #ifndef PMAX
2309 #define PMAX 64
2310 #endif
2311 #ifndef QMAX
2312 #define QMAX 64
2313 #endif
2314
2315 #if VECTORSZ>32000
2316 #define OFFT int
2317 #else
2318 #define OFFT short
2319 #endif
2320
2321 #ifdef SET_SEG_SIZE
2322 /* no longer usefule -- being recomputed for local heap size anyway */
2323 double SEG_SIZE = (((double) SET_SEG_SIZE) * 1048576.);
2324 #else
2325 double SEG_SIZE = (1048576.*1024.); /* 1GB default shared memory pool segments */
2326 #endif
2327
2328 double LWQ_SIZE = 0.; /* initialized in main */
2329
2330 #ifdef SET_WQ_SIZE
2331 #ifdef NGQ
2332 #warning SET_WQ_SIZE applies to global queue -- ignored
2333 double GWQ_SIZE = 0.;
2334 #else
2335 double GWQ_SIZE = (((double) SET_WQ_SIZE) * 1048576.);
2336 /* must match the value in pan_proxy.c, if used */
2337 #endif
2338 #else
2339 #ifdef NGQ
2340 double GWQ_SIZE = 0.;
2341 #else
2342 double GWQ_SIZE = (128.*1048576.); /* 128 MB default queue sizes */
2343 #endif
2344 #endif
2345
2346 /* Crash Detection Parameters */
2347 #ifndef ONESECOND
2348 #define ONESECOND (1<<25)
2349 #endif
2350 #ifndef SHORT_T
2351 #define SHORT_T (0.1)
2352 #endif
2353 #ifndef LONG_T
2354 #define LONG_T (600)
2355 #endif
2356
2357 double OneSecond = (double) (ONESECOND); /* waiting for a free slot -- checks crash */
2358 double TenSeconds = 10. * (ONESECOND); /* waiting for a lock -- check for a crash */
2359
2360 /* Termination Detection Params -- waiting for new state input in Get_Full_Frame */
2361 double Delay = ((double) SHORT_T) * (ONESECOND); /* termination detection trigger */
2362 double OneHour = ((double) LONG_T) * (ONESECOND); /* timeout termination detection */
2363
2364 typedef struct SM_frame SM_frame;
2365 typedef struct SM_results SM_results;
2366 typedef struct sh_Allocater sh_Allocater;
2367
2368 struct SM_frame { /* about 6K per slot */
2369 volatile int m_vsize; /* 0 means free slot */
2370 volatile int m_boq; /* >500 is a control message */
2371 #ifdef FULL_TRAIL
2372 volatile struct Stack_Tree *m_stack; /* ptr to previous state */
2373 #endif
2374 volatile uchar m_tau;
2375 volatile uchar m_o_pm;
2376 volatile int nr_handoffs; /* to compute real_depth */
2377 volatile char m_now [VMAX];
2378 volatile char m_Mask [(VMAX + 7)/8];
2379 volatile OFFT m_p_offset[PMAX];
2380 volatile OFFT m_q_offset[QMAX];
2381 volatile uchar m_p_skip [PMAX];
2382 volatile uchar m_q_skip [QMAX];
2383 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
2384 volatile uchar m_c_stack [StackSize];
2385 #endif
2386 };
2387
2388 int proxy_pid; /* id of proxy if nonzero -- receive half */
2389 int store_proxy_pid;
2390 short remote_party;
2391 int proxy_pid_snd; /* id of proxy if nonzero -- send half */
2392 char o_cmdline[512]; /* to pass options to children */
2393
2394 int iamin[CS_NR+NCORE]; /* non-shared */
2395
2396 #if defined(WIN32) || defined(WIN64)
2397 int tas(volatile LONG *);
2398
2399 HANDLE proxy_handle_snd; /* for Windows Create and Terminate */
2400
2401 struct sh_Allocater { /* shared memory for states */
2402 volatile char *dc_arena; /* to allocate states from */
2403 volatile long pattern; /* to detect overruns */
2404 volatile long dc_size; /* nr of bytes left */
2405 volatile void *dc_start; /* where memory segment starts */
2406 volatile void *dc_id; /* to attach, detach, remove shared memory segments */
2407 volatile sh_Allocater *nxt; /* linked list of pools */
2408 };
2409 DWORD worker_pids[NCORE]; /* root mem of pids of all workers created */
2410 HANDLE worker_handles[NCORE]; /* for windows Create and Terminate */
2411 void * shmid [NR_QS]; /* return value from CreateFileMapping */
2412 void * shmid_M; /* shared mem for state allocation in hashtable */
2413
2414 #ifdef SEP_STATE
2415 void *shmid_X;
2416 #else
2417 void *shmid_S; /* shared bitstate arena or hashtable */
2418 #endif
2419 #else
2420 int tas(volatile int *);
2421
2422 struct sh_Allocater { /* shared memory for states */
2423 volatile char *dc_arena; /* to allocate states from */
2424 volatile long pattern; /* to detect overruns */
2425 volatile long dc_size; /* nr of bytes left */
2426 volatile char *dc_start; /* where memory segment starts */
2427 volatile int dc_id; /* to attach, detach, remove shared memory segments */
2428 volatile sh_Allocater *nxt; /* linked list of pools */
2429 };
2430
2431 int worker_pids[NCORE]; /* root mem of pids of all workers created */
2432 int shmid [NR_QS]; /* return value from shmget */
2433 int nibis = 0; /* set after shared mem has been released */
2434 int shmid_M; /* shared mem for state allocation in hashtable */
2435 #ifdef SEP_STATE
2436 long shmid_X;
2437 #else
2438 int shmid_S; /* shared bitstate arena or hashtable */
2439 volatile sh_Allocater *first_pool; /* of shared state memory */
2440 volatile sh_Allocater *last_pool;
2441 #endif
2442 #endif
2443
2444 struct SM_results { /* for shuttling back final stats */
2445 volatile int m_vsize; /* avoid conflicts with frames */
2446 volatile int m_boq; /* these 2 fields are not written in record_info */
2447 /* probably not all fields really need to be volatile */
2448 volatile double m_memcnt;
2449 volatile double m_nstates;
2450 volatile double m_truncs;
2451 volatile double m_truncs2;
2452 volatile double m_nShadow;
2453 volatile double m_nlinks;
2454 volatile double m_ngrabs;
2455 volatile double m_nlost;
2456 volatile double m_hcmp;
2457 volatile double m_frame_wait;
2458 volatile int m_hmax;
2459 volatile int m_svmax;
2460 volatile int m_smax;
2461 volatile int m_mreached;
2462 volatile int m_errors;
2463 volatile int m_VMAX;
2464 volatile short m_PMAX;
2465 volatile short m_QMAX;
2466 volatile uchar m_R; /* reached info for all proctypes */
2467 };
2468
2469 int core_id = 0; /* internal process nr, to know which q to use */
2470 unsigned long nstates_put = 0; /* statistics */
2471 unsigned long nstates_get = 0;
2472 int query_in_progress = 0; /* termination detection */
2473
2474 double free_wait = 0.; /* waiting for a free frame */
2475 double frame_wait = 0.; /* waiting for a full frame */
2476 double lock_wait = 0.; /* waiting for access to cs */
2477 double glock_wait[3]; /* waiting for access to global lock */
2478
2479 char *sprefix = "rst";
2480 uchar was_interrupted, issued_kill, writing_trail;
2481
2482 static SM_frame cur_Root; /* current root, to be safe with error trails */
2483
2484 SM_frame *m_workq [NR_QS]; /* per cpu work queues + global q */
2485 char *shared_mem[NR_QS]; /* return value from shmat */
2486 #ifdef SEP_HEAP
2487 char *my_heap;
2488 long my_size;
2489 #endif
2490 volatile sh_Allocater *dc_shared; /* assigned at initialization */
2491
2492 static int vmax_seen, pmax_seen, qmax_seen;
2493 static double gq_tries, gq_hasroom, gq_hasnoroom;
2494
2495 volatile int *prfree;
2496 volatile int *prfull;
2497 volatile int *prcnt;
2498 volatile int *prmax;
2499
2500 volatile int *sh_lock; /* mutual exclusion locks - in shared memory */
2501 volatile double *is_alive; /* to detect when processes crash */
2502 volatile int *grfree, *grfull, *grcnt, *grmax; /* access to shared global q */
2503 volatile double *gr_readmiss, *gr_writemiss;
2504 static int lrfree; /* used for temporary recording of slot */
2505 static int dfs_phase2;
2506
2507 void mem_put(int); /* handoff state to other cpu */
2508 void mem_put_acc(void); /* liveness mode */
2509 void mem_get(void); /* get state from work queue */
2510 void sudden_stop(char *);
2511 #if 0
2512 void enter_critical(int);
2513 void leave_critical(int);
2514 #endif
2515
2516 void
2517 record_info(SM_results *r)
2518 { int i;
2519 uchar *ptr;
2520
2521 #ifdef SEP_STATE
2522 if (0)
2523 { cpu_printf("nstates %g nshadow %g -- memory %-6.3f Mb\n",
2524 nstates, nShadow, memcnt/(1048576.));
2525 }
2526 r->m_memcnt = 0;
2527 #else
2528 #ifdef BITSTATE
2529 r->m_memcnt = 0; /* it's shared */
2530 #endif
2531 r->m_memcnt = memcnt;
2532 #endif
2533 if (a_cycles && core_id == 1)
2534 { r->m_nstates = nstates;
2535 r->m_nShadow = nstates;
2536 } else
2537 { r->m_nstates = nstates;
2538 r->m_nShadow = nShadow;
2539 }
2540 r->m_truncs = truncs;
2541 r->m_truncs2 = truncs2;
2542 r->m_nlinks = nlinks;
2543 r->m_ngrabs = ngrabs;
2544 r->m_nlost = nlost;
2545 r->m_hcmp = hcmp;
2546 r->m_frame_wait = frame_wait;
2547 r->m_hmax = hmax;
2548 r->m_svmax = svmax;
2549 r->m_smax = smax;
2550 r->m_mreached = mreached;
2551 r->m_errors = errors;
2552 r->m_VMAX = vmax_seen;
2553 r->m_PMAX = (short) pmax_seen;
2554 r->m_QMAX = (short) qmax_seen;
2555 ptr = (uchar *) &(r->m_R);
2556 for (i = 0; i <= _NP_; i++) /* all proctypes */
2557 { memcpy(ptr, reached[i], NrStates[i]*sizeof(uchar));
2558 ptr += NrStates[i]*sizeof(uchar);
2559 }
2560 if (verbose>1)
2561 { cpu_printf("Put Results nstates %g (sz %d)\n", nstates, ptr - &(r->m_R));
2562 }
2563 }
2564
2565 void snapshot(void);
2566
2567 void
2568 retrieve_info(SM_results *r)
2569 { int i, j;
2570 volatile uchar *ptr;
2571
2572 snapshot(); /* for a final report */
2573
2574 enter_critical(GLOBAL_LOCK);
2575 #ifdef SEP_HEAP
2576 if (verbose)
2577 { printf("cpu%d: local heap-left %ld KB (%d MB)\n",
2578 core_id, (int) (my_size/1024), (int) (my_size/1048576));
2579 }
2580 #endif
2581 if (verbose && core_id == 0)
2582 { printf("qmax: ");
2583 for (i = 0; i < NCORE; i++)
2584 { printf("%d ", prmax[i]);
2585 }
2586 #ifndef NGQ
2587 printf("G: %d", *grmax);
2588 #endif
2589 printf("\n");
2590 }
2591 leave_critical(GLOBAL_LOCK);
2592
2593 memcnt += r->m_memcnt;
2594 nstates += r->m_nstates;
2595 nShadow += r->m_nShadow;
2596 truncs += r->m_truncs;
2597 truncs2 += r->m_truncs2;
2598 nlinks += r->m_nlinks;
2599 ngrabs += r->m_ngrabs;
2600 nlost += r->m_nlost;
2601 hcmp += r->m_hcmp;
2602 /* frame_wait += r->m_frame_wait; */
2603 errors += r->m_errors;
2604
2605 if (hmax < r->m_hmax) hmax = r->m_hmax;
2606 if (svmax < r->m_svmax) svmax = r->m_svmax;
2607 if (smax < r->m_smax) smax = r->m_smax;
2608 if (mreached < r->m_mreached) mreached = r->m_mreached;
2609
2610 if (vmax_seen < r->m_VMAX) vmax_seen = r->m_VMAX;
2611 if (pmax_seen < (int) r->m_PMAX) pmax_seen = (int) r->m_PMAX;
2612 if (qmax_seen < (int) r->m_QMAX) qmax_seen = (int) r->m_QMAX;
2613
2614 ptr = &(r->m_R);
2615 for (i = 0; i <= _NP_; i++) /* all proctypes */
2616 { for (j = 0; j < NrStates[i]; j++)
2617 { if (*(ptr + j) != 0)
2618 { reached[i][j] = 1;
2619 } }
2620 ptr += NrStates[i]*sizeof(uchar);
2621 }
2622 if (verbose>1)
2623 { cpu_printf("Got Results (%d)\n", ptr - &(r->m_R));
2624 snapshot();
2625 }
2626 }
2627
2628 #if !defined(WIN32) && !defined(WIN64)
2629 static void
2630 rm_shared_segments(void)
2631 { int m;
2632 volatile sh_Allocater *nxt_pool;
2633 /*
2634 * mark all shared memory segments for removal
2635 * the actual removes wont happen intil last process dies or detaches
2636 * the shmctl calls can return -1 if not all procs have detached yet
2637 */
2638 for (m = 0; m < NR_QS; m++) /* +1 for global q */
2639 { if (shmid[m] != -1)
2640 { (void) shmctl(shmid[m], IPC_RMID, NULL);
2641 } }
2642 #ifdef SEP_STATE
2643 if (shmid_M != -1)
2644 { (void) shmctl(shmid_M, IPC_RMID, NULL);
2645 }
2646 #else
2647 if (shmid_S != -1)
2648 { (void) shmctl(shmid_S, IPC_RMID, NULL);
2649 }
2650 for (last_pool = first_pool; last_pool != NULL; last_pool = nxt_pool)
2651 { shmid_M = (int) (last_pool->dc_id);
2652 nxt_pool = last_pool->nxt; /* as a pre-caution only */
2653 if (shmid_M != -1)
2654 { (void) shmctl(shmid_M, IPC_RMID, NULL);
2655 } }
2656 #endif
2657 }
2658 #endif
2659
2660 void
2661 sudden_stop(char *s)
2662 { char b[64];
2663 int i;
2664
2665 printf("cpu%d: stop - %s\n", core_id, s);
2666 #if !defined(WIN32) && !defined(WIN64)
2667 if (proxy_pid != 0)
2668 { rm_shared_segments();
2669 }
2670 #endif
2671 if (search_terminated != NULL)
2672 { if (*search_terminated != 0)
2673 { if (verbose)
2674 { printf("cpu%d: termination initiated (%d)\n",
2675 core_id, *search_terminated);
2676 }
2677 } else
2678 { if (verbose)
2679 { printf("cpu%d: initiated termination\n", core_id);
2680 }
2681 *search_terminated |= 8; /* sudden_stop */
2682 }
2683 if (core_id == 0)
2684 { if (((*search_terminated) & 4) /* uerror in one of the cpus */
2685 && !((*search_terminated) & (8|32|128|256))) /* abnormal stop */
2686 { if (errors == 0) errors++; /* we know there is at least 1 */
2687 }
2688 wrapup(); /* incomplete stats, but at least something */
2689 }
2690 return;
2691 } /* else: should rarely happen, take more drastic measures */
2692
2693 if (core_id == 0) /* local root process */
2694 { for (i = 1; i < NCORE; i++) /* not for 0 of course */
2695 {
2696 #if defined(WIN32) || defined(WIN64)
2697 DWORD dwExitCode = 0;
2698 GetExitCodeProcess(worker_handles[i], &dwExitCode);
2699 if (dwExitCode == STILL_ACTIVE)
2700 { TerminateProcess(worker_handles[i], 0);
2701 }
2702 printf("cpu0: terminate %d %d\n",
2703 worker_pids[i], (dwExitCode == STILL_ACTIVE));
2704 #else
2705 sprintf(b, "kill -%d %d", SIGKILL, worker_pids[i]);
2706 system(b); /* if this is a proxy: receive half */
2707 printf("cpu0: %s\n", b);
2708 #endif
2709 }
2710 issued_kill++;
2711 } else
2712 { /* on WIN32/WIN64 -- these merely kills the root process... */
2713 if (was_interrupted == 0)
2714 { sprintf(b, "kill -%d %d", SIGINT, worker_pids[0]);
2715 system(b); /* warn the root process */
2716 printf("cpu%d: %s\n", core_id, b);
2717 issued_kill++;
2718 } }
2719 }
2720
2721 #define iam_alive() is_alive[core_id]++
2722
2723 extern int crash_test(double);
2724 extern void crash_reset(void);
2725
2726 int
2727 someone_crashed(int wait_type)
2728 { static double last_value = 0.0;
2729 static int count = 0;
2730
2731 if (search_terminated == NULL
2732 || *search_terminated != 0)
2733 {
2734 if (!(*search_terminated & (8|32|128|256)))
2735 { if (count++ < 100*NCORE)
2736 { return 0;
2737 } }
2738 return 1;
2739 }
2740 /* check left neighbor only */
2741 if (last_value == is_alive[(core_id + NCORE - 1) % NCORE])
2742 { if (count++ >= 100) /* to avoid unnecessary checks */
2743 { return 1;
2744 }
2745 return 0;
2746 }
2747 last_value = is_alive[(core_id + NCORE - 1) % NCORE];
2748 count = 0;
2749 crash_reset();
2750 return 0;
2751 }
2752
2753 void
2754 sleep_report(void)
2755 {
2756 enter_critical(GLOBAL_LOCK);
2757 if (verbose)
2758 {
2759 #ifdef NGQ
2760 printf("cpu%d: locks: global %g\tother %g\t",
2761 core_id, glock_wait[0], lock_wait - glock_wait[0]);
2762 #else
2763 printf("cpu%d: locks: GL %g, RQ %g, WQ %g, HT %g\t",
2764 core_id, glock_wait[0], glock_wait[1], glock_wait[2],
2765 lock_wait - glock_wait[0] - glock_wait[1] - glock_wait[2]);
2766 #endif
2767 printf("waits: states %g slots %g\n", frame_wait, free_wait);
2768 #ifndef NGQ
2769 printf("cpu%d: gq [tries %g, room %g, noroom %g]\n", core_id, gq_tries, gq_hasroom, gq_hasnoroom);
2770 if (core_id == 0 && (*gr_readmiss >= 1.0 || *gr_readmiss >= 1.0 || *grcnt != 0))
2771 printf("cpu0: gq [readmiss: %g, writemiss: %g cnt %d]\n", *gr_readmiss, *gr_writemiss, *grcnt);
2772 #endif
2773 }
2774 if (free_wait > 1000000.)
2775 #ifndef NGQ
2776 if (!a_cycles)
2777 { printf("hint: this search may be faster with a larger work-queue\n");
2778 printf(" (-DSET_WQ_SIZE=N with N>%g), and/or with -DUSE_DISK\n",
2779 GWQ_SIZE/sizeof(SM_frame));
2780 printf(" or with a larger value for -zN (N>%d)\n", z_handoff);
2781 #else
2782 { printf("hint: this search may be faster if compiled without -DNGQ, with -DUSE_DISK, ");
2783 printf("or with a larger -zN (N>%d)\n", z_handoff);
2784 #endif
2785 }
2786 leave_critical(GLOBAL_LOCK);
2787 }
2788
2789 #ifndef MAX_DSK_FILE
2790 #define MAX_DSK_FILE 1000000 /* default is max 1M states per file */
2791 #endif
2792
2793 void
2794 multi_usage(FILE *fd)
2795 { static int warned = 0;
2796 if (warned > 0) { return; } else { warned++; }
2797 fprintf(fd, "\n");
2798 fprintf(fd, "Defining multi-core mode:\n\n");
2799 fprintf(fd, " -DDUAL_CORE --> same as -DNCORE=2\n");
2800 fprintf(fd, " -DQUAD_CORE --> same as -DNCORE=4\n");
2801 fprintf(fd, " -DNCORE=N --> enables multi_core verification if N>1\n");
2802 fprintf(fd, "\n");
2803 fprintf(fd, "Additional directives supported in multi-core mode:\n\n");
2804 fprintf(fd, " -DSEP_STATE --> forces separate statespaces instead of a single shared state space\n");
2805 fprintf(fd, " -DNUSE_DISK --> use disk for storing states when a work queue overflows\n");
2806 fprintf(fd, " -DMAX_DSK_FILE --> max nr of states per diskfile (%d)\n", MAX_DSK_FILE);
2807 fprintf(fd, " -DFULL_TRAIL --> support full error trails (increases memory use)\n");
2808 fprintf(fd, "\n");
2809 fprintf(fd, "More advanced use (should rarely need changing):\n\n");
2810 fprintf(fd, " To change the nr of states that can be stored in the global queue\n");
2811 fprintf(fd, " (lower numbers allow for more states to be stored, prefer multiples of 8):\n");
2812 fprintf(fd, " -DVMAX=N --> upperbound on statevector for handoffs (N=%d)\n", VMAX);
2813 fprintf(fd, " -DPMAX=N --> upperbound on nr of procs (default: N=%d)\n", PMAX);
2814 fprintf(fd, " -DQMAX=N --> upperbound on nr of channels (default: N=%d)\n", QMAX);
2815 fprintf(fd, "\n");
2816 fprintf(fd, " To set the total amount of memory reserved for the global workqueue:\n");
2817 fprintf(fd, " -DSET_WQ_SIZE=N --> default: N=128 (defined in MBytes)\n\n");
2818 fprintf(fd, " To force the use of a single global heap, instead of separate heaps:\n");
2819 fprintf(fd, " -DGLOB_HEAP\n");
2820 fprintf(fd, "\n");
2821 fprintf(fd, " To define a fct to initialize data before spawning processes (use quotes):\n");
2822 fprintf(fd, " \"-DC_INIT=fct()\"\n");
2823 fprintf(fd, "\n");
2824 fprintf(fd, " Timer settings for termination and crash detection:\n");
2825 fprintf(fd, " -DSHORT_T=N --> timeout for termination detection trigger (N=%g)\n", (double) SHORT_T);
2826 fprintf(fd, " -DLONG_T=N --> timeout for giving up on termination detection (N=%g)\n", (double) LONG_T);
2827 fprintf(fd, " -DONESECOND --> (1<<29) --> timeout waiting for a free slot -- to check for crash\n");
2828 fprintf(fd, " -DT_ALERT --> collect stats on crash alert timeouts\n\n");
2829 fprintf(fd, "Help with Linux/Windows/Cygwin configuration for multi-core:\n");
2830 fprintf(fd, " http://spinroot.com/spin/multicore/V5_Readme.html\n");
2831 fprintf(fd, "\n");
2832 }
2833 #if NCORE>1 && defined(FULL_TRAIL)
2834 typedef struct Stack_Tree {
2835 uchar pr; /* process that made transition */
2836 T_ID t_id; /* id of transition */
2837 volatile struct Stack_Tree *prv; /* backward link towards root */
2838 } Stack_Tree;
2839
2840 struct H_el *grab_shared(int);
2841 volatile Stack_Tree **stack_last; /* in shared memory */
2842 char *stack_cache = NULL; /* local */
2843 int nr_cached = 0; /* local */
2844
2845 #ifndef CACHE_NR
2846 #define CACHE_NR 1024
2847 #endif
2848
2849 volatile Stack_Tree *
2850 stack_prefetch(void)
2851 { volatile Stack_Tree *st;
2852
2853 if (nr_cached == 0)
2854 { stack_cache = (char *) grab_shared(CACHE_NR * sizeof(Stack_Tree));
2855 nr_cached = CACHE_NR;
2856 }
2857 st = (volatile Stack_Tree *) stack_cache;
2858 stack_cache += sizeof(Stack_Tree);
2859 nr_cached--;
2860 return st;
2861 }
2862
2863 void
2864 Push_Stack_Tree(short II, T_ID t_id)
2865 { volatile Stack_Tree *st;
2866
2867 st = (volatile Stack_Tree *) stack_prefetch();
2868 st->pr = II;
2869 st->t_id = t_id;
2870 st->prv = (Stack_Tree *) stack_last[core_id];
2871 stack_last[core_id] = st;
2872 }
2873
2874 void
2875 Pop_Stack_Tree(void)
2876 { volatile Stack_Tree *cf = stack_last[core_id];
2877
2878 if (cf)
2879 { stack_last[core_id] = cf->prv;
2880 } else if (nr_handoffs * z_handoff + depth > 0)
2881 { printf("cpu%d: error pop_stack_tree (depth %d)\n",
2882 core_id, depth);
2883 }
2884 }
2885 #endif
2886
2887 void
2888 e_critical(int which)
2889 { double cnt_start;
2890
2891 if (readtrail || iamin[which] > 0)
2892 { if (!readtrail && verbose)
2893 { printf("cpu%d: Double Lock on %d (now %d)\n",
2894 core_id, which, iamin[which]+1);
2895 fflush(stdout);
2896 }
2897 iamin[which]++; /* local variable */
2898 return;
2899 }
2900
2901 cnt_start = lock_wait;
2902
2903 while (sh_lock != NULL) /* as long as we have shared memory */
2904 { int r = tas(&sh_lock[which]);
2905 if (r == 0)
2906 { iamin[which] = 1;
2907 return; /* locked */
2908 }
2909
2910 lock_wait++;
2911 #ifndef NGQ
2912 if (which < 3) { glock_wait[which]++; }
2913 #else
2914 if (which == 0) { glock_wait[which]++; }
2915 #endif
2916 iam_alive();
2917
2918 if (lock_wait - cnt_start > TenSeconds)
2919 { printf("cpu%d: lock timeout on %d\n", core_id, which);
2920 cnt_start = lock_wait;
2921 if (someone_crashed(1))
2922 { sudden_stop("lock timeout");
2923 pan_exit(1);
2924 } } }
2925 }
2926
2927 void
2928 x_critical(int which)
2929 {
2930 if (iamin[which] != 1)
2931 { if (iamin[which] > 1)
2932 { iamin[which]--; /* this is thread-local - no races on this one */
2933 if (!readtrail && verbose)
2934 { printf("cpu%d: Partial Unlock on %d (%d more needed)\n",
2935 core_id, which, iamin[which]);
2936 fflush(stdout);
2937 }
2938 return;
2939 } else /* iamin[which] <= 0 */
2940 { if (!readtrail)
2941 { printf("cpu%d: Invalid Unlock iamin[%d] = %d\n",
2942 core_id, which, iamin[which]);
2943 fflush(stdout);
2944 }
2945 return;
2946 } }
2947
2948 if (sh_lock != NULL)
2949 { iamin[which] = 0;
2950 sh_lock[which] = 0; /* unlock */
2951 }
2952 }
2953
2954 void
2955 #if defined(WIN32) || defined(WIN64)
2956 start_proxy(char *s, DWORD r_pid)
2957 #else
2958 start_proxy(char *s, int r_pid)
2959 #endif
2960 { char Q_arg[16], Z_arg[16], Y_arg[16];
2961 char *args[32], *ptr;
2962 int argcnt = 0;
2963
2964 sprintf(Q_arg, "-Q%d", getpid());
2965 sprintf(Y_arg, "-Y%d", r_pid);
2966 sprintf(Z_arg, "-Z%d", proxy_pid /* core_id */);
2967
2968 args[argcnt++] = "proxy";
2969 args[argcnt++] = s; /* -r or -s */
2970 args[argcnt++] = Q_arg;
2971 args[argcnt++] = Z_arg;
2972 args[argcnt++] = Y_arg;
2973
2974 if (strlen(o_cmdline) > 0)
2975 { ptr = o_cmdline; /* assume args separated by spaces */
2976 do { args[argcnt++] = ptr++;
2977 if ((ptr = strchr(ptr, ' ')) != NULL)
2978 { while (*ptr == ' ')
2979 { *ptr++ = '\0';
2980 }
2981 } else
2982 { break;
2983 }
2984 } while (argcnt < 31);
2985 }
2986 args[argcnt] = NULL;
2987 #if defined(WIN32) || defined(WIN64)
2988 execvp("pan_proxy", args); /* no return */
2989 #else
2990 execvp("./pan_proxy", args); /* no return */
2991 #endif
2992 Uerror("pan_proxy exec failed");
2993 }
2994 /*** end of common code fragment ***/
2995
2996 #if !defined(WIN32) && !defined(WIN64)
2997 void
2998 init_shm(void) /* initialize shared work-queues - linux/cygwin */
2999 { key_t key[NR_QS];
3000 int n, m;
3001 int must_exit = 0;
3002
3003 if (core_id == 0 && verbose)
3004 { printf("cpu0: step 3: allocate shared workqueues %g MB\n",
3005 ((double) NCORE * LWQ_SIZE + GWQ_SIZE) / (1048576.) );
3006 }
3007 for (m = 0; m < NR_QS; m++) /* last q is the global q */
3008 { double qsize = (m == NCORE) ? GWQ_SIZE : LWQ_SIZE;
3009 key[m] = ftok(PanSource, m+1);
3010 if (key[m] == -1)
3011 { perror("ftok shared queues"); must_exit = 1; break;
3012 }
3013
3014 if (core_id == 0) /* root creates */
3015 { /* check for stale copy */
3016 shmid[m] = shmget(key[m], (size_t) qsize, 0600);
3017 if (shmid[m] != -1) /* yes there is one; remove it */
3018 { printf("cpu0: removing stale q%d, status: %d\n",
3019 m, shmctl(shmid[m], IPC_RMID, NULL));
3020 }
3021 shmid[m] = shmget(key[m], (size_t) qsize, 0600|IPC_CREAT|IPC_EXCL);
3022 memcnt += qsize;
3023 } else /* workers attach */
3024 { shmid[m] = shmget(key[m], (size_t) qsize, 0600);
3025 /* never called, since we create shm *before* we fork */
3026 }
3027 if (shmid[m] == -1)
3028 { perror("shmget shared queues"); must_exit = 1; break;
3029 }
3030
3031 shared_mem[m] = (char *) shmat(shmid[m], (void *) 0, 0); /* attach */
3032 if (shared_mem[m] == (char *) -1)
3033 { fprintf(stderr, "error: cannot attach shared wq %d (%d Mb)\n",
3034 m+1, (int) (qsize/(1048576.)));
3035 perror("shmat shared queues"); must_exit = 1; break;
3036 }
3037
3038 m_workq[m] = (SM_frame *) shared_mem[m];
3039 if (core_id == 0)
3040 { int nframes = (m == NCORE) ? GN_FRAMES : LN_FRAMES;
3041 for (n = 0; n < nframes; n++)
3042 { m_workq[m][n].m_vsize = 0;
3043 m_workq[m][n].m_boq = 0;
3044 } } }
3045
3046 if (must_exit)
3047 { rm_shared_segments();
3048 fprintf(stderr, "pan: check './pan --' for usage details\n");
3049 pan_exit(1); /* calls cleanup_shm */
3050 }
3051 }
3052
3053 static uchar *
3054 prep_shmid_S(size_t n) /* either sets SS or H_tab, linux/cygwin */
3055 { char *rval;
3056 #ifndef SEP_STATE
3057 key_t key;
3058
3059 if (verbose && core_id == 0)
3060 {
3061 #ifdef BITSTATE
3062 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
3063 (double) n / (1048576.));
3064 #else
3065 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
3066 (double) n / (1048576.));
3067 #endif
3068 }
3069 #ifdef MEMLIM
3070 if (memcnt + (double) n > memlim)
3071 { printf("cpu0: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
3072 memcnt/1024., n/1024, memlim/(1048576.));
3073 printf("cpu0: insufficient memory -- aborting\n");
3074 exit(1);
3075 }
3076 #endif
3077
3078 key = ftok(PanSource, NCORE+2); /* different from queues */
3079 if (key == -1)
3080 { perror("ftok shared bitstate or hashtable");
3081 fprintf(stderr, "pan: check './pan --' for usage details\n");
3082 pan_exit(1);
3083 }
3084
3085 if (core_id == 0) /* root */
3086 { shmid_S = shmget(key, n, 0600);
3087 if (shmid_S != -1)
3088 { printf("cpu0: removing stale segment, status: %d\n",
3089 shmctl(shmid_S, IPC_RMID, NULL));
3090 }
3091 shmid_S = shmget(key, n, 0600 | IPC_CREAT | IPC_EXCL);
3092 memcnt += (double) n;
3093 } else /* worker */
3094 { shmid_S = shmget(key, n, 0600);
3095 }
3096 if (shmid_S == -1)
3097 { perror("shmget shared bitstate or hashtable too large?");
3098 fprintf(stderr, "pan: check './pan --' for usage details\n");
3099 pan_exit(1);
3100 }
3101
3102 rval = (char *) shmat(shmid_S, (void *) 0, 0); /* attach */
3103 if ((char *) rval == (char *) -1)
3104 { perror("shmat shared bitstate or hashtable");
3105 fprintf(stderr, "pan: check './pan --' for usage details\n");
3106 pan_exit(1);
3107 }
3108 #else
3109 rval = (char *) emalloc(n);
3110 #endif
3111 return (uchar *) rval;
3112 }
3113
3114 #define TRY_AGAIN 1
3115 #define NOT_AGAIN 0
3116
3117 static char shm_prep_result;
3118
3119 static uchar *
3120 prep_state_mem(size_t n) /* sets memory arena for states linux/cygwin */
3121 { char *rval;
3122 key_t key;
3123 static int cnt = 3; /* start larger than earlier ftok calls */
3124
3125 shm_prep_result = NOT_AGAIN; /* default */
3126 if (verbose && core_id == 0)
3127 { printf("cpu0: step 2+: pre-allocate memory arena %d of %6.2g Mb\n",
3128 cnt-3, (double) n / (1048576.));
3129 }
3130 #ifdef MEMLIM
3131 if (memcnt + (double) n > memlim)
3132 { printf("cpu0: error: M %.0f + %.0f Kb exceeds memory limit of %.0f Mb\n",
3133 memcnt/1024.0, (double) n/1024.0, memlim/(1048576.));
3134 return NULL;
3135 }
3136 #endif
3137
3138 key = ftok(PanSource, NCORE+cnt); cnt++;
3139 if (key == -1)
3140 { perror("ftok T");
3141 printf("pan: check './pan --' for usage details\n");
3142 pan_exit(1);
3143 }
3144
3145 if (core_id == 0)
3146 { shmid_M = shmget(key, n, 0600);
3147 if (shmid_M != -1)
3148 { printf("cpu0: removing stale memory segment %d, status: %d\n",
3149 cnt-3, shmctl(shmid_M, IPC_RMID, NULL));
3150 }
3151 shmid_M = shmget(key, n, 0600 | IPC_CREAT | IPC_EXCL);
3152 /* memcnt += (double) n; -- only amount actually used is counted */
3153 } else
3154 { shmid_M = shmget(key, n, 0600);
3155
3156 }
3157 if (shmid_M == -1)
3158 { if (verbose)
3159 { printf("error: failed to get pool of shared memory %d of %.0f Mb\n",
3160 cnt-3, ((double)n)/(1048576.));
3161 perror("state mem");
3162 printf("pan: check './pan --' for usage details\n");
3163 }
3164 shm_prep_result = TRY_AGAIN;
3165 return NULL;
3166 }
3167 rval = (char *) shmat(shmid_M, (void *) 0, 0); /* attach */
3168
3169 if ((char *) rval == (char *) -1)
3170 { printf("cpu%d error: failed to attach pool of shared memory %d of %.0f Mb\n",
3171 core_id, cnt-3, ((double)n)/(1048576.));
3172 perror("state mem");
3173 return NULL;
3174 }
3175 return (uchar *) rval;
3176 }
3177
3178 void
3179 init_HT(unsigned long n) /* cygwin/linux version */
3180 { volatile char *x;
3181 double get_mem;
3182 #ifndef SEP_STATE
3183 volatile char *dc_mem_start;
3184 double need_mem, got_mem = 0.;
3185 #endif
3186
3187 #ifdef SEP_STATE
3188 #ifndef MEMLIM
3189 if (verbose)
3190 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
3191 }
3192 #else
3193 if (verbose)
3194 { printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
3195 MEMLIM, ((double)n/(1048576.)), (((double) NCORE * LWQ_SIZE) + GWQ_SIZE) /(1048576.) );
3196 }
3197 #endif
3198 get_mem = NCORE * sizeof(double) + (1 + CS_NR) * sizeof(void *) + 4*sizeof(void *) + 2*sizeof(double);
3199 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
3200 get_mem += 4 * NCORE * sizeof(void *); /* prfree, prfull, prcnt, prmax */
3201 #ifdef FULL_TRAIL
3202 get_mem += (NCORE) * sizeof(Stack_Tree *); /* NCORE * stack_last */
3203 #endif
3204 x = (volatile char *) prep_state_mem((size_t) get_mem); /* work queues and basic structs */
3205 shmid_X = (long) x;
3206 if (x == NULL)
3207 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
3208 exit(1);
3209 }
3210 search_terminated = (volatile unsigned int *) x; /* comes first */
3211 x += sizeof(void *); /* maintain alignment */
3212
3213 is_alive = (volatile double *) x;
3214 x += NCORE * sizeof(double);
3215
3216 sh_lock = (volatile int *) x;
3217 x += CS_NR * sizeof(void *);
3218
3219 grfree = (volatile int *) x;
3220 x += sizeof(void *);
3221 grfull = (volatile int *) x;
3222 x += sizeof(void *);
3223 grcnt = (volatile int *) x;
3224 x += sizeof(void *);
3225 grmax = (volatile int *) x;
3226 x += sizeof(void *);
3227 prfree = (volatile int *) x;
3228 x += NCORE * sizeof(void *);
3229 prfull = (volatile int *) x;
3230 x += NCORE * sizeof(void *);
3231 prcnt = (volatile int *) x;
3232 x += NCORE * sizeof(void *);
3233 prmax = (volatile int *) x;
3234 x += NCORE * sizeof(void *);
3235 gr_readmiss = (volatile double *) x;
3236 x += sizeof(double);
3237 gr_writemiss = (volatile double *) x;
3238 x += sizeof(double);
3239
3240 #ifdef FULL_TRAIL
3241 stack_last = (volatile Stack_Tree **) x;
3242 x += NCORE * sizeof(Stack_Tree *);
3243 #endif
3244
3245 #ifndef BITSTATE
3246 H_tab = (struct H_el **) emalloc(n);
3247 #endif
3248 #else
3249 #ifndef MEMLIM
3250 #warning MEMLIM not set
3251 #define MEMLIM (2048)
3252 #endif
3253
3254 if (core_id == 0 && verbose)
3255 { printf("cpu0: step 0: -DMEMLIM=%d Mb minus hashtable+workqs (%g + %g Mb) leaves %g Mb\n",
3256 MEMLIM, ((double)n/(1048576.)), (NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.),
3257 (memlim - memcnt - (double) n - (NCORE * LWQ_SIZE + GWQ_SIZE))/(1048576.));
3258 }
3259 #ifndef BITSTATE
3260 H_tab = (struct H_el **) prep_shmid_S((size_t) n); /* hash_table */
3261 #endif
3262 need_mem = memlim - memcnt - ((double) NCORE * LWQ_SIZE) - GWQ_SIZE;
3263 if (need_mem <= 0.)
3264 { Uerror("internal error -- shared state memory");
3265 }
3266
3267 if (core_id == 0 && verbose)
3268 { printf("cpu0: step 2: pre-allocate shared state memory %g Mb\n",
3269 need_mem/(1048576.));
3270 }
3271 #ifdef SEP_HEAP
3272 SEG_SIZE = need_mem / NCORE;
3273 if (verbose && core_id == 0)
3274 { printf("cpu0: setting segsize to %6g MB\n",
3275 SEG_SIZE/(1048576.));
3276 }
3277 #if defined(CYGWIN) || defined(__CYGWIN__)
3278 if (SEG_SIZE > 512.*1024.*1024.)
3279 { printf("warning: reducing SEG_SIZE of %g MB to 512MB (exceeds max for Cygwin)\n",
3280 SEG_SIZE/(1024.*1024.));
3281 SEG_SIZE = 512.*1024.*1024.;
3282 }
3283 #endif
3284 #endif
3285 mem_reserved = need_mem;
3286 while (need_mem > 1024.)
3287 { get_mem = need_mem;
3288 shm_more:
3289 if (get_mem > (double) SEG_SIZE)
3290 { get_mem = (double) SEG_SIZE;
3291 }
3292 if (get_mem <= 0.0) break;
3293
3294 /* for allocating states: */
3295 x = dc_mem_start = (volatile char *) prep_state_mem((size_t) get_mem);
3296 if (x == NULL)
3297 { if (shm_prep_result == NOT_AGAIN
3298 || first_pool != NULL
3299 || SEG_SIZE < (16. * 1048576.))
3300 { break;
3301 }
3302 SEG_SIZE /= 2.;
3303 if (verbose)
3304 { printf("pan: lowered segsize to 0.000000\n", SEG_SIZE);
3305 }
3306 if (SEG_SIZE >= 1024.)
3307 { goto shm_more;
3308 }
3309 break;
3310 }
3311
3312 need_mem -= get_mem;
3313 got_mem += get_mem;
3314 if (first_pool == NULL)
3315 { search_terminated = (volatile unsigned int *) x; /* comes first */
3316 x += sizeof(void *); /* maintain alignment */
3317
3318 is_alive = (volatile double *) x;
3319 x += NCORE * sizeof(double);
3320
3321 sh_lock = (volatile int *) x;
3322 x += CS_NR * sizeof(void *);
3323
3324 grfree = (volatile int *) x;
3325 x += sizeof(void *);
3326 grfull = (volatile int *) x;
3327 x += sizeof(void *);
3328 grcnt = (volatile int *) x;
3329 x += sizeof(void *);
3330 grmax = (volatile int *) x;
3331 x += sizeof(void *);
3332 prfree = (volatile int *) x;
3333 x += NCORE * sizeof(void *);
3334 prfull = (volatile int *) x;
3335 x += NCORE * sizeof(void *);
3336 prcnt = (volatile int *) x;
3337 x += NCORE * sizeof(void *);
3338 prmax = (volatile int *) x;
3339 x += NCORE * sizeof(void *);
3340 gr_readmiss = (volatile double *) x;
3341 x += sizeof(double);
3342 gr_writemiss = (volatile double *) x;
3343 x += sizeof(double);
3344 #ifdef FULL_TRAIL
3345 stack_last = (volatile Stack_Tree **) x;
3346 x += NCORE * sizeof(Stack_Tree *);
3347 #endif
3348 if (((long)x)&(sizeof(void *)-1)) /* 64-bit word alignment */
3349 { x += sizeof(void *)-(((long)x)&(sizeof(void *)-1));
3350 }
3351
3352 #ifdef COLLAPSE
3353 ncomps = (unsigned long *) x;
3354 x += (256+2) * sizeof(unsigned long);
3355 #endif
3356 }
3357
3358 dc_shared = (sh_Allocater *) x; /* must be in shared memory */
3359 x += sizeof(sh_Allocater);
3360
3361 if (core_id == 0) /* root only */
3362 { dc_shared->dc_id = shmid_M;
3363 dc_shared->dc_start = dc_mem_start;
3364 dc_shared->dc_arena = x;
3365 dc_shared->pattern = 1234567; /* protection */
3366 dc_shared->dc_size = (long) get_mem - (long) (x - dc_mem_start);
3367 dc_shared->nxt = (long) 0;
3368
3369 if (last_pool == NULL)
3370 { first_pool = last_pool = dc_shared;
3371 } else
3372 { last_pool->nxt = dc_shared;
3373 last_pool = dc_shared;
3374 }
3375 } else if (first_pool == NULL)
3376 { first_pool = dc_shared;
3377 } }
3378
3379 if (need_mem > 1024.)
3380 { printf("cpu0: could allocate only %g Mb of shared memory (wanted %g more)\n",
3381 got_mem/(1048576.), need_mem/(1048576.));
3382 }
3383
3384 if (!first_pool)
3385 { printf("cpu0: insufficient memory -- aborting.\n");
3386 exit(1);
3387 }
3388 /* we are still single-threaded at this point, with core_id 0 */
3389 dc_shared = first_pool;
3390
3391 #endif
3392 }
3393
3394 /* Test and Set assembly code */
3395
3396 #if defined(i386) || defined(__i386__) || defined(__x86_64__)
3397 int
3398 tas(volatile int *s) /* tested */
3399 { int r;
3400 __asm__ __volatile__(
3401 "xchgl %0, %1 \n\t"
3402 : "=r"(r), "=m"(*s)
3403 : "0"(1), "m"(*s)
3404 : "memory");
3405
3406 return r;
3407 }
3408 #elif defined(__arm__)
3409 int
3410 tas(volatile int *s) /* not tested */
3411 { int r = 1;
3412 __asm__ __volatile__(
3413 "swpb %0, %0, [%3] \n"
3414 : "=r"(r), "=m"(*s)
3415 : "0"(r), "r"(s));
3416
3417 return r;
3418 }
3419 #elif defined(sparc) || defined(__sparc__)
3420 int
3421 tas(volatile int *s) /* not tested */
3422 { int r = 1;
3423 __asm__ __volatile__(
3424 " ldstub [%2], %0 \n"
3425 : "=r"(r), "=m"(*s)
3426 : "r"(s));
3427
3428 return r;
3429 }
3430 #elif defined(ia64) || defined(__ia64__)
3431 /* Intel Itanium */
3432 int
3433 tas(volatile int *s) /* tested */
3434 { long int r;
3435 __asm__ __volatile__(
3436 " xchg4 %0=%1,%2 \n"
3437 : "=r"(r), "+m"(*s)
3438 : "r"(1)
3439 : "memory");
3440 return (int) r;
3441 }
3442 #else
3443 #error missing definition of test and set operation for this platform
3444 #endif
3445
3446 void
3447 cleanup_shm(int val)
3448 { volatile sh_Allocater *nxt_pool;
3449 unsigned long cnt = 0;
3450 int m;
3451
3452 if (nibis != 0)
3453 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id, val);
3454 return;
3455 } else
3456 { nibis = 1;
3457 }
3458 if (search_terminated != NULL)
3459 { *search_terminated |= 16; /* cleanup_shm */
3460 }
3461
3462 for (m = 0; m < NR_QS; m++)
3463 { if (shmdt((void *) shared_mem[m]) > 0)
3464 { perror("shmdt detaching from shared queues");
3465 } }
3466
3467 #ifdef SEP_STATE
3468 if (shmdt((void *) shmid_X) != 0)
3469 { perror("shmdt detaching from shared state memory");
3470 }
3471 #else
3472 #ifdef BITSTATE
3473 if (SS > 0 && shmdt((void *) SS) != 0)
3474 { if (verbose)
3475 { perror("shmdt detaching from shared bitstate arena");
3476 } }
3477 #else
3478 if (core_id == 0)
3479 { /* before detaching: */
3480 for (nxt_pool = dc_shared; nxt_pool != NULL; nxt_pool = nxt_pool->nxt)
3481 { cnt += nxt_pool->dc_size;
3482 }
3483 if (verbose)
3484 { printf("cpu0: done, %ld Mb of shared state memory left\n",
3485 cnt / (long)(1048576));
3486 } }
3487
3488 if (shmdt((void *) H_tab) != 0)
3489 { perror("shmdt detaching from shared hashtable");
3490 }
3491
3492 for (last_pool = first_pool; last_pool != NULL; last_pool = nxt_pool)
3493 { nxt_pool = last_pool->nxt;
3494 if (shmdt((void *) last_pool->dc_start) != 0)
3495 { perror("shmdt detaching from shared state memory");
3496 } }
3497 first_pool = last_pool = NULL; /* precaution */
3498 #endif
3499 #endif
3500 /* detached from shared memory - so cannot use cpu_printf */
3501 if (verbose)
3502 { printf("cpu%d: done -- got %d states from queue\n",
3503 core_id, nstates_get);
3504 }
3505 }
3506
3507 extern void give_up(int);
3508 extern void Read_Queue(int);
3509
3510 void
3511 mem_get(void)
3512 { SM_frame *f;
3513 int is_parent;
3514
3515 #if defined(MA) && !defined(SEP_STATE)
3516 #error MA without SEP_STATE is not supported with multi-core
3517 #endif
3518 #ifdef BFS
3519 #error BFS is not supported with multi-core
3520 #endif
3521 #ifdef SC
3522 #error SC is not supported with multi-core
3523 #endif
3524 init_shm(); /* we are single threaded when this starts */
3525
3526 if (core_id == 0 && verbose)
3527 { printf("cpu0: step 4: calling fork()\n");
3528 }
3529 fflush(stdout);
3530
3531 /* if NCORE > 1 the child or the parent should fork N-1 more times
3532 * the parent is the only process with core_id == 0 and is_parent > 0
3533 * the workers have is_parent = 0 and core_id = 1..NCORE-1
3534 */
3535 if (core_id == 0)
3536 { worker_pids[0] = getpid(); /* for completeness */
3537 while (++core_id < NCORE) /* first worker sees core_id = 1 */
3538 { is_parent = fork();
3539 if (is_parent == -1)
3540 { Uerror("fork failed");
3541 }
3542 if (is_parent == 0) /* this is a worker process */
3543 { if (proxy_pid == core_id) /* always non-zero */
3544 { start_proxy("-r", 0); /* no return */
3545 }
3546 goto adapt; /* root process continues spawning */
3547 }
3548 worker_pids[core_id] = is_parent;
3549 }
3550 /* note that core_id is now NCORE */
3551 if (proxy_pid > 0 && proxy_pid < NCORE)
3552 { proxy_pid_snd = fork();
3553 if (proxy_pid_snd == -1)
3554 { Uerror("proxy fork failed");
3555 }
3556 if (proxy_pid_snd == 0)
3557 { start_proxy("-s", worker_pids[proxy_pid]); /* no return */
3558 } } /* else continue */
3559 if (is_parent > 0)
3560 { core_id = 0; /* reset core_id for root process */
3561 }
3562 } else /* worker */
3563 { static char db0[16]; /* good for up to 10^6 cores */
3564 static char db1[16];
3565 adapt: tprefix = db0; sprefix = db1;
3566 sprintf(tprefix, "cpu%d_trail", core_id);
3567 sprintf(sprefix, "cpu%d_rst", core_id);
3568 memcnt = 0; /* count only additionally allocated memory */
3569 }
3570 signal(SIGINT, give_up);
3571
3572 if (proxy_pid == 0) /* not in a cluster setup, pan_proxy must attach */
3573 { rm_shared_segments(); /* mark all shared segments for removal on exit */
3574 }
3575 if (verbose)
3576 { cpu_printf("starting core_id %d -- pid %d\n", core_id, getpid());
3577 }
3578 #if defined(SEP_HEAP) && !defined(SEP_STATE)
3579 { int i;
3580 volatile sh_Allocater *ptr;
3581 ptr = first_pool;
3582 for (i = 0; i < NCORE && ptr != NULL; i++)
3583 { if (i == core_id)
3584 { my_heap = (char *) ptr->dc_arena;
3585 my_size = (long) ptr->dc_size;
3586 if (verbose)
3587 cpu_printf("local heap %ld MB\n", my_size/(1048576));
3588 break;
3589 }
3590 ptr = ptr->nxt; /* local */
3591 }
3592 if (my_heap == NULL)
3593 { printf("cpu%d: no local heap\n", core_id);
3594 pan_exit(1);
3595 } /* else */
3596 #if defined(CYGWIN) || defined(__CYGWIN__)
3597 ptr = first_pool;
3598 for (i = 0; i < NCORE && ptr != NULL; i++)
3599 { ptr = ptr->nxt; /* local */
3600 }
3601 dc_shared = ptr; /* any remainder */
3602 #else
3603 dc_shared = NULL; /* used all mem for local heaps */
3604 #endif
3605 }
3606 #endif
3607 if (core_id == 0 && !remote_party)
3608 { new_state(); /* cpu0 explores root */
3609 if (verbose)
3610 cpu_printf("done with 1st dfs, nstates %g (put %d states), read q\n",
3611 nstates, nstates_put);
3612 dfs_phase2 = 1;
3613 }
3614 Read_Queue(core_id); /* all cores */
3615
3616 if (verbose)
3617 { cpu_printf("put %6d states into queue -- got %6d\n",
3618 nstates_put, nstates_get);
3619 }
3620 if (proxy_pid != 0)
3621 { rm_shared_segments();
3622 }
3623 done = 1;
3624 wrapup();
3625 exit(0);
3626 }
3627
3628 #else
3629 int unpack_state(SM_frame *, int);
3630 #endif
3631
3632 struct H_el *
3633 grab_shared(int n)
3634 {
3635 #ifndef SEP_STATE
3636 char *rval = (char *) 0;
3637
3638 if (n == 0)
3639 { printf("cpu%d: grab shared zero\n", core_id); fflush(stdout);
3640 return (struct H_el *) rval;
3641 } else if (n&(sizeof(void *)-1))
3642 { n += sizeof(void *)-(n&(sizeof(void *)-1)); /* alignment */
3643 }
3644
3645 #ifdef SEP_HEAP
3646 /* no locking */
3647 if (my_heap != NULL && my_size > n)
3648 { rval = my_heap;
3649 my_heap += n;
3650 my_size -= n;
3651 goto done;
3652 }
3653 #endif
3654
3655 if (!dc_shared)
3656 { sudden_stop("pan: out of memory");
3657 }
3658
3659 /* another lock is always already in effect when this is called */
3660 /* but not always the same lock -- i.e., on different parts of the hashtable */
3661 enter_critical(GLOBAL_LOCK); /* this must be independently mutex */
3662 #if defined(SEP_HEAP) && !defined(WIN32) && !defined(WIN64)
3663 { static int noted = 0;
3664 if (!noted)
3665 { noted = 1;
3666 printf("cpu%d: global heap has %ld bytes left, needed %d\n",
3667 core_id, dc_shared?dc_shared->dc_size:0, n);
3668 } }
3669 #endif
3670 #if 0
3671 if (dc_shared->pattern != 1234567)
3672 { leave_critical(GLOBAL_LOCK);
3673 Uerror("overrun -- memory corruption");
3674 }
3675 #endif
3676 if (dc_shared->dc_size < n)
3677 { if (verbose)
3678 { printf("Next Pool %g Mb + %d\n", memcnt/(1048576.), n);
3679 }
3680 if (dc_shared->nxt == NULL
3681 || dc_shared->nxt->dc_arena == NULL
3682 || dc_shared->nxt->dc_size < n)
3683 { printf("cpu%d: memcnt %g Mb + wanted %d bytes more\n",
3684 core_id, memcnt / (1048576.), n);
3685 leave_critical(GLOBAL_LOCK);
3686 sudden_stop("out of memory -- aborting");
3687 wrapup(); /* exits */
3688 } else
3689 { dc_shared = (sh_Allocater *) dc_shared->nxt;
3690 } }
3691
3692 rval = (char *) dc_shared->dc_arena;
3693 dc_shared->dc_arena += n;
3694 dc_shared->dc_size -= (long) n;
3695 #if 0
3696 if (VVERBOSE)
3697 printf("cpu%d grab shared (%d bytes) -- %ld left\n",
3698 core_id, n, dc_shared->dc_size);
3699 #endif
3700 leave_critical(GLOBAL_LOCK);
3701 done:
3702 memset(rval, 0, n);
3703 memcnt += (double) n;
3704
3705 return (struct H_el *) rval;
3706 #else
3707 return (struct H_el *) emalloc(n);
3708 #endif
3709 }
3710
3711 SM_frame *
3712 Get_Full_Frame(int n)
3713 { SM_frame *f;
3714 double cnt_start = frame_wait;
3715
3716 f = &m_workq[n][prfull[n]];
3717 while (f->m_vsize == 0) /* await full slot LOCK : full frame */
3718 { iam_alive();
3719 #ifndef NGQ
3720 #ifndef SAFETY
3721 if (!a_cycles || core_id != 0)
3722 #endif
3723 if (*grcnt > 0) /* accessed outside lock, but safe even if wrong */
3724 { enter_critical(GQ_RD); /* gq - read access */
3725 if (*grcnt > 0) /* could have changed */
3726 { f = &m_workq[NCORE][*grfull]; /* global q */
3727 if (f->m_vsize == 0)
3728 { /* writer is still filling the slot */
3729 *gr_writemiss++;
3730 f = &m_workq[n][prfull[n]]; /* reset */
3731 } else
3732 { *grfull = (*grfull+1) % (GN_FRAMES);
3733 enter_critical(GQ_WR);
3734 *grcnt = *grcnt - 1;
3735 leave_critical(GQ_WR);
3736 leave_critical(GQ_RD);
3737 return f;
3738 } }
3739 leave_critical(GQ_RD);
3740 }
3741 #endif
3742 if (frame_wait++ - cnt_start > Delay)
3743 { if (0)
3744 { cpu_printf("timeout on q%d -- %u -- query %d\n",
3745 n, f, query_in_progress);
3746 }
3747 return (SM_frame *) 0; /* timeout */
3748 } }
3749 iam_alive();
3750 if (VVERBOSE) cpu_printf("got frame from q%d\n", n);
3751 prfull[n] = (prfull[n] + 1) % (LN_FRAMES);
3752 enter_critical(QLOCK(n));
3753 prcnt[n]--; /* lock out increments */
3754 leave_critical(QLOCK(n));
3755 return f;
3756 }
3757
3758 SM_frame *
3759 Get_Free_Frame(int n)
3760 { SM_frame *f;
3761 double cnt_start = free_wait;
3762
3763 if (VVERBOSE) { cpu_printf("get free frame from q%d\n", n); }
3764
3765 if (n == NCORE) /* global q */
3766 { f = &(m_workq[n][lrfree]);
3767 } else
3768 { f = &(m_workq[n][prfree[n]]);
3769 }
3770 while (f->m_vsize != 0) /* await free slot LOCK : free slot */
3771 { iam_alive();
3772 if (free_wait++ - cnt_start > OneSecond)
3773 { if (verbose)
3774 { cpu_printf("timeout waiting for free slot q%d\n", n);
3775 }
3776 cnt_start = free_wait;
3777 if (someone_crashed(1))
3778 { printf("cpu%d: search terminated\n", core_id);
3779 sudden_stop("get free frame");
3780 pan_exit(1);
3781 } } }
3782 if (n != NCORE)
3783 { prfree[n] = (prfree[n] + 1) % (LN_FRAMES);
3784 enter_critical(QLOCK(n));
3785 prcnt[n]++; /* lock out decrements */
3786 if (prmax[n] < prcnt[n])
3787 { prmax[n] = prcnt[n];
3788 }
3789 leave_critical(QLOCK(n));
3790 }
3791 return f;
3792 }
3793 #ifndef NGQ
3794 int
3795 GlobalQ_HasRoom(void)
3796 { int rval = 0;
3797
3798 gq_tries++;
3799 if (*grcnt < GN_FRAMES) /* there seems to be room */
3800 { enter_critical(GQ_WR); /* gq write access */
3801 if (*grcnt < GN_FRAMES)
3802 { if (m_workq[NCORE][*grfree].m_vsize != 0)
3803 { /* can happen if reader is slow emptying slot */
3804 *gr_readmiss++;
3805 goto out; /* dont wait: release lock and return */
3806 }
3807 lrfree = *grfree; /* Get_Free_Frame use lrfree in this mode */
3808 *grfree = (*grfree + 1) % GN_FRAMES;
3809 *grcnt = *grcnt + 1; /* count nr of slots filled -- no additional lock needed */
3810 if (*grmax < *grcnt) *grmax = *grcnt;
3811 leave_critical(GQ_WR); /* for short lock duration */
3812 gq_hasroom++;
3813 mem_put(NCORE); /* copy state into reserved slot */
3814 rval = 1; /* successfull handoff */
3815 } else
3816 { gq_hasnoroom++;
3817 out: leave_critical(GQ_WR);
3818 } }
3819 return rval;
3820 }
3821 #endif
3822
3823 int
3824 unpack_state(SM_frame *f, int from_q)
3825 { int i, j;
3826 static struct H_el D_State;
3827
3828 if (f->m_vsize > 0)
3829 { boq = f->m_boq;
3830 if (boq > 256)
3831 { cpu_printf("saw control %d, expected state\n", boq);
3832 return 0;
3833 }
3834 vsize = f->m_vsize;
3835 correct:
3836 memcpy((uchar *) &now, (uchar *) f->m_now, vsize);
3837 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
3838 { Mask[i] = (f->m_Mask[i/8] & (1<<j)) ? 1 : 0;
3839 }
3840 if (now._nr_pr > 0)
3841 { memcpy((uchar *) proc_offset, (uchar *) f->m_p_offset, now._nr_pr * sizeof(OFFT));
3842 memcpy((uchar *) proc_skip, (uchar *) f->m_p_skip, now._nr_pr * sizeof(uchar));
3843 }
3844 if (now._nr_qs > 0)
3845 { memcpy((uchar *) q_offset, (uchar *) f->m_q_offset, now._nr_qs * sizeof(OFFT));
3846 memcpy((uchar *) q_skip, (uchar *) f->m_q_skip, now._nr_qs * sizeof(uchar));
3847 }
3848 #ifndef NOVSZ
3849 if (vsize != now._vsz)
3850 { cpu_printf("vsize %d != now._vsz %d (type %d) %d\n",
3851 vsize, now._vsz, f->m_boq, f->m_vsize);
3852 vsize = now._vsz;
3853 goto correct; /* rare event: a race */
3854 }
3855 #endif
3856 hmax = max(hmax, vsize);
3857
3858 if (f != &cur_Root)
3859 { memcpy((uchar *) &cur_Root, (uchar *) f, sizeof(SM_frame));
3860 }
3861
3862 if (((now._a_t) & 1) == 1) /* i.e., when starting nested DFS */
3863 { A_depth = depthfound = 0;
3864 memcpy((uchar *)&A_Root, (uchar *)&now, vsize);
3865 }
3866 nr_handoffs = f->nr_handoffs;
3867 } else
3868 { cpu_printf("pan: state empty\n");
3869 }
3870
3871 depth = 0;
3872 trpt = &trail[1];
3873 trpt->tau = f->m_tau;
3874 trpt->o_pm = f->m_o_pm;
3875
3876 (trpt-1)->ostate = &D_State; /* stub */
3877 trpt->ostate = &D_State;
3878
3879 #ifdef FULL_TRAIL
3880 if (upto > 0)
3881 { stack_last[core_id] = (Stack_Tree *) f->m_stack;
3882 }
3883 #if defined(VERBOSE)
3884 if (stack_last[core_id])
3885 { cpu_printf("%d: UNPACK -- SET m_stack %u (%d,%d)\n",
3886 depth, stack_last[core_id], stack_last[core_id]->pr,
3887 stack_last[core_id]->t_id);
3888 }
3889 #endif
3890 #endif
3891
3892 if (!trpt->o_t)
3893 { static Trans D_Trans;
3894 trpt->o_t = &D_Trans;
3895 }
3896
3897 #ifdef VERI
3898 if ((trpt->tau & 4) != 4)
3899 { trpt->tau |= 4; /* the claim moves first */
3900 cpu_printf("warning: trpt was not up to date\n");
3901 }
3902 #endif
3903
3904 for (i = 0; i < (int) now._nr_pr; i++)
3905 { P0 *ptr = (P0 *) pptr(i);
3906 #ifndef NP
3907 if (accpstate[ptr->_t][ptr->_p])
3908 { trpt->o_pm |= 2;
3909 }
3910 #else
3911 if (progstate[ptr->_t][ptr->_p])
3912 { trpt->o_pm |= 4;
3913 }
3914 #endif
3915 }
3916
3917 #ifdef EVENT_TRACE
3918 #ifndef NP
3919 if (accpstate[EVENT_TRACE][now._event])
3920 { trpt->o_pm |= 2;
3921 }
3922 #else
3923 if (progstate[EVENT_TRACE][now._event])
3924 { trpt->o_pm |= 4;
3925 }
3926 #endif
3927 #endif
3928
3929 #if defined(C_States) && (HAS_TRACK==1)
3930 /* restore state of tracked C objects */
3931 c_revert((uchar *) &(now.c_state[0]));
3932 #if (HAS_STACK==1)
3933 c_unstack((uchar *) f->m_c_stack); /* unmatched tracked data */
3934 #endif
3935 #endif
3936 return 1;
3937 }
3938
3939 void
3940 write_root(void) /* for trail file */
3941 { int fd;
3942
3943 if (iterative == 0 && Nr_Trails > 1)
3944 sprintf(fnm, "%s%d.%s", TrailFile, Nr_Trails-1, sprefix);
3945 else
3946 sprintf(fnm, "%s.%s", TrailFile, sprefix);
3947
3948 if (cur_Root.m_vsize == 0)
3949 { (void) unlink(fnm); /* remove possible old copy */
3950 return; /* its the default initial state */
3951 }
3952
3953 if ((fd = creat(fnm, TMODE)) < 0)
3954 { char *q;
3955 if ((q = strchr(TrailFile, '.')))
3956 { *q = '\0'; /* strip .pml */
3957 if (iterative == 0 && Nr_Trails-1 > 0)
3958 sprintf(fnm, "%s%d.%s", TrailFile, Nr_Trails-1, sprefix);
3959 else
3960 sprintf(fnm, "%s.%s", TrailFile, sprefix);
3961 *q = '.';
3962 fd = creat(fnm, TMODE);
3963 }
3964 if (fd < 0)
3965 { cpu_printf("pan: cannot create %s\n", fnm);
3966 perror("cause");
3967 return;
3968 } }
3969
3970 if (write(fd, &cur_Root, sizeof(SM_frame)) != sizeof(SM_frame))
3971 { cpu_printf("pan: error writing %s\n", fnm);
3972 } else
3973 { cpu_printf("pan: wrote %s\n", fnm);
3974 }
3975 close(fd);
3976 }
3977
3978 void
3979 set_root(void)
3980 { int fd;
3981 char *q;
3982 char MyFile[512];
3983 char MySuffix[16];
3984 char *ssuffix = "rst";
3985 int try_core = 1;
3986
3987 strcpy(MyFile, TrailFile);
3988 try_again:
3989 if (whichtrail > 0)
3990 { sprintf(fnm, "%s%d.%s", MyFile, whichtrail, ssuffix);
3991 fd = open(fnm, O_RDONLY, 0);
3992 if (fd < 0 && (q = strchr(MyFile, '.')))
3993 { *q = '\0'; /* strip .pml */
3994 sprintf(fnm, "%s%d.%s", MyFile, whichtrail, ssuffix);
3995 *q = '.';
3996 fd = open(fnm, O_RDONLY, 0);
3997 }
3998 } else
3999 { sprintf(fnm, "%s.%s", MyFile, ssuffix);
4000 fd = open(fnm, O_RDONLY, 0);
4001 if (fd < 0 && (q = strchr(MyFile, '.')))
4002 { *q = '\0'; /* strip .pml */
4003 sprintf(fnm, "%s.%s", MyFile, ssuffix);
4004 *q = '.';
4005 fd = open(fnm, O_RDONLY, 0);
4006 } }
4007
4008 if (fd < 0)
4009 { if (try_core < NCORE)
4010 { ssuffix = MySuffix;
4011 sprintf(ssuffix, "cpu%d_rst", try_core++);
4012 goto try_again;
4013 }
4014 cpu_printf("no file '%s.rst' or '%s' (not an error)\n", MyFile, fnm);
4015 } else
4016 { if (read(fd, &cur_Root, sizeof(SM_frame)) != sizeof(SM_frame))
4017 { cpu_printf("read error %s\n", fnm);
4018 close(fd);
4019 pan_exit(1);
4020 }
4021 close(fd);
4022 (void) unpack_state(&cur_Root, -2);
4023 #ifdef SEP_STATE
4024 cpu_printf("partial trail -- last few steps only\n");
4025 #endif
4026 cpu_printf("restored root from '%s'\n", fnm);
4027 printf("=====State:=====\n");
4028 { int i, j; P0 *z;
4029 for (i = 0; i < now._nr_pr; i++)
4030 { z = (P0 *)pptr(i);
4031 printf("proc %2d (%s) ", i, procname[z->_t]);
4032 for (j = 0; src_all[j].src; j++)
4033 if (src_all[j].tp == (int) z->_t)
4034 { printf(" line %3d \"%s\" ",
4035 src_all[j].src[z->_p], PanSource);
4036 break;
4037 }
4038 printf("(state %d)\n", z->_p);
4039 c_locals(i, z->_t);
4040 }
4041 c_globals();
4042 }
4043 printf("================\n");
4044 }
4045 }
4046
4047 #ifdef USE_DISK
4048 unsigned long dsk_written, dsk_drained;
4049 void mem_drain(void);
4050 #endif
4051
4052 void
4053 m_clear_frame(SM_frame *f)
4054 { int i, clr_sz = sizeof(SM_results);
4055
4056 for (i = 0; i <= _NP_; i++) /* all proctypes */
4057 { clr_sz += NrStates[i]*sizeof(uchar);
4058 }
4059 memset(f, 0, clr_sz);
4060 /* caution if sizeof(SM_results) > sizeof(SM_frame) */
4061 }
4062
4063 #define TargetQ_Full(n) (m_workq[n][prfree[n]].m_vsize != 0)
4064 #define TargetQ_NotFull(n) (m_workq[n][prfree[n]].m_vsize == 0)
4065
4066 int
4067 AllQueuesEmpty(void)
4068 { int q;
4069 #ifndef NGQ
4070 if (*grcnt != 0)
4071 { return 0;
4072 }
4073 #endif
4074 for (q = 0; q < NCORE; q++)
4075 { if (prcnt[q] != 0)
4076 { return 0;
4077 } }
4078 return 1;
4079 }
4080
4081 void
4082 Read_Queue(int q)
4083 { SM_frame *f, *of;
4084 int remember, target_q;
4085 SM_results *r;
4086 double patience = 0.0;
4087
4088 target_q = (q + 1) % NCORE;
4089
4090 for (;;)
4091 { f = Get_Full_Frame(q);
4092 if (!f) /* 1 second timeout -- and trigger for Query */
4093 { if (someone_crashed(2))
4094 { printf("cpu%d: search terminated [code %d]\n",
4095 core_id, search_terminated?*search_terminated:-1);
4096 sudden_stop("");
4097 pan_exit(1);
4098 }
4099 #ifdef TESTING
4100 /* to profile with cc -pg and gprof pan.exe -- set handoff depth beyond maxdepth */
4101 exit(0);
4102 #endif
4103 remember = *grfree;
4104 if (core_id == 0 /* root can initiate termination */
4105 && remote_party == 0 /* and only the original root */
4106 && query_in_progress == 0 /* unless its already in progress */
4107 && AllQueuesEmpty())
4108 { f = Get_Free_Frame(target_q);
4109 query_in_progress = 1; /* only root process can do this */
4110 if (!f) { Uerror("Fatal1: no free slot"); }
4111 f->m_boq = QUERY; /* initiate Query */
4112 if (verbose)
4113 { cpu_printf("snd QUERY to q%d (%d) into slot %d\n",
4114 target_q, nstates_get + 1, prfree[target_q]-1);
4115 }
4116 f->m_vsize = remember + 1;
4117 /* number will not change unless we receive more states */
4118 } else if (patience++ > OneHour) /* one hour watchdog timer */
4119 { cpu_printf("timeout -- giving up\n");
4120 sudden_stop("queue timeout");
4121 pan_exit(1);
4122 }
4123 if (0) cpu_printf("timed out -- try again\n");
4124 continue;
4125 }
4126 patience = 0.0; /* reset watchdog */
4127
4128 if (f->m_boq == QUERY)
4129 { if (verbose)
4130 { cpu_printf("got QUERY on q%d (%d <> %d) from slot %d\n",
4131 q, f->m_vsize, nstates_put + 1, prfull[q]-1);
4132 snapshot();
4133 }
4134 remember = f->m_vsize;
4135 f->m_vsize = 0; /* release slot */
4136
4137 if (core_id == 0 && remote_party == 0) /* original root cpu0 */
4138 { if (query_in_progress == 1 /* didn't send more states in the interim */
4139 && *grfree + 1 == remember) /* no action on global queue meanwhile */
4140 { if (verbose) cpu_printf("Termination detected\n");
4141 if (TargetQ_Full(target_q))
4142 { if (verbose)
4143 cpu_printf("warning: target q is full\n");
4144 }
4145 f = Get_Free_Frame(target_q);
4146 if (!f) { Uerror("Fatal2: no free slot"); }
4147 m_clear_frame(f);
4148 f->m_boq = QUIT; /* send final Quit, collect stats */
4149 f->m_vsize = 111; /* anything non-zero will do */
4150 if (verbose)
4151 cpu_printf("put QUIT on q%d\n", target_q);
4152 } else
4153 { if (verbose) cpu_printf("Stale Query\n");
4154 #ifdef USE_DISK
4155 mem_drain();
4156 #endif
4157 }
4158 query_in_progress = 0;
4159 } else
4160 { if (TargetQ_Full(target_q))
4161 { if (verbose)
4162 cpu_printf("warning: forward query - target q full\n");
4163 }
4164 f = Get_Free_Frame(target_q);
4165 if (verbose)
4166 cpu_printf("snd QUERY response to q%d (%d <> %d) in slot %d\n",
4167 target_q, remember, *grfree + 1, prfree[target_q]-1);
4168 if (!f) { Uerror("Fatal4: no free slot"); }
4169
4170 if (*grfree + 1 == remember) /* no action on global queue */
4171 { f->m_boq = QUERY; /* forward query, to root */
4172 f->m_vsize = remember;
4173 } else
4174 { f->m_boq = QUERY_F; /* no match -- busy */
4175 f->m_vsize = 112; /* anything non-zero */
4176 #ifdef USE_DISK
4177 if (dsk_written != dsk_drained)
4178 { mem_drain();
4179 }
4180 #endif
4181 } }
4182 continue;
4183 }
4184
4185 if (f->m_boq == QUERY_F)
4186 { if (verbose)
4187 { cpu_printf("got QUERY_F on q%d from slot %d\n", q, prfull[q]-1);
4188 }
4189 f->m_vsize = 0; /* release slot */
4190
4191 if (core_id == 0 && remote_party == 0) /* original root cpu0 */
4192 { if (verbose) cpu_printf("No Match on Query\n");
4193 query_in_progress = 0;
4194 } else
4195 { if (TargetQ_Full(target_q))
4196 { if (verbose) cpu_printf("warning: forwarding query_f, target queue full\n");
4197 }
4198 f = Get_Free_Frame(target_q);
4199 if (verbose) cpu_printf("forward QUERY_F to q%d into slot %d\n",
4200 target_q, prfree[target_q]-1);
4201 if (!f) { Uerror("Fatal5: no free slot"); }
4202 f->m_boq = QUERY_F; /* cannot terminate yet */
4203 f->m_vsize = 113; /* anything non-zero */
4204 }
4205 #ifdef USE_DISK
4206 if (dsk_written != dsk_drained)
4207 { mem_drain();
4208 }
4209 #endif
4210 continue;
4211 }
4212
4213 if (f->m_boq == QUIT)
4214 { if (0) cpu_printf("done -- local memcnt %g Mb\n", memcnt/(1048576.));
4215 retrieve_info((SM_results *) f); /* collect and combine stats */
4216 if (verbose)
4217 { cpu_printf("received Quit\n");
4218 snapshot();
4219 }
4220 f->m_vsize = 0; /* release incoming slot */
4221 if (core_id != 0)
4222 { f = Get_Free_Frame(target_q); /* new outgoing slot */
4223 if (!f) { Uerror("Fatal6: no free slot"); }
4224 m_clear_frame(f); /* start with zeroed stats */
4225 record_info((SM_results *) f);
4226 f->m_boq = QUIT; /* forward combined results */
4227 f->m_vsize = 114; /* anything non-zero */
4228 if (verbose>1)
4229 cpu_printf("fwd Results to q%d\n", target_q);
4230 }
4231 break; /* successful termination */
4232 }
4233
4234 /* else: 0<= boq <= 255, means STATE transfer */
4235 if (unpack_state(f, q) != 0)
4236 { nstates_get++;
4237 f->m_vsize = 0; /* release slot */
4238 if (VVERBOSE) cpu_printf("Got state\n");
4239
4240 if (search_terminated != NULL
4241 && *search_terminated == 0)
4242 { new_state(); /* explore successors */
4243 memset((uchar *) &cur_Root, 0, sizeof(SM_frame)); /* avoid confusion */
4244 } else
4245 { pan_exit(0);
4246 }
4247 } else
4248 { pan_exit(0);
4249 } }
4250 if (verbose) cpu_printf("done got %d put %d\n", nstates_get, nstates_put);
4251 sleep_report();
4252 }
4253
4254 void
4255 give_up(int unused_x)
4256 {
4257 if (search_terminated != NULL)
4258 { *search_terminated |= 32; /* give_up */
4259 }
4260 if (!writing_trail)
4261 { was_interrupted = 1;
4262 snapshot();
4263 cpu_printf("Give Up\n");
4264 sleep_report();
4265 pan_exit(1);
4266 } else /* we are already terminating */
4267 { cpu_printf("SIGINT\n");
4268 }
4269 }
4270
4271 void
4272 check_overkill(void)
4273 {
4274 vmax_seen = (vmax_seen + 7)/ 8;
4275 vmax_seen *= 8; /* round up to a multiple of 8 */
4276
4277 if (core_id == 0
4278 && !remote_party
4279 && nstates_put > 0
4280 && VMAX - vmax_seen > 8)
4281 {
4282 #ifdef BITSTATE
4283 printf("cpu0: max VMAX value seen in this run: ");
4284 #else
4285 printf("cpu0: recommend recompiling with ");
4286 #endif
4287 printf("-DVMAX=%d\n", vmax_seen);
4288 }
4289 }
4290
4291 void
4292 mem_put(int q) /* handoff state to other cpu, workq q */
4293 { SM_frame *f;
4294 int i, j;
4295
4296 if (vsize > VMAX)
4297 { vsize = (vsize + 7)/8; vsize *= 8; /* round up */
4298 printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize);
4299 Uerror("aborting");
4300 }
4301 if (now._nr_pr > PMAX)
4302 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now._nr_pr);
4303 Uerror("aborting");
4304 }
4305 if (now._nr_qs > QMAX)
4306 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now._nr_qs);
4307 Uerror("aborting");
4308 }
4309 if (vsize > vmax_seen) vmax_seen = vsize;
4310 if (now._nr_pr > pmax_seen) pmax_seen = now._nr_pr;
4311 if (now._nr_qs > qmax_seen) qmax_seen = now._nr_qs;
4312
4313 f = Get_Free_Frame(q); /* not called in likely deadlock states */
4314 if (!f) { Uerror("Fatal3: no free slot"); }
4315
4316 if (VVERBOSE) cpu_printf("putting state into q%d\n", q);
4317
4318 memcpy((uchar *) f->m_now, (uchar *) &now, vsize);
4319 memset((uchar *) f->m_Mask, 0, (VMAX+7)/8 * sizeof(char));
4320 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
4321 { if (Mask[i])
4322 { f->m_Mask[i/8] |= (1<<j);
4323 } }
4324
4325 if (now._nr_pr > 0)
4326 { memcpy((uchar *) f->m_p_offset, (uchar *) proc_offset, now._nr_pr * sizeof(OFFT));
4327 memcpy((uchar *) f->m_p_skip, (uchar *) proc_skip, now._nr_pr * sizeof(uchar));
4328 }
4329 if (now._nr_qs > 0)
4330 { memcpy((uchar *) f->m_q_offset, (uchar *) q_offset, now._nr_qs * sizeof(OFFT));
4331 memcpy((uchar *) f->m_q_skip, (uchar *) q_skip, now._nr_qs * sizeof(uchar));
4332 }
4333 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4334 c_stack((uchar *) f->m_c_stack); /* save unmatched tracked data */
4335 #endif
4336 #ifdef FULL_TRAIL
4337 f->m_stack = stack_last[core_id];
4338 #endif
4339 f->nr_handoffs = nr_handoffs+1;
4340 f->m_tau = trpt->tau;
4341 f->m_o_pm = trpt->o_pm;
4342 f->m_boq = boq;
4343 f->m_vsize = vsize; /* must come last - now the other cpu can see it */
4344
4345 if (query_in_progress == 1)
4346 query_in_progress = 2; /* make sure we know, if a query makes the rounds */
4347 nstates_put++;
4348 }
4349
4350 #ifdef USE_DISK
4351 int Dsk_W_Nr, Dsk_R_Nr;
4352 int dsk_file = -1, dsk_read = -1;
4353 unsigned long dsk_written, dsk_drained;
4354 char dsk_name[512];
4355
4356 #ifndef BFS_DISK
4357 #if defined(WIN32) || defined(WIN64)
4358 #define RFLAGS (O_RDONLY|O_BINARY)
4359 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
4360 #else
4361 #define RFLAGS (O_RDONLY)
4362 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
4363 #endif
4364 #endif
4365
4366 void
4367 dsk_stats(void)
4368 { int i;
4369
4370 if (dsk_written > 0)
4371 { cpu_printf("dsk_written %d states in %d files\ncpu%d: dsk_drained %6d states\n",
4372 dsk_written, Dsk_W_Nr, core_id, dsk_drained);
4373 close(dsk_read);
4374 close(dsk_file);
4375 for (i = 0; i < Dsk_W_Nr; i++)
4376 { sprintf(dsk_name, "Q%.3d_%.3d.tmp", i, core_id);
4377 unlink(dsk_name);
4378 } }
4379 }
4380
4381 void
4382 mem_drain(void)
4383 { SM_frame *f, g;
4384 int q = (core_id + 1) % NCORE; /* target q */
4385 int sz;
4386
4387 if (dsk_read < 0
4388 || dsk_written <= dsk_drained)
4389 { return;
4390 }
4391
4392 while (dsk_written > dsk_drained
4393 && TargetQ_NotFull(q))
4394 { f = Get_Free_Frame(q);
4395 if (!f) { Uerror("Fatal: unhandled condition"); }
4396
4397 if ((dsk_drained+1)%MAX_DSK_FILE == 0) /* 100K states max per file */
4398 { (void) close(dsk_read); /* close current read handle */
4399 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_R_Nr++, core_id);
4400 (void) unlink(dsk_name); /* remove current file */
4401 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_R_Nr, core_id);
4402 cpu_printf("reading %s\n", dsk_name);
4403 dsk_read = open(dsk_name, RFLAGS); /* open next file */
4404 if (dsk_read < 0)
4405 { Uerror("could not open dsk file");
4406 } }
4407 if (read(dsk_read, &g, sizeof(SM_frame)) != sizeof(SM_frame))
4408 { Uerror("bad dsk file read");
4409 }
4410 sz = g.m_vsize;
4411 g.m_vsize = 0;
4412 memcpy(f, &g, sizeof(SM_frame));
4413 f->m_vsize = sz; /* last */
4414
4415 dsk_drained++;
4416 }
4417 }
4418
4419 void
4420 mem_file(void)
4421 { SM_frame f;
4422 int i, j, q = (core_id + 1) % NCORE; /* target q */
4423
4424 if (vsize > VMAX)
4425 { printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize);
4426 Uerror("aborting");
4427 }
4428 if (now._nr_pr > PMAX)
4429 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now._nr_pr);
4430 Uerror("aborting");
4431 }
4432 if (now._nr_qs > QMAX)
4433 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now._nr_qs);
4434 Uerror("aborting");
4435 }
4436
4437 if (VVERBOSE) cpu_printf("filing state for q%d\n", q);
4438
4439 memcpy((uchar *) f.m_now, (uchar *) &now, vsize);
4440 memset((uchar *) f.m_Mask, 0, (VMAX+7)/8 * sizeof(char));
4441 for (i = j = 0; i < VMAX; i++, j = (j+1)%8)
4442 { if (Mask[i])
4443 { f.m_Mask[i/8] |= (1<<j);
4444 } }
4445
4446 if (now._nr_pr > 0)
4447 { memcpy((uchar *)f.m_p_offset, (uchar *)proc_offset, now._nr_pr*sizeof(OFFT));
4448 memcpy((uchar *)f.m_p_skip, (uchar *)proc_skip, now._nr_pr*sizeof(uchar));
4449 }
4450 if (now._nr_qs > 0)
4451 { memcpy((uchar *) f.m_q_offset, (uchar *) q_offset, now._nr_qs*sizeof(OFFT));
4452 memcpy((uchar *) f.m_q_skip, (uchar *) q_skip, now._nr_qs*sizeof(uchar));
4453 }
4454 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4455 c_stack((uchar *) f.m_c_stack); /* save unmatched tracked data */
4456 #endif
4457 #ifdef FULL_TRAIL
4458 f.m_stack = stack_last[core_id];
4459 #endif
4460 f.nr_handoffs = nr_handoffs+1;
4461 f.m_tau = trpt->tau;
4462 f.m_o_pm = trpt->o_pm;
4463 f.m_boq = boq;
4464 f.m_vsize = vsize;
4465
4466 if (query_in_progress == 1)
4467 { query_in_progress = 2;
4468 }
4469 if (dsk_file < 0)
4470 { sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_W_Nr, core_id);
4471 dsk_file = open(dsk_name, WFLAGS, 0644);
4472 dsk_read = open(dsk_name, RFLAGS);
4473 if (dsk_file < 0 || dsk_read < 0)
4474 { cpu_printf("File: <%s>\n", dsk_name);
4475 Uerror("cannot open diskfile");
4476 }
4477 Dsk_W_Nr++; /* nr of next file to open */
4478 cpu_printf("created temporary diskfile %s\n", dsk_name);
4479 } else if ((dsk_written+1)%MAX_DSK_FILE == 0)
4480 { close(dsk_file); /* close write handle */
4481 sprintf(dsk_name, "Q%.3d_%.3d.tmp", Dsk_W_Nr++, core_id);
4482 dsk_file = open(dsk_name, WFLAGS, 0644);
4483 if (dsk_file < 0)
4484 { cpu_printf("File: <%s>\n", dsk_name);
4485 Uerror("aborting: cannot open new diskfile");
4486 }
4487 cpu_printf("created temporary diskfile %s\n", dsk_name);
4488 }
4489 if (write(dsk_file, &f, sizeof(SM_frame)) != sizeof(SM_frame))
4490 { Uerror("aborting -- disk write failed (disk full?)");
4491 }
4492 nstates_put++;
4493 dsk_written++;
4494 }
4495 #endif
4496
4497 int
4498 mem_hand_off(void)
4499 {
4500 if (search_terminated == NULL
4501 || *search_terminated != 0) /* not a full crash check */
4502 { pan_exit(0);
4503 }
4504 iam_alive(); /* on every transition of Down */
4505 #ifdef USE_DISK
4506 mem_drain(); /* maybe call this also on every Up */
4507 #endif
4508 if (depth > z_handoff /* above handoff limit */
4509 #ifndef SAFETY
4510 && !a_cycles /* not in liveness mode */
4511 #endif
4512 #if SYNC
4513 && boq == -1 /* not mid-rv */
4514 #endif
4515 #ifdef VERI
4516 && (trpt->tau&4) /* claim moves first */
4517 && !((trpt-1)->tau&128) /* not a stutter move */
4518 #endif
4519 && !(trpt->tau&8)) /* not an atomic move */
4520 { int q = (core_id + 1) % NCORE; /* circular handoff */
4521 #ifdef GENEROUS
4522 if (prcnt[q] < LN_FRAMES)
4523 #else
4524 if (TargetQ_NotFull(q)
4525 && (dfs_phase2 == 0 || prcnt[core_id] > 0))
4526 #endif
4527 { mem_put(q);
4528 return 1;
4529 }
4530 { int rval;
4531 #ifndef NGQ
4532 rval = GlobalQ_HasRoom();
4533 #else
4534 rval = 0;
4535 #endif
4536 #ifdef USE_DISK
4537 if (rval == 0)
4538 { void mem_file(void);
4539 mem_file();
4540 rval = 1;
4541 }
4542 #endif
4543 return rval;
4544 }
4545 }
4546 return 0; /* i.e., no handoff */
4547 }
4548
4549 void
4550 mem_put_acc(void) /* liveness mode */
4551 { int q = (core_id + 1) % NCORE;
4552
4553 if (search_terminated == NULL
4554 || *search_terminated != 0)
4555 { pan_exit(0);
4556 }
4557 #ifdef USE_DISK
4558 mem_drain();
4559 #endif
4560 /* some tortured use of preprocessing: */
4561 #if !defined(NGQ) || defined(USE_DISK)
4562 if (TargetQ_Full(q))
4563 {
4564 #endif
4565 #ifndef NGQ
4566 if (GlobalQ_HasRoom())
4567 { return;
4568 }
4569 #endif
4570 #ifdef USE_DISK
4571 mem_file();
4572 } else
4573 #else
4574 #if !defined(NGQ) || defined(USE_DISK)
4575 }
4576 #endif
4577 #endif
4578 { mem_put(q);
4579 }
4580 }
4581
4582 #if defined(WIN32) || defined(WIN64)
4583 void
4584 init_shm(void) /* initialize shared work-queues */
4585 { char key[512];
4586 int n, m;
4587 int must_exit = 0;
4588
4589 if (core_id == 0 && verbose)
4590 { printf("cpu0: step 3: allocate shared work-queues %g Mb\n",
4591 ((double) NCORE * LWQ_SIZE + GWQ_SIZE) / (1048576.));
4592 }
4593 for (m = 0; m < NR_QS; m++) /* last q is global 1 */
4594 { double qsize = (m == NCORE) ? GWQ_SIZE : LWQ_SIZE;
4595 sprintf(key, "Global\\pan_%s_%.3d", PanSource, m);
4596 if (core_id == 0)
4597 { shmid[m] = CreateFileMapping(
4598 INVALID_HANDLE_VALUE, /* use paging file */
4599 NULL, /* default security */
4600 PAGE_READWRITE, /* access permissions */
4601 0, /* high-order 4 bytes */
4602 qsize, /* low-order bytes, size in bytes */
4603 key); /* name */
4604 } else /* worker nodes just open these segments */
4605 { shmid[m] = OpenFileMapping(
4606 FILE_MAP_ALL_ACCESS, /* read/write access */
4607 FALSE, /* children do not inherit handle */
4608 key);
4609 }
4610 if (shmid[m] == NULL)
4611 { fprintf(stderr, "cpu%d: could not create or open shared queues\n",
4612 core_id);
4613 must_exit = 1;
4614 break;
4615 }
4616 /* attach: */
4617 shared_mem[m] = (char *) MapViewOfFile(shmid[m], FILE_MAP_ALL_ACCESS, 0, 0, 0);
4618 if (shared_mem[m] == NULL)
4619 { fprintf(stderr, "cpu%d: cannot attach shared q%d (%d Mb)\n",
4620 core_id, m+1, (int) (qsize/(1048576.)));
4621 must_exit = 1;
4622 break;
4623 }
4624
4625 memcnt += qsize;
4626
4627 m_workq[m] = (SM_frame *) shared_mem[m];
4628 if (core_id == 0)
4629 { int nframes = (m == NCORE) ? GN_FRAMES : LN_FRAMES;
4630 for (n = 0; n < nframes; n++)
4631 { m_workq[m][n].m_vsize = 0;
4632 m_workq[m][n].m_boq = 0;
4633 } } }
4634
4635 if (must_exit)
4636 { fprintf(stderr, "pan: check './pan --' for usage details\n");
4637 pan_exit(1); /* calls cleanup_shm */
4638 }
4639 }
4640
4641 static uchar *
4642 prep_shmid_S(size_t n) /* either sets SS or H_tab, WIN32/WIN64 */
4643 { char *rval;
4644 #ifndef SEP_STATE
4645 char key[512];
4646
4647 if (verbose && core_id == 0)
4648 {
4649 #ifdef BITSTATE
4650 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
4651 (double) n / (1048576.));
4652 #else
4653 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
4654 (double) n / (1048576.));
4655 #endif
4656 }
4657 #ifdef MEMLIM
4658 if (memcnt + (double) n > memlim)
4659 { printf("cpu%d: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
4660 core_id, memcnt/1024., n/1024, memlim/(1048576.));
4661 printf("cpu%d: insufficient memory -- aborting\n", core_id);
4662 exit(1);
4663 }
4664 #endif
4665
4666 /* make key different from queues: */
4667 sprintf(key, "Global\\pan_%s_%.3d", PanSource, NCORE+2); /* different from qs */
4668
4669 if (core_id == 0) /* root */
4670 { shmid_S = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
4671 #ifdef WIN64
4672 PAGE_READWRITE, (n>>32), (n & 0xffffffff), key);
4673 #else
4674 PAGE_READWRITE, 0, n, key);
4675 #endif
4676 memcnt += (double) n;
4677 } else /* worker */
4678 { shmid_S = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, key);
4679 }
4680 if (shmid_S == NULL)
4681 {
4682 #ifdef BITSTATE
4683 fprintf(stderr, "cpu%d: cannot %s shared bitstate",
4684 core_id, core_id?"open":"create");
4685 #else
4686 fprintf(stderr, "cpu%d: cannot %s shared hashtable",
4687 core_id, core_id?"open":"create");
4688 #endif
4689 fprintf(stderr, "pan: check './pan --' for usage details\n");
4690 pan_exit(1);
4691 }
4692
4693 rval = (char *) MapViewOfFile(shmid_S, FILE_MAP_ALL_ACCESS, 0, 0, 0); /* attach */
4694 if ((char *) rval == NULL)
4695 { fprintf(stderr, "cpu%d: cannot attach shared bitstate or hashtable\n", core_id);
4696 fprintf(stderr, "pan: check './pan --' for usage details\n");
4697 pan_exit(1);
4698 }
4699 #else
4700 rval = (char *) emalloc(n);
4701 #endif
4702 return (uchar *) rval;
4703 }
4704
4705 static uchar *
4706 prep_state_mem(size_t n) /* WIN32/WIN64 sets memory arena for states */
4707 { char *rval;
4708 char key[512];
4709 static int cnt = 3; /* start larger than earlier ftok calls */
4710
4711 if (verbose && core_id == 0)
4712 { printf("cpu0: step 2+: pre-allocate memory arena %d of %g Mb\n",
4713 cnt-3, (double) n / (1048576.));
4714 }
4715 #ifdef MEMLIM
4716 if (memcnt + (double) n > memlim)
4717 { printf("cpu%d: error: M %.0f + %.0f exceeds memory limit of %.0f Kb\n",
4718 core_id, memcnt/1024.0, (double) n/1024.0, memlim/1024.0);
4719 return NULL;
4720 }
4721 #endif
4722
4723 sprintf(key, "Global\\pan_%s_%.3d", PanSource, NCORE+cnt); cnt++;
4724
4725 if (core_id == 0)
4726 { shmid_M = CreateFileMapping(INVALID_HANDLE_VALUE, NULL,
4727 #ifdef WIN64
4728 PAGE_READWRITE, (n>>32), (n & 0xffffffff), key);
4729 #else
4730 PAGE_READWRITE, 0, n, key);
4731 #endif
4732 } else
4733 { shmid_M = OpenFileMapping(FILE_MAP_ALL_ACCESS, FALSE, key);
4734 }
4735 if (shmid_M == NULL)
4736 { printf("cpu%d: failed to get pool of shared memory nr %d of size %d\n",
4737 core_id, cnt-3, n);
4738 printf("pan: check './pan --' for usage details\n");
4739 return NULL;
4740 }
4741 rval = (char *) MapViewOfFile(shmid_M, FILE_MAP_ALL_ACCESS, 0, 0, 0); /* attach */
4742
4743 if (rval == NULL)
4744 { printf("cpu%d: failed to attach pool of shared memory nr %d of size %d\n",
4745 core_id, cnt-3, n);
4746 return NULL;
4747 }
4748 return (uchar *) rval;
4749 }
4750
4751 void
4752 init_HT(unsigned long n) /* WIN32/WIN64 version */
4753 { volatile char *x;
4754 double get_mem;
4755 #ifndef SEP_STATE
4756 char *dc_mem_start;
4757 #endif
4758 if (verbose) printf("cpu%d: initialization for Windows\n", core_id);
4759
4760 #ifdef SEP_STATE
4761 #ifndef MEMLIM
4762 if (verbose)
4763 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
4764 }
4765 #else
4766 if (verbose)
4767 printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
4768 MEMLIM, ((double)n/(1048576.)), ((double) NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.));
4769 #endif
4770 get_mem = NCORE * sizeof(double) + (1 + CS_NR) * sizeof(void *)+ 4*sizeof(void *) + 2*sizeof(double);
4771 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
4772 get_mem += 4 * NCORE * sizeof(void *);
4773 #ifdef FULL_TRAIL
4774 get_mem += (NCORE) * sizeof(Stack_Tree *);
4775 /* NCORE * stack_last */
4776 #endif
4777 x = (volatile char *) prep_state_mem((size_t) get_mem);
4778 shmid_X = (void *) x;
4779 if (x == NULL)
4780 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
4781 exit(1);
4782 }
4783 search_terminated = (volatile unsigned int *) x; /* comes first */
4784 x += sizeof(void *); /* maintain alignment */
4785
4786 is_alive = (volatile double *) x;
4787 x += NCORE * sizeof(double);
4788
4789 sh_lock = (volatile int *) x;
4790 x += CS_NR * sizeof(void *); /* allow 1 word per entry */
4791
4792 grfree = (volatile int *) x;
4793 x += sizeof(void *);
4794 grfull = (volatile int *) x;
4795 x += sizeof(void *);
4796 grcnt = (volatile int *) x;
4797 x += sizeof(void *);
4798 grmax = (volatile int *) x;
4799 x += sizeof(void *);
4800 prfree = (volatile int *) x;
4801 x += NCORE * sizeof(void *);
4802 prfull = (volatile int *) x;
4803 x += NCORE * sizeof(void *);
4804 prcnt = (volatile int *) x;
4805 x += NCORE * sizeof(void *);
4806 prmax = (volatile int *) x;
4807 x += NCORE * sizeof(void *);
4808 gr_readmiss = (volatile double *) x;
4809 x += sizeof(double);
4810 gr_writemiss = (volatile double *) x;
4811 x += sizeof(double);
4812
4813 #ifdef FULL_TRAIL
4814 stack_last = (volatile Stack_Tree **) x;
4815 x += NCORE * sizeof(Stack_Tree *);
4816 #endif
4817
4818 #ifndef BITSTATE
4819 H_tab = (struct H_el **) emalloc(n);
4820 #endif
4821 #else
4822 #ifndef MEMLIM
4823 #warning MEMLIM not set
4824 #define MEMLIM (2048)
4825 #endif
4826
4827 if (core_id == 0 && verbose)
4828 printf("cpu0: step 0: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb) = %g Mb for state storage\n",
4829 MEMLIM, ((double)n/(1048576.)), ((double) NCORE * LWQ_SIZE + GWQ_SIZE)/(1048576.),
4830 (memlim - memcnt - (double) n - ((double) NCORE * LWQ_SIZE + GWQ_SIZE))/(1048576.));
4831 #ifndef BITSTATE
4832 H_tab = (struct H_el **) prep_shmid_S((size_t) n); /* hash_table */
4833 #endif
4834 get_mem = memlim - memcnt - ((double) NCORE) * LWQ_SIZE - GWQ_SIZE;
4835 if (get_mem <= 0)
4836 { Uerror("internal error -- shared state memory");
4837 }
4838
4839 if (core_id == 0 && verbose)
4840 { printf("cpu0: step 2: shared state memory %g Mb\n",
4841 get_mem/(1048576.));
4842 }
4843 x = dc_mem_start = (char *) prep_state_mem((size_t) get_mem); /* for states */
4844 if (x == NULL)
4845 { printf("cpu%d: insufficient memory -- aborting\n", core_id);
4846 exit(1);
4847 }
4848
4849 search_terminated = (volatile unsigned int *) x; /* comes first */
4850 x += sizeof(void *); /* maintain alignment */
4851
4852 is_alive = (volatile double *) x;
4853 x += NCORE * sizeof(double);
4854
4855 sh_lock = (volatile int *) x;
4856 x += CS_NR * sizeof(int);
4857
4858 grfree = (volatile int *) x;
4859 x += sizeof(void *);
4860 grfull = (volatile int *) x;
4861 x += sizeof(void *);
4862 grcnt = (volatile int *) x;
4863 x += sizeof(void *);
4864 grmax = (volatile int *) x;
4865 x += sizeof(void *);
4866 prfree = (volatile int *) x;
4867 x += NCORE * sizeof(void *);
4868 prfull = (volatile int *) x;
4869 x += NCORE * sizeof(void *);
4870 prcnt = (volatile int *) x;
4871 x += NCORE * sizeof(void *);
4872 prmax = (volatile int *) x;
4873 x += NCORE * sizeof(void *);
4874 gr_readmiss = (volatile double *) x;
4875 x += sizeof(double);
4876 gr_writemiss = (volatile double *) x;
4877 x += sizeof(double);
4878
4879 #ifdef FULL_TRAIL
4880 stack_last = (volatile Stack_Tree **) x;
4881 x += NCORE * sizeof(Stack_Tree *);
4882 #endif
4883 if (((long)x)&(sizeof(void *)-1)) /* word alignment */
4884 { x += sizeof(void *)-(((long)x)&(sizeof(void *)-1)); /* 64-bit align */
4885 }
4886
4887 #ifdef COLLAPSE
4888 ncomps = (unsigned long *) x;
4889 x += (256+2) * sizeof(unsigned long);
4890 #endif
4891
4892 dc_shared = (sh_Allocater *) x; /* in shared memory */
4893 x += sizeof(sh_Allocater);
4894
4895 if (core_id == 0) /* root only */
4896 { dc_shared->dc_id = shmid_M;
4897 dc_shared->dc_start = (void *) dc_mem_start;
4898 dc_shared->dc_arena = x;
4899 dc_shared->pattern = 1234567;
4900 dc_shared->dc_size = (long) get_mem - (long) (x - dc_mem_start);
4901 dc_shared->nxt = NULL;
4902 }
4903 #endif
4904 }
4905
4906 #if defined(WIN32) || defined(WIN64) || defined(__i386__) || defined(__x86_64__)
4907 extern BOOLEAN InterlockedBitTestAndSet(LONG volatile* Base, LONG Bit);
4908 int
4909 tas(volatile LONG *s)
4910 { return InterlockedBitTestAndSet(s, 1);
4911 }
4912 #else
4913 #error missing definition of test and set operation for this platform
4914 #endif
4915
4916 void
4917 cleanup_shm(int val)
4918 { int m;
4919 static int nibis = 0;
4920
4921 if (nibis != 0)
4922 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id, val);
4923 return;
4924 } else
4925 { nibis = 1;
4926 }
4927 if (search_terminated != NULL)
4928 { *search_terminated |= 16; /* cleanup_shm */
4929 }
4930
4931 for (m = 0; m < NR_QS; m++)
4932 { if (shmid[m] != NULL)
4933 { UnmapViewOfFile((char *) shared_mem[m]);
4934 CloseHandle(shmid[m]);
4935 } }
4936 #ifdef SEP_STATE
4937 UnmapViewOfFile((void *) shmid_X);
4938 CloseHandle((void *) shmid_M);
4939 #else
4940 #ifdef BITSTATE
4941 if (shmid_S != NULL)
4942 { UnmapViewOfFile(SS);
4943 CloseHandle(shmid_S);
4944 }
4945 #else
4946 if (core_id == 0 && verbose)
4947 { printf("cpu0: done, %ld Mb of shared state memory left\n",
4948 dc_shared->dc_size / (long)(1048576));
4949 }
4950 if (shmid_S != NULL)
4951 { UnmapViewOfFile(H_tab);
4952 CloseHandle(shmid_S);
4953 }
4954 shmid_M = (void *) (dc_shared->dc_id);
4955 UnmapViewOfFile((char *) dc_shared->dc_start);
4956 CloseHandle(shmid_M);
4957 #endif
4958 #endif
4959 /* detached from shared memory - so cannot use cpu_printf */
4960 if (verbose)
4961 { printf("cpu%d: done -- got %d states from queue\n",
4962 core_id, nstates_get);
4963 }
4964 }
4965
4966 void
4967 mem_get(void)
4968 { SM_frame *f;
4969 int is_parent;
4970
4971 #if defined(MA) && !defined(SEP_STATE)
4972 #error MA requires SEP_STATE in multi-core mode
4973 #endif
4974 #ifdef BFS
4975 #error BFS is not supported in multi-core mode
4976 #endif
4977 #ifdef SC
4978 #error SC is not supported in multi-core mode
4979 #endif
4980 init_shm(); /* we are single threaded when this starts */
4981 signal(SIGINT, give_up); /* windows control-c interrupt */
4982
4983 if (core_id == 0 && verbose)
4984 { printf("cpu0: step 4: creating additional workers (proxy %d)\n",
4985 proxy_pid);
4986 }
4987 #if 0
4988 if NCORE > 1 the child or the parent should fork N-1 more times
4989 the parent is the only process with core_id == 0 and is_parent > 0
4990 the others (workers) have is_parent = 0 and core_id = 1..NCORE-1
4991 #endif
4992 if (core_id == 0) /* root starts up the workers */
4993 { worker_pids[0] = (DWORD) getpid(); /* for completeness */
4994 while (++core_id < NCORE) /* first worker sees core_id = 1 */
4995 { char cmdline[64];
4996 STARTUPINFO si = { sizeof(si) };
4997 PROCESS_INFORMATION pi;
4998
4999 if (proxy_pid == core_id) /* always non-zero */
5000 { sprintf(cmdline, "pan_proxy.exe -r %s-Q%d -Z%d",
5001 o_cmdline, getpid(), core_id);
5002 } else
5003 { sprintf(cmdline, "pan.exe %s-Q%d -Z%d",
5004 o_cmdline, getpid(), core_id);
5005 }
5006 if (verbose) printf("cpu%d: spawn %s\n", core_id, cmdline);
5007
5008 is_parent = CreateProcess(0, cmdline, 0, 0, FALSE, 0, 0, 0, &si, &pi);
5009 if (is_parent == 0)
5010 { Uerror("fork failed");
5011 }
5012 worker_pids[core_id] = pi.dwProcessId;
5013 worker_handles[core_id] = pi.hProcess;
5014 if (verbose)
5015 { cpu_printf("created core %d, pid %d\n",
5016 core_id, pi.dwProcessId);
5017 }
5018 if (proxy_pid == core_id) /* we just created the receive half */
5019 { /* add proxy send, store pid in proxy_pid_snd */
5020 sprintf(cmdline, "pan_proxy.exe -s %s-Q%d -Z%d -Y%d",
5021 o_cmdline, getpid(), core_id, worker_pids[proxy_pid]);
5022 if (verbose) printf("cpu%d: spawn %s\n", core_id, cmdline);
5023 is_parent = CreateProcess(0, cmdline, 0,0, FALSE, 0,0,0, &si, &pi);
5024 if (is_parent == 0)
5025 { Uerror("fork failed");
5026 }
5027 proxy_pid_snd = pi.dwProcessId;
5028 proxy_handle_snd = pi.hProcess;
5029 if (verbose)
5030 { cpu_printf("created core %d, pid %d (send proxy)\n",
5031 core_id, pi.dwProcessId);
5032 } } }
5033 core_id = 0; /* reset core_id for root process */
5034 } else /* worker */
5035 { static char db0[16]; /* good for up to 10^6 cores */
5036 static char db1[16];
5037 tprefix = db0; sprefix = db1;
5038 sprintf(tprefix, "cpu%d_trail", core_id); /* avoid conflicts on file access */
5039 sprintf(sprefix, "cpu%d_rst", core_id);
5040 memcnt = 0; /* count only additionally allocated memory */
5041 }
5042 if (verbose)
5043 { cpu_printf("starting core_id %d -- pid %d\n", core_id, getpid());
5044 }
5045 if (core_id == 0 && !remote_party)
5046 { new_state(); /* root starts the search */
5047 if (verbose)
5048 cpu_printf("done with 1st dfs, nstates %g (put %d states), start reading q\n",
5049 nstates, nstates_put);
5050 dfs_phase2 = 1;
5051 }
5052 Read_Queue(core_id); /* all cores */
5053
5054 if (verbose)
5055 { cpu_printf("put %6d states into queue -- got %6d\n",
5056 nstates_put, nstates_get);
5057 }
5058 done = 1;
5059 wrapup();
5060 exit(0);
5061 }
5062 #endif
5063
5064 #ifdef BITSTATE
5065 void
5066 init_SS(unsigned long n)
5067 {
5068 SS = (uchar *) prep_shmid_S((size_t) n);
5069 init_HT(0L);
5070 }
5071 #endif
5072
5073 #endif
5074 clock_t start_time;
5075 #if NCORE>1
5076 clock_t crash_stamp;
5077 #endif
5078 #if !defined(WIN32) && !defined(WIN64)
5079 struct tms start_tm;
5080 #endif
5081
5082 void
5083 start_timer(void)
5084 {
5085 #if defined(WIN32) || defined(WIN64)
5086 start_time = clock();
5087 #else
5088 start_time = times(&start_tm);
5089 #endif
5090 }
5091
5092 void
5093 stop_timer(void)
5094 { clock_t stop_time;
5095 double delta_time;
5096 #if !defined(WIN32) && !defined(WIN64)
5097 struct tms stop_tm;
5098 stop_time = times(&stop_tm);
5099 delta_time = ((double) (stop_time - start_time)) / ((double) sysconf(_SC_CLK_TCK));
5100 #else
5101 stop_time = clock();
5102 delta_time = ((double) (stop_time - start_time)) / ((double) CLOCKS_PER_SEC);
5103 #endif
5104 if (readtrail || delta_time < 0.00) return;
5105 #if NCORE>1
5106 if (core_id == 0 && nstates > (double) 0)
5107 { printf("\ncpu%d: elapsed time %.3g seconds (%g states visited)\n", core_id, delta_time, nstates);
5108 if (delta_time > 0.01)
5109 { printf("cpu%d: rate %g states/second\n", core_id, nstates/delta_time);
5110 }
5111 { void check_overkill(void);
5112 check_overkill();
5113 } }
5114 #else
5115 printf("\npan: elapsed time %.3g seconds\n", delta_time);
5116 if (delta_time > 0.01)
5117 { printf("pan: rate %9.8g states/second\n", nstates/delta_time);
5118 if (verbose)
5119 { printf("pan: avg transition delay %.5g usec\n",
5120 delta_time/(nstates+truncs));
5121 } }
5122 #endif
5123 }
5124
5125 #if NCORE>1
5126 #ifdef T_ALERT
5127 double t_alerts[17];
5128
5129 void
5130 crash_report(void)
5131 { int i;
5132 printf("crash alert intervals:\n");
5133 for (i = 0; i < 17; i++)
5134 { printf("%d\t%g\n", i, t_alerts[i]);
5135 } }
5136 #endif
5137
5138 void
5139 crash_reset(void)
5140 { /* false alarm */
5141 if (crash_stamp != (clock_t) 0)
5142 {
5143 #ifdef T_ALERT
5144 double delta_time;
5145 int i;
5146 #if defined(WIN32) || defined(WIN64)
5147 delta_time = ((double) (clock() - crash_stamp)) / ((double) CLOCKS_PER_SEC);
5148 #else
5149 delta_time = ((double) (times(&start_tm) - crash_stamp)) / ((double) sysconf(_SC_CLK_TCK));
5150 #endif
5151 for (i = 0; i < 16; i++)
5152 { if (delta_time <= (i*30))
5153 { t_alerts[i] = delta_time;
5154 break;
5155 } }
5156 if (i == 16) t_alerts[i] = delta_time;
5157 #endif
5158 if (verbose)
5159 printf("cpu%d: crash alert off\n", core_id);
5160 }
5161 crash_stamp = (clock_t) 0;
5162 }
5163
5164 int
5165 crash_test(double maxtime)
5166 { double delta_time;
5167 if (crash_stamp == (clock_t) 0)
5168 { /* start timing */
5169 #if defined(WIN32) || defined(WIN64)
5170 crash_stamp = clock();
5171 #else
5172 crash_stamp = times(&start_tm);
5173 #endif
5174 if (verbose)
5175 { printf("cpu%d: crash detection\n", core_id);
5176 }
5177 return 0;
5178 }
5179 #if defined(WIN32) || defined(WIN64)
5180 delta_time = ((double) (clock() - crash_stamp)) / ((double) CLOCKS_PER_SEC);
5181 #else
5182 delta_time = ((double) (times(&start_tm) - crash_stamp)) / ((double) sysconf(_SC_CLK_TCK));
5183 #endif
5184 return (delta_time >= maxtime);
5185 }
5186 #endif
5187
5188 void
5189 do_the_search(void)
5190 { int i;
5191 depth = mreached = 0;
5192 trpt = &trail[0];
5193 #ifdef VERI
5194 trpt->tau |= 4; /* the claim moves first */
5195 #endif
5196 for (i = 0; i < (int) now._nr_pr; i++)
5197 { P0 *ptr = (P0 *) pptr(i);
5198 #ifndef NP
5199 if (!(trpt->o_pm&2)
5200 && accpstate[ptr->_t][ptr->_p])
5201 { trpt->o_pm |= 2;
5202 }
5203 #else
5204 if (!(trpt->o_pm&4)
5205 && progstate[ptr->_t][ptr->_p])
5206 { trpt->o_pm |= 4;
5207 }
5208 #endif
5209 }
5210 #ifdef EVENT_TRACE
5211 #ifndef NP
5212 if (accpstate[EVENT_TRACE][now._event])
5213 { trpt->o_pm |= 2;
5214 }
5215 #else
5216 if (progstate[EVENT_TRACE][now._event])
5217 { trpt->o_pm |= 4;
5218 }
5219 #endif
5220 #endif
5221 #ifndef NOCOMP
5222 Mask[0] = Mask[1] = 1; /* _nr_pr, _nr_qs */
5223 if (!a_cycles)
5224 { i = &(now._a_t) - (uchar *) &now;
5225 Mask[i] = 1; /* _a_t */
5226 }
5227 #ifndef NOFAIR
5228 if (!fairness)
5229 { int j = 0;
5230 i = &(now._cnt[0]) - (uchar *) &now;
5231 while (j++ < NFAIR)
5232 Mask[i++] = 1; /* _cnt[] */
5233 }
5234 #endif
5235 #endif
5236 #ifndef NOFAIR
5237 if (fairness
5238 && (a_cycles && (trpt->o_pm&2)))
5239 { now._a_t = 2; /* set the A-bit */
5240 now._cnt[0] = now._nr_pr + 1;
5241 #ifdef VERBOSE
5242 printf("%3d: fairness Rule 1, cnt=%d, _a_t=%d\n",
5243 depth, now._cnt[now._a_t&1], now._a_t);
5244 #endif
5245 }
5246 #endif
5247 c_stack_start = (char *) &i; /* meant to be read-only */
5248 #if defined(HAS_CODE) && defined (C_INIT)
5249 C_INIT; /* initialization of data that must precede fork() */
5250 c_init_done++;
5251 #endif
5252 #if defined(C_States) && (HAS_TRACK==1)
5253 /* capture initial state of tracked C objects */
5254 c_update((uchar *) &(now.c_state[0]));
5255 #endif
5256 #ifdef HAS_CODE
5257 if (readtrail) getrail(); /* no return */
5258 #endif
5259 start_timer();
5260 #ifdef BFS
5261 bfs();
5262 #else
5263 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
5264 /* initial state of tracked & unmatched objects */
5265 c_stack((uchar *) &(svtack->c_stack[0]));
5266 #endif
5267 #ifdef RANDOMIZE
5268 #if RANDOMIZE>0
5269 srand(RANDOMIZE);
5270 #else
5271 srand(123);
5272 #endif
5273 #endif
5274 #if NCORE>1
5275 mem_get();
5276 #else
5277 new_state(); /* start 1st DFS */
5278 #endif
5279 #endif
5280 }
5281 #ifdef INLINE_REV
5282 uchar
5283 do_reverse(Trans *t, short II, uchar M)
5284 { uchar _m = M;
5285 int tt = (int) ((P0 *)this)->_p;
5286 #include REVERSE_MOVES
5287 R999: return _m;
5288 }
5289 #endif
5290 #ifndef INLINE
5291 #ifdef EVENT_TRACE
5292 static char _tp = 'n'; static int _qid = 0;
5293 #endif
5294 uchar
5295 do_transit(Trans *t, short II)
5296 { uchar _m = 0;
5297 int tt = (int) ((P0 *)this)->_p;
5298 #ifdef M_LOSS
5299 uchar delta_m = 0;
5300 #endif
5301 #ifdef EVENT_TRACE
5302 short oboq = boq;
5303 uchar ot = (uchar) ((P0 *)this)->_t;
5304 if (ot == EVENT_TRACE) boq = -1;
5305 #define continue { boq = oboq; return 0; }
5306 #else
5307 #define continue return 0
5308 #ifdef SEPARATE
5309 uchar ot = (uchar) ((P0 *)this)->_t;
5310 #endif
5311 #endif
5312 #include FORWARD_MOVES
5313 P999:
5314 #ifdef EVENT_TRACE
5315 if (ot == EVENT_TRACE) boq = oboq;
5316 #endif
5317 return _m;
5318 #undef continue
5319 }
5320 #ifdef EVENT_TRACE
5321 void
5322 require(char tp, int qid)
5323 { Trans *t;
5324 _tp = tp; _qid = qid;
5325
5326 if (now._event != endevent)
5327 for (t = trans[EVENT_TRACE][now._event]; t; t = t->nxt)
5328 { if (do_transit(t, EVENT_TRACE))
5329 { now._event = t->st;
5330 reached[EVENT_TRACE][t->st] = 1;
5331 #ifdef VERBOSE
5332 printf(" event_trace move to -> %d\n", t->st);
5333 #endif
5334 #ifndef BFS
5335 #ifndef NP
5336 if (accpstate[EVENT_TRACE][now._event])
5337 (trpt+1)->o_pm |= 2;
5338 #else
5339 if (progstate[EVENT_TRACE][now._event])
5340 (trpt+1)->o_pm |= 4;
5341 #endif
5342 #endif
5343 #ifdef NEGATED_TRACE
5344 if (now._event == endevent)
5345 {
5346 #ifndef BFS
5347 depth++; trpt++;
5348 #endif
5349 uerror("event_trace error (all events matched)");
5350 #ifndef BFS
5351 trpt--; depth--;
5352 #endif
5353 break;
5354 }
5355 #endif
5356 for (t = t->nxt; t; t = t->nxt)
5357 { if (do_transit(t, EVENT_TRACE))
5358 Uerror("non-determinism in event-trace");
5359 }
5360 return;
5361 }
5362 #ifdef VERBOSE
5363 else
5364 printf(" event_trace miss '%c' -- %d, %d, %d\n",
5365 tp, qid, now._event, t->forw);
5366 #endif
5367 }
5368 #ifdef NEGATED_TRACE
5369 now._event = endevent; /* only 1st try will count -- fixed 4.2.6 */
5370 #else
5371 #ifndef BFS
5372 depth++; trpt++;
5373 #endif
5374 uerror("event_trace error (no matching event)");
5375 #ifndef BFS
5376 trpt--; depth--;
5377 #endif
5378 #endif
5379 }
5380 #endif
5381 int
5382 enabled(int iam, int pid)
5383 { Trans *t; uchar *othis = this;
5384 int res = 0; int tt; uchar ot;
5385 #ifdef VERI
5386 /* if (pid > 0) */ pid++;
5387 #endif
5388 if (pid == iam)
5389 Uerror("used: enabled(pid=thisproc)");
5390 if (pid < 0 || pid >= (int) now._nr_pr)
5391 return 0;
5392 this = pptr(pid);
5393 TstOnly = 1;
5394 tt = (int) ((P0 *)this)->_p;
5395 ot = (uchar) ((P0 *)this)->_t;
5396 for (t = trans[ot][tt]; t; t = t->nxt)
5397 if (do_transit(t, (short) pid))
5398 { res = 1;
5399 break;
5400 }
5401 TstOnly = 0;
5402 this = othis;
5403 return res;
5404 }
5405 #endif
5406 void
5407 snap_time(void)
5408 { clock_t stop_time;
5409 double delta_time;
5410 #if !defined(WIN32) && !defined(WIN64)
5411 struct tms stop_tm;
5412 stop_time = times(&stop_tm);
5413 delta_time = ((double) (stop_time - start_time)) / ((double) sysconf(_SC_CLK_TCK));
5414 #else
5415 stop_time = clock();
5416 delta_time = ((double) (stop_time - start_time)) / ((double) CLOCKS_PER_SEC);
5417 #endif
5418 if (delta_time > 0.01)
5419 { printf("t= %6.3g ", delta_time);
5420 printf("R= %7.0g", nstates/delta_time);
5421 }
5422 printf("\n");
5423 if (quota > 0.1 && delta_time > quota)
5424 { printf("Time limit of %6.3g minutes exceeded\n", quota/60.0);
5425 #if NCORE>1
5426 fflush(stdout);
5427 leave_critical(GLOBAL_LOCK);
5428 sudden_stop("time-limit");
5429 exit(1);
5430 #endif
5431 wrapup();
5432 }
5433 }
5434 void
5435 snapshot(void)
5436 {
5437 #if NCORE>1
5438 enter_critical(GLOBAL_LOCK); /* snapshot */
5439 printf("cpu%d: ", core_id);
5440 #endif
5441 printf("Depth= %7ld States= %8.3g ",
5442 #if NCORE>1
5443 (long) (nr_handoffs * z_handoff) +
5444 #endif
5445 mreached, nstates);
5446 printf("Transitions= %8.3g ", nstates+truncs);
5447 #ifdef MA
5448 printf("Nodes= %7d ", nr_states);
5449 #endif
5450 printf("Memory= %9.3f\t", memcnt/1048576.);
5451 snap_time();
5452 fflush(stdout);
5453 #if NCORE>1
5454 leave_critical(GLOBAL_LOCK);
5455 #endif
5456 }
5457 #ifdef SC
5458 void
5459 stack2disk(void)
5460 {
5461 if (!stackwrite
5462 && (stackwrite = creat(stackfile, TMODE)) < 0)
5463 Uerror("cannot create stackfile");
5464
5465 if (write(stackwrite, trail, DDD*sizeof(Trail))
5466 != DDD*sizeof(Trail))
5467 Uerror("stackfile write error -- disk is full?");
5468
5469 memmove(trail, &trail[DDD], (HHH-DDD+2)*sizeof(Trail));
5470 memset(&trail[HHH-DDD+2], 0, (omaxdepth - HHH + DDD - 2)*sizeof(Trail));
5471 CNT1++;
5472 }
5473 void
5474 disk2stack(void)
5475 { long have;
5476
5477 CNT2++;
5478 memmove(&trail[DDD], trail, (HHH-DDD+2)*sizeof(Trail));
5479
5480 if (!stackwrite
5481 || lseek(stackwrite, -DDD* (off_t) sizeof(Trail), SEEK_CUR) == -1)
5482 Uerror("disk2stack lseek error");
5483
5484 if (!stackread
5485 && (stackread = open(stackfile, 0)) < 0)
5486 Uerror("cannot open stackfile");
5487
5488 if (lseek(stackread, (CNT1-CNT2)*DDD* (off_t) sizeof(Trail), SEEK_SET) == -1)
5489 Uerror("disk2stack lseek error");
5490
5491 have = read(stackread, trail, DDD*sizeof(Trail));
5492 if (have != DDD*sizeof(Trail))
5493 Uerror("stackfile read error");
5494 }
5495 #endif
5496 uchar *
5497 Pptr(int x)
5498 { if (x < 0 || x >= MAXPROC || !proc_offset[x])
5499 return noptr;
5500 else
5501 return (uchar *) pptr(x);
5502 }
5503 int qs_empty(void);
5504 /*
5505 * new_state() is the main DFS search routine in the verifier
5506 * it has a lot of code ifdef-ed together to support
5507 * different search modes, which makes it quite unreadable.
5508 * if you are studying the code, first use the C preprocessor
5509 * to generate a specific version from the pan.c source,
5510 * e.g. by saying:
5511 * gcc -E -DNOREDUCE -DBITSTATE pan.c > ppan.c
5512 * and then study the resulting file, rather than this one
5513 */
5514 #if !defined(BFS) && (!defined(BITSTATE) || !defined(MA))
5515
5516 #ifdef NSUCC
5517 int N_succ[512];
5518 void
5519 tally_succ(int cnt)
5520 { if (cnt < 512) N_succ[cnt]++;
5521 else printf("tally_succ: cnt %d exceeds range\n", cnt);
5522 }
5523
5524 void
5525 dump_succ(void)
5526 { int i; double sum = 0.0;
5527 double w_avg = 0.0;
5528 printf("Successor counts:\n");
5529 for (i = 0; i < 512; i++)
5530 { sum += (double) N_succ[i];
5531 }
5532 for (i = 0; i < 512; i++)
5533 { if (N_succ[i] > 0)
5534 { printf("%3d %10d (%.4g %% of total)\n",
5535 i, N_succ[i], (100.0 * (double) N_succ[i])/sum);
5536 w_avg += (double) i * (double) N_succ[i];
5537 } }
5538 if (sum > N_succ[0])
5539 printf("mean %.4g (without 0: %.4g)\n", w_avg / sum, w_avg / (sum - (double) N_succ[0]));
5540 }
5541 #endif
5542
5543 void
5544 new_state(void)
5545 { Trans *t;
5546 uchar _n, _m, ot;
5547 #ifdef RANDOMIZE
5548 short ooi, eoi;
5549 #endif
5550 #ifdef M_LOSS
5551 uchar delta_m = 0;
5552 #endif
5553 short II, JJ = 0, kk;
5554 int tt;
5555 #ifdef REVERSE
5556 short From = BASE, To = now._nr_pr-1;
5557 #else
5558 short From = now._nr_pr-1, To = BASE;
5559 #endif
5560 Down:
5561 #ifdef CHECK
5562 cpu_printf("%d: Down - %s %saccepting [pids %d-%d]\n",
5563 depth, (trpt->tau&4)?"claim":"program",
5564 (trpt->o_pm&2)?"":"non-", From, To);
5565 #endif
5566 #ifdef SCHED
5567 if (depth > 0)
5568 { trpt->sched_limit = (trpt-1)->sched_limit;
5569 } else
5570 { trpt->sched_limit = 0;
5571 }
5572 #endif
5573 #ifdef SC
5574 if (depth > hiwater)
5575 { stack2disk();
5576 maxdepth += DDD;
5577 hiwater += DDD;
5578 trpt -= DDD;
5579 if(verbose)
5580 printf("zap %d: %d (maxdepth now %d)\n",
5581 CNT1, hiwater, maxdepth);
5582 }
5583 #endif
5584 trpt->tau &= ~(16|32|64); /* make sure these are off */
5585 #if defined(FULLSTACK) && defined(MA)
5586 trpt->proviso = 0;
5587 #endif
5588 #ifdef NSUCC
5589 trpt->n_succ = 0;
5590 #endif
5591 #if NCORE>1
5592 if (mem_hand_off())
5593 {
5594 #if SYNC
5595 (trpt+1)->o_n = 1; /* not a deadlock: as below */
5596 #endif
5597 #ifndef LOOPSTATE
5598 (trpt-1)->tau |= 16; /* worstcase guess: as below */
5599 #endif
5600 #if NCORE>1 && defined(FULL_TRAIL)
5601 if (upto > 0)
5602 { Pop_Stack_Tree();
5603 }
5604 #endif
5605 goto Up;
5606 }
5607 #endif
5608 if (depth >= maxdepth)
5609 { if (!warned)
5610 { warned = 1;
5611 printf("error: max search depth too small\n");
5612 }
5613 if (bounded)
5614 { uerror("depth limit reached");
5615 }
5616 truncs++;
5617 #if SYNC
5618 (trpt+1)->o_n = 1; /* not a deadlock */
5619 #endif
5620 #ifndef LOOPSTATE
5621 (trpt-1)->tau |= 16; /* worstcase guess */
5622 #endif
5623 #if NCORE>1 && defined(FULL_TRAIL)
5624 if (upto > 0)
5625 { Pop_Stack_Tree();
5626 }
5627 #endif
5628 goto Up;
5629 }
5630 AllOver:
5631 #if (defined(FULLSTACK) && !defined(MA)) || NCORE>1
5632 /* if atomic or rv move, carry forward previous state */
5633 trpt->ostate = (trpt-1)->ostate;
5634 #endif
5635 #ifdef VERI
5636 if ((trpt->tau&4) || ((trpt-1)->tau&128))
5637 #endif
5638 if (boq == -1) { /* if not mid-rv */
5639 #ifndef SAFETY
5640 /* this check should now be redundant
5641 * because the seed state also appears
5642 * on the 1st dfs stack and would be
5643 * matched in hstore below
5644 */
5645 if ((now._a_t&1) && depth > A_depth)
5646 { if (!memcmp((char *)&A_Root,
5647 (char *)&now, vsize))
5648 {
5649 depthfound = A_depth;
5650 #ifdef CHECK
5651 printf("matches seed\n");
5652 #endif
5653 #ifdef NP
5654 uerror("non-progress cycle");
5655 #else
5656 uerror("acceptance cycle");
5657 #endif
5658 #if NCORE>1 && defined(FULL_TRAIL)
5659 if (upto > 0)
5660 { Pop_Stack_Tree();
5661 }
5662 #endif
5663 goto Up;
5664 }
5665 #ifdef CHECK
5666 printf("not seed\n");
5667 #endif
5668 }
5669 #endif
5670 if (!(trpt->tau&8)) /* if no atomic move */
5671 {
5672 #ifdef BITSTATE
5673 #ifdef CNTRSTACK
5674 II = bstore((char *)&now, vsize);
5675 trpt->j6 = j1; trpt->j7 = j2;
5676 JJ = LL[j1] && LL[j2];
5677 #else
5678 #ifdef FULLSTACK
5679 JJ = onstack_now();
5680 #else
5681 #ifndef NOREDUCE
5682 JJ = II; /* worstcase guess for p.o. */
5683 #endif
5684 #endif
5685 II = bstore((char *)&now, vsize);
5686 #endif
5687 #else
5688 #ifdef MA
5689 II = gstore((char *)&now, vsize, 0);
5690 #ifndef FULLSTACK
5691 JJ = II;
5692 #else
5693 JJ = (II == 2)?1:0;
5694 #endif
5695 #else
5696 II = hstore((char *)&now, vsize);
5697 #ifdef FULLSTACK
5698 JJ = (II == 2)?1:0;
5699 #endif
5700 #endif
5701 #endif
5702 kk = (II == 1 || II == 2);
5703 #ifndef SAFETY
5704 #if NCORE==1 || defined (SEP_STATE)
5705 if (II == 2 && ((trpt->o_pm&2) || ((trpt-1)->o_pm&2)))
5706 #ifndef NOFAIR
5707 #if 0
5708 if (!fairness || ((now._a_t&1) && now._cnt[1] == 1)) /* 5.1.4 */
5709 #else
5710 if (a_cycles && !fairness) /* 5.1.6 -- example by Hirofumi Watanabe */
5711 #endif
5712 #endif
5713 {
5714 II = 3; /* Schwoon & Esparza 2005, Gastin&Moro 2004 */
5715 #ifdef VERBOSE
5716 printf("state match on dfs stack\n");
5717 #endif
5718 goto same_case;
5719 }
5720 #endif
5721 #if defined(FULLSTACK) && defined(BITSTATE)
5722 if (!JJ && (now._a_t&1) && depth > A_depth)
5723 { int oj1 = j1;
5724 uchar o_a_t = now._a_t;
5725 now._a_t &= ~(1|16|32);
5726 if (onstack_now())
5727 { II = 3;
5728 #ifdef VERBOSE
5729 printf("state match on 1st dfs stack\n");
5730 #endif
5731 }
5732 now._a_t = o_a_t;
5733 j1 = oj1;
5734 }
5735 #endif
5736 if (II == 3 && a_cycles && (now._a_t&1))
5737 {
5738 #ifndef NOFAIR
5739 if (fairness && now._cnt[1] > 1) /* was != 0 */
5740 {
5741 #ifdef VERBOSE
5742 printf(" fairness count non-zero\n");
5743 #endif
5744 II = 0;
5745 } else
5746 #endif
5747 {
5748 #ifndef BITSTATE
5749 nShadow--;
5750 #endif
5751 same_case: if (Lstate) depthfound = Lstate->D;
5752 #ifdef NP
5753 uerror("non-progress cycle");
5754 #else
5755 uerror("acceptance cycle");
5756 #endif
5757 #if NCORE>1 && defined(FULL_TRAIL)
5758 if (upto > 0)
5759 { Pop_Stack_Tree();
5760 }
5761 #endif
5762 goto Up;
5763 }
5764 }
5765 #endif
5766 #ifndef NOREDUCE
5767 #ifndef SAFETY
5768 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5769 if (II != 0 && (!Lstate || Lstate->cpu_id < core_id))
5770 { (trpt-1)->tau |= 16;
5771 }
5772 #endif
5773 if ((II && JJ) || (II == 3))
5774 { /* marker for liveness proviso */
5775 #ifndef LOOPSTATE
5776 (trpt-1)->tau |= 16;
5777 #endif
5778 truncs2++;
5779 }
5780 #else
5781 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5782 if (!(II != 0 && (!Lstate || Lstate->cpu_id < core_id)))
5783 { /* treat as stack state */
5784 (trpt-1)->tau |= 16;
5785 } else
5786 { /* treat as non-stack state */
5787 (trpt-1)->tau |= 64;
5788 }
5789 #endif
5790 if (!II || !JJ)
5791 { /* successor outside stack */
5792 (trpt-1)->tau |= 64;
5793 }
5794 #endif
5795 #endif
5796 if (II)
5797 { truncs++;
5798 #if NCORE>1 && defined(FULL_TRAIL)
5799 if (upto > 0)
5800 { Pop_Stack_Tree();
5801 if (depth == 0)
5802 { return;
5803 } }
5804 #endif
5805 goto Up;
5806 }
5807 if (!kk)
5808 { static long sdone = (long) 0; long ndone;
5809 nstates++;
5810 #if defined(ZAPH) && defined(BITSTATE)
5811 zstates += (double) hfns;
5812 #endif
5813 ndone = (unsigned long) (nstates/((double) FREQ));
5814 if (ndone != sdone)
5815 { snapshot();
5816 sdone = ndone;
5817 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
5818 if (nstates > ((double)(ONE_L<<(ssize+1))))
5819 { void resize_hashtable(void);
5820 resize_hashtable();
5821 }
5822 #endif
5823 #if defined(ZAPH) && defined(BITSTATE)
5824 if (zstates > ((double)(ONE_L<<(ssize-2))))
5825 { /* more than half the bits set */
5826 void zap_hashtable(void);
5827 zap_hashtable();
5828 zstates = 0;
5829 }
5830 #endif
5831 }
5832 #ifdef SVDUMP
5833 if (vprefix > 0)
5834 if (write(svfd, (uchar *) &now, vprefix) != vprefix)
5835 { fprintf(efd, "writing %s.svd failed\n", PanSource);
5836 wrapup();
5837 }
5838 #endif
5839 #if defined(MA) && defined(W_XPT)
5840 if ((unsigned long) nstates%W_XPT == 0)
5841 { void w_xpoint(void);
5842 w_xpoint();
5843 }
5844 #endif
5845 }
5846 #if defined(FULLSTACK) || defined(CNTRSTACK)
5847 onstack_put();
5848 #ifdef DEBUG2
5849 #if defined(FULLSTACK) && !defined(MA)
5850 printf("%d: putting %u (%d)\n", depth,
5851 trpt->ostate,
5852 (trpt->ostate)?trpt->ostate->tagged:0);
5853 #else
5854 printf("%d: putting\n", depth);
5855 #endif
5856 #endif
5857 #else
5858 #if NCORE>1
5859 trpt->ostate = Lstate;
5860 #endif
5861 #endif
5862 } }
5863 if (depth > mreached)
5864 mreached = depth;
5865 #ifdef VERI
5866 if (trpt->tau&4)
5867 #endif
5868 trpt->tau &= ~(1|2); /* timeout and -request off */
5869 _n = 0;
5870 #if SYNC
5871 (trpt+1)->o_n = 0;
5872 #endif
5873 #ifdef VERI
5874 if (now._nr_pr == 0) /* claim terminated */
5875 uerror("end state in claim reached");
5876 check_claim(((P0 *)pptr(0))->_p);
5877 Stutter:
5878 if (trpt->tau&4) /* must make a claimmove */
5879 {
5880 #ifndef NOFAIR
5881 if ((now._a_t&2) /* A-bit set */
5882 && now._cnt[now._a_t&1] == 1)
5883 { now._a_t &= ~2;
5884 now._cnt[now._a_t&1] = 0;
5885 trpt->o_pm |= 16;
5886 #ifdef DEBUG
5887 printf("%3d: fairness Rule 3.: _a_t = %d\n",
5888 depth, now._a_t);
5889 #endif
5890 }
5891 #endif
5892 II = 0; /* never */
5893 goto Veri0;
5894 }
5895 #endif
5896 #ifndef NOREDUCE
5897 /* Look for a process with only safe transitions */
5898 /* (special rules apply in the 2nd dfs) */
5899 if (boq == -1 && From != To
5900
5901 #ifdef SAFETY
5902 #if NCORE>1
5903 && (depth < z_handoff)
5904 #endif
5905 )
5906 #else
5907 #if NCORE>1
5908 && ((a_cycles) || (!a_cycles && depth < z_handoff))
5909 #endif
5910 && (!(now._a_t&1)
5911 || (a_cycles &&
5912 #ifndef BITSTATE
5913 #ifdef MA
5914 #ifdef VERI
5915 !((trpt-1)->proviso))
5916 #else
5917 !(trpt->proviso))
5918 #endif
5919 #else
5920 #ifdef VERI
5921 (trpt-1)->ostate &&
5922 !(((char *)&((trpt-1)->ostate->state))[0] & 128))
5923 #else
5924 !(((char *)&(trpt->ostate->state))[0] & 128))
5925 #endif
5926 #endif
5927 #else
5928 #ifdef VERI
5929 (trpt-1)->ostate &&
5930 (trpt-1)->ostate->proviso == 0)
5931 #else
5932 trpt->ostate->proviso == 0)
5933 #endif
5934 #endif
5935 ))
5936 #endif
5937
5938 #ifdef REVERSE
5939 for (II = From; II <= To; II++)
5940 #else
5941 for (II = From; II >= To; II--)
5942 #endif
5943 {
5944 Resume: /* pick up here if preselect fails */
5945 this = pptr(II);
5946 tt = (int) ((P0 *)this)->_p;
5947 ot = (uchar) ((P0 *)this)->_t;
5948 if (trans[ot][tt]->atom & 8)
5949 { t = trans[ot][tt];
5950 if (t->qu[0] != 0)
5951 { Ccheck++;
5952 if (!q_cond(II, t))
5953 continue;
5954 Cholds++;
5955 }
5956 From = To = II; /* the process preselected */
5957 #ifdef NIBIS
5958 t->om = 0;
5959 #endif
5960 trpt->tau |= 32; /* preselect marker */
5961 #ifdef DEBUG
5962 #ifdef NIBIS
5963 printf("%3d: proc %d Pre", depth, II);
5964 printf("Selected (om=%d, tau=%d)\n",
5965 t->om, trpt->tau);
5966 #else
5967 printf("%3d: proc %d PreSelected (tau=%d)\n",
5968 depth, II, trpt->tau);
5969 #endif
5970 #endif
5971 goto Again;
5972 }
5973 }
5974 trpt->tau &= ~32;
5975 #endif
5976 #if !defined(NOREDUCE) || (defined(ETIM) && !defined(VERI))
5977 Again:
5978 #endif
5979 /* The Main Expansion Loop over Processes */
5980 trpt->o_pm &= ~(8|16|32|64); /* fairness-marks */
5981 #ifndef NOFAIR
5982 if (fairness && boq == -1
5983 #ifdef VERI
5984 && (!(trpt->tau&4) && !((trpt-1)->tau&128))
5985 #endif
5986 && !(trpt->tau&8))
5987 { /* A_bit = 1; Cnt = N in acc states with A_bit 0 */
5988 if (!(now._a_t&2))
5989 {
5990 if (a_cycles && (trpt->o_pm&2))
5991 { /* Accepting state */
5992 now._a_t |= 2;
5993 now._cnt[now._a_t&1] = now._nr_pr + 1;
5994 trpt->o_pm |= 8;
5995 #ifdef DEBUG
5996 printf("%3d: fairness Rule 1: cnt=%d, _a_t=%d\n",
5997 depth, now._cnt[now._a_t&1], now._a_t);
5998 #endif
5999 }
6000 } else
6001 { /* A_bit = 0 when Cnt 0 */
6002 if (now._cnt[now._a_t&1] == 1)
6003 { now._a_t &= ~2;
6004 now._cnt[now._a_t&1] = 0;
6005 trpt->o_pm |= 16;
6006 #ifdef DEBUG
6007 printf("%3d: fairness Rule 3: _a_t = %d\n",
6008 depth, now._a_t);
6009 #endif
6010 } } }
6011 #endif
6012
6013 #ifdef REVERSE
6014 for (II = From; II <= To; II++)
6015 #else
6016 for (II = From; II >= To; II--)
6017 #endif
6018 {
6019 #if SYNC
6020 /* no rendezvous with same proc */
6021 if (boq != -1 && trpt->pr == II) continue;
6022 #endif
6023 #ifdef SCHED
6024 /* limit max nr of interleavings */
6025 if (From != To
6026 && depth > 0
6027 #ifdef VERI
6028 && II != 0
6029 #endif
6030 && (trpt-1)->pr != II
6031 && trpt->sched_limit >= sched_max)
6032 { continue;
6033 }
6034 #endif
6035 #ifdef VERI
6036 Veri0:
6037 #endif
6038 this = pptr(II);
6039 tt = (int) ((P0 *)this)->_p;
6040 ot = (uchar) ((P0 *)this)->_t;
6041 #ifdef NIBIS
6042 /* don't repeat a previous preselected expansion */
6043 /* could hit this if reduction proviso was false */
6044 t = trans[ot][tt];
6045 if (!(trpt->tau&4)
6046 && !(trpt->tau&1)
6047 && !(trpt->tau&32)
6048 && (t->atom & 8)
6049 && boq == -1
6050 && From != To)
6051 { if (t->qu[0] == 0
6052 || q_cond(II, t))
6053 { _m = t->om;
6054 if (_m>_n||(_n>3&&_m!=0)) _n=_m;
6055 continue; /* did it before */
6056 } }
6057 #endif
6058 trpt->o_pm &= ~1; /* no move in this pid yet */
6059 #ifdef EVENT_TRACE
6060 (trpt+1)->o_event = now._event;
6061 #endif
6062 /* Fairness: Cnt++ when Cnt == II */
6063 #ifndef NOFAIR
6064 trpt->o_pm &= ~64; /* didn't apply rule 2 */
6065 if (fairness
6066 && boq == -1
6067 && !(trpt->o_pm&32)
6068 && (now._a_t&2)
6069 && now._cnt[now._a_t&1] == II+2)
6070 { now._cnt[now._a_t&1] -= 1;
6071 #ifdef VERI
6072 /* claim need not participate */
6073 if (II == 1)
6074 now._cnt[now._a_t&1] = 1;
6075 #endif
6076 #ifdef DEBUG
6077 printf("%3d: proc %d fairness ", depth, II);
6078 printf("Rule 2: --cnt to %d (%d)\n",
6079 now._cnt[now._a_t&1], now._a_t);
6080 #endif
6081 trpt->o_pm |= (32|64);
6082 }
6083 #endif
6084 #ifdef HAS_PROVIDED
6085 if (!provided(II, ot, tt, t)) continue;
6086 #endif
6087 /* check all trans of proc II - escapes first */
6088 #ifdef HAS_UNLESS
6089 trpt->e_state = 0;
6090 #endif
6091 (trpt+1)->pr = (uchar) II;
6092 (trpt+1)->st = tt;
6093 #ifdef RANDOMIZE
6094 for (ooi = eoi = 0, t = trans[ot][tt]; t; t = t->nxt, ooi++)
6095 { if (strcmp(t->tp, "else") == 0)
6096 { eoi++;
6097 break;
6098 } }
6099 if (eoi > 0)
6100 { t = trans[ot][tt];
6101 #ifdef VERBOSE
6102 printf("randomizer: suppressed, saw else\n");
6103 #endif
6104 } else
6105 { eoi = rand()%ooi;
6106 #ifdef VERBOSE
6107 printf("randomizer: skip %d in %d\n", eoi, ooi);
6108 #endif
6109 for (t = trans[ot][tt]; t; t = t->nxt)
6110 if (eoi-- <= 0) break;
6111 }
6112 domore:
6113 for ( ; t && ooi > 0; t = t->nxt, ooi--)
6114 #else
6115 for (t = trans[ot][tt]; t; t = t->nxt)
6116 #endif
6117 {
6118 #ifdef HAS_UNLESS
6119 /* exploring all transitions from
6120 * a single escape state suffices
6121 */
6122 if (trpt->e_state > 0
6123 && trpt->e_state != t->e_trans)
6124 {
6125 #ifdef DEBUG
6126 printf("skip 2nd escape %d (did %d before)\n",
6127 t->e_trans, trpt->e_state);
6128 #endif
6129 break;
6130 }
6131 #endif
6132 (trpt+1)->o_t = t;
6133 #ifdef INLINE
6134 #include FORWARD_MOVES
6135 P999: /* jumps here when move succeeds */
6136 #else
6137 if (!(_m = do_transit(t, II))) continue;
6138 #endif
6139 #ifdef SCHED
6140 if (depth > 0
6141 #ifdef VERI
6142 && II != 0
6143 #endif
6144 && (trpt-1)->pr != II)
6145 { trpt->sched_limit = 1 + (trpt-1)->sched_limit;
6146 }
6147 #endif
6148 if (boq == -1)
6149 #ifdef CTL
6150 /* for branching-time, can accept reduction only if */
6151 /* the persistent set contains just 1 transition */
6152 { if ((trpt->tau&32) && (trpt->o_pm&1))
6153 trpt->tau |= 16;
6154 trpt->o_pm |= 1; /* we moved */
6155 }
6156 #else
6157 trpt->o_pm |= 1; /* we moved */
6158 #endif
6159 #ifdef LOOPSTATE
6160 if (loopstate[ot][tt])
6161 {
6162 #ifdef VERBOSE
6163 printf("exiting from loopstate:\n");
6164 #endif
6165 trpt->tau |= 16;
6166 cnt_loops++;
6167 }
6168 #endif
6169 #ifdef PEG
6170 peg[t->forw]++;
6171 #endif
6172 #if defined(VERBOSE) || defined(CHECK)
6173 #if defined(SVDUMP)
6174 cpu_printf("%3d: proc %d exec %d \n", depth, II, t->t_id);
6175 #else
6176 cpu_printf("%3d: proc %d exec %d, %d to %d, %s %s %s %saccepting [tau=%d]\n",
6177 depth, II, t->forw, tt, t->st, t->tp,
6178 (t->atom&2)?"atomic":"",
6179 (boq != -1)?"rendez-vous":"",
6180 (trpt->o_pm&2)?"":"non-", trpt->tau);
6181 #ifdef HAS_UNLESS
6182 if (t->e_trans)
6183 cpu_printf("\t(escape to state %d)\n", t->st);
6184 #endif
6185 #endif
6186 #ifdef RANDOMIZE
6187 cpu_printf("\t(randomizer %d)\n", ooi);
6188 #endif
6189 #endif
6190 #ifdef HAS_LAST
6191 #ifdef VERI
6192 if (II != 0)
6193 #endif
6194 now._last = II - BASE;
6195 #endif
6196 #ifdef HAS_UNLESS
6197 trpt->e_state = t->e_trans;
6198 #endif
6199 depth++; trpt++;
6200 trpt->pr = (uchar) II;
6201 trpt->st = tt;
6202 trpt->o_pm &= ~(2|4);
6203 if (t->st > 0)
6204 { ((P0 *)this)->_p = t->st;
6205 /* moved down reached[ot][t->st] = 1; */
6206 }
6207 #ifndef SAFETY
6208 if (a_cycles)
6209 {
6210 #if (ACCEPT_LAB>0 && !defined(NP)) || (PROG_LAB>0 && defined(HAS_NP))
6211 int ii;
6212 #endif
6213 #define P__Q ((P0 *)pptr(ii))
6214 #if ACCEPT_LAB>0
6215 #ifdef NP
6216 /* state 1 of np_ claim is accepting */
6217 if (((P0 *)pptr(0))->_p == 1)
6218 trpt->o_pm |= 2;
6219 #else
6220 for (ii = 0; ii < (int) now._nr_pr; ii++)
6221 { if (accpstate[P__Q->_t][P__Q->_p])
6222 { trpt->o_pm |= 2;
6223 break;
6224 } }
6225 #endif
6226 #endif
6227 #if defined(HAS_NP) && PROG_LAB>0
6228 for (ii = 0; ii < (int) now._nr_pr; ii++)
6229 { if (progstate[P__Q->_t][P__Q->_p])
6230 { trpt->o_pm |= 4;
6231 break;
6232 } }
6233 #endif
6234 #undef P__Q
6235 }
6236 #endif
6237 trpt->o_t = t; trpt->o_n = _n;
6238 trpt->o_ot = ot; trpt->o_tt = tt;
6239 trpt->o_To = To; trpt->o_m = _m;
6240 trpt->tau = 0;
6241 #ifdef RANDOMIZE
6242 trpt->oo_i = ooi;
6243 #endif
6244 if (boq != -1 || (t->atom&2))
6245 { trpt->tau |= 8;
6246 #ifdef VERI
6247 /* atomic sequence in claim */
6248 if((trpt-1)->tau&4)
6249 trpt->tau |= 4;
6250 else
6251 trpt->tau &= ~4;
6252 } else
6253 { if ((trpt-1)->tau&4)
6254 trpt->tau &= ~4;
6255 else
6256 trpt->tau |= 4;
6257 }
6258 /* if claim allowed timeout, so */
6259 /* does the next program-step: */
6260 if (((trpt-1)->tau&1) && !(trpt->tau&4))
6261 trpt->tau |= 1;
6262 #else
6263 } else
6264 trpt->tau &= ~8;
6265 #endif
6266 if (boq == -1 && (t->atom&2))
6267 { From = To = II; nlinks++;
6268 } else
6269 #ifdef REVERSE
6270 { From = BASE; To = now._nr_pr-1;
6271 #else
6272 { From = now._nr_pr-1; To = BASE;
6273 #endif
6274 }
6275 #if NCORE>1 && defined(FULL_TRAIL)
6276 if (upto > 0)
6277 { Push_Stack_Tree(II, t->t_id);
6278 }
6279 #endif
6280 goto Down; /* pseudo-recursion */
6281 Up:
6282 #ifdef CHECK
6283 cpu_printf("%d: Up - %s\n", depth,
6284 (trpt->tau&4)?"claim":"program");
6285 #endif
6286 #if NCORE>1
6287 iam_alive();
6288 #ifdef USE_DISK
6289 mem_drain();
6290 #endif
6291 #endif
6292 #if defined(MA) || NCORE>1
6293 if (depth <= 0) return;
6294 /* e.g., if first state is old, after a restart */
6295 #endif
6296 #ifdef SC
6297 if (CNT1 > CNT2
6298 && depth < hiwater - (HHH-DDD) + 2)
6299 {
6300 trpt += DDD;
6301 disk2stack();
6302 maxdepth -= DDD;
6303 hiwater -= DDD;
6304 if(verbose)
6305 printf("unzap %d: %d\n", CNT2, hiwater);
6306 }
6307 #endif
6308 #ifndef NOFAIR
6309 if (trpt->o_pm&128) /* fairness alg */
6310 { now._cnt[now._a_t&1] = trpt->bup.oval;
6311 _n = 1; trpt->o_pm &= ~128;
6312 depth--; trpt--;
6313 #if defined(VERBOSE) || defined(CHECK)
6314 printf("%3d: reversed fairness default move\n", depth);
6315 #endif
6316 goto Q999;
6317 }
6318 #endif
6319 #ifdef HAS_LAST
6320 #ifdef VERI
6321 { int d; Trail *trl;
6322 now._last = 0;
6323 for (d = 1; d < depth; d++)
6324 { trl = getframe(depth-d); /* was (trpt-d) */
6325 if (trl->pr != 0)
6326 { now._last = trl->pr - BASE;
6327 break;
6328 } } }
6329 #else
6330 now._last = (depth<1)?0:(trpt-1)->pr;
6331 #endif
6332 #endif
6333 #ifdef EVENT_TRACE
6334 now._event = trpt->o_event;
6335 #endif
6336 #ifndef SAFETY
6337 if ((now._a_t&1) && depth <= A_depth)
6338 return; /* to checkcycles() */
6339 #endif
6340 t = trpt->o_t; _n = trpt->o_n;
6341 ot = trpt->o_ot; II = trpt->pr;
6342 tt = trpt->o_tt; this = pptr(II);
6343 To = trpt->o_To; _m = trpt->o_m;
6344 #ifdef RANDOMIZE
6345 ooi = trpt->oo_i;
6346 #endif
6347 #ifdef INLINE_REV
6348 _m = do_reverse(t, II, _m);
6349 #else
6350 #include REVERSE_MOVES
6351 R999: /* jumps here when done */
6352 #endif
6353 #ifdef VERBOSE
6354 cpu_printf("%3d: proc %d reverses %d, %d to %d\n",
6355 depth, II, t->forw, tt, t->st);
6356 cpu_printf("\t%s [abit=%d,adepth=%d,tau=%d,%d]\n",
6357 t->tp, now._a_t, A_depth, trpt->tau, (trpt-1)->tau);
6358 #endif
6359 #ifndef NOREDUCE
6360 /* pass the proviso tags */
6361 if ((trpt->tau&8) /* rv or atomic */
6362 && (trpt->tau&16))
6363 (trpt-1)->tau |= 16;
6364 #ifdef SAFETY
6365 if ((trpt->tau&8) /* rv or atomic */
6366 && (trpt->tau&64))
6367 (trpt-1)->tau |= 64;
6368 #endif
6369 #endif
6370 depth--; trpt--;
6371
6372 #ifdef NSUCC
6373 trpt->n_succ++;
6374 #endif
6375 #ifdef NIBIS
6376 (trans[ot][tt])->om = _m; /* head of list */
6377 #endif
6378 /* i.e., not set if rv fails */
6379 if (_m)
6380 {
6381 #if defined(VERI) && !defined(NP)
6382 if (II == 0 && verbose && !reached[ot][t->st])
6383 {
6384 printf("depth %d: Claim reached state %d (line %d)\n",
6385 depth, t->st, src_claim [t->st]);
6386 fflush(stdout);
6387 }
6388 #endif
6389 reached[ot][t->st] = 1;
6390 reached[ot][tt] = 1;
6391 }
6392 #ifdef HAS_UNLESS
6393 else trpt->e_state = 0; /* undo */
6394 #endif
6395 if (_m>_n||(_n>3&&_m!=0)) _n=_m;
6396 ((P0 *)this)->_p = tt;
6397 } /* all options */
6398 #ifdef RANDOMIZE
6399 if (!t && ooi > 0)
6400 { t = trans[ot][tt];
6401 #ifdef VERBOSE
6402 printf("randomizer: continue for %d more\n", ooi);
6403 #endif
6404 goto domore;
6405 }
6406 #ifdef VERBOSE
6407 else
6408 printf("randomizer: done\n");
6409 #endif
6410 #endif
6411 #ifndef NOFAIR
6412 /* Fairness: undo Rule 2 */
6413 if ((trpt->o_pm&32)
6414 && (trpt->o_pm&64))
6415 { if (trpt->o_pm&1)
6416 {
6417 #ifdef VERI
6418 if (now._cnt[now._a_t&1] == 1)
6419 now._cnt[now._a_t&1] = 2;
6420 #endif
6421 now._cnt[now._a_t&1] += 1;
6422 #ifdef VERBOSE
6423 printf("%3d: proc %d fairness ", depth, II);
6424 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6425 now._cnt[now._a_t&1], now._a_t);
6426 #endif
6427 trpt->o_pm &= ~(32|64);
6428 } else
6429 { if (_n > 0)
6430 {
6431 trpt->o_pm &= ~64;
6432 #ifdef REVERSE
6433 II = From-1;
6434 #else
6435 II = From+1;
6436 #endif
6437 } } }
6438 #endif
6439 #ifdef VERI
6440 if (II == 0) break; /* never claim */
6441 #endif
6442 } /* all processes */
6443 #ifdef NSUCC
6444 tally_succ(trpt->n_succ);
6445 #endif
6446 #ifdef SCHED
6447 if (_n == 0 /* no process could move */
6448 #ifdef VERI
6449 && II != 0
6450 #endif
6451 && depth > 0
6452 && trpt->sched_limit >= sched_max)
6453 { _n = 1; /* not a deadlock */
6454 }
6455 #endif
6456 #ifndef NOFAIR
6457 /* Fairness: undo Rule 2 */
6458 if (trpt->o_pm&32) /* remains if proc blocked */
6459 {
6460 #ifdef VERI
6461 if (now._cnt[now._a_t&1] == 1)
6462 now._cnt[now._a_t&1] = 2;
6463 #endif
6464 now._cnt[now._a_t&1] += 1;
6465 #ifdef VERBOSE
6466 printf("%3d: proc -- fairness ", depth);
6467 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6468 now._cnt[now._a_t&1], now._a_t);
6469 #endif
6470 trpt->o_pm &= ~32;
6471 }
6472 #ifndef NP
6473 if (fairness
6474 && _n == 0 /* nobody moved */
6475 #ifdef VERI
6476 && !(trpt->tau&4) /* in program move */
6477 #endif
6478 && !(trpt->tau&8) /* not an atomic one */
6479 #ifdef OTIM
6480 && ((trpt->tau&1) || endstate())
6481 #else
6482 #ifdef ETIM
6483 && (trpt->tau&1) /* already tried timeout */
6484 #endif
6485 #endif
6486 #ifndef NOREDUCE
6487 /* see below */
6488 && !((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6489 #endif
6490 && now._cnt[now._a_t&1] > 0) /* needed more procs */
6491 { depth++; trpt++;
6492 trpt->o_pm |= 128 | ((trpt-1)->o_pm&(2|4));
6493 trpt->bup.oval = now._cnt[now._a_t&1];
6494 now._cnt[now._a_t&1] = 1;
6495 #ifdef VERI
6496 trpt->tau = 4;
6497 #else
6498 trpt->tau = 0;
6499 #endif
6500 #ifdef REVERSE
6501 From = BASE; To = now._nr_pr-1;
6502 #else
6503 From = now._nr_pr-1; To = BASE;
6504 #endif
6505 #if defined(VERBOSE) || defined(CHECK)
6506 printf("%3d: fairness default move ", depth);
6507 printf("(all procs block)\n");
6508 #endif
6509 goto Down;
6510 }
6511 #endif
6512 Q999: /* returns here with _n>0 when done */;
6513 if (trpt->o_pm&8)
6514 { now._a_t &= ~2;
6515 now._cnt[now._a_t&1] = 0;
6516 trpt->o_pm &= ~8;
6517 #ifdef VERBOSE
6518 printf("%3d: fairness undo Rule 1, _a_t=%d\n",
6519 depth, now._a_t);
6520 #endif
6521 }
6522 if (trpt->o_pm&16)
6523 { now._a_t |= 2;
6524 now._cnt[now._a_t&1] = 1;
6525 trpt->o_pm &= ~16;
6526 #ifdef VERBOSE
6527 printf("%3d: fairness undo Rule 3, _a_t=%d\n",
6528 depth, now._a_t);
6529 #endif
6530 }
6531 #endif
6532 #ifndef NOREDUCE
6533 #ifdef SAFETY
6534 #ifdef LOOPSTATE
6535 /* at least one move that was preselected at this */
6536 /* level, blocked or was a loop control flow point */
6537 if ((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6538 #else
6539 /* preselected move - no successors outside stack */
6540 if ((trpt->tau&32) && !(trpt->tau&64))
6541 #endif
6542 #ifdef REVERSE
6543 { From = BASE; To = now._nr_pr-1;
6544 #else
6545 { From = now._nr_pr-1; To = BASE;
6546 #endif
6547 #ifdef DEBUG
6548 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6549 depth, II+1, _n, trpt->tau);
6550 #endif
6551 _n = 0; trpt->tau &= ~(16|32|64);
6552 #ifdef REVERSE
6553 if (II <= To) /* II already decremented */
6554 #else
6555 if (II >= BASE) /* II already decremented */
6556 #endif
6557 goto Resume;
6558 else
6559 goto Again;
6560 }
6561 #else
6562 /* at least one move that was preselected at this */
6563 /* level, blocked or truncated at the next level */
6564 /* implied: #ifdef FULLSTACK */
6565 if ((trpt->tau&32) && (_n == 0 || (trpt->tau&16)))
6566 {
6567 #ifdef DEBUG
6568 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6569 depth, II+1, (int) _n, trpt->tau);
6570 #endif
6571 if (a_cycles && (trpt->tau&16))
6572 { if (!(now._a_t&1))
6573 {
6574 #ifdef DEBUG
6575 printf("%3d: setting proviso bit\n", depth);
6576 #endif
6577 #ifndef BITSTATE
6578 #ifdef MA
6579 #ifdef VERI
6580 (trpt-1)->proviso = 1;
6581 #else
6582 trpt->proviso = 1;
6583 #endif
6584 #else
6585 #ifdef VERI
6586 if ((trpt-1)->ostate)
6587 ((char *)&((trpt-1)->ostate->state))[0] |= 128;
6588 #else
6589 ((char *)&(trpt->ostate->state))[0] |= 128;
6590 #endif
6591 #endif
6592 #else
6593 #ifdef VERI
6594 if ((trpt-1)->ostate)
6595 (trpt-1)->ostate->proviso = 1;
6596 #else
6597 trpt->ostate->proviso = 1;
6598 #endif
6599 #endif
6600 #ifdef REVERSE
6601 From = BASE; To = now._nr_pr-1;
6602 #else
6603 From = now._nr_pr-1; To = BASE;
6604 #endif
6605 _n = 0; trpt->tau &= ~(16|32|64);
6606 goto Again; /* do full search */
6607 } /* else accept reduction */
6608 } else
6609 #ifdef REVERSE
6610 { From = BASE; To = now._nr_pr-1;
6611 #else
6612 { From = now._nr_pr-1; To = BASE;
6613 #endif
6614 _n = 0; trpt->tau &= ~(16|32|64);
6615 #ifdef REVERSE
6616 if (II <= To) /* already decremented */
6617 #else
6618 if (II >= BASE) /* already decremented */
6619 #endif
6620 goto Resume;
6621 else
6622 goto Again;
6623 } }
6624 /* #endif */
6625 #endif
6626 #endif
6627 if (_n == 0 || ((trpt->tau&4) && (trpt->tau&2)))
6628 {
6629 #ifdef DEBUG
6630 cpu_printf("%3d: no move [II=%d, tau=%d, boq=%d]\n",
6631 depth, II, trpt->tau, boq);
6632 #endif
6633 #if SYNC
6634 /* ok if a rendez-vous fails: */
6635 if (boq != -1) goto Done;
6636 #endif
6637 /* ok if no procs or we're at maxdepth */
6638 if ((now._nr_pr == 0 && (!strict || qs_empty()))
6639 #ifdef OTIM
6640 || endstate()
6641 #endif
6642 || depth >= maxdepth-1) goto Done;
6643 if ((trpt->tau&8) && !(trpt->tau&4))
6644 { trpt->tau &= ~(1|8);
6645 /* 1=timeout, 8=atomic */
6646 #ifdef REVERSE
6647 From = BASE; To = now._nr_pr-1;
6648 #else
6649 From = now._nr_pr-1; To = BASE;
6650 #endif
6651 #ifdef DEBUG
6652 cpu_printf("%3d: atomic step proc %d unexecutable\n", depth, II+1);
6653 #endif
6654 #ifdef VERI
6655 trpt->tau |= 4; /* switch to claim */
6656 #endif
6657 goto AllOver;
6658 }
6659 #ifdef ETIM
6660 if (!(trpt->tau&1)) /* didn't try timeout yet */
6661 {
6662 #ifdef VERI
6663 if (trpt->tau&4)
6664 {
6665 #ifndef NTIM
6666 if (trpt->tau&2) /* requested */
6667 #endif
6668 { trpt->tau |= 1;
6669 trpt->tau &= ~2;
6670 #ifdef DEBUG
6671 cpu_printf("%d: timeout\n", depth);
6672 #endif
6673 goto Stutter;
6674 } }
6675 else
6676 { /* only claim can enable timeout */
6677 if ((trpt->tau&8)
6678 && !((trpt-1)->tau&4))
6679 /* blocks inside an atomic */ goto BreakOut;
6680 #ifdef DEBUG
6681 cpu_printf("%d: req timeout\n",
6682 depth);
6683 #endif
6684 (trpt-1)->tau |= 2; /* request */
6685 #if NCORE>1 && defined(FULL_TRAIL)
6686 if (upto > 0)
6687 { Pop_Stack_Tree();
6688 }
6689 #endif
6690 goto Up;
6691 }
6692 #else
6693 #ifdef DEBUG
6694 cpu_printf("%d: timeout\n", depth);
6695 #endif
6696 trpt->tau |= 1;
6697 goto Again;
6698 #endif
6699 }
6700 #endif
6701 #ifdef VERI
6702 BreakOut:
6703 #ifndef NOSTUTTER
6704 if (!(trpt->tau&4))
6705 { trpt->tau |= 4; /* claim stuttering */
6706 trpt->tau |= 128; /* stutter mark */
6707 #ifdef DEBUG
6708 cpu_printf("%d: claim stutter\n", depth);
6709 #endif
6710 goto Stutter;
6711 }
6712 #else
6713 ;
6714 #endif
6715 #else
6716 if (!noends && !a_cycles && !endstate())
6717 { depth--; trpt--; /* new 4.2.3 */
6718 uerror("invalid end state");
6719 depth++; trpt++;
6720 }
6721 #ifndef NOSTUTTER
6722 else if (a_cycles && (trpt->o_pm&2)) /* new 4.2.4 */
6723 { depth--; trpt--;
6724 uerror("accept stutter");
6725 depth++; trpt++;
6726 }
6727 #endif
6728 #endif
6729 }
6730 Done:
6731 if (!(trpt->tau&8)) /* not in atomic seqs */
6732 {
6733 #ifndef SAFETY
6734 if (_n != 0
6735 #ifdef VERI
6736 /* --after-- a program-step, i.e., */
6737 /* after backtracking a claim-step */
6738 && (trpt->tau&4)
6739 /* with at least one running process */
6740 /* unless in a stuttered accept state */
6741 && ((now._nr_pr > 1) || (trpt->o_pm&2))
6742 #endif
6743 && !(now._a_t&1))
6744 {
6745 #ifndef NOFAIR
6746 if (fairness)
6747 {
6748 #ifdef VERBOSE
6749 cpu_printf("Consider check %d %d...\n",
6750 now._a_t, now._cnt[0]);
6751 #endif
6752 if ((now._a_t&2) /* A-bit */
6753 && (now._cnt[0] == 1))
6754 checkcycles();
6755 } else
6756 #endif
6757 if (a_cycles && (trpt->o_pm&2))
6758 checkcycles();
6759 }
6760 #endif
6761 #ifndef MA
6762 #if defined(FULLSTACK) || defined(CNTRSTACK)
6763 #ifdef VERI
6764 if (boq == -1
6765 && (((trpt->tau&4) && !(trpt->tau&128))
6766 || ( (trpt-1)->tau&128)))
6767 #else
6768 if (boq == -1)
6769 #endif
6770 {
6771 #ifdef DEBUG2
6772 #if defined(FULLSTACK)
6773 printf("%d: zapping %u (%d)\n",
6774 depth, trpt->ostate,
6775 (trpt->ostate)?trpt->ostate->tagged:0);
6776 #endif
6777 #endif
6778 onstack_zap();
6779 }
6780 #endif
6781 #else
6782 #ifdef VERI
6783 if (boq == -1
6784 && (((trpt->tau&4) && !(trpt->tau&128))
6785 || ( (trpt-1)->tau&128)))
6786 #else
6787 if (boq == -1)
6788 #endif
6789 {
6790 #ifdef DEBUG
6791 printf("%d: zapping\n", depth);
6792 #endif
6793 onstack_zap();
6794 #ifndef NOREDUCE
6795 if (trpt->proviso)
6796 gstore((char *) &now, vsize, 1);
6797 #endif
6798 }
6799 #endif
6800 }
6801 if (depth > 0)
6802 {
6803 #if NCORE>1 && defined(FULL_TRAIL)
6804 if (upto > 0)
6805 { Pop_Stack_Tree();
6806 }
6807 #endif
6808 goto Up;
6809 }
6810 }
6811
6812 #else
6813 void new_state(void) { /* place holder */ }
6814 #endif
6815
6816 void
6817 assert(int a, char *s, int ii, int tt, Trans *t)
6818 {
6819 if (!a && !noasserts)
6820 { char bad[1024];
6821 strcpy(bad, "assertion violated ");
6822 if (strlen(s) > 1000)
6823 { strncpy(&bad[19], (const char *) s, 1000);
6824 bad[1019] = '\0';
6825 } else
6826 strcpy(&bad[19], s);
6827 uerror(bad);
6828 }
6829 }
6830 #ifndef NOBOUNDCHECK
6831 int
6832 Boundcheck(int x, int y, int a1, int a2, Trans *a3)
6833 {
6834 assert((x >= 0 && x < y), "- invalid array index",
6835 a1, a2, a3);
6836 return x;
6837 }
6838 #endif
6839 void
6840 wrap_stats(void)
6841 {
6842 if (nShadow>0)
6843 printf("%9.8g states, stored (%g visited)\n",
6844 nstates - nShadow, nstates);
6845 else
6846 printf("%9.8g states, stored\n", nstates);
6847 #ifdef BFS
6848 #if SYNC
6849 printf(" %8g nominal states (- rv and atomic)\n", nstates-midrv-nlinks+revrv);
6850 printf(" %8g rvs succeeded\n", midrv-failedrv);
6851 #else
6852 printf(" %8g nominal states (stored-atomic)\n", nstates-nlinks);
6853 #endif
6854 #ifdef DEBUG
6855 printf(" %8g midrv\n", midrv);
6856 printf(" %8g failedrv\n", failedrv);
6857 printf(" %8g revrv\n", revrv);
6858 #endif
6859 #endif
6860 printf("%9.8g states, matched\n", truncs);
6861 #ifdef CHECK
6862 printf("%9.8g matches within stack\n",truncs2);
6863 #endif
6864 if (nShadow>0)
6865 printf("%9.8g transitions (= visited+matched)\n",
6866 nstates+truncs);
6867 else
6868 printf("%9.8g transitions (= stored+matched)\n",
6869 nstates+truncs);
6870 printf("%9.8g atomic steps\n", nlinks);
6871 if (nlost) printf("%g lost messages\n", (double) nlost);
6872
6873 #ifndef BITSTATE
6874 printf("hash conflicts: %9.8g (resolved)\n", hcmp);
6875 #ifndef AUTO_RESIZE
6876 if (hcmp > (double) (1<<ssize))
6877 { printf("hint: increase hashtable-size (-w) to reduce runtime\n");
6878 } /* in multi-core: also reduces lock delays on access to hashtable */
6879 #endif
6880 #else
6881 #ifdef CHECK
6882 printf("%8g states allocated for dfs stack\n", ngrabs);
6883 #endif
6884 if (udmem)
6885 printf("\nhash factor: %4g (best if > 100.)\n\n",
6886 (double)(((double) udmem) * 8.0) / (double) nstates);
6887 else
6888 printf("\nhash factor: %4g (best if > 100.)\n\n",
6889 (double)(1<<(ssize-8)) / (double) nstates * 256.0);
6890 printf("bits set per state: %u (-k%u)\n", hfns, hfns);
6891 #if 0
6892 if (udmem)
6893 { printf("total bits available: %8g (-M%ld)\n",
6894 ((double) udmem) * 8.0, udmem/(1024L*1024L));
6895 } else
6896 printf("total bits available: %8g (-w%d)\n",
6897 ((double) (ONE_L << (ssize-4)) * 16.0), ssize);
6898 #endif
6899 #endif
6900 #ifdef BFS_DISK
6901 printf("bfs disk reads: %ld writes %ld -- diff %ld\n",
6902 bfs_dsk_reads, bfs_dsk_writes, bfs_dsk_writes-bfs_dsk_reads);
6903 if (bfs_dsk_read >= 0) (void) close(bfs_dsk_read);
6904 if (bfs_dsk_write >= 0) (void) close(bfs_dsk_write);
6905 (void) unlink("pan_bfs_dsk.tmp");
6906 #endif
6907 }
6908
6909 void
6910 wrapup(void)
6911 {
6912 #if defined(BITSTATE) || !defined(NOCOMP)
6913 double nr1, nr2, nr3 = 0.0, nr4, nr5 = 0.0;
6914 #if !defined(MA) && (defined(MEMCNT) || defined(MEMLIM))
6915 int mverbose = 1;
6916 #else
6917 int mverbose = verbose;
6918 #endif
6919 #endif
6920 #if NCORE>1
6921 if (verbose) cpu_printf("wrapup -- %d error(s)\n", errors);
6922 if (core_id != 0)
6923 {
6924 #ifdef USE_DISK
6925 void dsk_stats(void);
6926 dsk_stats();
6927 #endif
6928 if (search_terminated != NULL)
6929 { *search_terminated |= 2; /* wrapup */
6930 }
6931 exit(0); /* normal termination, not an error */
6932 }
6933 #endif
6934 #if !defined(WIN32) && !defined(WIN64)
6935 signal(SIGINT, SIG_DFL);
6936 #endif
6937 printf("\n(%s)\n", SpinVersion);
6938 if (!done) printf("Warning: Search not completed\n");
6939 #ifdef SC
6940 (void) unlink((const char *)stackfile);
6941 #endif
6942 #if NCORE>1
6943 if (a_cycles)
6944 { printf(" + Multi-Core (NCORE=%d)\n", NCORE);
6945 } else
6946 { printf(" + Multi-Core (NCORE=%d -z%d)\n", NCORE, z_handoff);
6947 }
6948 #endif
6949 #ifdef BFS
6950 printf(" + Using Breadth-First Search\n");
6951 #endif
6952 #ifndef NOREDUCE
6953 printf(" + Partial Order Reduction\n");
6954 #endif
6955 #ifdef REVERSE
6956 printf(" + Reverse Depth-First Search Order\n");
6957 #endif
6958 #ifdef T_REVERSE
6959 printf(" + Reverse Transition Ordering\n");
6960 #endif
6961 #ifdef RANDOMIZE
6962 printf(" + Randomized Transition Ordering\n");
6963 #endif
6964 #ifdef SCHED
6965 printf(" + Scheduling Restriction (-DSCHED=%d)\n", sched_max);
6966 #endif
6967 #ifdef COLLAPSE
6968 printf(" + Compression\n");
6969 #endif
6970 #ifdef MA
6971 printf(" + Graph Encoding (-DMA=%d)\n", MA);
6972 #ifdef R_XPT
6973 printf(" Restarted from checkpoint %s.xpt\n", PanSource);
6974 #endif
6975 #endif
6976 #ifdef CHECK
6977 #ifdef FULLSTACK
6978 printf(" + FullStack Matching\n");
6979 #endif
6980 #ifdef CNTRSTACK
6981 printf(" + CntrStack Matching\n");
6982 #endif
6983 #endif
6984 #ifdef BITSTATE
6985 printf("\nBit statespace search for:\n");
6986 #else
6987 #ifdef HC
6988 printf("\nHash-Compact %d search for:\n", HC);
6989 #else
6990 printf("\nFull statespace search for:\n");
6991 #endif
6992 #endif
6993 #ifdef EVENT_TRACE
6994 #ifdef NEGATED_TRACE
6995 printf(" notrace assertion +\n");
6996 #else
6997 printf(" trace assertion +\n");
6998 #endif
6999 #endif
7000 #ifdef VERI
7001 printf(" never claim +\n");
7002 printf(" assertion violations ");
7003 if (noasserts)
7004 printf("- (disabled by -A flag)\n");
7005 else
7006 printf("+ (if within scope of claim)\n");
7007 #else
7008 #ifdef NOCLAIM
7009 printf(" never claim - (not selected)\n");
7010 #else
7011 printf(" never claim - (none specified)\n");
7012 #endif
7013 printf(" assertion violations ");
7014 if (noasserts)
7015 printf("- (disabled by -A flag)\n");
7016 else
7017 printf("+\n");
7018 #endif
7019 #ifndef SAFETY
7020 #ifdef NP
7021 printf(" non-progress cycles ");
7022 #else
7023 printf(" acceptance cycles ");
7024 #endif
7025 if (a_cycles)
7026 printf("+ (fairness %sabled)\n",
7027 fairness?"en":"dis");
7028 else printf("- (not selected)\n");
7029 #else
7030 printf(" cycle checks - (disabled by -DSAFETY)\n");
7031 #endif
7032 #ifdef VERI
7033 printf(" invalid end states - ");
7034 printf("(disabled by ");
7035 if (noends)
7036 printf("-E flag)\n\n");
7037 else
7038 printf("never claim)\n\n");
7039 #else
7040 printf(" invalid end states ");
7041 if (noends)
7042 printf("- (disabled by -E flag)\n\n");
7043 else
7044 printf("+\n\n");
7045 #endif
7046 printf("State-vector %d byte, depth reached %ld", hmax,
7047 #if NCORE>1
7048 (nr_handoffs * z_handoff) +
7049 #endif
7050 mreached);
7051 printf(", errors: %d\n", errors);
7052 fflush(stdout);
7053 #ifdef MA
7054 if (done)
7055 { extern void dfa_stats(void);
7056 if (maxgs+a_cycles+2 < MA)
7057 printf("MA stats: -DMA=%d is sufficient\n",
7058 maxgs+a_cycles+2);
7059 dfa_stats();
7060 }
7061 #endif
7062 wrap_stats();
7063 #ifdef CHECK
7064 printf("stackframes: %d/%d\n\n", smax, svmax);
7065 printf("stats: fa %d, fh %d, zh %d, zn %d - ",
7066 Fa, Fh, Zh, Zn);
7067 printf("check %d holds %d\n", Ccheck, Cholds);
7068 printf("stack stats: puts %d, probes %d, zaps %d\n",
7069 PUT, PROBE, ZAPS);
7070 #else
7071 printf("\n");
7072 #endif
7073
7074 #if defined(BITSTATE) || !defined(NOCOMP)
7075 nr1 = (nstates-nShadow)*
7076 (double)(hmax+sizeof(struct H_el)-sizeof(unsigned));
7077 #ifdef BFS
7078 nr2 = 0.0;
7079 #else
7080 nr2 = (double) ((maxdepth+3)*sizeof(Trail));
7081 #endif
7082 #ifndef BITSTATE
7083 #if !defined(MA) || defined(COLLAPSE)
7084 nr3 = (double) (ONE_L<<ssize)*sizeof(struct H_el *);
7085 #endif
7086 #else
7087 if (udmem)
7088 nr3 = (double) (udmem);
7089 else
7090 nr3 = (double) (ONE_L<<(ssize-3));
7091 #ifdef CNTRSTACK
7092 nr5 = (double) (ONE_L<<(ssize-3));
7093 #endif
7094 #ifdef FULLSTACK
7095 nr5 = (double) (maxdepth*sizeof(struct H_el *));
7096 #endif
7097 #endif
7098 nr4 = (double) (svmax * (sizeof(Svtack) + hmax))
7099 + (double) (smax * (sizeof(Stack) + Maxbody));
7100 #ifndef MA
7101 if (mverbose || memcnt < nr1+nr2+nr3+nr4+nr5)
7102 #endif
7103 { double remainder = memcnt;
7104 double tmp_nr = memcnt-nr3-nr4-(nr2-fragment)-nr5;
7105 #if NCORE>1 && !defined(SEP_STATE)
7106 tmp_nr -= ((double) NCORE * LWQ_SIZE) + GWQ_SIZE;
7107 #endif
7108 if (tmp_nr < 0.0) tmp_nr = 0.;
7109 printf("Stats on memory usage (in Megabytes):\n");
7110 printf("%9.3f equivalent memory usage for states",
7111 nr1/1048576.); /* 1024*1024=1048576 */
7112 printf(" (stored*(State-vector + overhead))\n");
7113 #if NCORE>1 && !defined(WIN32) && !defined(WIN64)
7114 printf("%9.3f shared memory reserved for state storage\n",
7115 mem_reserved/1048576.);
7116 #ifdef SEP_HEAP
7117 printf(" in %d local heaps of %7.3f MB each\n",
7118 NCORE, mem_reserved/(NCORE*1048576.));
7119 #endif
7120 printf("\n");
7121 #endif
7122 #ifdef BITSTATE
7123 if (udmem)
7124 printf("%9.3f memory used for hash array (-M%ld)\n",
7125 nr3/1048576., udmem/(1024L*1024L));
7126 else
7127 printf("%9.3f memory used for hash array (-w%d)\n",
7128 nr3/1048576., ssize);
7129 if (nr5 > 0.0)
7130 printf("%9.3f memory used for bit stack\n",
7131 nr5/1048576.);
7132 remainder = remainder - nr3 - nr5;
7133 #else
7134 printf("%9.3f actual memory usage for states",
7135 tmp_nr/1048576.);
7136 remainder -= tmp_nr;
7137 printf(" (");
7138 if (tmp_nr > 0.)
7139 { if (tmp_nr > nr1) printf("unsuccessful ");
7140 printf("compression: %.2f%%)\n",
7141 (100.0*tmp_nr)/nr1);
7142 } else
7143 printf("less than 1k)\n");
7144 #ifndef MA
7145 if (tmp_nr > 0.)
7146 { printf(" state-vector as stored = %.0f byte",
7147 (tmp_nr)/(nstates-nShadow) -
7148 (double) (sizeof(struct H_el) - sizeof(unsigned)));
7149 printf(" + %ld byte overhead\n",
7150 (long int) sizeof(struct H_el)-sizeof(unsigned));
7151 }
7152 #endif
7153 #if !defined(MA) || defined(COLLAPSE)
7154 printf("%9.3f memory used for hash table (-w%d)\n",
7155 nr3/1048576., ssize);
7156 remainder -= nr3;
7157 #endif
7158 #endif
7159 #ifndef BFS
7160 printf("%9.3f memory used for DFS stack (-m%ld)\n",
7161 nr2/1048576., maxdepth);
7162 remainder -= nr2;
7163 #endif
7164 #if NCORE>1
7165 remainder -= ((double) NCORE * LWQ_SIZE) + GWQ_SIZE;
7166 printf("%9.3f shared memory used for work-queues\n",
7167 (GWQ_SIZE + (double) NCORE * LWQ_SIZE) /1048576.);
7168 printf(" in %d queues of %7.3f MB each",
7169 NCORE, (double) LWQ_SIZE /1048576.);
7170 #ifndef NGQ
7171 printf(" + a global q of %7.3f MB\n",
7172 (double) GWQ_SIZE / 1048576.);
7173 #else
7174 printf("\n");
7175 #endif
7176 #endif
7177 if (remainder - fragment > 1048576.)
7178 printf("%9.3f other (proc and chan stacks)\n",
7179 (remainder-fragment)/1048576.);
7180 if (fragment > 1048576.)
7181 printf("%9.3f memory lost to fragmentation\n",
7182 fragment/1048576.);
7183 printf("%9.3f total actual memory usage\n\n",
7184 memcnt/1048576.);
7185 }
7186 #ifndef MA
7187 else
7188 #endif
7189 #endif
7190 #ifndef MA
7191 printf("%9.3f memory usage (Mbyte)\n\n",
7192 memcnt/1048576.);
7193 #endif
7194 #ifdef COLLAPSE
7195 printf("nr of templates: [ globals chans procs ]\n");
7196 printf("collapse counts: [ ");
7197 { int i; for (i = 0; i < 256+2; i++)
7198 if (ncomps[i] != 0)
7199 printf("%d ", ncomps[i]);
7200 printf("]\n");
7201 }
7202 #endif
7203 if ((done || verbose) && !no_rck) do_reach();
7204 #ifdef PEG
7205 { int i;
7206 printf("\nPeg Counts (transitions executed):\n");
7207 for (i = 1; i < NTRANS; i++)
7208 { if (peg[i]) putpeg(i, peg[i]);
7209 } }
7210 #endif
7211 #ifdef VAR_RANGES
7212 dumpranges();
7213 #endif
7214 #ifdef SVDUMP
7215 if (vprefix > 0) close(svfd);
7216 #endif
7217 #ifdef LOOPSTATE
7218 printf("%g loopstates hit\n", cnt_loops);
7219 #endif
7220 #ifdef NSUCC
7221 dump_succ();
7222 #endif
7223 #if NCORE>1 && defined(T_ALERT)
7224 crash_report();
7225 #endif
7226 pan_exit(0);
7227 }
7228
7229 void
7230 stopped(int arg)
7231 { printf("Interrupted\n");
7232 #if NCORE>1
7233 was_interrupted = 1;
7234 #endif
7235 wrapup();
7236 pan_exit(0);
7237 }
7238
7239 #ifdef SFH
7240 /*
7241 * super fast hash, based on Paul Hsieh's function
7242 * http://www.azillionmonkeys.com/qed/hash.html
7243 */
7244 #include <stdint.h>
7245 #undef get16bits
7246 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
7247 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
7248 #define get16bits(d) (*((const uint16_t *) (d)))
7249 #endif
7250
7251 #ifndef get16bits
7252 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
7253 +(uint32_t)(((const uint8_t *)(d))[0]) )
7254 #endif
7255
7256 void
7257 d_sfh(const char *s, int len)
7258 { uint32_t h = len, tmp;
7259 int rem;
7260
7261 rem = len & 3;
7262 len >>= 2;
7263
7264 for ( ; len > 0; len--)
7265 { h += get16bits(s);
7266 tmp = (get16bits(s+2) << 11) ^ h;
7267 h = (h << 16) ^ tmp;
7268 s += 2*sizeof(uint16_t);
7269 h += h >> 11;
7270 }
7271 switch (rem) {
7272 case 3: h += get16bits(s);
7273 h ^= h << 16;
7274 h ^= s[sizeof(uint16_t)] << 18;
7275 h += h >> 11;
7276 break;
7277 case 2: h += get16bits(s);
7278 h ^= h << 11;
7279 h += h >> 17;
7280 break;
7281 case 1: h += *s;
7282 h ^= h << 10;
7283 h += h >> 1;
7284 break;
7285 }
7286 h ^= h << 3;
7287 h += h >> 5;
7288 h ^= h << 4;
7289 h += h >> 17;
7290 h ^= h << 25;
7291 h += h >> 6;
7292
7293 K1 = h;
7294 }
7295 #endif
7296
7297 #include <stdint.h>
7298 #if defined(HASH64) || defined(WIN64)
7299 /* 64-bit Jenkins hash, 1997
7300 * http://burtleburtle.net/bob/c/lookup8.c
7301 */
7302 #define mix(a,b,c) \
7303 { a -= b; a -= c; a ^= (c>>43); \
7304 b -= c; b -= a; b ^= (a<<9); \
7305 c -= a; c -= b; c ^= (b>>8); \
7306 a -= b; a -= c; a ^= (c>>38); \
7307 b -= c; b -= a; b ^= (a<<23); \
7308 c -= a; c -= b; c ^= (b>>5); \
7309 a -= b; a -= c; a ^= (c>>35); \
7310 b -= c; b -= a; b ^= (a<<49); \
7311 c -= a; c -= b; c ^= (b>>11); \
7312 a -= b; a -= c; a ^= (c>>12); \
7313 b -= c; b -= a; b ^= (a<<18); \
7314 c -= a; c -= b; c ^= (b>>22); \
7315 }
7316 #else
7317 /* 32-bit Jenkins hash, 2006
7318 * http://burtleburtle.net/bob/c/lookup3.c
7319 */
7320 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k))))
7321
7322 #define mix(a,b,c) \
7323 { a -= c; a ^= rot(c, 4); c += b; \
7324 b -= a; b ^= rot(a, 6); a += c; \
7325 c -= b; c ^= rot(b, 8); b += a; \
7326 a -= c; a ^= rot(c,16); c += b; \
7327 b -= a; b ^= rot(a,19); a += c; \
7328 c -= b; c ^= rot(b, 4); b += a; \
7329 }
7330
7331 #define final(a,b,c) \
7332 { c ^= b; c -= rot(b,14); \
7333 a ^= c; a -= rot(c,11); \
7334 b ^= a; b -= rot(a,25); \
7335 c ^= b; c -= rot(b,16); \
7336 a ^= c; a -= rot(c,4); \
7337 b ^= a; b -= rot(a,14); \
7338 c ^= b; c -= rot(b,24); \
7339 }
7340 #endif
7341
7342 void
7343 d_hash(uchar *kb, int nbytes)
7344 { uint8_t *bp;
7345 #if defined(HASH64) || defined(WIN64)
7346 uint64_t a = 0, b, c, n;
7347 uint64_t *k = (uint64_t *) kb;
7348 #else
7349 uint32_t a, b, c, n;
7350 uint32_t *k = (uint32_t *) kb;
7351 #endif
7352 /* extend to multiple of words, if needed */
7353 n = nbytes/WS; /* nr of words */
7354 a = nbytes - (n*WS);
7355 if (a > 0)
7356 { n++;
7357 bp = kb + nbytes;
7358 switch (a) {
7359 case 3: *bp++ = 0; /* fall thru */
7360 case 2: *bp++ = 0; /* fall thru */
7361 case 1: *bp = 0;
7362 case 0: break;
7363 } }
7364 #if defined(HASH64) || defined(WIN64)
7365 b = HASH_CONST[HASH_NR];
7366 c = 0x9e3779b97f4a7c13LL; /* arbitrary value */
7367 while (n >= 3)
7368 { a += k[0];
7369 b += k[1];
7370 c += k[2];
7371 mix(a,b,c);
7372 n -= 3;
7373 k += 3;
7374 }
7375 c += (((uint64_t) nbytes)<<3);
7376 switch (n) {
7377 case 2: b += k[1];
7378 case 1: a += k[0];
7379 case 0: break;
7380 }
7381 mix(a,b,c);
7382 #else
7383 a = c = 0xdeadbeef + (n<<2);
7384 b = HASH_CONST[HASH_NR];
7385 while (n > 3)
7386 { a += k[0];
7387 b += k[1];
7388 c += k[2];
7389 mix(a,b,c);
7390 n -= 3;
7391 k += 3;
7392 }
7393 switch (n) {
7394 case 3: c += k[2];
7395 case 2: b += k[1];
7396 case 1: a += k[0];
7397 case 0: break;
7398 }
7399 final(a,b,c);
7400 #endif
7401 j1 = c&nmask; j3 = a&7; /* 1st bit */
7402 j2 = b&nmask; j4 = (a>>3)&7; /* 2nd bit */
7403 K1 = c; K2 = b;
7404 }
7405
7406 void
7407 s_hash(uchar *cp, int om)
7408 {
7409 #if defined(SFH)
7410 d_sfh((const char *) cp, om); /* sets K1 */
7411 #else
7412 d_hash(cp, om); /* sets K1 etc */
7413 #endif
7414 #ifdef BITSTATE
7415 if (S_Tab == H_tab)
7416 j1 = K1 % omaxdepth;
7417 else
7418 #endif
7419 if (ssize < 8*WS)
7420 j1 = K1&mask;
7421 else
7422 j1 = K1;
7423 }
7424 #ifndef RANDSTOR
7425 int *prerand;
7426 void
7427 inirand(void)
7428 { int i;
7429 srand(123); /* fixed startpoint */
7430 prerand = (int *) emalloc((omaxdepth+3)*sizeof(int));
7431 for (i = 0; i < omaxdepth+3; i++)
7432 prerand[i] = rand();
7433 }
7434 int
7435 pan_rand(void)
7436 { if (!prerand) inirand();
7437 return prerand[depth];
7438 }
7439 #endif
7440
7441 void
7442 set_masks(void) /* 4.2.5 */
7443 {
7444 if (WS == 4 && ssize >= 32)
7445 { mask = 0xffffffff;
7446 #ifdef BITSTATE
7447 switch (ssize) {
7448 case 34: nmask = (mask>>1); break;
7449 case 33: nmask = (mask>>2); break;
7450 default: nmask = (mask>>3); break;
7451 }
7452 #else
7453 nmask = mask;
7454 #endif
7455 } else if (WS == 8)
7456 { mask = ((ONE_L<<ssize)-1); /* hash init */
7457 #ifdef BITSTATE
7458 nmask = mask>>3;
7459 #else
7460 nmask = mask;
7461 #endif
7462 } else if (WS != 4)
7463 { fprintf(stderr, "pan: wordsize %ld not supported\n", (long int) WS);
7464 exit(1);
7465 } else /* WS == 4 and ssize < 32 */
7466 { mask = ((ONE_L<<ssize)-1); /* hash init */
7467 nmask = (mask>>3);
7468 }
7469 }
7470
7471 static long reclaim_size;
7472 static char *reclaim_mem;
7473 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
7474 #if NCORE>1
7475 #error cannot combine AUTO_RESIZE with NCORE>1 yet
7476 #endif
7477 static struct H_el **N_tab;
7478 void
7479 reverse_capture(struct H_el *p)
7480 { if (!p) return;
7481 reverse_capture(p->nxt);
7482 /* last element of list moves first */
7483 /* to preserve list-order */
7484 j2 = p->m_K1;
7485 if (ssize < 8*WS) /* probably always true */
7486 { j2 &= mask;
7487 }
7488 p->nxt = N_tab[j2];
7489 N_tab[j2] = p;
7490 }
7491 void
7492 resize_hashtable(void)
7493 {
7494 if (WS == 4 && ssize >= 27 - 1)
7495 { return; /* canot increase further */
7496 }
7497
7498 ssize += 2; /* 4x size */
7499
7500 printf("pan: resizing hashtable to -w%d.. ", ssize);
7501
7502 N_tab = (struct H_el **)
7503 emalloc((ONE_L<<ssize)*sizeof(struct H_el *));
7504
7505 set_masks(); /* they changed */
7506
7507 for (j1 = 0; j1 < (ONE_L << (ssize - 2)); j1++)
7508 { reverse_capture(H_tab[j1]);
7509 }
7510 reclaim_mem = (char *) H_tab;
7511 reclaim_size = (ONE_L << (ssize - 2));
7512 H_tab = N_tab;
7513
7514 printf(" done\n");
7515 }
7516 #endif
7517 #if defined(ZAPH) && defined(BITSTATE)
7518 void
7519 zap_hashtable(void)
7520 { cpu_printf("pan: resetting hashtable\n");
7521 if (udmem)
7522 { memset(SS, 0, udmem);
7523 } else
7524 { memset(SS, 0, ONE_L<<(ssize-3));
7525 }
7526 }
7527 #endif
7528
7529 int
7530 main(int argc, char *argv[])
7531 { void to_compile(void);
7532
7533 efd = stderr; /* default */
7534 #ifdef BITSTATE
7535 bstore = bstore_reg; /* default */
7536 #endif
7537 #if NCORE>1
7538 { int i, j;
7539 strcpy(o_cmdline, "");
7540 for (j = 1; j < argc; j++)
7541 { strcat(o_cmdline, argv[j]);
7542 strcat(o_cmdline, " ");
7543 }
7544 /* printf("Command Line: %s\n", o_cmdline); */
7545 if (strlen(o_cmdline) >= sizeof(o_cmdline))
7546 { Uerror("option list too long");
7547 } }
7548 #endif
7549 while (argc > 1 && argv[1][0] == '-')
7550 { switch (argv[1][1]) {
7551 #ifndef SAFETY
7552 #ifdef NP
7553 case 'a': fprintf(efd, "error: -a disabled");
7554 usage(efd); break;
7555 #else
7556 case 'a': a_cycles = 1; break;
7557 #endif
7558 #endif
7559 case 'A': noasserts = 1; break;
7560 case 'b': bounded = 1; break;
7561 #ifdef HAS_CODE
7562 case 'C': coltrace = 1; goto samething;
7563 #endif
7564 case 'c': upto = atoi(&argv[1][2]); break;
7565 case 'd': state_tables++; break;
7566 case 'e': every_error = 1; Nr_Trails = 1; break;
7567 case 'E': noends = 1; break;
7568 #ifdef SC
7569 case 'F': if (strlen(argv[1]) > 2)
7570 stackfile = &argv[1][2];
7571 break;
7572 #endif
7573 #if !defined(SAFETY) && !defined(NOFAIR)
7574 case 'f': fairness = 1; break;
7575 #endif
7576 #ifdef HAS_CODE
7577 case 'g': gui = 1; goto samething;
7578 #endif
7579 case 'h': if (!argv[1][2]) usage(efd); else
7580 HASH_NR = atoi(&argv[1][2])%33; break;
7581 case 'I': iterative = 2; every_error = 1; break;
7582 case 'i': iterative = 1; every_error = 1; break;
7583 case 'J': like_java = 1; break; /* Klaus Havelund */
7584 #ifdef BITSTATE
7585 case 'k': hfns = atoi(&argv[1][2]); break;
7586 #endif
7587 #ifdef SCHED
7588 case 'L': sched_max = atoi(&argv[1][2]); break;
7589 #endif
7590 #ifndef SAFETY
7591 #ifdef NP
7592 case 'l': a_cycles = 1; break;
7593 #else
7594 case 'l': fprintf(efd, "error: -l disabled");
7595 usage(efd); break;
7596 #endif
7597 #endif
7598 #ifdef BITSTATE
7599 case 'M': udmem = atoi(&argv[1][2]); break;
7600 case 'G': udmem = atoi(&argv[1][2]); udmem *= 1024; break;
7601 #else
7602 case 'M': case 'G':
7603 fprintf(stderr, "-M and -G affect only -DBITSTATE\n");
7604 break;
7605 #endif
7606 case 'm': maxdepth = atoi(&argv[1][2]); break;
7607 case 'n': no_rck = 1; break;
7608 case 'P': readtrail = 1; onlyproc = atoi(&argv[1][2]);
7609 if (argv[2][0] != '-') /* check next arg */
7610 { trailfilename = argv[2];
7611 argc--; argv++; /* skip next arg */
7612 }
7613 break;
7614 #ifdef SVDUMP
7615 case 'p': vprefix = atoi(&argv[1][2]); break;
7616 #endif
7617 #if NCORE==1
7618 case 'Q': quota = (double) 60.0 * (double) atoi(&argv[1][2]); break;
7619 #endif
7620 case 'q': strict = 1; break;
7621 case 'R': Nrun = atoi(&argv[1][2]); break;
7622 #ifdef HAS_CODE
7623 case 'r':
7624 samething: readtrail = 1;
7625 if (isdigit(argv[1][2]))
7626 whichtrail = atoi(&argv[1][2]);
7627 else if (argc > 2 && argv[2][0] != '-') /* check next arg */
7628 { trailfilename = argv[2];
7629 argc--; argv++; /* skip next arg */
7630 }
7631 break;
7632 case 'S': silent = 1; goto samething;
7633 #endif
7634 #ifdef BITSTATE
7635 case 's': hfns = 1; break;
7636 #endif
7637 case 'T': TMODE = 0444; break;
7638 case 't': if (argv[1][2]) tprefix = &argv[1][2]; break;
7639 case 'V': start_timer(); printf("Generated by %s\n", SpinVersion);
7640 to_compile(); pan_exit(2); break;
7641 case 'v': verbose++; break;
7642 case 'w': ssize = atoi(&argv[1][2]); break;
7643 case 'Y': signoff = 1; break;
7644 case 'X': efd = stdout; break;
7645 case 'x': exclusive = 1; break;
7646 #if NCORE>1
7647 /* -B ip is passthru to proxy of remote ip address: */
7648 case 'B': argc--; argv++; break;
7649 case 'Q': worker_pids[0] = atoi(&argv[1][2]); break;
7650 /* -Un means that the nth worker should be instantiated as a proxy */
7651 case 'U': proxy_pid = atoi(&argv[1][2]); break;
7652 /* -W means that this copy is started by a cluster-server as a remote */
7653 /* this flag is passed to ./pan_proxy, which interprets it */
7654 case 'W': remote_party++; break;
7655 case 'Z': core_id = atoi(&argv[1][2]);
7656 if (verbose)
7657 { printf("cpu%d: pid %d parent %d\n",
7658 core_id, getpid(), worker_pids[0]);
7659 }
7660 break;
7661 case 'z': z_handoff = atoi(&argv[1][2]); break;
7662 #else
7663 case 'z': break; /* ignored for single-core */
7664 #endif
7665 default : fprintf(efd, "saw option -%c\n", argv[1][1]); usage(efd); break;
7666 }
7667 argc--; argv++;
7668 }
7669 if (iterative && TMODE != 0666)
7670 { TMODE = 0666;
7671 fprintf(efd, "warning: -T ignored when -i or -I is used\n");
7672 }
7673 #if defined(HASH32) && !defined(SFH)
7674 if (WS > 4)
7675 { fprintf(efd, "strong warning: compiling -DHASH32 on a 64-bit machine\n");
7676 fprintf(efd, " without -DSFH can slow down performance a lot\n");
7677 }
7678 #endif
7679 #if defined(WIN32) || defined(WIN64)
7680 if (TMODE == 0666)
7681 TMODE = _S_IWRITE | _S_IREAD;
7682 else
7683 TMODE = _S_IREAD;
7684 #endif
7685 #if NCORE>1
7686 store_proxy_pid = proxy_pid; /* for checks in mem_file() and someone_crashed() */
7687 if (core_id != 0) { proxy_pid = 0; }
7688 #ifndef SEP_STATE
7689 if (core_id == 0 && a_cycles)
7690 { fprintf(efd, "hint: this search may be more efficient ");
7691 fprintf(efd, "if pan.c is compiled -DSEP_STATE\n");
7692 }
7693 #endif
7694 if (z_handoff < 0)
7695 { z_handoff = 20; /* conservative default - for non-liveness checks */
7696 }
7697 #if defined(NGQ) || defined(LWQ_FIXED)
7698 LWQ_SIZE = (double) (128.*1048576.);
7699 #else
7700 LWQ_SIZE = (double) ( z_handoff + 2.) * (double) sizeof(SM_frame);
7701 #endif
7702 #if NCORE>2
7703 if (a_cycles)
7704 { fprintf(efd, "warning: the intended nr of cores to be used in liveness mode is 2\n");
7705 #ifndef SEP_STATE
7706 fprintf(efd, "warning: without -DSEP_STATE there is no guarantee that all liveness violations are found\n");
7707 #endif
7708 }
7709 #endif
7710 #ifdef HAS_HIDDEN
7711 #error cannot use hidden variables when compiling multi-core
7712 #endif
7713 #endif
7714 #ifdef BITSTATE
7715 if (hfns <= 0)
7716 { hfns = 1;
7717 fprintf(efd, "warning: using -k%d as minimal usable value\n", hfns);
7718 }
7719 #endif
7720 omaxdepth = maxdepth;
7721 #ifdef BITSTATE
7722 if (WS == 4 && ssize > 34)
7723 { ssize = 34;
7724 fprintf(efd, "warning: using -w%d as max usable value\n", ssize);
7725 /*
7726 * -w35 would not work: 35-3 = 32 but 1^31 is the largest
7727 * power of 2 that can be represented in an unsigned long
7728 */
7729 }
7730 #else
7731 if (WS == 4 && ssize > 27)
7732 { ssize = 27;
7733 fprintf(efd, "warning: using -w%d as max usable value\n", ssize);
7734 /*
7735 * for emalloc, the lookup table size multiplies by 4 for the pointers
7736 * the largest power of 2 that can be represented in a ulong is 1^31
7737 * hence the largest number of lookup table slots is 31-4 = 27
7738 */
7739 }
7740 #endif
7741 #ifdef SC
7742 hiwater = HHH = maxdepth-10;
7743 DDD = HHH/2;
7744 if (!stackfile)
7745 { stackfile = (char *) emalloc(strlen(PanSource)+4+1);
7746 sprintf(stackfile, "%s._s_", PanSource);
7747 }
7748 if (iterative)
7749 { fprintf(efd, "error: cannot use -i or -I with -DSC\n");
7750 pan_exit(1);
7751 }
7752 #endif
7753 #if (defined(R_XPT) || defined(W_XPT)) && !defined(MA)
7754 #warning -DR_XPT and -DW_XPT assume -DMA (ignored)
7755 #endif
7756 if (iterative && a_cycles)
7757 fprintf(efd, "warning: -i or -I work for safety properties only\n");
7758 #ifdef BFS
7759 #ifdef SC
7760 #error -DBFS not compatible with -DSC
7761 #endif
7762 #ifdef HAS_LAST
7763 #error -DBFS not compatible with _last
7764 #endif
7765 #ifdef HAS_STACK
7766 #error cannot use c_track UnMatched with BFS
7767 #endif
7768 #ifdef REACH
7769 #warning -DREACH is redundant when -DBFS is used
7770 #endif
7771 #endif
7772 #if defined(MERGED) && defined(PEG)
7773 #error to use -DPEG use: spin -o3 -a
7774 #endif
7775 #ifdef HC
7776 #ifdef SFH
7777 #error cannot combine -DHC and -DSFH
7778 /* use of NOCOMP is the real reason */
7779 #else
7780 #ifdef NOCOMP
7781 #error cannot combine -DHC and -DNOCOMP
7782 #endif
7783 #endif
7784 #ifdef BITSTATE
7785 #error cannot combine -DHC and -DBITSTATE
7786 #endif
7787 #endif
7788 #if defined(SAFETY) && defined(NP)
7789 #error cannot combine -DNP and -DBFS or -DSAFETY
7790 #endif
7791 #ifdef MA
7792 #ifdef BITSTATE
7793 #error cannot combine -DMA and -DBITSTATE
7794 #endif
7795 #if MA <= 0
7796 #error usage: -DMA=N with N > 0 and N < VECTORSZ
7797 #endif
7798 #endif
7799 #ifdef COLLAPSE
7800 #ifdef BITSTATE
7801 #error cannot combine -DBITSTATE and -DCOLLAPSE
7802 #endif
7803 #ifdef SFH
7804 #error cannot combine -DCOLLAPSE and -DSFH
7805 /* use of NOCOMP is the real reason */
7806 #else
7807 #ifdef NOCOMP
7808 #error cannot combine -DCOLLAPSE and -DNOCOMP
7809 #endif
7810 #endif
7811 #endif
7812 if (maxdepth <= 0 || ssize <= 1) usage(efd);
7813 #if SYNC>0 && !defined(NOREDUCE)
7814 if (a_cycles && fairness)
7815 { fprintf(efd, "error: p.o. reduction not compatible with ");
7816 fprintf(efd, "fairness (-f) in models\n");
7817 fprintf(efd, " with rendezvous operations: ");
7818 fprintf(efd, "recompile with -DNOREDUCE\n");
7819 pan_exit(1);
7820 }
7821 #endif
7822 #if defined(REM_VARS) && !defined(NOREDUCE)
7823 #warning p.o. reduction not compatible with remote varrefs (use -DNOREDUCE)
7824 #endif
7825 #if defined(NOCOMP) && !defined(BITSTATE)
7826 if (a_cycles)
7827 { fprintf(efd, "error: use of -DNOCOMP voids -l and -a\n");
7828 pan_exit(1);
7829 }
7830 #endif
7831 #ifdef MEMLIM
7832 memlim = ((double) MEMLIM) * (double) (1<<20); /* size in Mbyte */
7833 #endif
7834 #ifndef BITSTATE
7835 if (Nrun > 1) HASH_NR = Nrun - 1;
7836 #endif
7837 if (Nrun < 1 || Nrun > 32)
7838 { fprintf(efd, "error: invalid arg for -R\n");
7839 usage(efd);
7840 }
7841 #ifndef SAFETY
7842 if (fairness && !a_cycles)
7843 { fprintf(efd, "error: -f requires -a or -l\n");
7844 usage(efd);
7845 }
7846 #if ACCEPT_LAB==0
7847 if (a_cycles)
7848 { fprintf(efd, "error: no accept labels defined ");
7849 fprintf(efd, "in model (for option -a)\n");
7850 usage(efd);
7851 }
7852 #endif
7853 #endif
7854 #ifndef NOREDUCE
7855 #ifdef HAS_ENABLED
7856 #error use of enabled() requires -DNOREDUCE
7857 #endif
7858 #ifdef HAS_PCVALUE
7859 #error use of pcvalue() requires -DNOREDUCE
7860 #endif
7861 #ifdef HAS_BADELSE
7862 #error use of 'else' combined with i/o stmnts requires -DNOREDUCE
7863 #endif
7864 #ifdef HAS_LAST
7865 #error use of _last requires -DNOREDUCE
7866 #endif
7867 #endif
7868 #if SYNC>0 && !defined(NOREDUCE)
7869 #ifdef HAS_UNLESS
7870 fprintf(efd, "warning: use of a rendezvous stmnts in the escape\n");
7871 fprintf(efd, " of an unless clause, if present, could make p.o. reduction\n");
7872 fprintf(efd, " invalid (use -DNOREDUCE to avoid this)\n");
7873 #ifdef BFS
7874 fprintf(efd, " (this type of rv is also not compatible with -DBFS)\n");
7875 #endif
7876 #endif
7877 #endif
7878 #if SYNC>0 && defined(BFS)
7879 #warning use of rendezvous with BFS does not preserve all invalid endstates
7880 #endif
7881 #if !defined(REACH) && !defined(BITSTATE)
7882 if (iterative != 0 && a_cycles == 0)
7883 { fprintf(efd, "warning: -i and -I need -DREACH to work accurately\n");
7884 }
7885 #endif
7886 #if defined(BITSTATE) && defined(REACH)
7887 #warning -DREACH is voided by -DBITSTATE
7888 #endif
7889 #if defined(MA) && defined(REACH)
7890 #warning -DREACH is voided by -DMA
7891 #endif
7892 #if defined(FULLSTACK) && defined(CNTRSTACK)
7893 #error cannot combine -DFULLSTACK and -DCNTRSTACK
7894 #endif
7895 #if defined(VERI)
7896 #if ACCEPT_LAB>0
7897 #ifndef BFS
7898 if (!a_cycles
7899 #ifdef HAS_CODE
7900 && !readtrail
7901 #endif
7902 #if NCORE>1
7903 && core_id == 0
7904 #endif
7905 && !state_tables)
7906 { fprintf(efd, "warning: never claim + accept labels ");
7907 fprintf(efd, "requires -a flag to fully verify\n");
7908 }
7909 #else
7910 if (!state_tables
7911 #ifdef HAS_CODE
7912 && !readtrail
7913 #endif
7914 )
7915 { fprintf(efd, "warning: verification in BFS mode ");
7916 fprintf(efd, "is restricted to safety properties\n");
7917 }
7918 #endif
7919 #endif
7920 #endif
7921 #ifndef SAFETY
7922 if (!a_cycles
7923 #ifdef HAS_CODE
7924 && !readtrail
7925 #endif
7926 #if NCORE>1
7927 && core_id == 0
7928 #endif
7929 && !state_tables)
7930 { fprintf(efd, "hint: this search is more efficient ");
7931 fprintf(efd, "if pan.c is compiled -DSAFETY\n");
7932 }
7933 #ifndef NOCOMP
7934 if (!a_cycles)
7935 { S_A = 0;
7936 } else
7937 { if (!fairness)
7938 { S_A = 1; /* _a_t */
7939 #ifndef NOFAIR
7940 } else /* _a_t and _cnt[NFAIR] */
7941 { S_A = (&(now._cnt[0]) - (uchar *) &now) + NFAIR - 2;
7942 /* -2 because first two uchars in now are masked */
7943 #endif
7944 } }
7945 #endif
7946 #endif
7947 signal(SIGINT, stopped);
7948 set_masks();
7949 #ifdef BFS
7950 trail = (Trail *) emalloc(6*sizeof(Trail));
7951 trail += 3;
7952 #else
7953 trail = (Trail *) emalloc((maxdepth+3)*sizeof(Trail));
7954 trail++; /* protect trpt-1 refs at depth 0 */
7955 #endif
7956 #ifdef SVDUMP
7957 if (vprefix > 0)
7958 { char nm[64];
7959 sprintf(nm, "%s.svd", PanSource);
7960 if ((svfd = creat(nm, TMODE)) < 0)
7961 { fprintf(efd, "couldn't create %s\n", nm);
7962 vprefix = 0;
7963 } }
7964 #endif
7965 #ifdef RANDSTOR
7966 srand(123);
7967 #endif
7968 #if SYNC>0 && ASYNC==0
7969 set_recvs();
7970 #endif
7971 run();
7972 done = 1;
7973 wrapup();
7974 return 0;
7975 }
7976
7977 void
7978 usage(FILE *fd)
7979 {
7980 fprintf(fd, "%s\n", SpinVersion);
7981 fprintf(fd, "Valid Options are:\n");
7982 #ifndef SAFETY
7983 #ifdef NP
7984 fprintf(fd, " -a -> is disabled by -DNP ");
7985 fprintf(fd, "(-DNP compiles for -l only)\n");
7986 #else
7987 fprintf(fd, " -a find acceptance cycles\n");
7988 #endif
7989 #else
7990 fprintf(fd, " -a,-l,-f -> are disabled by -DSAFETY\n");
7991 #endif
7992 fprintf(fd, " -A ignore assert() violations\n");
7993 fprintf(fd, " -b consider it an error to exceed the depth-limit\n");
7994 fprintf(fd, " -cN stop at Nth error ");
7995 fprintf(fd, "(defaults to -c1)\n");
7996 fprintf(fd, " -d print state tables and stop\n");
7997 fprintf(fd, " -e create trails for all errors\n");
7998 fprintf(fd, " -E ignore invalid end states\n");
7999 #ifdef SC
8000 fprintf(fd, " -Ffile use 'file' to store disk-stack\n");
8001 #endif
8002 #ifndef NOFAIR
8003 fprintf(fd, " -f add weak fairness (to -a or -l)\n");
8004 #endif
8005 fprintf(fd, " -hN use different hash-seed N:1..32\n");
8006 fprintf(fd, " -i search for shortest path to error\n");
8007 fprintf(fd, " -I like -i, but approximate and faster\n");
8008 fprintf(fd, " -J reverse eval order of nested unlesses\n");
8009 #ifdef BITSTATE
8010 fprintf(fd, " -kN set N bits per state (defaults to 3)\n");
8011 #endif
8012 #ifdef SCHED
8013 fprintf(fd, " -LN set scheduling restriction to N (default 10)\n");
8014 #endif
8015 #ifndef SAFETY
8016 #ifdef NP
8017 fprintf(fd, " -l find non-progress cycles\n");
8018 #else
8019 fprintf(fd, " -l find non-progress cycles -> ");
8020 fprintf(fd, "disabled, requires ");
8021 fprintf(fd, "compilation with -DNP\n");
8022 #endif
8023 #endif
8024 #ifdef BITSTATE
8025 fprintf(fd, " -MN use N Megabytes for bitstate hash array\n");
8026 fprintf(fd, " -GN use N Gigabytes for bitstate hash array\n");
8027 #endif
8028 fprintf(fd, " -mN max depth N steps (default=10k)\n");
8029 fprintf(fd, " -n no listing of unreached states\n");
8030 #ifdef SVDUMP
8031 fprintf(fd, " -pN create svfile (save N bytes per state)\n");
8032 #endif
8033 fprintf(fd, " -QN set time-limit on execution of N minutes\n");
8034 fprintf(fd, " -q require empty chans in valid end states\n");
8035 #ifdef HAS_CODE
8036 fprintf(fd, " -r read and execute trail - can add -v,-n,-PN,-g,-C\n");
8037 fprintf(fd, " -rN read and execute N-th error trail\n");
8038 fprintf(fd, " -C read and execute trail - columnated output (can add -v,-n)\n");
8039 fprintf(fd, " -PN read and execute trail - restrict trail output to proc N\n");
8040 fprintf(fd, " -g read and execute trail + msc gui support\n");
8041 fprintf(fd, " -S silent replay: only user defined printfs show\n");
8042 #endif
8043 #ifdef BITSTATE
8044 fprintf(fd, " -RN repeat run Nx with N ");
8045 fprintf(fd, "[1..32] independent hash functions\n");
8046 fprintf(fd, " -s same as -k1 (single bit per state)\n");
8047 #endif
8048 fprintf(fd, " -T create trail files in read-only mode\n");
8049 fprintf(fd, " -tsuf replace .trail with .suf on trailfiles\n");
8050 fprintf(fd, " -V print SPIN version number\n");
8051 fprintf(fd, " -v verbose -- filenames in unreached state listing\n");
8052 fprintf(fd, " -wN hashtable of 2^N entries ");
8053 fprintf(fd, "(defaults to -w%d)\n", ssize);
8054 fprintf(fd, " -x do not overwrite an existing trail file\n");
8055 #if NCORE>1
8056 fprintf(fd, " -zN handoff states below depth N to 2nd cpu (multi_core)\n");
8057 #endif
8058 #ifdef HAS_CODE
8059 fprintf(fd, "\n options -r, -C, -PN, -g, and -S can optionally be followed by\n");
8060 fprintf(fd, " a filename argument, as in '-r filename', naming the trailfile\n");
8061 #endif
8062 #if NCORE>1
8063 multi_usage(fd);
8064 #endif
8065 exit(1);
8066 }
8067
8068 char *
8069 Malloc(unsigned long n)
8070 { char *tmp;
8071 #ifdef MEMLIM
8072 if (memcnt+ (double) n > memlim) goto err;
8073 #endif
8074 #if 1
8075 tmp = (char *) malloc(n);
8076 if (!tmp)
8077 #else
8078 tmp = (char *) sbrk(n);
8079 if (tmp == (char *) -ONE_L)
8080 #endif
8081 {
8082 #ifdef MEMLIM
8083 err:
8084 #endif
8085 printf("pan: out of memory\n");
8086 #ifdef MEMLIM
8087 printf(" %g bytes used\n", memcnt);
8088 printf(" %g bytes more needed\n", (double) n);
8089 printf(" %g bytes limit\n",
8090 memlim);
8091 #endif
8092 #ifdef COLLAPSE
8093 printf("hint: to reduce memory, recompile with\n");
8094 #ifndef MA
8095 printf(" -DMA=%d # better/slower compression, or\n", hmax);
8096 #endif
8097 printf(" -DBITSTATE # supertrace, approximation\n");
8098 #else
8099 #ifndef BITSTATE
8100 printf("hint: to reduce memory, recompile with\n");
8101 #ifndef HC
8102 printf(" -DCOLLAPSE # good, fast compression, or\n");
8103 #ifndef MA
8104 printf(" -DMA=%d # better/slower compression, or\n", hmax);
8105 #endif
8106 printf(" -DHC # hash-compaction, approximation\n");
8107 #endif
8108 printf(" -DBITSTATE # supertrace, approximation\n");
8109 #endif
8110 #endif
8111 #if NCORE>1
8112 #ifdef FULL_TRAIL
8113 printf(" omit -DFULL_TRAIL or use pan -c0 to reduce memory\n");
8114 #endif
8115 #ifdef SEP_STATE
8116 printf("hint: to reduce memory, recompile without\n");
8117 printf(" -DSEP_STATE # may be faster, but uses more memory\n");
8118 #endif
8119 #endif
8120 wrapup();
8121 }
8122 memcnt += (double) n;
8123 return tmp;
8124 }
8125
8126 #define CHUNK (100*VECTORSZ)
8127
8128 char *
8129 emalloc(unsigned long n) /* never released or reallocated */
8130 { char *tmp;
8131 if (n == 0)
8132 return (char *) NULL;
8133 if (n&(sizeof(void *)-1)) /* for proper alignment */
8134 n += sizeof(void *)-(n&(sizeof(void *)-1));
8135 if ((unsigned long) left < n)
8136 { grow = (n < CHUNK) ? CHUNK : n;
8137 have = Malloc(grow);
8138 fragment += (double) left;
8139 left = grow;
8140 }
8141 tmp = have;
8142 have += (long) n;
8143 left -= (long) n;
8144 memset(tmp, 0, n);
8145 return tmp;
8146 }
8147 void
8148 Uerror(char *str)
8149 { /* always fatal */
8150 uerror(str);
8151 #if NCORE>1
8152 sudden_stop("Uerror");
8153 #endif
8154 wrapup();
8155 }
8156
8157 #if defined(MA) && !defined(SAFETY)
8158 int
8159 Unwind(void)
8160 { Trans *t; uchar ot, _m; int tt; short II;
8161 #ifdef VERBOSE
8162 int i;
8163 #endif
8164 uchar oat = now._a_t;
8165 now._a_t &= ~(1|16|32);
8166 memcpy((char *) &comp_now, (char *) &now, vsize);
8167 now._a_t = oat;
8168 Up:
8169 #ifdef SC
8170 trpt = getframe(depth);
8171 #endif
8172 #ifdef VERBOSE
8173 printf("%d State: ", depth);
8174 for (i = 0; i < vsize; i++) printf("%d%s,",
8175 ((char *)&now)[i], Mask[i]?"*":"");
8176 printf("\n");
8177 #endif
8178 #ifndef NOFAIR
8179 if (trpt->o_pm&128) /* fairness alg */
8180 { now._cnt[now._a_t&1] = trpt->bup.oval;
8181 depth--;
8182 #ifdef SC
8183 trpt = getframe(depth);
8184 #else
8185 trpt--;
8186 #endif
8187 goto Q999;
8188 }
8189 #endif
8190 #ifdef HAS_LAST
8191 #ifdef VERI
8192 { int d; Trail *trl;
8193 now._last = 0;
8194 for (d = 1; d < depth; d++)
8195 { trl = getframe(depth-d); /* was trl = (trpt-d); */
8196 if (trl->pr != 0)
8197 { now._last = trl->pr - BASE;
8198 break;
8199 } } }
8200 #else
8201 now._last = (depth<1)?0:(trpt-1)->pr;
8202 #endif
8203 #endif
8204 #ifdef EVENT_TRACE
8205 now._event = trpt->o_event;
8206 #endif
8207 if ((now._a_t&1) && depth <= A_depth)
8208 { now._a_t &= ~(1|16|32);
8209 if (fairness) now._a_t |= 2; /* ? */
8210 A_depth = 0;
8211 goto CameFromHere; /* checkcycles() */
8212 }
8213 t = trpt->o_t;
8214 ot = trpt->o_ot; II = trpt->pr;
8215 tt = trpt->o_tt; this = pptr(II);
8216 _m = do_reverse(t, II, trpt->o_m);
8217 #ifdef VERBOSE
8218 printf("%3d: proc %d ", depth, II);
8219 printf("reverses %d, %d to %d,",
8220 t->forw, tt, t->st);
8221 printf(" %s [abit=%d,adepth=%d,",
8222 t->tp, now._a_t, A_depth);
8223 printf("tau=%d,%d] <unwind>\n",
8224 trpt->tau, (trpt-1)->tau);
8225 #endif
8226 depth--;
8227 #ifdef SC
8228 trpt = getframe(depth);
8229 #else
8230 trpt--;
8231 #endif
8232 /* reached[ot][t->st] = 1; 3.4.13 */
8233 ((P0 *)this)->_p = tt;
8234 #ifndef NOFAIR
8235 if ((trpt->o_pm&32))
8236 {
8237 #ifdef VERI
8238 if (now._cnt[now._a_t&1] == 0)
8239 now._cnt[now._a_t&1] = 1;
8240 #endif
8241 now._cnt[now._a_t&1] += 1;
8242 }
8243 Q999:
8244 if (trpt->o_pm&8)
8245 { now._a_t &= ~2;
8246 now._cnt[now._a_t&1] = 0;
8247 }
8248 if (trpt->o_pm&16)
8249 now._a_t |= 2;
8250 #endif
8251 CameFromHere:
8252 if (memcmp((char *) &now, (char *) &comp_now, vsize) == 0)
8253 return depth;
8254 if (depth > 0) goto Up;
8255 return 0;
8256 }
8257 #endif
8258 static char unwinding;
8259 void
8260 uerror(char *str)
8261 { static char laststr[256];
8262 int is_cycle;
8263
8264 if (unwinding) return; /* 1.4.2 */
8265 if (strncmp(str, laststr, 254))
8266 #if NCORE>1
8267 cpu_printf("pan: %s (at depth %ld)\n", str,
8268 #else
8269 printf("pan: %s (at depth %ld)\n", str,
8270 #endif
8271 #if NCORE>1
8272 (nr_handoffs * z_handoff) +
8273 #endif
8274 ((depthfound==-1)?depth:depthfound));
8275 strncpy(laststr, str, 254);
8276 errors++;
8277 #ifdef HAS_CODE
8278 if (readtrail) { wrap_trail(); return; }
8279 #endif
8280 is_cycle = (strstr(str, " cycle") != (char *) 0);
8281 if (!is_cycle)
8282 { depth++; trpt++;
8283 }
8284 if ((every_error != 0)
8285 || errors == upto)
8286 {
8287 #if defined(MA) && !defined(SAFETY)
8288 if (is_cycle)
8289 { int od = depth;
8290 unwinding = 1;
8291 depthfound = Unwind();
8292 unwinding = 0;
8293 depth = od;
8294 }
8295 #endif
8296 #if NCORE>1
8297 writing_trail = 1;
8298 #endif
8299 #ifdef BFS
8300 if (depth > 1) trpt--;
8301 nuerror(str);
8302 if (depth > 1) trpt++;
8303 #else
8304 putrail();
8305 #endif
8306 #if defined(MA) && !defined(SAFETY)
8307 if (strstr(str, " cycle"))
8308 { if (every_error)
8309 printf("sorry: MA writes 1 trail max\n");
8310 wrapup(); /* no recovery from unwind */
8311 }
8312 #endif
8313 #if NCORE>1
8314 if (search_terminated != NULL)
8315 { *search_terminated |= 4; /* uerror */
8316 }
8317 writing_trail = 0;
8318 #endif
8319 }
8320 if (!is_cycle)
8321 { depth--; trpt--; /* undo */
8322 }
8323 #ifndef BFS
8324 if (iterative != 0 && maxdepth > 0)
8325 { maxdepth = (iterative == 1)?(depth-1):(depth/2);
8326 warned = 1;
8327 printf("pan: reducing search depth to %ld\n",
8328 maxdepth);
8329 } else
8330 #endif
8331 if (errors >= upto && upto != 0)
8332 {
8333 #if NCORE>1
8334 sudden_stop("uerror");
8335 #endif
8336 wrapup();
8337 }
8338 depthfound = -1;
8339 }
8340
8341 int
8342 xrefsrc(int lno, S_F_MAP *mp, int M, int i)
8343 { Trans *T; int j, retval=1;
8344 for (T = trans[M][i]; T; T = T->nxt)
8345 if (T && T->tp)
8346 { if (strcmp(T->tp, ".(goto)") == 0
8347 || strncmp(T->tp, "goto :", 6) == 0)
8348 return 1; /* not reported */
8349
8350 printf("\tline %d", lno);
8351 if (verbose)
8352 for (j = 0; j < sizeof(mp); j++)
8353 if (i >= mp[j].from && i <= mp[j].upto)
8354 { printf(", \"%s\"", mp[j].fnm);
8355 break;
8356 }
8357 printf(", state %d", i);
8358 if (strcmp(T->tp, "") != 0)
8359 { char *q;
8360 q = transmognify(T->tp);
8361 printf(", \"%s\"", q?q:"");
8362 } else if (stopstate[M][i])
8363 printf(", -end state-");
8364 printf("\n");
8365 retval = 0; /* reported */
8366 }
8367 return retval;
8368 }
8369
8370 void
8371 r_ck(uchar *which, int N, int M, short *src, S_F_MAP *mp)
8372 { int i, m=0;
8373
8374 #ifdef VERI
8375 if (M == VERI && !verbose) return;
8376 #endif
8377 printf("unreached in proctype %s\n", procname[M]);
8378 for (i = 1; i < N; i++)
8379 if (which[i] == 0
8380 && (mapstate[M][i] == 0
8381 || which[mapstate[M][i]] == 0))
8382 m += xrefsrc((int) src[i], mp, M, i);
8383 else
8384 m++;
8385 printf(" (%d of %d states)\n", N-1-m, N-1);
8386 }
8387 #if NCORE>1 && !defined(SEP_STATE)
8388 static long rev_trail_cnt;
8389
8390 #ifdef FULL_TRAIL
8391 void
8392 rev_trail(int fd, volatile Stack_Tree *st_tr)
8393 { long j; char snap[64];
8394
8395 if (!st_tr)
8396 { return;
8397 }
8398 rev_trail(fd, st_tr->prv);
8399 #ifdef VERBOSE
8400 printf("%d (%d) LRT [%d,%d] -- %9u (root %9u)\n",
8401 depth, rev_trail_cnt, st_tr->pr, st_tr->t_id, st_tr, stack_last[core_id]);
8402 #endif
8403 if (st_tr->pr != 255)
8404 { sprintf(snap, "%ld:%d:%d\n",
8405 rev_trail_cnt++, st_tr->pr, st_tr->t_id);
8406 j = strlen(snap);
8407 if (write(fd, snap, j) != j)
8408 { printf("pan: error writing trailfile\n");
8409 close(fd);
8410 wrapup();
8411 return;
8412 }
8413 } else /* handoff point */
8414 { if (a_cycles)
8415 { write(fd, "-1:-1:-1\n", 9);
8416 } }
8417 }
8418 #endif
8419 #endif
8420
8421 void
8422 putrail(void)
8423 { int fd;
8424 #if defined VERI || defined(MERGED)
8425 char snap[64];
8426 #endif
8427 #if NCORE==1 || defined(SEP_STATE) || !defined(FULL_TRAIL)
8428 long i, j;
8429 Trail *trl;
8430 #endif
8431 fd = make_trail();
8432 if (fd < 0) return;
8433 #ifdef VERI
8434 sprintf(snap, "-2:%d:-2\n", VERI);
8435 write(fd, snap, strlen(snap));
8436 #endif
8437 #ifdef MERGED
8438 sprintf(snap, "-4:-4:-4\n");
8439 write(fd, snap, strlen(snap));
8440 #endif
8441 #if NCORE>1 && !defined(SEP_STATE) && defined(FULL_TRAIL)
8442 rev_trail_cnt = 1;
8443 enter_critical(GLOBAL_LOCK);
8444 rev_trail(fd, stack_last[core_id]);
8445 leave_critical(GLOBAL_LOCK);
8446 #else
8447 i = 1; /* trail starts at position 1 */
8448 #if NCORE>1 && defined(SEP_STATE)
8449 if (cur_Root.m_vsize > 0) { i++; depth++; }
8450 #endif
8451 for ( ; i <= depth; i++)
8452 { if (i == depthfound+1)
8453 write(fd, "-1:-1:-1\n", 9);
8454 trl = getframe(i);
8455 if (!trl->o_t) continue;
8456 if (trl->o_pm&128) continue;
8457 sprintf(snap, "%ld:%d:%d\n",
8458 i, trl->pr, trl->o_t->t_id);
8459 j = strlen(snap);
8460 if (write(fd, snap, j) != j)
8461 { printf("pan: error writing trailfile\n");
8462 close(fd);
8463 wrapup();
8464 } }
8465 #endif
8466 close(fd);
8467 #if NCORE>1
8468 cpu_printf("pan: wrote trailfile\n");
8469 #endif
8470 }
8471
8472 void
8473 sv_save(void) /* push state vector onto save stack */
8474 { if (!svtack->nxt)
8475 { svtack->nxt = (Svtack *) emalloc(sizeof(Svtack));
8476 svtack->nxt->body = emalloc(vsize*sizeof(char));
8477 svtack->nxt->lst = svtack;
8478 svtack->nxt->m_delta = vsize;
8479 svmax++;
8480 } else if (vsize > svtack->nxt->m_delta)
8481 { svtack->nxt->body = emalloc(vsize*sizeof(char));
8482 svtack->nxt->lst = svtack;
8483 svtack->nxt->m_delta = vsize;
8484 svmax++;
8485 }
8486 svtack = svtack->nxt;
8487 #if SYNC
8488 svtack->o_boq = boq;
8489 #endif
8490 svtack->o_delta = vsize; /* don't compress */
8491 memcpy((char *)(svtack->body), (char *) &now, vsize);
8492 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
8493 c_stack((uchar *) &(svtack->c_stack[0]));
8494 #endif
8495 #ifdef DEBUG
8496 cpu_printf("%d: sv_save\n", depth);
8497 #endif
8498 }
8499
8500 void
8501 sv_restor(void) /* pop state vector from save stack */
8502 {
8503 memcpy((char *)&now, svtack->body, svtack->o_delta);
8504 #if SYNC
8505 boq = svtack->o_boq;
8506 #endif
8507 #if defined(C_States) && (HAS_TRACK==1)
8508 #ifdef HAS_STACK
8509 c_unstack((uchar *) &(svtack->c_stack[0]));
8510 #endif
8511 c_revert((uchar *) &(now.c_state[0]));
8512 #endif
8513 if (vsize != svtack->o_delta)
8514 Uerror("sv_restor");
8515 if (!svtack->lst)
8516 Uerror("error: v_restor");
8517 svtack = svtack->lst;
8518 #ifdef DEBUG
8519 cpu_printf(" sv_restor\n");
8520 #endif
8521 }
8522
8523 void
8524 p_restor(int h)
8525 { int i; char *z = (char *) &now;
8526
8527 proc_offset[h] = stack->o_offset;
8528 proc_skip[h] = (uchar) stack->o_skip;
8529 #ifndef XUSAFE
8530 p_name[h] = stack->o_name;
8531 #endif
8532 #ifndef NOCOMP
8533 for (i = vsize + stack->o_skip; i > vsize; i--)
8534 Mask[i-1] = 1; /* align */
8535 #endif
8536 vsize += stack->o_skip;
8537 memcpy(z+vsize, stack->body, stack->o_delta);
8538 vsize += stack->o_delta;
8539 #ifndef NOVSZ
8540 now._vsz = vsize;
8541 #endif
8542 #ifndef NOCOMP
8543 for (i = 1; i <= Air[((P0 *)pptr(h))->_t]; i++)
8544 Mask[vsize - i] = 1; /* pad */
8545 Mask[proc_offset[h]] = 1; /* _pid */
8546 #endif
8547 if (BASE > 0 && h > 0)
8548 ((P0 *)pptr(h))->_pid = h-BASE;
8549 else
8550 ((P0 *)pptr(h))->_pid = h;
8551 i = stack->o_delqs;
8552 now._nr_pr += 1;
8553 if (!stack->lst) /* debugging */
8554 Uerror("error: p_restor");
8555 stack = stack->lst;
8556 this = pptr(h);
8557 while (i-- > 0)
8558 q_restor();
8559 }
8560
8561 void
8562 q_restor(void)
8563 { char *z = (char *) &now;
8564 #ifndef NOCOMP
8565 int k, k_end;
8566 #endif
8567 q_offset[now._nr_qs] = stack->o_offset;
8568 q_skip[now._nr_qs] = (uchar) stack->o_skip;
8569 #ifndef XUSAFE
8570 q_name[now._nr_qs] = stack->o_name;
8571 #endif
8572 vsize += stack->o_skip;
8573 memcpy(z+vsize, stack->body, stack->o_delta);
8574 vsize += stack->o_delta;
8575 #ifndef NOVSZ
8576 now._vsz = vsize;
8577 #endif
8578 now._nr_qs += 1;
8579 #ifndef NOCOMP
8580 k_end = stack->o_offset;
8581 k = k_end - stack->o_skip;
8582 #if SYNC
8583 #ifndef BFS
8584 if (q_zero(now._nr_qs)) k_end += stack->o_delta;
8585 #endif
8586 #endif
8587 for ( ; k < k_end; k++)
8588 Mask[k] = 1;
8589 #endif
8590 if (!stack->lst) /* debugging */
8591 Uerror("error: q_restor");
8592 stack = stack->lst;
8593 }
8594 typedef struct IntChunks {
8595 int *ptr;
8596 struct IntChunks *nxt;
8597 } IntChunks;
8598 IntChunks *filled_chunks[512];
8599 IntChunks *empty_chunks[512];
8600 int *
8601 grab_ints(int nr)
8602 { IntChunks *z;
8603 if (nr >= 512) Uerror("cannot happen grab_int");
8604 if (filled_chunks[nr])
8605 { z = filled_chunks[nr];
8606 filled_chunks[nr] = filled_chunks[nr]->nxt;
8607 } else
8608 { z = (IntChunks *) emalloc(sizeof(IntChunks));
8609 z->ptr = (int *) emalloc(nr * sizeof(int));
8610 }
8611 z->nxt = empty_chunks[nr];
8612 empty_chunks[nr] = z;
8613 return z->ptr;
8614 }
8615 void
8616 ungrab_ints(int *p, int nr)
8617 { IntChunks *z;
8618 if (!empty_chunks[nr]) Uerror("cannot happen ungrab_int");
8619 z = empty_chunks[nr];
8620 empty_chunks[nr] = empty_chunks[nr]->nxt;
8621 z->ptr = p;
8622 z->nxt = filled_chunks[nr];
8623 filled_chunks[nr] = z;
8624 }
8625 int
8626 delproc(int sav, int h)
8627 { int d, i=0;
8628 #ifndef NOCOMP
8629 int o_vsize = vsize;
8630 #endif
8631 if (h+1 != (int) now._nr_pr) return 0;
8632
8633 while (now._nr_qs
8634 && q_offset[now._nr_qs-1] > proc_offset[h])
8635 { delq(sav);
8636 i++;
8637 }
8638 d = vsize - proc_offset[h];
8639 if (sav)
8640 { if (!stack->nxt)
8641 { stack->nxt = (Stack *)
8642 emalloc(sizeof(Stack));
8643 stack->nxt->body =
8644 emalloc(Maxbody*sizeof(char));
8645 stack->nxt->lst = stack;
8646 smax++;
8647 }
8648 stack = stack->nxt;
8649 stack->o_offset = proc_offset[h];
8650 #if VECTORSZ>32000
8651 stack->o_skip = (int) proc_skip[h];
8652 #else
8653 stack->o_skip = (short) proc_skip[h];
8654 #endif
8655 #ifndef XUSAFE
8656 stack->o_name = p_name[h];
8657 #endif
8658 stack->o_delta = d;
8659 stack->o_delqs = i;
8660 memcpy(stack->body, (char *)pptr(h), d);
8661 }
8662 vsize = proc_offset[h];
8663 now._nr_pr = now._nr_pr - 1;
8664 memset((char *)pptr(h), 0, d);
8665 vsize -= (int) proc_skip[h];
8666 #ifndef NOVSZ
8667 now._vsz = vsize;
8668 #endif
8669 #ifndef NOCOMP
8670 for (i = vsize; i < o_vsize; i++)
8671 Mask[i] = 0; /* reset */
8672 #endif
8673 return 1;
8674 }
8675
8676 void
8677 delq(int sav)
8678 { int h = now._nr_qs - 1;
8679 int d = vsize - q_offset[now._nr_qs - 1];
8680 #ifndef NOCOMP
8681 int k, o_vsize = vsize;
8682 #endif
8683 if (sav)
8684 { if (!stack->nxt)
8685 { stack->nxt = (Stack *)
8686 emalloc(sizeof(Stack));
8687 stack->nxt->body =
8688 emalloc(Maxbody*sizeof(char));
8689 stack->nxt->lst = stack;
8690 smax++;
8691 }
8692 stack = stack->nxt;
8693 stack->o_offset = q_offset[h];
8694 #if VECTORSZ>32000
8695 stack->o_skip = (int) q_skip[h];
8696 #else
8697 stack->o_skip = (short) q_skip[h];
8698 #endif
8699 #ifndef XUSAFE
8700 stack->o_name = q_name[h];
8701 #endif
8702 stack->o_delta = d;
8703 memcpy(stack->body, (char *)qptr(h), d);
8704 }
8705 vsize = q_offset[h];
8706 now._nr_qs = now._nr_qs - 1;
8707 memset((char *)qptr(h), 0, d);
8708 vsize -= (int) q_skip[h];
8709 #ifndef NOVSZ
8710 now._vsz = vsize;
8711 #endif
8712 #ifndef NOCOMP
8713 for (k = vsize; k < o_vsize; k++)
8714 Mask[k] = 0; /* reset */
8715 #endif
8716 }
8717
8718 int
8719 qs_empty(void)
8720 { int i;
8721 for (i = 0; i < (int) now._nr_qs; i++)
8722 { if (q_sz(i) > 0)
8723 return 0;
8724 }
8725 return 1;
8726 }
8727
8728 int
8729 endstate(void)
8730 { int i; P0 *ptr;
8731 for (i = BASE; i < (int) now._nr_pr; i++)
8732 { ptr = (P0 *) pptr(i);
8733 if (!stopstate[ptr->_t][ptr->_p])
8734 return 0;
8735 }
8736 if (strict) return qs_empty();
8737 #if defined(EVENT_TRACE) && !defined(OTIM)
8738 if (!stopstate[EVENT_TRACE][now._event] && !a_cycles)
8739 { printf("pan: event_trace not completed\n");
8740 return 0;
8741 }
8742 #endif
8743 return 1;
8744 }
8745
8746 #ifndef SAFETY
8747 void
8748 checkcycles(void)
8749 { uchar o_a_t = now._a_t;
8750 #ifdef SCHED
8751 int o_limit;
8752 #endif
8753 #ifndef NOFAIR
8754 uchar o_cnt = now._cnt[1];
8755 #endif
8756 #ifdef FULLSTACK
8757 #ifndef MA
8758 struct H_el *sv = trpt->ostate; /* save */
8759 #else
8760 uchar prov = trpt->proviso; /* save */
8761 #endif
8762 #endif
8763 #ifdef DEBUG
8764 { int i; uchar *v = (uchar *) &now;
8765 printf(" set Seed state ");
8766 #ifndef NOFAIR
8767 if (fairness) printf("(cnt = %d:%d, nrpr=%d) ",
8768 now._cnt[0], now._cnt[1], now._nr_pr);
8769 #endif
8770 /* for (i = 0; i < n; i++) printf("%d,", v[i]); */
8771 printf("\n");
8772 }
8773 printf("%d: cycle check starts\n", depth);
8774 #endif
8775 now._a_t |= (1|16|32);
8776 /* 1 = 2nd DFS; (16|32) to help hasher */
8777 #ifndef NOFAIR
8778 now._cnt[1] = now._cnt[0];
8779 #endif
8780 memcpy((char *)&A_Root, (char *)&now, vsize);
8781 A_depth = depthfound = depth;
8782 #if NCORE>1
8783 mem_put_acc();
8784 #else
8785 #ifdef SCHED
8786 o_limit = trpt->sched_limit;
8787 trpt->sched_limit = 0;
8788 #endif
8789 new_state(); /* start 2nd DFS */
8790 #ifdef SCHED
8791 trpt->sched_limit = o_limit;
8792 #endif
8793 #endif
8794 now._a_t = o_a_t;
8795 #ifndef NOFAIR
8796 now._cnt[1] = o_cnt;
8797 #endif
8798 A_depth = 0; depthfound = -1;
8799 #ifdef DEBUG
8800 printf("%d: cycle check returns\n", depth);
8801 #endif
8802 #ifdef FULLSTACK
8803 #ifndef MA
8804 trpt->ostate = sv; /* restore */
8805 #else
8806 trpt->proviso = prov;
8807 #endif
8808 #endif
8809 }
8810 #endif
8811
8812 #if defined(FULLSTACK) && defined(BITSTATE)
8813 struct H_el *Free_list = (struct H_el *) 0;
8814 void
8815 onstack_init(void) /* to store stack states in a bitstate search */
8816 { S_Tab = (struct H_el **) emalloc(maxdepth*sizeof(struct H_el *));
8817 }
8818 struct H_el *
8819 grab_state(int n)
8820 { struct H_el *v, *last = 0;
8821 if (H_tab == S_Tab)
8822 { for (v = Free_list; v && ((int) v->tagged >= n); v=v->nxt)
8823 { if ((int) v->tagged == n)
8824 { if (last)
8825 last->nxt = v->nxt;
8826 else
8827 gotcha: Free_list = v->nxt;
8828 v->tagged = 0;
8829 v->nxt = 0;
8830 #ifdef COLLAPSE
8831 v->ln = 0;
8832 #endif
8833 return v;
8834 }
8835 Fh++; last=v;
8836 }
8837 /* new: second try */
8838 v = Free_list;
8839 if (v && ((int) v->tagged >= n))
8840 goto gotcha;
8841 ngrabs++;
8842 }
8843 return (struct H_el *)
8844 emalloc(sizeof(struct H_el)+n-sizeof(unsigned));
8845 }
8846
8847 #else
8848 #if NCORE>1
8849 struct H_el *
8850 grab_state(int n)
8851 { struct H_el *grab_shared(int);
8852 return grab_shared(sizeof(struct H_el)+n-sizeof(unsigned));
8853 }
8854 #else
8855 #ifndef AUTO_RESIZE
8856 #define grab_state(n) (struct H_el *) \
8857 emalloc(sizeof(struct H_el)+n-sizeof(unsigned long));
8858 #else
8859 struct H_el *
8860 grab_state(int n)
8861 { struct H_el *p;
8862 int cnt = sizeof(struct H_el)+n-sizeof(unsigned long);
8863
8864 if (reclaim_size >= cnt+WS)
8865 { if ((cnt & (WS-1)) != 0) /* alignment */
8866 { cnt += WS - (cnt & (WS-1));
8867 }
8868 p = (struct H_el *) reclaim_mem;
8869 reclaim_mem += cnt;
8870 reclaim_size -= cnt;
8871 memset(p, 0, cnt);
8872 } else
8873 { p = (struct H_el *) emalloc(cnt);
8874 }
8875 return p;
8876 }
8877 #endif
8878 #endif
8879 #endif
8880 #ifdef COLLAPSE
8881 unsigned long
8882 ordinal(char *v, long n, short tp)
8883 { struct H_el *tmp, *ntmp; long m;
8884 struct H_el *olst = (struct H_el *) 0;
8885 s_hash((uchar *)v, n);
8886 #if NCORE>1 && !defined(SEP_STATE)
8887 enter_critical(CS_ID); /* uses spinlock - 1..128 */
8888 #endif
8889 tmp = H_tab[j1];
8890 if (!tmp)
8891 { tmp = grab_state(n);
8892 H_tab[j1] = tmp;
8893 } else
8894 for ( ;; olst = tmp, tmp = tmp->nxt)
8895 { m = memcmp(((char *)&(tmp->state)), v, n);
8896 if (n == tmp->ln)
8897 {
8898 if (m == 0)
8899 goto done;
8900 if (m < 0)
8901 {
8902 Insert: ntmp = grab_state(n);
8903 ntmp->nxt = tmp;
8904 if (!olst)
8905 H_tab[j1] = ntmp;
8906 else
8907 olst->nxt = ntmp;
8908 tmp = ntmp;
8909 break;
8910 } else if (!tmp->nxt)
8911 {
8912 Append: tmp->nxt = grab_state(n);
8913 tmp = tmp->nxt;
8914 break;
8915 }
8916 continue;
8917 }
8918 if (n < tmp->ln)
8919 goto Insert;
8920 else if (!tmp->nxt)
8921 goto Append;
8922 }
8923 m = ++ncomps[tp];
8924 #ifdef FULLSTACK
8925 tmp->tagged = m;
8926 #else
8927 tmp->st_id = m;
8928 #endif
8929 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
8930 tmp->m_K1 = K1;
8931 #endif
8932 memcpy(((char *)&(tmp->state)), v, n);
8933 tmp->ln = n;
8934 done:
8935 #if NCORE>1 && !defined(SEP_STATE)
8936 leave_critical(CS_ID); /* uses spinlock */
8937 #endif
8938 #ifdef FULLSTACK
8939 return tmp->tagged;
8940 #else
8941 return tmp->st_id;
8942 #endif
8943 }
8944
8945 int
8946 compress(char *vin, int nin) /* collapse compression */
8947 { char *w, *v = (char *) &comp_now;
8948 int i, j;
8949 unsigned long n;
8950 static char *x;
8951 static uchar nbytes[513]; /* 1 + 256 + 256 */
8952 static unsigned short nbytelen;
8953 long col_q(int, char *);
8954 long col_p(int, char *);
8955 #ifndef SAFETY
8956 if (a_cycles)
8957 *v++ = now._a_t;
8958 #ifndef NOFAIR
8959 if (fairness)
8960 for (i = 0; i < NFAIR; i++)
8961 *v++ = now._cnt[i];
8962 #endif
8963 #endif
8964 nbytelen = 0;
8965 #ifndef JOINPROCS
8966 for (i = 0; i < (int) now._nr_pr; i++)
8967 { n = col_p(i, (char *) 0);
8968 #ifdef NOFIX
8969 nbytes[nbytelen] = 0;
8970 #else
8971 nbytes[nbytelen] = 1;
8972 *v++ = ((P0 *) pptr(i))->_t;
8973 #endif
8974 *v++ = n&255;
8975 if (n >= (1<<8))
8976 { nbytes[nbytelen]++;
8977 *v++ = (n>>8)&255;
8978 }
8979 if (n >= (1<<16))
8980 { nbytes[nbytelen]++;
8981 *v++ = (n>>16)&255;
8982 }
8983 if (n >= (1<<24))
8984 { nbytes[nbytelen]++;
8985 *v++ = (n>>24)&255;
8986 }
8987 nbytelen++;
8988 }
8989 #else
8990 x = scratch;
8991 for (i = 0; i < (int) now._nr_pr; i++)
8992 x += col_p(i, x);
8993 n = ordinal(scratch, x-scratch, 2); /* procs */
8994 *v++ = n&255;
8995 nbytes[nbytelen] = 0;
8996 if (n >= (1<<8))
8997 { nbytes[nbytelen]++;
8998 *v++ = (n>>8)&255;
8999 }
9000 if (n >= (1<<16))
9001 { nbytes[nbytelen]++;
9002 *v++ = (n>>16)&255;
9003 }
9004 if (n >= (1<<24))
9005 { nbytes[nbytelen]++;
9006 *v++ = (n>>24)&255;
9007 }
9008 nbytelen++;
9009 #endif
9010 #ifdef SEPQS
9011 for (i = 0; i < (int) now._nr_qs; i++)
9012 { n = col_q(i, (char *) 0);
9013 nbytes[nbytelen] = 0;
9014 *v++ = n&255;
9015 if (n >= (1<<8))
9016 { nbytes[nbytelen]++;
9017 *v++ = (n>>8)&255;
9018 }
9019 if (n >= (1<<16))
9020 { nbytes[nbytelen]++;
9021 *v++ = (n>>16)&255;
9022 }
9023 if (n >= (1<<24))
9024 { nbytes[nbytelen]++;
9025 *v++ = (n>>24)&255;
9026 }
9027 nbytelen++;
9028 }
9029 #endif
9030 #ifdef NOVSZ
9031 /* 3 = _a_t, _nr_pr, _nr_qs */
9032 w = (char *) &now + 3 * sizeof(uchar);
9033 #ifndef NOFAIR
9034 w += NFAIR;
9035 #endif
9036 #else
9037 #if VECTORSZ<65536
9038 w = (char *) &(now._vsz) + sizeof(unsigned short);
9039 #else
9040 w = (char *) &(now._vsz) + sizeof(unsigned long);
9041 #endif
9042 #endif
9043 x = scratch;
9044 *x++ = now._nr_pr;
9045 *x++ = now._nr_qs;
9046 if (now._nr_qs > 0 && qptr(0) < pptr(0))
9047 n = qptr(0) - (uchar *) w;
9048 else
9049 n = pptr(0) - (uchar *) w;
9050 j = w - (char *) &now;
9051 for (i = 0; i < (int) n; i++, w++)
9052 if (!Mask[j++]) *x++ = *w;
9053 #ifndef SEPQS
9054 for (i = 0; i < (int) now._nr_qs; i++)
9055 x += col_q(i, x);
9056 #endif
9057 x--;
9058 for (i = 0, j = 6; i < nbytelen; i++)
9059 { if (j == 6)
9060 { j = 0;
9061 *(++x) = 0;
9062 } else
9063 j += 2;
9064 *x |= (nbytes[i] << j);
9065 }
9066 x++;
9067 for (j = 0; j < WS-1; j++)
9068 *x++ = 0;
9069 x -= j; j = 0;
9070 n = ordinal(scratch, x-scratch, 0); /* globals */
9071 *v++ = n&255;
9072 if (n >= (1<< 8)) { *v++ = (n>> 8)&255; j++; }
9073 if (n >= (1<<16)) { *v++ = (n>>16)&255; j++; }
9074 if (n >= (1<<24)) { *v++ = (n>>24)&255; j++; }
9075 *v++ = j; /* add last count as a byte */
9076 for (i = 0; i < WS-1; i++)
9077 *v++ = 0;
9078 v -= i;
9079 #if 0
9080 printf("collapse %d -> %d\n",
9081 vsize, v - (char *)&comp_now);
9082 #endif
9083 return v - (char *)&comp_now;
9084 }
9085 #else
9086 #if !defined(NOCOMP)
9087 int
9088 compress(char *vin, int n) /* default compression */
9089 {
9090 #ifdef HC
9091 int delta = 0;
9092 s_hash((uchar *)vin, n); /* sets K1 and K2 */
9093 #ifndef SAFETY
9094 if (S_A)
9095 { delta++; /* _a_t */
9096 #ifndef NOFAIR
9097 if (S_A > NFAIR)
9098 delta += NFAIR; /* _cnt[] */
9099 #endif
9100 }
9101 #endif
9102 memcpy((char *) &comp_now + delta, (char *) &K1, WS);
9103 delta += WS;
9104 #if HC>0
9105 memcpy((char *) &comp_now + delta, (char *) &K2, HC);
9106 delta += HC;
9107 #endif
9108 return delta;
9109 #else
9110 char *vv = vin;
9111 char *v = (char *) &comp_now;
9112 int i;
9113 #ifndef NO_FAST_C
9114 int r = 0, unroll = n/8;
9115 if (unroll > 0)
9116 { i = 0;
9117 while (r++ < unroll)
9118 { /* unroll 8 times, avoid ifs */
9119 /* 1 */ *v = *vv++;
9120 v += 1 - Mask[i++];
9121 /* 2 */ *v = *vv++;
9122 v += 1 - Mask[i++];
9123 /* 3 */ *v = *vv++;
9124 v += 1 - Mask[i++];
9125 /* 4 */ *v = *vv++;
9126 v += 1 - Mask[i++];
9127 /* 5 */ *v = *vv++;
9128 v += 1 - Mask[i++];
9129 /* 6 */ *v = *vv++;
9130 v += 1 - Mask[i++];
9131 /* 7 */ *v = *vv++;
9132 v += 1 - Mask[i++];
9133 /* 8 */ *v = *vv++;
9134 v += 1 - Mask[i++];
9135 }
9136 r = n - i; /* the rest, at most 7 */
9137 switch (r) {
9138 case 7: *v = *vv++; v += 1 - Mask[i++];
9139 case 6: *v = *vv++; v += 1 - Mask[i++];
9140 case 5: *v = *vv++; v += 1 - Mask[i++];
9141 case 4: *v = *vv++; v += 1 - Mask[i++];
9142 case 3: *v = *vv++; v += 1 - Mask[i++];
9143 case 2: *v = *vv++; v += 1 - Mask[i++];
9144 case 1: *v = *vv++; v += 1 - Mask[i++];
9145 case 0: break;
9146 }
9147 r = (n+WS-1)/WS; /* words rounded up */
9148 r *= WS; /* bytes */
9149 i = r - i; /* remainder */
9150 switch (i) {
9151 case 7: *v++ = 0; /* fall thru */
9152 case 6: *v++ = 0;
9153 case 5: *v++ = 0;
9154 case 4: *v++ = 0;
9155 case 3: *v++ = 0;
9156 case 2: *v++ = 0;
9157 case 1: *v++ = 0;
9158 case 0: break;
9159 default: Uerror("unexpected wordsize");
9160 }
9161 v -= i;
9162 } else
9163 #endif
9164 { for (i = 0; i < n; i++, vv++)
9165 if (!Mask[i]) *v++ = *vv;
9166 for (i = 0; i < WS-1; i++)
9167 *v++ = 0;
9168 v -= i;
9169 }
9170 #if 0
9171 printf("compress %d -> %d\n",
9172 n, v - (char *)&comp_now);
9173 #endif
9174 return v - (char *)&comp_now;
9175 #endif
9176 }
9177 #endif
9178 #endif
9179 #if defined(FULLSTACK) && defined(BITSTATE)
9180 #if defined(MA)
9181 #if !defined(onstack_now)
9182 int onstack_now(void) {}
9183 #endif
9184 #if !defined(onstack_put)
9185 void onstack_put(void) {}
9186 #endif
9187 #if !defined(onstack_zap)
9188 void onstack_zap(void) {}
9189 #endif
9190 #else
9191 void
9192 onstack_zap(void)
9193 { struct H_el *v, *w, *last = 0;
9194 struct H_el **tmp = H_tab;
9195 char *nv; int n, m;
9196
9197 static char warned = 0;
9198
9199 H_tab = S_Tab;
9200 #ifndef NOCOMP
9201 nv = (char *) &comp_now;
9202 n = compress((char *)&now, vsize);
9203 #else
9204 #if defined(BITSTATE) && defined(LC)
9205 nv = (char *) &comp_now;
9206 n = compact_stack((char *)&now, vsize);
9207 #else
9208 nv = (char *) &now;
9209 n = vsize;
9210 #endif
9211 #endif
9212 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9213 s_hash((uchar *)nv, n);
9214 #endif
9215 H_tab = tmp;
9216 for (v = S_Tab[j1]; v; Zh++, last=v, v=v->nxt)
9217 { m = memcmp(&(v->state), nv, n);
9218 if (m == 0)
9219 goto Found;
9220 if (m < 0)
9221 break;
9222 }
9223 /* NotFound: */
9224 #ifndef ZAPH
9225 #if defined(BITSTATE) && NCORE>1
9226 /* seen this happen, likely harmless, but not yet understood */
9227 if (warned == 0)
9228 #endif
9229 { /* Uerror("stack out of wack - zap"); */
9230 cpu_printf("pan: warning, stack incomplete\n");
9231 warned = 1;
9232 }
9233 #endif
9234 return;
9235 Found:
9236 ZAPS++;
9237 if (last)
9238 last->nxt = v->nxt;
9239 else
9240 S_Tab[j1] = v->nxt;
9241 v->tagged = (unsigned) n;
9242 #if !defined(NOREDUCE) && !defined(SAFETY)
9243 v->proviso = 0;
9244 #endif
9245 v->nxt = last = (struct H_el *) 0;
9246 for (w = Free_list; w; Fa++, last=w, w = w->nxt)
9247 { if ((int) w->tagged <= n)
9248 { if (last)
9249 { v->nxt = w;
9250 last->nxt = v;
9251 } else
9252 { v->nxt = Free_list;
9253 Free_list = v;
9254 }
9255 return;
9256 }
9257 if (!w->nxt)
9258 { w->nxt = v;
9259 return;
9260 } }
9261 Free_list = v;
9262 }
9263 void
9264 onstack_put(void)
9265 { struct H_el **tmp = H_tab;
9266 H_tab = S_Tab;
9267 if (hstore((char *)&now, vsize) != 0)
9268 #if defined(BITSTATE) && defined(LC)
9269 printf("pan: warning, double stack entry\n");
9270 #else
9271 #ifndef ZAPH
9272 Uerror("cannot happen - unstack_put");
9273 #endif
9274 #endif
9275 H_tab = tmp;
9276 trpt->ostate = Lstate;
9277 PUT++;
9278 }
9279 int
9280 onstack_now(void)
9281 { struct H_el *tmp;
9282 struct H_el **tmp2 = H_tab;
9283 char *v; int n, m = 1;
9284
9285 H_tab = S_Tab;
9286 #ifdef NOCOMP
9287 #if defined(BITSTATE) && defined(LC)
9288 v = (char *) &comp_now;
9289 n = compact_stack((char *)&now, vsize);
9290 #else
9291 v = (char *) &now;
9292 n = vsize;
9293 #endif
9294 #else
9295 v = (char *) &comp_now;
9296 n = compress((char *)&now, vsize);
9297 #endif
9298 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9299 s_hash((uchar *)v, n);
9300 #endif
9301 H_tab = tmp2;
9302 for (tmp = S_Tab[j1]; tmp; Zn++, tmp = tmp->nxt)
9303 { m = memcmp(((char *)&(tmp->state)),v,n);
9304 if (m <= 0)
9305 { Lstate = (struct H_el *) tmp;
9306 break;
9307 } }
9308 PROBE++;
9309 return (m == 0);
9310 }
9311 #endif
9312 #endif
9313 #ifndef BITSTATE
9314 void
9315 hinit(void)
9316 {
9317 #ifdef MA
9318 #ifdef R_XPT
9319 { void r_xpoint(void);
9320 r_xpoint();
9321 }
9322 #else
9323 dfa_init((unsigned short) (MA+a_cycles));
9324 #if NCORE>1 && !defined(COLLAPSE)
9325 if (!readtrail)
9326 { void init_HT(unsigned long);
9327 init_HT(0L);
9328 }
9329 #endif
9330 #endif
9331 #endif
9332 #if !defined(MA) || defined(COLLAPSE)
9333 #if NCORE>1
9334 if (!readtrail)
9335 { void init_HT(unsigned long);
9336 init_HT((unsigned long) (ONE_L<<ssize)*sizeof(struct H_el *));
9337 } else
9338 #endif
9339 H_tab = (struct H_el **)
9340 emalloc((ONE_L<<ssize)*sizeof(struct H_el *));
9341 #endif
9342 }
9343 #endif
9344
9345 #if !defined(BITSTATE) || defined(FULLSTACK)
9346 #ifdef DEBUG
9347 void
9348 dumpstate(int wasnew, char *v, int n, int tag)
9349 { int i;
9350 #ifndef SAFETY
9351 if (S_A)
9352 { printf(" state tags %d (%d::%d): ",
9353 V_A, wasnew, v[0]);
9354 #ifdef FULLSTACK
9355 printf(" %d ", tag);
9356 #endif
9357 printf("\n");
9358 }
9359 #endif
9360 #ifdef SDUMP
9361 #ifndef NOCOMP
9362 printf(" State: ");
9363 for (i = 0; i < vsize; i++) printf("%d%s,",
9364 ((char *)&now)[i], Mask[i]?"*":"");
9365 #endif
9366 printf("\n Vector: ");
9367 for (i = 0; i < n; i++) printf("%d,", v[i]);
9368 printf("\n");
9369 #endif
9370 }
9371 #endif
9372 #ifdef MA
9373 int
9374 gstore(char *vin, int nin, uchar pbit)
9375 { int n, i;
9376 int ret_val = 1;
9377 uchar *v;
9378 static uchar Info[MA+1];
9379 #ifndef NOCOMP
9380 n = compress(vin, nin);
9381 v = (uchar *) &comp_now;
9382 #else
9383 n = nin;
9384 v = vin;
9385 #endif
9386 if (n >= MA)
9387 { printf("pan: error, MA too small, recompile pan.c");
9388 printf(" with -DMA=N with N>%d\n", n);
9389 Uerror("aborting");
9390 }
9391 if (n > (int) maxgs)
9392 { maxgs = (unsigned int) n;
9393 }
9394 for (i = 0; i < n; i++)
9395 { Info[i] = v[i];
9396 }
9397 for ( ; i < MA-1; i++)
9398 { Info[i] = 0;
9399 }
9400 Info[MA-1] = pbit;
9401 if (a_cycles) /* place _a_t at the end */
9402 { Info[MA] = Info[0];
9403 Info[0] = 0;
9404 }
9405
9406 #if NCORE>1 && !defined(SEP_STATE)
9407 enter_critical(GLOBAL_LOCK); /* crude, but necessary */
9408 /* to make this mode work, also replace emalloc with grab_shared inside store MA routines */
9409 #endif
9410
9411 if (!dfa_store(Info))
9412 { if (pbit == 0
9413 && (now._a_t&1)
9414 && depth > A_depth)
9415 { Info[MA] &= ~(1|16|32); /* _a_t */
9416 if (dfa_member(MA))
9417 { Info[MA-1] = 4; /* off-stack bit */
9418 nShadow++;
9419 if (!dfa_member(MA-1))
9420 { ret_val = 3;
9421 #ifdef VERBOSE
9422 printf("intersected 1st dfs stack\n");
9423 #endif
9424 goto done;
9425 } } }
9426 ret_val = 0;
9427 #ifdef VERBOSE
9428 printf("new state\n");
9429 #endif
9430 goto done;
9431 }
9432 #ifdef FULLSTACK
9433 if (pbit == 0)
9434 { Info[MA-1] = 1; /* proviso bit */
9435 #ifndef BFS
9436 trpt->proviso = dfa_member(MA-1);
9437 #endif
9438 Info[MA-1] = 4; /* off-stack bit */
9439 if (dfa_member(MA-1))
9440 { ret_val = 1; /* off-stack */
9441 #ifdef VERBOSE
9442 printf("old state\n");
9443 #endif
9444 } else
9445 { ret_val = 2; /* on-stack */
9446 #ifdef VERBOSE
9447 printf("on-stack\n");
9448 #endif
9449 }
9450 goto done;
9451 }
9452 #endif
9453 ret_val = 1;
9454 #ifdef VERBOSE
9455 printf("old state\n");
9456 #endif
9457 done:
9458 #if NCORE>1 && !defined(SEP_STATE)
9459 leave_critical(GLOBAL_LOCK);
9460 #endif
9461 return ret_val; /* old state */
9462 }
9463 #endif
9464 #if defined(BITSTATE) && defined(LC)
9465 int
9466 compact_stack(char *vin, int n)
9467 { int delta = 0;
9468 s_hash((uchar *)vin, n); /* sets K1 and K2 */
9469 #ifndef SAFETY
9470 delta++; /* room for state[0] |= 128 */
9471 #endif
9472 memcpy((char *) &comp_now + delta, (char *) &K1, WS);
9473 delta += WS;
9474 memcpy((char *) &comp_now + delta, (char *) &K2, WS);
9475 delta += WS; /* use all available bits */
9476 return delta;
9477 }
9478 #endif
9479 int
9480 hstore(char *vin, int nin) /* hash table storage */
9481 { struct H_el *ntmp;
9482 struct H_el *tmp, *olst = (struct H_el *) 0;
9483 char *v; int n, m=0;
9484 #ifdef HC
9485 uchar rem_a;
9486 #endif
9487 #ifdef NOCOMP
9488 #if defined(BITSTATE) && defined(LC)
9489 if (S_Tab == H_tab)
9490 { v = (char *) &comp_now;
9491 n = compact_stack(vin, nin);
9492 } else
9493 { v = vin; n = nin;
9494 }
9495 #else
9496 v = vin; n = nin;
9497 #endif
9498 #else
9499 v = (char *) &comp_now;
9500 #ifdef HC
9501 rem_a = now._a_t;
9502 now._a_t = 0;
9503 #endif
9504 n = compress(vin, nin);
9505 #ifdef HC
9506 now._a_t = rem_a;
9507 #endif
9508 #ifndef SAFETY
9509 if (S_A)
9510 { v[0] = 0; /* _a_t */
9511 #ifndef NOFAIR
9512 if (S_A > NFAIR)
9513 for (m = 0; m < NFAIR; m++)
9514 v[m+1] = 0; /* _cnt[] */
9515 #endif
9516 m = 0;
9517 }
9518 #endif
9519 #endif
9520 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9521 s_hash((uchar *)v, n);
9522 #endif
9523 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9524 enter_critical(CS_ID); /* uses spinlock */
9525 #endif
9526 tmp = H_tab[j1];
9527 if (!tmp)
9528 { tmp = grab_state(n);
9529 #if NCORE>1
9530 if (!tmp)
9531 { /* if we get here -- we've already issued a warning */
9532 /* but we want to allow the normal distributed termination */
9533 /* to collect the stats on all cpus in the wrapup */
9534 #if !defined(SEP_STATE) && !defined(BITSTATE)
9535 leave_critical(CS_ID);
9536 #endif
9537 return 1; /* allow normal termination */
9538 }
9539 #endif
9540 H_tab[j1] = tmp;
9541 } else
9542 { for (;; hcmp++, olst = tmp, tmp = tmp->nxt)
9543 { /* skip the _a_t and the _cnt bytes */
9544 #ifdef COLLAPSE
9545 if (tmp->ln != 0)
9546 { if (!tmp->nxt) goto Append;
9547 continue;
9548 }
9549 #endif
9550 m = memcmp(((char *)&(tmp->state)) + S_A,
9551 v + S_A, n - S_A);
9552 if (m == 0) {
9553 #ifdef SAFETY
9554 #define wasnew 0
9555 #else
9556 int wasnew = 0;
9557 #endif
9558 #ifndef SAFETY
9559 #ifndef NOCOMP
9560 if (S_A)
9561 { if ((((char *)&(tmp->state))[0] & V_A) != V_A)
9562 { wasnew = 1; nShadow++;
9563 ((char *)&(tmp->state))[0] |= V_A;
9564 }
9565 #ifndef NOFAIR
9566 if (S_A > NFAIR)
9567 { /* 0 <= now._cnt[now._a_t&1] < MAXPROC */
9568 unsigned ci, bp; /* index, bit pos */
9569 ci = (now._cnt[now._a_t&1] / 8);
9570 bp = (now._cnt[now._a_t&1] - 8*ci);
9571 if (now._a_t&1) /* use tail-bits in _cnt */
9572 { ci = (NFAIR - 1) - ci;
9573 bp = 7 - bp; /* bp = 0..7 */
9574 }
9575 ci++; /* skip over _a_t */
9576 bp = 1 << bp; /* the bit mask */
9577 if ((((char *)&(tmp->state))[ci] & bp)==0)
9578 { if (!wasnew)
9579 { wasnew = 1;
9580 nShadow++;
9581 }
9582 ((char *)&(tmp->state))[ci] |= bp;
9583 }
9584 }
9585 /* else: wasnew == 0, i.e., old state */
9586 #endif
9587 }
9588 #endif
9589 #endif
9590 #if NCORE>1
9591 Lstate = (struct H_el *) tmp;
9592 #endif
9593 #ifdef FULLSTACK
9594 #ifndef SAFETY
9595 if (wasnew)
9596 { Lstate = (struct H_el *) tmp;
9597 tmp->tagged |= V_A;
9598 if ((now._a_t&1)
9599 && (tmp->tagged&A_V)
9600 && depth > A_depth)
9601 {
9602 intersect:
9603 #ifdef CHECK
9604 #if NCORE>1
9605 printf("cpu%d: ", core_id);
9606 #endif
9607 printf("1st dfs-stack intersected on state %d+\n",
9608 (int) tmp->st_id);
9609 #endif
9610 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9611 leave_critical(CS_ID);
9612 #endif
9613 return 3;
9614 }
9615 #ifdef CHECK
9616 #if NCORE>1
9617 printf("cpu%d: ", core_id);
9618 #endif
9619 printf(" New state %d+\n", (int) tmp->st_id);
9620 #endif
9621 #ifdef DEBUG
9622 dumpstate(1, (char *)&(tmp->state),n,tmp->tagged);
9623 #endif
9624 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9625 leave_critical(CS_ID);
9626 #endif
9627 return 0;
9628 } else
9629 #endif
9630 if ((S_A)?(tmp->tagged&V_A):tmp->tagged)
9631 { Lstate = (struct H_el *) tmp;
9632 #ifndef SAFETY
9633 /* already on current dfs stack */
9634 /* but may also be on 1st dfs stack */
9635 if ((now._a_t&1)
9636 && (tmp->tagged&A_V)
9637 && depth > A_depth
9638 #ifndef NOFAIR
9639 && (!fairness || now._cnt[1] <= 1)
9640 #endif
9641 )
9642 goto intersect;
9643 #endif
9644 #ifdef CHECK
9645 #if NCORE>1
9646 printf("cpu%d: ", core_id);
9647 #endif
9648 printf(" Stack state %d\n", (int) tmp->st_id);
9649 #endif
9650 #ifdef DEBUG
9651 dumpstate(0, (char *)&(tmp->state),n,tmp->tagged);
9652 #endif
9653 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9654 leave_critical(CS_ID);
9655 #endif
9656 return 2; /* match on stack */
9657 }
9658 #else
9659 if (wasnew)
9660 {
9661 #ifdef CHECK
9662 #if NCORE>1
9663 printf("cpu%d: ", core_id);
9664 #endif
9665 printf(" New state %d+\n", (int) tmp->st_id);
9666 #endif
9667 #ifdef DEBUG
9668 dumpstate(1, (char *)&(tmp->state), n, 0);
9669 #endif
9670 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9671 leave_critical(CS_ID);
9672 #endif
9673 return 0;
9674 }
9675 #endif
9676 #ifdef CHECK
9677 #if NCORE>1
9678 printf("cpu%d: ", core_id);
9679 #endif
9680 printf(" Old state %d\n", (int) tmp->st_id);
9681 #endif
9682 #ifdef DEBUG
9683 dumpstate(0, (char *)&(tmp->state), n, 0);
9684 #endif
9685 #ifdef REACH
9686 if (tmp->D > depth)
9687 { tmp->D = depth;
9688 #ifdef CHECK
9689 #if NCORE>1
9690 printf("cpu%d: ", core_id);
9691 #endif
9692 printf(" ReVisiting (from smaller depth)\n");
9693 #endif
9694 nstates--;
9695 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9696 leave_critical(CS_ID);
9697 #endif
9698 return 0;
9699 }
9700 #endif
9701 #if (defined(BFS) && defined(Q_PROVISO)) || NCORE>1
9702 Lstate = (struct H_el *) tmp;
9703 #endif
9704 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9705 leave_critical(CS_ID);
9706 #endif
9707 return 1; /* match outside stack */
9708 } else if (m < 0)
9709 { /* insert state before tmp */
9710 ntmp = grab_state(n);
9711 #if NCORE>1
9712 if (!ntmp)
9713 {
9714 #if !defined(SEP_STATE) && !defined(BITSTATE)
9715 leave_critical(CS_ID);
9716 #endif
9717 return 1; /* allow normal termination */
9718 }
9719 #endif
9720 ntmp->nxt = tmp;
9721 if (!olst)
9722 H_tab[j1] = ntmp;
9723 else
9724 olst->nxt = ntmp;
9725 tmp = ntmp;
9726 break;
9727 } else if (!tmp->nxt)
9728 { /* append after tmp */
9729 #ifdef COLLAPSE
9730 Append:
9731 #endif
9732 tmp->nxt = grab_state(n);
9733 #if NCORE>1
9734 if (!tmp->nxt)
9735 {
9736 #if !defined(SEP_STATE) && !defined(BITSTATE)
9737 leave_critical(CS_ID);
9738 #endif
9739 return 1; /* allow normal termination */
9740 }
9741 #endif
9742 tmp = tmp->nxt;
9743 break;
9744 } }
9745 }
9746 #ifdef CHECK
9747 tmp->st_id = (unsigned) nstates;
9748 #if NCORE>1
9749 printf("cpu%d: ", core_id);
9750 #endif
9751 #ifdef BITSTATE
9752 printf(" Push state %d\n", ((int) nstates) - 1);
9753 #else
9754 printf(" New state %d\n", (int) nstates);
9755 #endif
9756 #endif
9757 #if !defined(SAFETY) || defined(REACH)
9758 tmp->D = depth;
9759 #endif
9760 #ifndef SAFETY
9761 #ifndef NOCOMP
9762 if (S_A)
9763 { v[0] = V_A;
9764 #ifndef NOFAIR
9765 if (S_A > NFAIR)
9766 { unsigned ci, bp; /* as above */
9767 ci = (now._cnt[now._a_t&1] / 8);
9768 bp = (now._cnt[now._a_t&1] - 8*ci);
9769 if (now._a_t&1)
9770 { ci = (NFAIR - 1) - ci;
9771 bp = 7 - bp; /* bp = 0..7 */
9772 }
9773 v[1+ci] = 1 << bp;
9774 }
9775 #endif
9776 }
9777 #endif
9778 #endif
9779 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
9780 tmp->m_K1 = K1;
9781 #endif
9782 memcpy(((char *)&(tmp->state)), v, n);
9783 #ifdef FULLSTACK
9784 tmp->tagged = (S_A)?V_A:(depth+1);
9785 #ifdef DEBUG
9786 dumpstate(-1, v, n, tmp->tagged);
9787 #endif
9788 Lstate = (struct H_el *) tmp;
9789 #else
9790 #ifdef DEBUG
9791 dumpstate(-1, v, n, 0);
9792 #endif
9793 #if NCORE>1
9794 Lstate = (struct H_el *) tmp;
9795 #endif
9796 #endif
9797 /* #if NCORE>1 && !defined(SEP_STATE) */
9798 #if NCORE>1
9799 #ifdef V_PROVISO
9800 tmp->cpu_id = core_id;
9801 #endif
9802 #if !defined(SEP_STATE) && !defined(BITSTATE)
9803 leave_critical(CS_ID);
9804 #endif
9805 #endif
9806 return 0;
9807 }
9808 #endif
9809 #include TRANSITIONS
9810 void
9811 do_reach(void)
9812 {
9813 r_ck(reached0, nstates0, 0, src_ln0, src_file0);
9814 r_ck(reached1, nstates1, 1, src_ln1, src_file1);
9815 r_ck(reached2, nstates2, 2, src_ln2, src_file2);
9816 r_ck(reached3, nstates3, 3, src_ln3, src_file3);
9817 r_ck(reached4, nstates4, 4, src_ln4, src_file4);
9818 }
9819
9820 void
9821 iniglobals(void)
9822 {
9823 deliver = 0;
9824 { int l_in;
9825 for (l_in = 0; l_in < 4; l_in++)
9826 {
9827 now.buffer_use[l_in] = 0;
9828 }
9829 }
9830 now.write_off = 0;
9831 { int l_in;
9832 for (l_in = 0; l_in < 2; l_in++)
9833 {
9834 now.commit_count[l_in] = 0;
9835 }
9836 }
9837 now.read_off = 0;
9838 { int l_in;
9839 for (l_in = 0; l_in < 2; l_in++)
9840 {
9841 now.retrieve_count[l_in] = 0;
9842 }
9843 }
9844 now.events_lost = 0;
9845 now.refcount = 0;
9846 #ifdef VAR_RANGES
9847 { int l_in;
9848 for (l_in = 0; l_in < 4; l_in++)
9849 {
9850 logval("buffer_use[l_in]", now.buffer_use[l_in]);
9851 }
9852 }
9853 logval("write_off", now.write_off);
9854 { int l_in;
9855 for (l_in = 0; l_in < 2; l_in++)
9856 {
9857 logval("commit_count[l_in]", now.commit_count[l_in]);
9858 }
9859 }
9860 logval("read_off", now.read_off);
9861 { int l_in;
9862 for (l_in = 0; l_in < 2; l_in++)
9863 {
9864 logval("retrieve_count[l_in]", now.retrieve_count[l_in]);
9865 }
9866 }
9867 logval("events_lost", now.events_lost);
9868 logval("refcount", now.refcount);
9869 #endif
9870 Maxbody = max(Maxbody, sizeof(State)-VECTORSZ);
9871 }
9872
9873 int
9874 addqueue(int n, int is_rv)
9875 { int j=0, i = now._nr_qs;
9876 #ifndef NOCOMP
9877 int k;
9878 #endif
9879 if (i >= MAXQ)
9880 Uerror("too many queues");
9881 switch (n) {
9882 default: Uerror("bad queue - addqueue");
9883 }
9884 if (vsize%WS)
9885 q_skip[i] = WS-(vsize%WS);
9886 else
9887 q_skip[i] = 0;
9888 #ifndef NOCOMP
9889 k = vsize;
9890 #ifndef BFS
9891 if (is_rv) k += j;
9892 #endif
9893 for (k += (int) q_skip[i]; k > vsize; k--)
9894 Mask[k-1] = 1;
9895 #endif
9896 vsize += (int) q_skip[i];
9897 q_offset[i] = vsize;
9898 now._nr_qs += 1;
9899 vsize += j;
9900 #ifndef NOVSZ
9901 now._vsz = vsize;
9902 #endif
9903 hmax = max(hmax, vsize);
9904 if (vsize >= VECTORSZ)
9905 Uerror("VECTORSZ is too small, edit pan.h");
9906 memset((char *)qptr(i), 0, j);
9907 ((Q0 *)qptr(i))->_t = n;
9908 return i+1;
9909 }
9910
9911 #if NQS>0
9912 void
9913 qsend(int into, int sorted, int args_given)
9914 { int j; uchar *z;
9915
9916 #ifdef HAS_SORTED
9917 int k;
9918 #endif
9919 if (!into--)
9920 uerror("ref to uninitialized chan name (sending)");
9921 if (into >= (int) now._nr_qs || into < 0)
9922 Uerror("qsend bad queue#");
9923 z = qptr(into);
9924 j = ((Q0 *)qptr(into))->Qlen;
9925 switch (((Q0 *)qptr(into))->_t) {
9926 case 0: printf("queue %d was deleted\n", into+1);
9927 default: Uerror("bad queue - qsend");
9928 }
9929 #ifdef EVENT_TRACE
9930 if (in_s_scope(into+1))
9931 require('s', into);
9932 #endif
9933 }
9934 #endif
9935
9936 #if SYNC
9937 int
9938 q_zero(int from)
9939 { if (!from--)
9940 { uerror("ref to uninitialized chan name (q_zero)");
9941 return 0;
9942 }
9943 switch(((Q0 *)qptr(from))->_t) {
9944 case 0: printf("queue %d was deleted\n", from+1);
9945 }
9946 Uerror("bad queue q-zero");
9947 return -1;
9948 }
9949 int
9950 not_RV(int from)
9951 { if (q_zero(from))
9952 { printf("==>> a test of the contents of a rv ");
9953 printf("channel always returns FALSE\n");
9954 uerror("error to poll rendezvous channel");
9955 }
9956 return 1;
9957 }
9958 #endif
9959 #ifndef XUSAFE
9960 void
9961 setq_claim(int x, int m, char *s, int y, char *p)
9962 { if (x == 0)
9963 uerror("x[rs] claim on uninitialized channel");
9964 if (x < 0 || x > MAXQ)
9965 Uerror("cannot happen setq_claim");
9966 q_claim[x] |= m;
9967 p_name[y] = p;
9968 q_name[x] = s;
9969 if (m&2) q_S_check(x, y);
9970 if (m&1) q_R_check(x, y);
9971 }
9972 short q_sender[MAXQ+1];
9973 int
9974 q_S_check(int x, int who)
9975 { if (!q_sender[x])
9976 { q_sender[x] = who+1;
9977 #if SYNC
9978 if (q_zero(x))
9979 { printf("chan %s (%d), ",
9980 q_name[x], x-1);
9981 printf("sndr proc %s (%d)\n",
9982 p_name[who], who);
9983 uerror("xs chans cannot be used for rv");
9984 }
9985 #endif
9986 } else
9987 if (q_sender[x] != who+1)
9988 { printf("pan: xs assertion violated: ");
9989 printf("access to chan <%s> (%d)\npan: by ",
9990 q_name[x], x-1);
9991 if (q_sender[x] > 0 && p_name[q_sender[x]-1])
9992 printf("%s (proc %d) and by ",
9993 p_name[q_sender[x]-1], q_sender[x]-1);
9994 printf("%s (proc %d)\n",
9995 p_name[who], who);
9996 uerror("error, partial order reduction invalid");
9997 }
9998 return 1;
9999 }
10000 short q_recver[MAXQ+1];
10001 int
10002 q_R_check(int x, int who)
10003 { if (!q_recver[x])
10004 { q_recver[x] = who+1;
10005 #if SYNC
10006 if (q_zero(x))
10007 { printf("chan %s (%d), ",
10008 q_name[x], x-1);
10009 printf("recv proc %s (%d)\n",
10010 p_name[who], who);
10011 uerror("xr chans cannot be used for rv");
10012 }
10013 #endif
10014 } else
10015 if (q_recver[x] != who+1)
10016 { printf("pan: xr assertion violated: ");
10017 printf("access to chan %s (%d)\npan: ",
10018 q_name[x], x-1);
10019 if (q_recver[x] > 0 && p_name[q_recver[x]-1])
10020 printf("by %s (proc %d) and ",
10021 p_name[q_recver[x]-1], q_recver[x]-1);
10022 printf("by %s (proc %d)\n",
10023 p_name[who], who);
10024 uerror("error, partial order reduction invalid");
10025 }
10026 return 1;
10027 }
10028 #endif
10029 int
10030 q_len(int x)
10031 { if (!x--)
10032 uerror("ref to uninitialized chan name (len)");
10033 return ((Q0 *)qptr(x))->Qlen;
10034 }
10035
10036 int
10037 q_full(int from)
10038 { if (!from--)
10039 uerror("ref to uninitialized chan name (qfull)");
10040 switch(((Q0 *)qptr(from))->_t) {
10041 case 0: printf("queue %d was deleted\n", from+1);
10042 }
10043 Uerror("bad queue - q_full");
10044 return 0;
10045 }
10046
10047 #ifdef HAS_UNLESS
10048 int
10049 q_e_f(int from)
10050 { /* empty or full */
10051 return !q_len(from) || q_full(from);
10052 }
10053 #endif
10054 #if NQS>0
10055 int
10056 qrecv(int from, int slot, int fld, int done)
10057 { uchar *z;
10058 int j, k, r=0;
10059
10060 if (!from--)
10061 uerror("ref to uninitialized chan name (receiving)");
10062 if (from >= (int) now._nr_qs || from < 0)
10063 Uerror("qrecv bad queue#");
10064 z = qptr(from);
10065 #ifdef EVENT_TRACE
10066 if (done && (in_r_scope(from+1)))
10067 require('r', from);
10068 #endif
10069 switch (((Q0 *)qptr(from))->_t) {
10070 case 0: printf("queue %d was deleted\n", from+1);
10071 default: Uerror("bad queue - qrecv");
10072 }
10073 return r;
10074 }
10075 #endif
10076
10077 #ifndef BITSTATE
10078 #ifdef COLLAPSE
10079 long
10080 col_q(int i, char *z)
10081 { int j=0, k;
10082 char *x, *y;
10083 Q0 *ptr = (Q0 *) qptr(i);
10084 switch (ptr->_t) {
10085 default: Uerror("bad qtype - collapse");
10086 }
10087 if (z) x = z; else x = scratch;
10088 y = (char *) ptr; k = q_offset[i];
10089 /* no need to store the empty slots at the end */
10090 j -= (q_max[ptr->_t] - ptr->Qlen) * ((j - 2)/q_max[ptr->_t]);
10091 for ( ; j > 0; j--, y++)
10092 if (!Mask[k++]) *x++ = *y;
10093 for (j = 0; j < WS-1; j++)
10094 *x++ = 0;
10095 x -= j;
10096 if (z) return (long) (x - z);
10097 return ordinal(scratch, x-scratch, 1); /* chan */
10098 }
10099 #endif
10100 #endif
10101 int
10102 unsend(int into)
10103 { int _m=0, j; uchar *z;
10104
10105 #ifdef HAS_SORTED
10106 int k;
10107 #endif
10108 if (!into--)
10109 uerror("ref to uninitialized chan (unsend)");
10110 z = qptr(into);
10111 j = ((Q0 *)z)->Qlen;
10112 ((Q0 *)z)->Qlen = --j;
10113 switch (((Q0 *)qptr(into))->_t) {
10114 default: Uerror("bad queue - unsend");
10115 }
10116 return _m;
10117 }
10118
10119 void
10120 unrecv(int from, int slot, int fld, int fldvar, int strt)
10121 { int j; uchar *z;
10122
10123 if (!from--)
10124 uerror("ref to uninitialized chan (unrecv)");
10125 z = qptr(from);
10126 j = ((Q0 *)z)->Qlen;
10127 if (strt) ((Q0 *)z)->Qlen = j+1;
10128 switch (((Q0 *)qptr(from))->_t) {
10129 default: Uerror("bad queue - qrecv");
10130 }
10131 }
10132 int
10133 q_cond(short II, Trans *t)
10134 { int i = 0;
10135 for (i = 0; i < 6; i++)
10136 { if (t->ty[i] == TIMEOUT_F) return 1;
10137 if (t->ty[i] == ALPHA_F)
10138 #ifdef GLOB_ALPHA
10139 return 0;
10140 #else
10141 return (II+1 == (short) now._nr_pr && II+1 < MAXPROC);
10142 #endif
10143 switch (t->qu[i]) {
10144 case 0: break;
10145 default: Uerror("unknown qid - q_cond");
10146 return 0;
10147 }
10148 }
10149 return 1;
10150 }
10151 void
10152 to_compile(void)
10153 { char ctd[1024], carg[64];
10154 #ifdef BITSTATE
10155 strcpy(ctd, "-DBITSTATE ");
10156 #else
10157 strcpy(ctd, "");
10158 #endif
10159 #ifdef NOVSZ
10160 strcat(ctd, "-DNOVSZ ");
10161 #endif
10162 #ifdef REVERSE
10163 strcat(ctd, "-DREVERSE ");
10164 #endif
10165 #ifdef T_REVERSE
10166 strcat(ctd, "-DT_REVERSE ");
10167 #endif
10168 #ifdef RANDOMIZE
10169 #if RANDOMIZE>0
10170 sprintf(carg, "-DRANDOMIZE=%d ", RANDOMIZE);
10171 strcat(ctd, carg);
10172 #else
10173 strcat(ctd, "-DRANDOMIZE ");
10174 #endif
10175 #endif
10176 #ifdef SCHED
10177 sprintf(carg, "-DSCHED=%d ", SCHED);
10178 strcat(ctd, carg);
10179 #endif
10180 #ifdef BFS
10181 strcat(ctd, "-DBFS ");
10182 #endif
10183 #ifdef MEMLIM
10184 sprintf(carg, "-DMEMLIM=%d ", MEMLIM);
10185 strcat(ctd, carg);
10186 #else
10187 #ifdef MEMCNT
10188 sprintf(carg, "-DMEMCNT=%d ", MEMCNT);
10189 strcat(ctd, carg);
10190 #endif
10191 #endif
10192 #ifdef NOCLAIM
10193 strcat(ctd, "-DNOCLAIM ");
10194 #endif
10195 #ifdef SAFETY
10196 strcat(ctd, "-DSAFETY ");
10197 #else
10198 #ifdef NOFAIR
10199 strcat(ctd, "-DNOFAIR ");
10200 #else
10201 #ifdef NFAIR
10202 if (NFAIR != 2)
10203 { sprintf(carg, "-DNFAIR=%d ", NFAIR);
10204 strcat(ctd, carg);
10205 }
10206 #endif
10207 #endif
10208 #endif
10209 #ifdef NOREDUCE
10210 strcat(ctd, "-DNOREDUCE ");
10211 #else
10212 #ifdef XUSAFE
10213 strcat(ctd, "-DXUSAFE ");
10214 #endif
10215 #endif
10216 #ifdef NP
10217 strcat(ctd, "-DNP ");
10218 #endif
10219 #ifdef PEG
10220 strcat(ctd, "-DPEG ");
10221 #endif
10222 #ifdef VAR_RANGES
10223 strcat(ctd, "-DVAR_RANGES ");
10224 #endif
10225 #ifdef HC0
10226 strcat(ctd, "-DHC0 ");
10227 #endif
10228 #ifdef HC1
10229 strcat(ctd, "-DHC1 ");
10230 #endif
10231 #ifdef HC2
10232 strcat(ctd, "-DHC2 ");
10233 #endif
10234 #ifdef HC3
10235 strcat(ctd, "-DHC3 ");
10236 #endif
10237 #ifdef HC4
10238 strcat(ctd, "-DHC4 ");
10239 #endif
10240 #ifdef CHECK
10241 strcat(ctd, "-DCHECK ");
10242 #endif
10243 #ifdef CTL
10244 strcat(ctd, "-DCTL ");
10245 #endif
10246 #ifdef NIBIS
10247 strcat(ctd, "-DNIBIS ");
10248 #endif
10249 #ifdef NOBOUNDCHECK
10250 strcat(ctd, "-DNOBOUNDCHECK ");
10251 #endif
10252 #ifdef NOSTUTTER
10253 strcat(ctd, "-DNOSTUTTER ");
10254 #endif
10255 #ifdef REACH
10256 strcat(ctd, "-DREACH ");
10257 #endif
10258 #ifdef PRINTF
10259 strcat(ctd, "-DPRINTF ");
10260 #endif
10261 #ifdef OTIM
10262 strcat(ctd, "-DOTIM ");
10263 #endif
10264 #ifdef COLLAPSE
10265 strcat(ctd, "-DCOLLAPSE ");
10266 #endif
10267 #ifdef MA
10268 sprintf(carg, "-DMA=%d ", MA);
10269 strcat(ctd, carg);
10270 #endif
10271 #ifdef SVDUMP
10272 strcat(ctd, "-DSVDUMP ");
10273 #endif
10274 #ifdef VECTORSZ
10275 if (VECTORSZ != 1024)
10276 { sprintf(carg, "-DVECTORSZ=%d ", VECTORSZ);
10277 strcat(ctd, carg);
10278 }
10279 #endif
10280 #ifdef VERBOSE
10281 strcat(ctd, "-DVERBOSE ");
10282 #endif
10283 #ifdef CHECK
10284 strcat(ctd, "-DCHECK ");
10285 #endif
10286 #ifdef SDUMP
10287 strcat(ctd, "-DSDUMP ");
10288 #endif
10289 #if NCORE>1
10290 sprintf(carg, "-DNCORE=%d ", NCORE);
10291 strcat(ctd, carg);
10292 #endif
10293 #ifdef SFH
10294 sprintf(carg, "-DSFH ");
10295 strcat(ctd, carg);
10296 #endif
10297 #ifdef VMAX
10298 if (VMAX != 256)
10299 { sprintf(carg, "-DVMAX=%d ", VMAX);
10300 strcat(ctd, carg);
10301 }
10302 #endif
10303 #ifdef PMAX
10304 if (PMAX != 16)
10305 { sprintf(carg, "-DPMAX=%d ", PMAX);
10306 strcat(ctd, carg);
10307 }
10308 #endif
10309 #ifdef QMAX
10310 if (QMAX != 16)
10311 { sprintf(carg, "-DQMAX=%d ", QMAX);
10312 strcat(ctd, carg);
10313 }
10314 #endif
10315 #ifdef SET_WQ_SIZE
10316 sprintf(carg, "-DSET_WQ_SIZE=%d ", SET_WQ_SIZE);
10317 strcat(ctd, carg);
10318 #endif
10319 printf("Compiled as: cc -o pan %span.c\n", ctd);
10320 }
10321 void
10322 active_procs(void)
10323 {
10324 if (!permuted) {
10325 Addproc(4);
10326 } else {
10327 Addproc(4);
10328 }
10329 }
10330 #ifdef MA
10331 /*
10332 #include <stdio.h>
10333 #define uchar unsigned char
10334 */
10335 #define ulong unsigned long
10336 #define ushort unsigned short
10337
10338 #define TWIDTH 256
10339 #define HASH(y,n) (n)*(((long)y))
10340 #define INRANGE(e,h) ((h>=e->From && h<=e->To)||(e->s==1 && e->S==h))
10341
10342 extern char *emalloc(unsigned long); /* imported routine */
10343 extern void dfa_init(ushort); /* 4 exported routines */
10344 extern int dfa_member(ulong);
10345 extern int dfa_store(uchar *);
10346 extern void dfa_stats(void);
10347
10348 typedef struct Edge {
10349 uchar From, To; /* max range 0..255 */
10350 uchar s, S; /* if s=1, S is singleton */
10351 struct Vertex *Dst;
10352 struct Edge *Nxt;
10353 } Edge;
10354
10355 typedef struct Vertex {
10356 ulong key, num; /* key for splay tree, nr incoming edges */
10357 uchar from[2], to[2]; /* in-node predefined edge info */
10358 struct Vertex *dst[2];/* most nodes have 2 or more edges */
10359 struct Edge *Succ; /* in case there are more edges */
10360 struct Vertex *lnk, *left, *right; /* splay tree plumbing */
10361 } Vertex;
10362
10363 static Edge *free_edges;
10364 static Vertex *free_vertices;
10365 static Vertex **layers; /* one splay tree of nodes per layer */
10366 static Vertex **path; /* run of word in the DFA */
10367 static Vertex *R, *F, *NF; /* Root, Final, Not-Final */
10368 static uchar *word, *lastword;/* string, and last string inserted */
10369 static int dfa_depth, iv=0, nv=0, pfrst=0, Tally;
10370
10371 static void insert_it(Vertex *, int); /* splay-tree code */
10372 static void delete_it(Vertex *, int);
10373 static Vertex *find_it(Vertex *, Vertex *, uchar, int);
10374
10375 static void
10376 recyc_edges(Edge *e)
10377 {
10378 if (!e) return;
10379 recyc_edges(e->Nxt);
10380 e->Nxt = free_edges;
10381 free_edges = e;
10382 }
10383
10384 static Edge *
10385 new_edge(Vertex *dst)
10386 { Edge *e;
10387
10388 if (free_edges)
10389 { e = free_edges;
10390 free_edges = e->Nxt;
10391 e->From = e->To = e->s = e->S = 0;
10392 e->Nxt = (Edge *) 0;
10393 } else
10394 e = (Edge *) emalloc(sizeof(Edge));
10395 e->Dst = dst;
10396
10397 return e;
10398 }
10399
10400 static void
10401 recyc_vertex(Vertex *v)
10402 {
10403 recyc_edges(v->Succ);
10404 v->Succ = (Edge *) free_vertices;
10405 free_vertices = v;
10406 nr_states--;
10407 }
10408
10409 static Vertex *
10410 new_vertex(void)
10411 { Vertex *v;
10412
10413 if (free_vertices)
10414 { v = free_vertices;
10415 free_vertices = (Vertex *) v->Succ;
10416 v->Succ = (Edge *) 0;
10417 v->num = 0;
10418 } else
10419 v = (Vertex *) emalloc(sizeof(Vertex));
10420
10421 nr_states++;
10422 return v;
10423 }
10424
10425 static Vertex *
10426 allDelta(Vertex *v, int n)
10427 { Vertex *dst = new_vertex();
10428
10429 v->from[0] = 0;
10430 v->to[0] = 255;
10431 v->dst[0] = dst;
10432 dst->num = 256;
10433 insert_it(v, n);
10434 return dst;
10435 }
10436
10437 static void
10438 insert_edge(Vertex *v, Edge *e)
10439 { /* put new edge first */
10440 if (!v->dst[0])
10441 { v->dst[0] = e->Dst;
10442 v->from[0] = e->From;
10443 v->to[0] = e->To;
10444 recyc_edges(e);
10445 return;
10446 }
10447 if (!v->dst[1])
10448 { v->from[1] = v->from[0]; v->from[0] = e->From;
10449 v->to[1] = v->to[0]; v->to[0] = e->To;
10450 v->dst[1] = v->dst[0]; v->dst[0] = e->Dst;
10451 recyc_edges(e);
10452 return;
10453 } /* shift */
10454 { int f = v->from[1];
10455 int t = v->to[1];
10456 Vertex *d = v->dst[1];
10457 v->from[1] = v->from[0]; v->from[0] = e->From;
10458 v->to[1] = v->to[0]; v->to[0] = e->To;
10459 v->dst[1] = v->dst[0]; v->dst[0] = e->Dst;
10460 e->From = f;
10461 e->To = t;
10462 e->Dst = d;
10463 }
10464 e->Nxt = v->Succ;
10465 v->Succ = e;
10466 }
10467
10468 static void
10469 copyRecursive(Vertex *v, Edge *e)
10470 { Edge *f;
10471 if (e->Nxt) copyRecursive(v, e->Nxt);
10472 f = new_edge(e->Dst);
10473 f->From = e->From;
10474 f->To = e->To;
10475 f->s = e->s;
10476 f->S = e->S;
10477 f->Nxt = v->Succ;
10478 v->Succ = f;
10479 }
10480
10481 static void
10482 copyEdges(Vertex *to, Vertex *from)
10483 { int i;
10484 for (i = 0; i < 2; i++)
10485 { to->from[i] = from->from[i];
10486 to->to[i] = from->to[i];
10487 to->dst[i] = from->dst[i];
10488 }
10489 if (from->Succ) copyRecursive(to, from->Succ);
10490 }
10491
10492 static Edge *
10493 cacheDelta(Vertex *v, int h, int first)
10494 { static Edge *ov, tmp; int i;
10495
10496 if (!first && INRANGE(ov,h))
10497 return ov; /* intercepts about 10% */
10498 for (i = 0; i < 2; i++)
10499 if (v->dst[i] && h >= v->from[i] && h <= v->to[i])
10500 { tmp.From = v->from[i];
10501 tmp.To = v->to[i];
10502 tmp.Dst = v->dst[i];
10503 tmp.s = tmp.S = 0;
10504 ov = &tmp;
10505 return ov;
10506 }
10507 for (ov = v->Succ; ov; ov = ov->Nxt)
10508 if (INRANGE(ov,h)) return ov;
10509
10510 Uerror("cannot get here, cacheDelta");
10511 return (Edge *) 0;
10512 }
10513
10514 static Vertex *
10515 Delta(Vertex *v, int h) /* v->delta[h] */
10516 { Edge *e;
10517
10518 if (v->dst[0] && h >= v->from[0] && h <= v->to[0])
10519 return v->dst[0]; /* oldest edge */
10520 if (v->dst[1] && h >= v->from[1] && h <= v->to[1])
10521 return v->dst[1];
10522 for (e = v->Succ; e; e = e->Nxt)
10523 if (INRANGE(e,h))
10524 return e->Dst;
10525 Uerror("cannot happen Delta");
10526 return (Vertex *) 0;
10527 }
10528
10529 static void
10530 numDelta(Vertex *v, int d)
10531 { Edge *e;
10532 ulong cnt;
10533 int i;
10534
10535 for (i = 0; i < 2; i++)
10536 if (v->dst[i])
10537 { cnt = v->dst[i]->num + d*(1 + v->to[i] - v->from[i]);
10538 if (d == 1 && cnt < v->dst[i]->num) goto bad;
10539 v->dst[i]->num = cnt;
10540 }
10541 for (e = v->Succ; e; e = e->Nxt)
10542 { cnt = e->Dst->num + d*(1 + e->To - e->From + e->s);
10543 if (d == 1 && cnt < e->Dst->num)
10544 bad: Uerror("too many incoming edges");
10545 e->Dst->num = cnt;
10546 }
10547 }
10548
10549 static void
10550 setDelta(Vertex *v, int h, Vertex *newdst) /* v->delta[h] = newdst; */
10551 { Edge *e, *f = (Edge *) 0, *g;
10552 int i;
10553
10554 /* remove the old entry, if there */
10555 for (i = 0; i < 2; i++)
10556 if (v->dst[i] && h >= v->from[i] && h <= v->to[i])
10557 { if (h == v->from[i])
10558 { if (h == v->to[i])
10559 { v->dst[i] = (Vertex *) 0;
10560 v->from[i] = v->to[i] = 0;
10561 } else
10562 v->from[i]++;
10563 } else if (h == v->to[i])
10564 { v->to[i]--;
10565 } else
10566 { g = new_edge(v->dst[i]);/* same dst */
10567 g->From = v->from[i];
10568 g->To = h-1; /* left half */
10569 v->from[i] = h+1; /* right half */
10570 insert_edge(v, g);
10571 }
10572 goto part2;
10573 }
10574 for (e = v->Succ; e; f = e, e = e->Nxt)
10575 { if (e->s == 1 && e->S == h)
10576 { e->s = e->S = 0;
10577 goto rem_tst;
10578 }
10579 if (h >= e->From && h <= e->To)
10580 { if (h == e->From)
10581 { if (h == e->To)
10582 { if (e->s)
10583 { e->From = e->To = e->S;
10584 e->s = 0;
10585 break;
10586 } else
10587 goto rem_do;
10588 } else
10589 e->From++;
10590 } else if (h == e->To)
10591 { e->To--;
10592 } else /* split */
10593 { g = new_edge(e->Dst); /* same dst */
10594 g->From = e->From;
10595 g->To = h-1; /* g=left half */
10596 e->From = h+1; /* e=right half */
10597 g->Nxt = e->Nxt; /* insert g */
10598 e->Nxt = g; /* behind e */
10599 break; /* done */
10600 }
10601
10602 rem_tst: if (e->From > e->To)
10603 { if (e->s == 0) {
10604 rem_do: if (f)
10605 f->Nxt = e->Nxt;
10606 else
10607 v->Succ = e->Nxt;
10608 e->Nxt = (Edge *) 0;
10609 recyc_edges(e);
10610 } else
10611 { e->From = e->To = e->S;
10612 e->s = 0;
10613 } }
10614 break;
10615 } }
10616 part2:
10617 /* check if newdst is already there */
10618 for (i = 0; i < 2; i++)
10619 if (v->dst[i] == newdst)
10620 { if (h+1 == (int) v->from[i])
10621 { v->from[i] = h;
10622 return;
10623 }
10624 if (h == (int) v->to[i]+1)
10625 { v->to[i] = h;
10626 return;
10627 } }
10628 for (e = v->Succ; e; e = e->Nxt)
10629 { if (e->Dst == newdst)
10630 { if (h+1 == (int) e->From)
10631 { e->From = h;
10632 if (e->s == 1 && e->S+1 == e->From)
10633 { e->From = e->S;
10634 e->s = e->S = 0;
10635 }
10636 return;
10637 }
10638 if (h == (int) e->To+1)
10639 { e->To = h;
10640 if (e->s == 1 && e->S == e->To+1)
10641 { e->To = e->S;
10642 e->s = e->S = 0;
10643 }
10644 return;
10645 }
10646 if (e->s == 0)
10647 { e->s = 1;
10648 e->S = h;
10649 return;
10650 } } }
10651 /* add as a new edge */
10652 e = new_edge(newdst);
10653 e->From = e->To = h;
10654 insert_edge(v, e);
10655 }
10656
10657 static ulong
10658 cheap_key(Vertex *v)
10659 { ulong vk2 = 0;
10660
10661 if (v->dst[0])
10662 { vk2 = (ulong) v->dst[0];
10663 if ((ulong) v->dst[1] > vk2)
10664 vk2 = (ulong) v->dst[1];
10665 } else if (v->dst[1])
10666 vk2 = (ulong) v->dst[1];
10667 if (v->Succ)
10668 { Edge *e;
10669 for (e = v->Succ; e; e = e->Nxt)
10670 if ((ulong) e->Dst > vk2)
10671 vk2 = (ulong) e->Dst;
10672 }
10673 Tally = (vk2>>2)&(TWIDTH-1);
10674 return v->key;
10675 }
10676
10677 static ulong
10678 mk_key(Vertex *v) /* not sensitive to order */
10679 { ulong m = 0, vk2 = 0;
10680 Edge *e;
10681
10682 if (v->dst[0])
10683 { m += HASH(v->dst[0], v->to[0] - v->from[0] + 1);
10684 vk2 = (ulong) v->dst[0];
10685 }
10686 if (v->dst[1])
10687 { m += HASH(v->dst[1], v->to[1] - v->from[1] + 1);
10688 if ((ulong) v->dst[1] > vk2) vk2 = (ulong) v->dst[1];
10689 }
10690 for (e = v->Succ; e; e = e->Nxt)
10691 { m += HASH(e->Dst, e->To - e->From + 1 + e->s);
10692 if ((ulong) e->Dst > vk2) vk2 = (ulong) e->Dst;
10693 }
10694 Tally = (vk2>>2)&(TWIDTH-1);
10695 return m;
10696 }
10697
10698 static ulong
10699 mk_special(int sigma, Vertex *n, Vertex *v)
10700 { ulong m = 0, vk2 = 0;
10701 Edge *f;
10702 int i;
10703
10704 for (i = 0; i < 2; i++)
10705 if (v->dst[i])
10706 { if (sigma >= v->from[i] && sigma <= v->to[i])
10707 { m += HASH(v->dst[i], v->to[i]-v->from[i]);
10708 if ((ulong) v->dst[i] > vk2
10709 && v->to[i] > v->from[i])
10710 vk2 = (ulong) v->dst[i];
10711 } else
10712 { m += HASH(v->dst[i], v->to[i]-v->from[i]+1);
10713 if ((ulong) v->dst[i] > vk2)
10714 vk2 = (ulong) v->dst[i];
10715 } }
10716 for (f = v->Succ; f; f = f->Nxt)
10717 { if (sigma >= f->From && sigma <= f->To)
10718 { m += HASH(f->Dst, f->To - f->From + f->s);
10719 if ((ulong) f->Dst > vk2
10720 && f->To - f->From + f->s > 0)
10721 vk2 = (ulong) f->Dst;
10722 } else if (f->s == 1 && sigma == f->S)
10723 { m += HASH(f->Dst, f->To - f->From + 1);
10724 if ((ulong) f->Dst > vk2) vk2 = (ulong) f->Dst;
10725 } else
10726 { m += HASH(f->Dst, f->To - f->From + 1 + f->s);
10727 if ((ulong) f->Dst > vk2) vk2 = (ulong) f->Dst;
10728 } }
10729
10730 if ((ulong) n > vk2) vk2 = (ulong) n;
10731 Tally = (vk2>>2)&(TWIDTH-1);
10732 m += HASH(n, 1);
10733 return m;
10734 }
10735
10736 void
10737 dfa_init(ushort nr_layers)
10738 { int i; Vertex *r, *t;
10739
10740 dfa_depth = nr_layers; /* one byte per layer */
10741 path = (Vertex **) emalloc((dfa_depth+1)*sizeof(Vertex *));
10742 layers = (Vertex **) emalloc(TWIDTH*(dfa_depth+1)*sizeof(Vertex *));
10743 lastword = (uchar *) emalloc((dfa_depth+1)*sizeof(uchar));
10744 lastword[dfa_depth] = lastword[0] = 255;
10745 path[0] = R = new_vertex(); F = new_vertex();
10746
10747 for (i = 1, r = R; i < dfa_depth; i++, r = t)
10748 t = allDelta(r, i-1);
10749 NF = allDelta(r, i-1);
10750 }
10751
10752 #if 0
10753 static void complement_dfa(void) { Vertex *tmp = F; F = NF; NF = tmp; }
10754 #endif
10755
10756 double
10757 tree_stats(Vertex *t)
10758 { Edge *e; double cnt=0.0;
10759 if (!t) return 0;
10760 if (!t->key) return 0;
10761 t->key = 0; /* precaution */
10762 if (t->dst[0]) cnt++;
10763 if (t->dst[1]) cnt++;
10764 for (e = t->Succ; e; e = e->Nxt)
10765 cnt++;
10766 cnt += tree_stats(t->lnk);
10767 cnt += tree_stats(t->left);
10768 cnt += tree_stats(t->right);
10769 return cnt;
10770 }
10771
10772 void
10773 dfa_stats(void)
10774 { int i, j; double cnt = 0.0;
10775 for (j = 0; j < TWIDTH; j++)
10776 for (i = 0; i < dfa_depth+1; i++)
10777 cnt += tree_stats(layers[i*TWIDTH+j]);
10778 printf("Minimized Automaton: %6d nodes and %6g edges\n",
10779 nr_states, cnt);
10780 }
10781
10782 int
10783 dfa_member(ulong n)
10784 { Vertex **p, **q;
10785 uchar *w = &word[n];
10786 int i;
10787
10788 p = &path[n]; q = (p+1);
10789 for (i = n; i < dfa_depth; i++)
10790 *q++ = Delta(*p++, *w++);
10791 return (*p == F);
10792 }
10793
10794 int
10795 dfa_store(uchar *sv)
10796 { Vertex **p, **q, *s, *y, *old, *new = F;
10797 uchar *w, *u = lastword;
10798 int i, j, k;
10799
10800 w = word = sv;
10801 while (*w++ == *u++) /* find first byte that differs */
10802 ;
10803 pfrst = (int) (u - lastword) - 1;
10804 memcpy(&lastword[pfrst], &sv[pfrst], dfa_depth-pfrst);
10805 if (pfrst > iv) pfrst = iv;
10806 if (pfrst > nv) pfrst = nv;
10807 /* phase1: */
10808 p = &path[pfrst]; q = (p+1); w = &word[pfrst];
10809 for (i = pfrst; i < dfa_depth; i++)
10810 *q++ = Delta(*p++, *w++); /* (*p)->delta[*w++]; */
10811
10812 if (*p == F) return 1; /* it's already there */
10813 /* phase2: */
10814 iv = dfa_depth;
10815 do { iv--;
10816 old = new;
10817 new = find_it(path[iv], old, word[iv], iv);
10818 } while (new && iv > 0);
10819
10820 /* phase3: */
10821 nv = k = 0; s = path[0];
10822 for (j = 1; j <= iv; ++j)
10823 if (path[j]->num > 1)
10824 { y = new_vertex();
10825 copyEdges(y, path[j]);
10826 insert_it(y, j);
10827 numDelta(y, 1);
10828 delete_it(s, j-1);
10829 setDelta(s, word[j-1], y);
10830 insert_it(s, j-1);
10831 y->num = 1; /* initial value 1 */
10832 s = y;
10833 path[j]->num--; /* only 1 moved from j to y */
10834 k = 1;
10835 } else
10836 { s = path[j];
10837 if (!k) nv = j;
10838 }
10839 y = Delta(s, word[iv]);
10840 y->num--;
10841 delete_it(s, iv);
10842 setDelta(s, word[iv], old);
10843 insert_it(s, iv);
10844 old->num++;
10845
10846 for (j = iv+1; j < dfa_depth; j++)
10847 if (path[j]->num == 0)
10848 { numDelta(path[j], -1);
10849 delete_it(path[j], j);
10850 recyc_vertex(path[j]);
10851 } else
10852 break;
10853 return 0;
10854 }
10855
10856 static Vertex *
10857 splay(ulong i, Vertex *t)
10858 { Vertex N, *l, *r, *y;
10859
10860 if (!t) return t;
10861 N.left = N.right = (Vertex *) 0;
10862 l = r = &N;
10863 for (;;)
10864 { if (i < t->key)
10865 { if (!t->left) break;
10866 if (i < t->left->key)
10867 { y = t->left;
10868 t->left = y->right;
10869 y->right = t;
10870 t = y;
10871 if (!t->left) break;
10872 }
10873 r->left = t;
10874 r = t;
10875 t = t->left;
10876 } else if (i > t->key)
10877 { if (!t->right) break;
10878 if (i > t->right->key)
10879 { y = t->right;
10880 t->right = y->left;
10881 y->left = t;
10882 t = y;
10883 if (!t->right) break;
10884 }
10885 l->right = t;
10886 l = t;
10887 t = t->right;
10888 } else
10889 break;
10890 }
10891 l->right = t->left;
10892 r->left = t->right;
10893 t->left = N.right;
10894 t->right = N.left;
10895 return t;
10896 }
10897
10898 static void
10899 insert_it(Vertex *v, int L)
10900 { Vertex *new, *t;
10901 ulong i; int nr;
10902
10903 i = mk_key(v);
10904 nr = ((L*TWIDTH)+Tally);
10905 t = layers[nr];
10906
10907 v->key = i;
10908 if (!t)
10909 { layers[nr] = v;
10910 return;
10911 }
10912 t = splay(i, t);
10913 if (i < t->key)
10914 { new = v;
10915 new->left = t->left;
10916 new->right = t;
10917 t->left = (Vertex *) 0;
10918 } else if (i > t->key)
10919 { new = v;
10920 new->right = t->right;
10921 new->left = t;
10922 t->right = (Vertex *) 0;
10923 } else /* it's already there */
10924 { v->lnk = t->lnk; /* put in linked list off v */
10925 t->lnk = v;
10926 new = t;
10927 }
10928 layers[nr] = new;
10929 }
10930
10931 static int
10932 checkit(Vertex *h, Vertex *v, Vertex *n, uchar sigma)
10933 { Edge *g, *f;
10934 int i, k, j = 1;
10935
10936 for (k = 0; k < 2; k++)
10937 if (h->dst[k])
10938 { if (sigma >= h->from[k] && sigma <= h->to[k])
10939 { if (h->dst[k] != n) goto no_match;
10940 }
10941 for (i = h->from[k]; i <= h->to[k]; i++)
10942 { if (i == sigma) continue;
10943 g = cacheDelta(v, i, j); j = 0;
10944 if (h->dst[k] != g->Dst)
10945 goto no_match;
10946 if (g->s == 0 || g->S != i)
10947 i = g->To;
10948 } }
10949 for (f = h->Succ; f; f = f->Nxt)
10950 { if (INRANGE(f,sigma))
10951 { if (f->Dst != n) goto no_match;
10952 }
10953 for (i = f->From; i <= f->To; i++)
10954 { if (i == sigma) continue;
10955 g = cacheDelta(v, i, j); j = 0;
10956 if (f->Dst != g->Dst)
10957 goto no_match;
10958 if (g->s == 1 && i == g->S)
10959 continue;
10960 i = g->To;
10961 }
10962 if (f->s && f->S != sigma)
10963 { g = cacheDelta(v, f->S, 1);
10964 if (f->Dst != g->Dst)
10965 goto no_match;
10966 }
10967 }
10968 if (h->Succ || h->dst[0] || h->dst[1]) return 1;
10969 no_match:
10970 return 0;
10971 }
10972
10973 static Vertex *
10974 find_it(Vertex *v, Vertex *n, uchar sigma, int L)
10975 { Vertex *z, *t;
10976 ulong i; int nr;
10977
10978 i = mk_special(sigma,n,v);
10979 nr = ((L*TWIDTH)+Tally);
10980 t = layers[nr];
10981
10982 if (!t) return (Vertex *) 0;
10983 layers[nr] = t = splay(i, t);
10984 if (i == t->key)
10985 for (z = t; z; z = z->lnk)
10986 if (checkit(z, v, n, sigma))
10987 return z;
10988
10989 return (Vertex *) 0;
10990 }
10991
10992 static void
10993 delete_it(Vertex *v, int L)
10994 { Vertex *x, *t;
10995 ulong i; int nr;
10996
10997 i = cheap_key(v);
10998 nr = ((L*TWIDTH)+Tally);
10999 t = layers[nr];
11000 if (!t) return;
11001
11002 t = splay(i, t);
11003 if (i == t->key)
11004 { Vertex *z, *y = (Vertex *) 0;
11005 for (z = t; z && z != v; y = z, z = z->lnk)
11006 ;
11007 if (z != v) goto bad;
11008 if (y)
11009 { y->lnk = z->lnk;
11010 z->lnk = (Vertex *) 0;
11011 layers[nr] = t;
11012 return;
11013 } else if (z->lnk) /* z == t == v */
11014 { y = z->lnk;
11015 y->left = t->left;
11016 y->right = t->right;
11017 t->left = t->right = t->lnk = (Vertex *) 0;
11018 layers[nr] = y;
11019 return;
11020 }
11021 /* delete the node itself */
11022 if (!t->left)
11023 { x = t->right;
11024 } else
11025 { x = splay(i, t->left);
11026 x->right = t->right;
11027 }
11028 t->left = t->right = t->lnk = (Vertex *) 0;
11029 layers[nr] = x;
11030 return;
11031 }
11032 bad: Uerror("cannot happen delete");
11033 }
11034 #endif
11035 #if defined(MA) && (defined(W_XPT) || defined(R_XPT))
11036 static Vertex **temptree;
11037 static char wbuf[4096];
11038 static int WCNT = 4096, wcnt=0;
11039 static uchar stacker[MA+1];
11040 static ulong stackcnt = 0;
11041 extern double nstates, nlinks, truncs, truncs2;
11042
11043 static void
11044 xwrite(int fd, char *b, int n)
11045 {
11046 if (wcnt+n >= 4096)
11047 { write(fd, wbuf, wcnt);
11048 wcnt = 0;
11049 }
11050 memcpy(&wbuf[wcnt], b, n);
11051 wcnt += n;
11052 }
11053
11054 static void
11055 wclose(fd)
11056 {
11057 if (wcnt > 0)
11058 write(fd, wbuf, wcnt);
11059 wcnt = 0;
11060 close(fd);
11061 }
11062
11063 static void
11064 w_vertex(int fd, Vertex *v)
11065 { char t[3]; int i; Edge *e;
11066
11067 xwrite(fd, (char *) &v, sizeof(Vertex *));
11068 t[0] = 0;
11069 for (i = 0; i < 2; i++)
11070 if (v->dst[i])
11071 { t[1] = v->from[i], t[2] = v->to[i];
11072 xwrite(fd, t, 3);
11073 xwrite(fd, (char *) &(v->dst[i]), sizeof(Vertex *));
11074 }
11075 for (e = v->Succ; e; e = e->Nxt)
11076 { t[1] = e->From, t[2] = e->To;
11077 xwrite(fd, t, 3);
11078 xwrite(fd, (char *) &(e->Dst), sizeof(Vertex *));
11079
11080 if (e->s)
11081 { t[1] = t[2] = e->S;
11082 xwrite(fd, t, 3);
11083 xwrite(fd, (char *) &(e->Dst), sizeof(Vertex *));
11084 } }
11085 }
11086
11087 static void
11088 w_layer(int fd, Vertex *v)
11089 { uchar c=1;
11090
11091 if (!v) return;
11092 xwrite(fd, (char *) &c, 1);
11093 w_vertex(fd, v);
11094 w_layer(fd, v->lnk);
11095 w_layer(fd, v->left);
11096 w_layer(fd, v->right);
11097 }
11098
11099 void
11100 w_xpoint(void)
11101 { int fd; char nm[64];
11102 int i, j; uchar c;
11103 static uchar xwarned = 0;
11104
11105 sprintf(nm, "%s.xpt", PanSource);
11106 if ((fd = creat(nm, 0666)) <= 0)
11107 if (!xwarned)
11108 { xwarned = 1;
11109 printf("cannot creat checkpoint file\n");
11110 return;
11111 }
11112 xwrite(fd, (char *) &nstates, sizeof(double));
11113 xwrite(fd, (char *) &truncs, sizeof(double));
11114 xwrite(fd, (char *) &truncs2, sizeof(double));
11115 xwrite(fd, (char *) &nlinks, sizeof(double));
11116 xwrite(fd, (char *) &dfa_depth, sizeof(int));
11117 xwrite(fd, (char *) &R, sizeof(Vertex *));
11118 xwrite(fd, (char *) &F, sizeof(Vertex *));
11119 xwrite(fd, (char *) &NF, sizeof(Vertex *));
11120
11121 for (j = 0; j < TWIDTH; j++)
11122 for (i = 0; i < dfa_depth+1; i++)
11123 { w_layer(fd, layers[i*TWIDTH+j]);
11124 c = 2; xwrite(fd, (char *) &c, 1);
11125 }
11126 wclose(fd);
11127 }
11128
11129 static void
11130 xread(int fd, char *b, int n)
11131 { int m = wcnt; int delta = 0;
11132 if (m < n)
11133 { if (m > 0) memcpy(b, &wbuf[WCNT-m], m);
11134 delta = m;
11135 WCNT = wcnt = read(fd, wbuf, 4096);
11136 if (wcnt < n-m)
11137 Uerror("xread failed -- insufficient data");
11138 n -= m;
11139 }
11140 memcpy(&b[delta], &wbuf[WCNT-wcnt], n);
11141 wcnt -= n;
11142 }
11143
11144 static void
11145 x_cleanup(Vertex *c)
11146 { Edge *e; /* remove the tree and edges from c */
11147 if (!c) return;
11148 for (e = c->Succ; e; e = e->Nxt)
11149 x_cleanup(e->Dst);
11150 recyc_vertex(c);
11151 }
11152
11153 static void
11154 x_remove(void)
11155 { Vertex *tmp; int i, s;
11156 int r, j;
11157 /* double-check: */
11158 stacker[dfa_depth-1] = 0; r = dfa_store(stacker);
11159 stacker[dfa_depth-1] = 4; j = dfa_member(dfa_depth-1);
11160 if (r != 1 || j != 0)
11161 { printf("%d: ", stackcnt);
11162 for (i = 0; i < dfa_depth; i++)
11163 printf("%d,", stacker[i]);
11164 printf(" -- not a stackstate <o:%d,4:%d>\n", r, j);
11165 return;
11166 }
11167 stacker[dfa_depth-1] = 1;
11168 s = dfa_member(dfa_depth-1);
11169
11170 { tmp = F; F = NF; NF = tmp; } /* complement */
11171 if (s) dfa_store(stacker);
11172 stacker[dfa_depth-1] = 0;
11173 dfa_store(stacker);
11174 stackcnt++;
11175 { tmp = F; F = NF; NF = tmp; }
11176 }
11177
11178 static void
11179 x_rm_stack(Vertex *t, int k)
11180 { int j; Edge *e;
11181
11182 if (k == 0)
11183 { x_remove();
11184 return;
11185 }
11186 if (t)
11187 for (e = t->Succ; e; e = e->Nxt)
11188 { for (j = e->From; j <= (int) e->To; j++)
11189 { stacker[k] = (uchar) j;
11190 x_rm_stack(e->Dst, k-1);
11191 }
11192 if (e->s)
11193 { stacker[k] = e->S;
11194 x_rm_stack(e->Dst, k-1);
11195 } }
11196 }
11197
11198 static Vertex *
11199 insert_withkey(Vertex *v, int L)
11200 { Vertex *new, *t = temptree[L];
11201
11202 if (!t) { temptree[L] = v; return v; }
11203 t = splay(v->key, t);
11204 if (v->key < t->key)
11205 { new = v;
11206 new->left = t->left;
11207 new->right = t;
11208 t->left = (Vertex *) 0;
11209 } else if (v->key > t->key)
11210 { new = v;
11211 new->right = t->right;
11212 new->left = t;
11213 t->right = (Vertex *) 0;
11214 } else
11215 { if (t != R && t != F && t != NF)
11216 Uerror("double insert, bad checkpoint data");
11217 else
11218 { recyc_vertex(v);
11219 new = t;
11220 } }
11221 temptree[L] = new;
11222
11223 return new;
11224 }
11225
11226 static Vertex *
11227 find_withkey(Vertex *v, int L)
11228 { Vertex *t = temptree[L];
11229 if (t)
11230 { temptree[L] = t = splay((ulong) v, t);
11231 if (t->key == (ulong) v)
11232 return t;
11233 }
11234 Uerror("not found error, bad checkpoint data");
11235 return (Vertex *) 0;
11236 }
11237
11238 void
11239 r_layer(int fd, int n)
11240 { Vertex *v;
11241 Edge *e;
11242 char c, t[2];
11243
11244 for (;;)
11245 { xread(fd, &c, 1);
11246 if (c == 2) break;
11247 if (c == 1)
11248 { v = new_vertex();
11249 xread(fd, (char *) &(v->key), sizeof(Vertex *));
11250 v = insert_withkey(v, n);
11251 } else /* c == 0 */
11252 { e = new_edge((Vertex *) 0);
11253 xread(fd, t, 2);
11254 e->From = t[0];
11255 e->To = t[1];
11256 xread(fd, (char *) &(e->Dst), sizeof(Vertex *));
11257 insert_edge(v, e);
11258 } }
11259 }
11260
11261 static void
11262 v_fix(Vertex *t, int nr)
11263 { int i; Edge *e;
11264
11265 if (!t) return;
11266
11267 for (i = 0; i < 2; i++)
11268 if (t->dst[i])
11269 t->dst[i] = find_withkey(t->dst[i], nr);
11270
11271 for (e = t->Succ; e; e = e->Nxt)
11272 e->Dst = find_withkey(e->Dst, nr);
11273
11274 v_fix(t->left, nr);
11275 v_fix(t->right, nr);
11276 }
11277
11278 static void
11279 v_insert(Vertex *t, int nr)
11280 { Edge *e; int i;
11281
11282 if (!t) return;
11283 v_insert(t->left, nr);
11284 v_insert(t->right, nr);
11285
11286 /* remove only leafs from temptree */
11287 t->left = t->right = t->lnk = (Vertex *) 0;
11288 insert_it(t, nr); /* into layers */
11289 for (i = 0; i < 2; i++)
11290 if (t->dst[i])
11291 t->dst[i]->num += (t->to[i] - t->from[i] + 1);
11292 for (e = t->Succ; e; e = e->Nxt)
11293 e->Dst->num += (e->To - e->From + 1 + e->s);
11294 }
11295
11296 static void
11297 x_fixup(void)
11298 { int i;
11299
11300 for (i = 0; i < dfa_depth; i++)
11301 v_fix(temptree[i], (i+1));
11302
11303 for (i = dfa_depth; i >= 0; i--)
11304 v_insert(temptree[i], i);
11305 }
11306
11307 static Vertex *
11308 x_tail(Vertex *t, ulong want)
11309 { int i, yes, no; Edge *e; Vertex *v = (Vertex *) 0;
11310
11311 if (!t) return v;
11312
11313 yes = no = 0;
11314 for (i = 0; i < 2; i++)
11315 if ((ulong) t->dst[i] == want)
11316 { /* was t->from[i] <= 0 && t->to[i] >= 0 */
11317 /* but from and to are uchar */
11318 if (t->from[i] == 0)
11319 yes = 1;
11320 else
11321 if (t->from[i] <= 4 && t->to[i] >= 4)
11322 no = 1;
11323 }
11324
11325 for (e = t->Succ; e; e = e->Nxt)
11326 if ((ulong) e->Dst == want)
11327 { /* was INRANGE(e,0) but From and To are uchar */
11328 if ((e->From == 0) || (e->s==1 && e->S==0))
11329 yes = 1;
11330 else if (INRANGE(e, 4))
11331 no = 1;
11332 }
11333 if (yes && !no) return t;
11334 v = x_tail(t->left, want); if (v) return v;
11335 v = x_tail(t->right, want); if (v) return v;
11336 return (Vertex *) 0;
11337 }
11338
11339 static void
11340 x_anytail(Vertex *t, Vertex *c, int nr)
11341 { int i; Edge *e, *f; Vertex *v;
11342
11343 if (!t) return;
11344
11345 for (i = 0; i < 2; i++)
11346 if ((ulong) t->dst[i] == c->key)
11347 { v = new_vertex(); v->key = t->key;
11348 f = new_edge(v);
11349 f->From = t->from[i];
11350 f->To = t->to[i];
11351 f->Nxt = c->Succ;
11352 c->Succ = f;
11353 if (nr > 0)
11354 x_anytail(temptree[nr-1], v, nr-1);
11355 }
11356
11357 for (e = t->Succ; e; e = e->Nxt)
11358 if ((ulong) e->Dst == c->key)
11359 { v = new_vertex(); v->key = t->key;
11360 f = new_edge(v);
11361 f->From = e->From;
11362 f->To = e->To;
11363 f->s = e->s;
11364 f->S = e->S;
11365 f->Nxt = c->Succ;
11366 c->Succ = f;
11367 x_anytail(temptree[nr-1], v, nr-1);
11368 }
11369
11370 x_anytail(t->left, c, nr);
11371 x_anytail(t->right, c, nr);
11372 }
11373
11374 static Vertex *
11375 x_cpy_rev(void)
11376 { Vertex *c, *v; /* find 0 and !4 predecessor of F */
11377
11378 v = x_tail(temptree[dfa_depth-1], F->key);
11379 if (!v) return (Vertex *) 0;
11380
11381 c = new_vertex(); c->key = v->key;
11382
11383 /* every node on dfa_depth-2 that has v->key as succ */
11384 /* make copy and let c point to these (reversing ptrs) */
11385
11386 x_anytail(temptree[dfa_depth-2], c, dfa_depth-2);
11387
11388 return c;
11389 }
11390
11391 void
11392 r_xpoint(void)
11393 { int fd; char nm[64]; Vertex *d;
11394 int i, j;
11395
11396 wcnt = 0;
11397 sprintf(nm, "%s.xpt", PanSource);
11398 if ((fd = open(nm, 0)) < 0) /* O_RDONLY */
11399 Uerror("cannot open checkpoint file");
11400
11401 xread(fd, (char *) &nstates, sizeof(double));
11402 xread(fd, (char *) &truncs, sizeof(double));
11403 xread(fd, (char *) &truncs2, sizeof(double));
11404 xread(fd, (char *) &nlinks, sizeof(double));
11405 xread(fd, (char *) &dfa_depth, sizeof(int));
11406
11407 if (dfa_depth != MA+a_cycles)
11408 Uerror("bad dfa_depth in checkpoint file");
11409
11410 path = (Vertex **) emalloc((dfa_depth+1)*sizeof(Vertex *));
11411 layers = (Vertex **) emalloc(TWIDTH*(dfa_depth+1)*sizeof(Vertex *));
11412 temptree = (Vertex **) emalloc((dfa_depth+2)*sizeof(Vertex *));
11413 lastword = (uchar *) emalloc((dfa_depth+1)*sizeof(uchar));
11414 lastword[dfa_depth] = lastword[0] = 255;
11415
11416 path[0] = R = new_vertex();
11417 xread(fd, (char *) &R->key, sizeof(Vertex *));
11418 R = insert_withkey(R, 0);
11419
11420 F = new_vertex();
11421 xread(fd, (char *) &F->key, sizeof(Vertex *));
11422 F = insert_withkey(F, dfa_depth);
11423
11424 NF = new_vertex();
11425 xread(fd, (char *) &NF->key, sizeof(Vertex *));
11426 NF = insert_withkey(NF, dfa_depth);
11427
11428 for (j = 0; j < TWIDTH; j++)
11429 for (i = 0; i < dfa_depth+1; i++)
11430 r_layer(fd, i);
11431
11432 if (wcnt != 0) Uerror("bad count in checkpoint file");
11433
11434 d = x_cpy_rev();
11435 x_fixup();
11436 stacker[dfa_depth-1] = 0;
11437 x_rm_stack(d, dfa_depth-2);
11438 x_cleanup(d);
11439 close(fd);
11440
11441 printf("pan: removed %d stackstates\n", stackcnt);
11442 nstates -= (double) stackcnt;
11443 }
11444 #endif
11445 #ifdef VERI
11446 void
11447 check_claim(int st)
11448 {
11449 if (st == endclaim)
11450 uerror("claim violated!");
11451 if (stopstate[VERI][st])
11452 uerror("end state in claim reached");
11453 }
11454 #endif
11455 void
11456 c_globals(void)
11457 { /* int i; */
11458 printf("global vars:\n");
11459 printf(" byte write_off: %d\n", now.write_off);
11460 { int l_in;
11461 for (l_in = 0; l_in < 2; l_in++)
11462 {
11463 printf(" byte commit_count[%d]: %d\n", l_in, now.commit_count[l_in]);
11464 }
11465 }
11466 printf(" byte read_off: %d\n", now.read_off);
11467 { int l_in;
11468 for (l_in = 0; l_in < 2; l_in++)
11469 {
11470 printf(" byte retrieve_count[%d]: %d\n", l_in, now.retrieve_count[l_in]);
11471 }
11472 }
11473 printf(" byte events_lost: %d\n", now.events_lost);
11474 printf(" byte refcount: %d\n", now.refcount);
11475 { int l_in;
11476 for (l_in = 0; l_in < 4; l_in++)
11477 {
11478 printf(" bit buffer_use[%d]: %d\n", l_in, now.buffer_use[l_in]);
11479 }
11480 }
11481 }
11482 void
11483 c_locals(int pid, int tp)
11484 { /* int i; */
11485 switch(tp) {
11486 case 4:
11487 printf("local vars proc %d (:init:):\n", pid);
11488 printf(" byte i: %d\n", ((P4 *)pptr(pid))->i);
11489 printf(" byte j: %d\n", ((P4 *)pptr(pid))->j);
11490 printf(" byte sum: %d\n", ((P4 *)pptr(pid))->sum);
11491 printf(" byte commit_sum: %d\n", ((P4 *)pptr(pid))->commit_sum);
11492 break;
11493 case 3:
11494 /* none */
11495 break;
11496 case 2:
11497 printf("local vars proc %d (reader):\n", pid);
11498 printf(" byte i: %d\n", ((P2 *)pptr(pid))->i);
11499 printf(" byte j: %d\n", ((P2 *)pptr(pid))->j);
11500 printf(" byte tmp_retrieve: %d\n", ((P2 *)pptr(pid))->tmp_retrieve);
11501 printf(" byte lwrite_off: %d\n", ((P2 *)pptr(pid))->lwrite_off);
11502 printf(" byte lcommit_count: %d\n", ((P2 *)pptr(pid))->lcommit_count);
11503 break;
11504 case 1:
11505 printf("local vars proc %d (tracer):\n", pid);
11506 printf(" byte size: %d\n", ((P1 *)pptr(pid))->size);
11507 printf(" byte prev_off: %d\n", ((P1 *)pptr(pid))->prev_off);
11508 printf(" byte new_off: %d\n", ((P1 *)pptr(pid))->new_off);
11509 printf(" byte tmp_commit: %d\n", ((P1 *)pptr(pid))->tmp_commit);
11510 printf(" byte i: %d\n", ((P1 *)pptr(pid))->i);
11511 printf(" byte j: %d\n", ((P1 *)pptr(pid))->j);
11512 break;
11513 case 0:
11514 printf("local vars proc %d (switcher):\n", pid);
11515 printf(" byte prev_off: %d\n", ((P0 *)pptr(pid))->prev_off);
11516 printf(" byte new_off: %d\n", ((P0 *)pptr(pid))->new_off);
11517 printf(" byte tmp_commit: %d\n", ((P0 *)pptr(pid))->tmp_commit);
11518 printf(" byte size: %d\n", ((P0 *)pptr(pid))->size);
11519 break;
11520 }
11521 }
11522 void
11523 printm(int x)
11524 {
11525 switch (x) {
11526 default: Printf("%d", x);
11527 }
11528 }
11529 void
11530 c_chandump(int unused) { unused++; /* avoid complaints */ }
This page took 0.284801 seconds and 4 git commands to generate.