1 /*** Generated by Spin Version 5.1.6 -- 9 May 2008 ***/
2 /*** From source: buffer.spin ***/
5 #define _FILE_OFFSET_BITS 64
14 #if defined(WIN32) || defined(WIN64)
18 #include <sys/times.h>
20 #include <sys/types.h>
23 #define Offsetof(X, Y) ((unsigned long)(&(((X *)0)->Y)))
25 #define max(a,b) (((a)<(b)) ? (b) : (a))
28 int Printf(const char *fmt
, ...); /* prototype only */
34 State A_Root
; /* seed-state for cycles */
35 State now
; /* the full state-vector */
37 #if defined(C_States) && defined(HAS_TRACK)
39 c_update(uchar
*p_t_r
)
42 printf("c_update %u\n", p_t_r
);
46 c_revert(uchar
*p_t_r
)
49 printf("c_revert %u\n", p_t_r
);
78 #define onstack_now() (LL[trpt->j6] && LL[trpt->j7])
79 #define onstack_put() LL[trpt->j6]++; LL[trpt->j7]++
80 #define onstack_zap() LL[trpt->j6]--; LL[trpt->j7]--
82 #if !defined(SAFETY) && !defined(NOCOMP)
83 #define V_A (((now._a_t&1)?2:1) << (now._a_t&2))
84 #define A_V (((now._a_t&1)?1:2) << (now._a_t&2))
95 #define onstack_put() ;
96 #define onstack_zap() gstore((char *) &now, vsize, 4)
98 #if defined(FULLSTACK) && !defined(BITSTATE)
99 #define onstack_put() trpt->ostate = Lstate
100 #define onstack_zap() { \
102 trpt->ostate->tagged = \
103 (S_A)? (trpt->ostate->tagged&~V_A) : 0; \
110 #if !defined(NO_RESIZE) && !defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(SPACE) && NCORE==1
118 #if defined(BITSTATE) && !defined(NOREDUCE) && !defined(SAFETY)
119 unsigned int proviso
;
122 #if defined(CHECK) || (defined(COLLAPSE) && !defined(FULLSTACK))
125 #if !defined(SAFETY) || defined(REACH)
129 /* could cost 1 extra word: 4 bytes if 32-bit and 8 bytes if 64-bit */
131 uchar cpu_id
; /* id of cpu that created the state */
141 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
147 typedef struct Trail
{
148 int st
; /* current state */
149 uchar pr
; /* process id */
150 uchar tau
; /* 8 bit-flags */
151 uchar o_pm
; /* 8 more bit-flags */
153 Meaning of bit
-flags
:
154 tau
&1 -> timeout enabled
155 tau
&2 -> request to enable timeout
1 level
up (in claim
)
156 tau
&4 -> current transition is a claim move
157 tau
&8 -> current transition is an atomic move
158 tau
&16 -> last move was truncated on stack
159 tau
&32 -> current transition is a preselected move
160 tau
&64 -> at least one next state is
not on the stack
161 tau
&128 -> current transition is a stutter move
162 o_pm
&1 -> the current pid moved
-- implements
else
163 o_pm
&2 -> this is an acceptance state
164 o_pm
&4 -> this is a progress state
165 o_pm
&8 -> fairness alg rule
1 undo mark
166 o_pm
&16 -> fairness alg rule
3 undo mark
167 o_pm
&32 -> fairness alg rule
2 undo mark
168 o_pm
&64 -> the current proc applied rule2
169 o_pm
&128 -> a fairness
, dummy move
- all procs blocked
172 uchar n_succ
; /* nr of successor states */
174 #if defined(FULLSTACK) && defined(MA) && !defined(BFS)
178 uchar o_n
, o_ot
; /* to save locals */
182 #if nstates_event<256
185 unsigned short o_event
;
195 #if defined(HAS_UNLESS) && !defined(BFS)
196 int e_state
; /* if escape trans - state of origin */
198 #if (defined(FULLSTACK) && !defined(MA)) || defined(BFS) || (NCORE>1)
199 struct H_el
*ostate
; /* pointer to stored state */
201 #if defined(CNTRSTACK) && !defined(BFS)
206 /* based on Qadeer&Rehof, Tacas 2005, LNCS 3440, pp. 93-107 */
208 #error "-DSCHED cannot be combined with -DNCORE (yet)"
224 long omaxdepth
=10000;
233 double quota
; /* time limit */
243 double memcnt
= (double) 0;
244 double memlim
= (double) (1<<30); /* 1 GB */
246 double mem_reserved
= (double) 0;
251 static long left
= 0L;
252 static double fragment
= (double) 0;
253 static unsigned long grow
;
255 unsigned int HASH_CONST
[] = {
256 /* asuming 4 bytes per int */
257 0x88888EEF, 0x00400007,
258 0x04c11db7, 0x100d4e63,
259 0x0fc22f87, 0x3ff0c3ff,
260 0x38e84cd7, 0x02b148e9,
261 0x98b2e49d, 0xb616d379,
262 0xa5247fd9, 0xbae92a15,
263 0xb91c8bc5, 0x8e5880f3,
264 0xacd7c069, 0xb4c44bb3,
265 0x2ead1fb7, 0x8e428171,
266 0xdbebd459, 0x828ae611,
267 0x6cb25933, 0x86cdd651,
268 0x9e8f5f21, 0xd5f8d8e7,
269 0x9c4e956f, 0xb5cf2c71,
270 0x2e805a6d, 0x33fc3a55,
271 0xaf203ed1, 0xe31f5909,
272 0x5276db35, 0x0c565ef7,
273 0x273d1aa5, 0x8923b1dd,
280 int done
=0, errors
=0, Nrun
=1;
282 char *c_stack_start
= (char *) 0;
283 double nstates
=0, nlinks
=0, truncs
=0, truncs2
=0;
284 double nlost
=0, nShadow
=0, hcmp
=0, ngrabs
=0;
285 #if defined(ZAPH) && defined(BITSTATE)
290 double midrv
=0, failedrv
=0, revrv
=0;
292 unsigned long nr_states
=0; /* nodes in DFA */
293 long Fa
=0, Fh
=0, Zh
=0, Zn
=0;
294 long PUT
=0, PROBE
=0, ZAPS
=0;
295 long Ccheck
=0, Cholds
=0;
296 int a_cycles
=0, upto
=1, strict
=0, verbose
= 0, signoff
= 0;
298 int gui
= 0, coltrace
= 0, readtrail
= 0;
299 int whichtrail
= 0, onlyproc
= -1, silent
= 0;
301 int state_tables
=0, fairness
=0, no_rck
=0, Nr_Trails
=0;
306 unsigned long mask
, nmask
;
308 int ssize
=23; /* 1 Mb */
310 int ssize
=19; /* 512K slots */
312 int hmax
=0, svmax
=0, smax
=0;
314 uchar
*noptr
; /* used by macro Pptr(x) */
316 void logval(char *, int);
317 void dumpranges(void);
321 extern void dfa_init(unsigned short);
322 extern int dfa_member(unsigned long);
323 extern int dfa_store(uchar
*);
324 unsigned int maxgs
= 0;
328 State comp_now
__attribute__ ((aligned (8)));
329 /* gcc 64-bit aligned for Itanium2 systems */
330 /* MAJOR runtime penalty if not used on those systems */
332 State comp_now
; /* compressed state vector */
336 uchar
*Mask
= (uchar
*) &comp_msk
;
339 static char *scratch
= (char *) &comp_tmp
;
341 Stack
*stack
; /* for queues, processes */
342 Svtack
*svtack
; /* for old state vectors */
344 static unsigned int hfns
= 3; /* new default */
346 static unsigned long j1
;
347 static unsigned long K1
, K2
;
348 static unsigned long j2
, j3
, j4
;
352 static long A_depth
= 0;
355 long nr_handoffs
= 0;
357 static uchar warned
= 0, iterative
= 0, exclusive
= 0, like_java
= 0, every_error
= 0;
358 static uchar noasserts
= 0, noends
= 0, bounded
= 0;
359 #if SYNC>0 && ASYNC==0
360 void set_recvs(void);
364 #define IfNotBlocked if (boq != -1) continue;
365 #define UnBlock boq = -1
367 #define IfNotBlocked /* cannot block */
368 #define UnBlock /* don't bother */
372 int (*bstore
)(char *, int);
373 int bstore_reg(char *, int);
374 int bstore_mod(char *, int);
376 void active_procs(void);
378 void do_the_search(void);
379 void find_shorter(int);
380 void iniglobals(void);
384 void ungrab_ints(int *, int);
386 #define Index(x, y) Boundcheck(x, y, II, tt, t)
388 #define Index(x, y) x
390 short Air
[] = { (short) Air0
, (short) Air1
, (short) Air2
, (short) Air3
, (short) Air4
, (short) Air5
};
393 { int j
, h
= now
._nr_pr
;
397 uchar
*o_this
= this;
400 if (TstOnly
) return (h
< MAXPROC
);
403 /* redefine Index only within this procedure */
405 #define Index(x, y) Boundcheck(x, y, 0, 0, 0)
408 Uerror("too many processes");
410 case 0: j
= sizeof(P0
); break;
411 case 1: j
= sizeof(P1
); break;
412 case 2: j
= sizeof(P2
); break;
413 case 3: j
= sizeof(P3
); break;
414 case 4: j
= sizeof(P4
); break;
415 case 5: j
= sizeof(P5
); break;
416 default: Uerror("bad proc - addproc");
419 proc_skip
[h
] = WS
-(vsize
%WS
);
423 for (k
= vsize
+ (int) proc_skip
[h
]; k
> vsize
; k
--)
424 Mask
[k
-1] = 1; /* align */
426 vsize
+= (int) proc_skip
[h
];
427 proc_offset
[h
] = vsize
;
431 write(svfd
, (uchar
*) &dummy
, sizeof(int)); /* mark */
432 write(svfd
, (uchar
*) &h
, sizeof(int));
433 write(svfd
, (uchar
*) &n
, sizeof(int));
435 write(svfd
, (uchar
*) &proc_offset
[h
], sizeof(int));
437 write(svfd
, (uchar
*) &proc_offset
[h
], sizeof(short));
439 write(svfd
, (uchar
*) &now
, vprefix
-4*sizeof(int)); /* padd */
443 if (fairness
&& ((int) now
._nr_pr
+ 1 >= (8*NFAIR
)/2))
444 { printf("pan: error: too many processes -- current");
445 printf(" max is %d procs (-DNFAIR=%d)\n",
446 (8*NFAIR
)/2 - 2, NFAIR
);
447 printf("\trecompile with -DNFAIR=%d\n",
456 for (k
= 1; k
<= Air
[n
]; k
++)
457 Mask
[vsize
- k
] = 1; /* pad */
458 Mask
[vsize
-j
] = 1; /* _pid */
460 hmax
= max(hmax
, vsize
);
461 if (vsize
>= VECTORSZ
)
462 { printf("pan: error, VECTORSZ too small, recompile pan.c");
463 printf(" with -DVECTORSZ=N with N>%d\n", (int) vsize
);
466 memset((char *)pptr(h
), 0, j
);
468 if (BASE
> 0 && h
> 0)
469 ((P0
*)this)->_pid
= h
-BASE
;
471 ((P0
*)this)->_pid
= h
;
474 ((P5
*)pptr(h
))->_t
= 5;
475 ((P5
*)pptr(h
))->_p
= 0;
480 ((P4
*)pptr(h
))->_t
= 4;
481 ((P4
*)pptr(h
))->_p
= 42; reached4
[42]=1;
484 ((P4
*)pptr(h
))->i
= 0;
485 ((P4
*)pptr(h
))->j
= 0;
486 ((P4
*)pptr(h
))->sum
= 0;
487 ((P4
*)pptr(h
))->commit_sum
= 0;
489 logval(":init::i", ((P4
*)pptr(h
))->i
);
490 logval(":init::j", ((P4
*)pptr(h
))->j
);
491 logval(":init::sum", ((P4
*)pptr(h
))->sum
);
492 logval(":init::commit_sum", ((P4
*)pptr(h
))->commit_sum
);
498 case 3: /* cleaner */
499 ((P3
*)pptr(h
))->_t
= 3;
500 ((P3
*)pptr(h
))->_p
= 8; reached3
[8]=1;
510 ((P2
*)pptr(h
))->_t
= 2;
511 ((P2
*)pptr(h
))->_p
= 28; reached2
[28]=1;
514 ((P2
*)pptr(h
))->i
= 0;
515 ((P2
*)pptr(h
))->j
= 0;
516 ((P2
*)pptr(h
))->tmp_retrieve
= 0;
517 ((P2
*)pptr(h
))->lwrite_off
= 0;
518 ((P2
*)pptr(h
))->lcommit_count
= 0;
520 logval("reader:i", ((P2
*)pptr(h
))->i
);
521 logval("reader:j", ((P2
*)pptr(h
))->j
);
522 logval("reader:tmp_retrieve", ((P2
*)pptr(h
))->tmp_retrieve
);
523 logval("reader:lwrite_off", ((P2
*)pptr(h
))->lwrite_off
);
524 logval("reader:lcommit_count", ((P2
*)pptr(h
))->lcommit_count
);
531 ((P1
*)pptr(h
))->_t
= 1;
532 ((P1
*)pptr(h
))->_p
= 3; reached1
[3]=1;
535 ((P1
*)pptr(h
))->size
= 1;
536 ((P1
*)pptr(h
))->prev_off
= 0;
537 ((P1
*)pptr(h
))->new_off
= 0;
538 ((P1
*)pptr(h
))->tmp_commit
= 0;
539 ((P1
*)pptr(h
))->i
= 0;
540 ((P1
*)pptr(h
))->j
= 0;
542 logval("tracer:size", ((P1
*)pptr(h
))->size
);
543 logval("tracer:prev_off", ((P1
*)pptr(h
))->prev_off
);
544 logval("tracer:new_off", ((P1
*)pptr(h
))->new_off
);
545 logval("tracer:tmp_commit", ((P1
*)pptr(h
))->tmp_commit
);
546 logval("tracer:i", ((P1
*)pptr(h
))->i
);
547 logval("tracer:j", ((P1
*)pptr(h
))->j
);
553 case 0: /* switcher */
554 ((P0
*)pptr(h
))->_t
= 0;
555 ((P0
*)pptr(h
))->_p
= 11; reached0
[11]=1;
558 ((P0
*)pptr(h
))->prev_off
= 0;
559 ((P0
*)pptr(h
))->new_off
= 0;
560 ((P0
*)pptr(h
))->tmp_commit
= 0;
561 ((P0
*)pptr(h
))->size
= 0;
563 logval("switcher:prev_off", ((P0
*)pptr(h
))->prev_off
);
564 logval("switcher:new_off", ((P0
*)pptr(h
))->new_off
);
565 logval("switcher:tmp_commit", ((P0
*)pptr(h
))->tmp_commit
);
566 logval("switcher:size", ((P0
*)pptr(h
))->size
);
577 #define Index(x, y) Boundcheck(x, y, II, tt, t)
581 #if defined(BITSTATE) && defined(COLLAPSE)
582 /* just to allow compilation, to generate the error */
583 long col_p(int i
, char *z
) { return 0; }
584 long col_q(int i
, char *z
) { return 0; }
589 col_p(int i
, char *z
)
590 { int j
, k
; unsigned long ordinal(char *, long, short);
592 P0
*ptr
= (P0
*) pptr(i
);
594 case 0: j
= sizeof(P0
); break;
595 case 1: j
= sizeof(P1
); break;
596 case 2: j
= sizeof(P2
); break;
597 case 3: j
= sizeof(P3
); break;
598 case 4: j
= sizeof(P4
); break;
599 case 5: j
= sizeof(P5
); break;
600 default: Uerror("bad proctype - collapse");
602 if (z
) x
= z
; else x
= scratch
;
603 y
= (char *) ptr
; k
= proc_offset
[i
];
604 for ( ; j
> 0; j
--, y
++)
605 if (!Mask
[k
++]) *x
++ = *y
;
606 for (j
= 0; j
< WS
-1; j
++)
609 if (z
) return (long) (x
- z
);
610 return ordinal(scratch
, x
-scratch
, (short) (2+ptr
->_t
));
617 memset((char *)&now
, 0, sizeof(State
));
618 vsize
= (unsigned long) (sizeof(State
) - VECTORSZ
);
622 /* optional provisioning statements, e.g. to */
623 /* set hidden variables, used as constants */
628 Maxbody
= max(Maxbody
, ((int) sizeof(P0
)));
629 Maxbody
= max(Maxbody
, ((int) sizeof(P1
)));
630 Maxbody
= max(Maxbody
, ((int) sizeof(P2
)));
631 Maxbody
= max(Maxbody
, ((int) sizeof(P3
)));
632 Maxbody
= max(Maxbody
, ((int) sizeof(P4
)));
633 Maxbody
= max(Maxbody
, ((int) sizeof(P5
)));
634 reached
[0] = reached0
;
635 reached
[1] = reached1
;
636 reached
[2] = reached2
;
637 reached
[3] = reached3
;
638 reached
[4] = reached4
;
639 reached
[5] = reached5
;
640 accpstate
[0] = (uchar
*) emalloc(nstates0
);
641 accpstate
[1] = (uchar
*) emalloc(nstates1
);
642 accpstate
[2] = (uchar
*) emalloc(nstates2
);
643 accpstate
[3] = (uchar
*) emalloc(nstates3
);
644 accpstate
[4] = (uchar
*) emalloc(nstates4
);
645 accpstate
[5] = (uchar
*) emalloc(nstates5
);
646 progstate
[0] = (uchar
*) emalloc(nstates0
);
647 progstate
[1] = (uchar
*) emalloc(nstates1
);
648 progstate
[2] = (uchar
*) emalloc(nstates2
);
649 progstate
[3] = (uchar
*) emalloc(nstates3
);
650 progstate
[4] = (uchar
*) emalloc(nstates4
);
651 progstate
[5] = (uchar
*) emalloc(nstates5
);
652 loopstate0
= loopstate
[0] = (uchar
*) emalloc(nstates0
);
653 loopstate1
= loopstate
[1] = (uchar
*) emalloc(nstates1
);
654 loopstate2
= loopstate
[2] = (uchar
*) emalloc(nstates2
);
655 loopstate3
= loopstate
[3] = (uchar
*) emalloc(nstates3
);
656 loopstate4
= loopstate
[4] = (uchar
*) emalloc(nstates4
);
657 loopstate5
= loopstate
[5] = (uchar
*) emalloc(nstates5
);
658 stopstate
[0] = (uchar
*) emalloc(nstates0
);
659 stopstate
[1] = (uchar
*) emalloc(nstates1
);
660 stopstate
[2] = (uchar
*) emalloc(nstates2
);
661 stopstate
[3] = (uchar
*) emalloc(nstates3
);
662 stopstate
[4] = (uchar
*) emalloc(nstates4
);
663 stopstate
[5] = (uchar
*) emalloc(nstates5
);
664 visstate
[0] = (uchar
*) emalloc(nstates0
);
665 visstate
[1] = (uchar
*) emalloc(nstates1
);
666 visstate
[2] = (uchar
*) emalloc(nstates2
);
667 visstate
[3] = (uchar
*) emalloc(nstates3
);
668 visstate
[4] = (uchar
*) emalloc(nstates4
);
669 visstate
[5] = (uchar
*) emalloc(nstates5
);
670 mapstate
[0] = (short *) emalloc(nstates0
* sizeof(short));
671 mapstate
[1] = (short *) emalloc(nstates1
* sizeof(short));
672 mapstate
[2] = (short *) emalloc(nstates2
* sizeof(short));
673 mapstate
[3] = (short *) emalloc(nstates3
* sizeof(short));
674 mapstate
[4] = (short *) emalloc(nstates4
* sizeof(short));
675 mapstate
[5] = (short *) emalloc(nstates5
* sizeof(short));
682 NrStates
[0] = nstates0
;
683 NrStates
[1] = nstates1
;
684 NrStates
[2] = nstates2
;
685 NrStates
[3] = nstates3
;
686 NrStates
[4] = nstates4
;
687 NrStates
[5] = nstates5
;
694 stopstate
[0][endstate0
] = 1;
695 stopstate
[1][endstate1
] = 1;
696 stopstate
[2][endstate2
] = 1;
697 stopstate
[3][endstate3
] = 1;
698 stopstate
[4][endstate4
] = 1;
699 stopstate
[5][endstate5
] = 1;
700 stopstate
[1][48] = 1;
701 retrans(0, nstates0
, start0
, src_ln0
, reached0
, loopstate0
);
702 retrans(1, nstates1
, start1
, src_ln1
, reached1
, loopstate1
);
703 retrans(2, nstates2
, start2
, src_ln2
, reached2
, loopstate2
);
704 retrans(3, nstates3
, start3
, src_ln3
, reached3
, loopstate3
);
705 retrans(4, nstates4
, start4
, src_ln4
, reached4
, loopstate4
);
707 { printf("\nTransition Type: ");
708 printf("A=atomic; D=d_step; L=local; G=global\n");
709 printf("Source-State Labels: ");
710 printf("p=progress; e=end; a=accept;\n");
712 printf("Note: statement merging was used. Only the first\n");
713 printf(" stmnt executed in each merge sequence is shown\n");
714 printf(" (use spin -a -o3 to disable statement merging)\n");
719 #if defined(VERI) && !defined(NOREDUCE) && !defined(NP)
728 { printf("warning: for p.o. reduction to be valid ");
729 printf("the never claim must be stutter-invariant\n");
730 printf("(never claims generated from LTL ");
731 printf("formulae are stutter-invariant)\n");
734 UnBlock
; /* disable rendez-vous */
737 { udmem
*= 1024L*1024L;
740 { void init_SS(unsigned long);
741 init_SS((unsigned long) udmem
);
744 SS
= (uchar
*) emalloc(udmem
);
748 { void init_SS(unsigned long);
749 init_SS(ONE_L
<<(ssize
-3));
752 SS
= (uchar
*) emalloc(ONE_L
<<(ssize
-3));
757 #if defined(FULLSTACK) && defined(BITSTATE)
760 #if defined(CNTRSTACK) && !defined(BFS)
761 LL
= (uchar
*) emalloc(ONE_L
<<(ssize
-3));
763 stack
= ( Stack
*) emalloc(sizeof(Stack
));
764 svtack
= (Svtack
*) emalloc(sizeof(Svtack
));
765 /* a place to point for Pptr of non-running procs: */
766 noptr
= (uchar
*) emalloc(Maxbody
* sizeof(char));
769 write(svfd
, (uchar
*) &vprefix
, sizeof(int));
772 Addproc(VERI
); /* never - pid = 0 */
774 active_procs(); /* started after never */
776 now
._event
= start_event
;
777 reached
[EVENT_TRACE
][start_event
] = 1;
787 if (--Nrun
> 0 && HASH_CONST
[++HASH_NR
])
788 { printf("Run %d:\n", HASH_NR
);
791 memset(SS
, 0, ONE_L
<<(ssize
-3));
793 memset(LL
, 0, ONE_L
<<(ssize
-3));
796 memset((uchar
*) S_Tab
, 0,
797 maxdepth
*sizeof(struct H_el
*));
799 nstates
=nlinks
=truncs
=truncs2
=ngrabs
= 0;
800 nlost
=nShadow
=hcmp
= 0;
802 PUT
=PROBE
=ZAPS
=Ccheck
=Cholds
= 0;
808 int provided(int, uchar
, int, Trans
*);
811 #define GLOBAL_LOCK (0)
813 #define CS_N (256*NCORE)
816 #define NR_QS (NCORE)
817 #define CS_NR (CS_N+1) /* 2^N + 1, nr critical sections */
818 #define GQ_RD GLOBAL_LOCK
819 #define GQ_WR GLOBAL_LOCK
820 #define CS_ID (1 + (int) (j1 & (CS_N-1))) /* mask: 2^N - 1, zero reserved */
821 #define QLOCK(n) (1+n)
823 #define NR_QS (NCORE+1)
824 #define CS_NR (CS_N+3)
827 #define CS_ID (3 + (int) (j1 & (CS_N-1)))
828 #define QLOCK(n) (3+n)
831 void e_critical(int);
832 void x_critical(int);
835 #define enter_critical(w) e_critical(w)
836 #define leave_critical(w) x_critical(w)
839 #define enter_critical(w) { if (w < 1+NCORE) e_critical(w); }
840 #define leave_critical(w) { if (w < 1+NCORE) x_critical(w); }
842 #define enter_critical(w) { if (w < 3+NCORE) e_critical(w); }
843 #define leave_critical(w) { if (w < 3+NCORE) x_critical(w); }
848 cpu_printf(const char *fmt
, ...)
850 enter_critical(GLOBAL_LOCK
); /* printing */
851 printf("cpu%d: ", core_id
);
857 leave_critical(GLOBAL_LOCK
);
862 cpu_printf(const char *fmt
, ...)
871 Printf(const char *fmt
, ...)
872 { /* Make sure the args to Printf
873 * are always evaluated (e.g., they
874 * could contain a run stmnt)
875 * but do not generate the output
876 * during verification runs
877 * unless explicitly wanted
878 * If this fails on your system
879 * compile SPIN itself -DPRINTF
880 * and this code is not generated
899 extern void printm(int);
901 #define getframe(i) &trail[i];
903 static long HHH
, DDD
, hiwater
;
904 static long CNT1
, CNT2
;
905 static int stackwrite
;
906 static int stackread
;
907 static Trail frameptr
;
914 if (d
>= (CNT1
-CNT2
)*DDD
)
915 return &trail
[d
- (CNT1
-CNT2
)*DDD
];
918 && (stackread
= open(stackfile
, 0)) < 0)
919 { printf("getframe: cannot open %s\n", stackfile
);
922 if (lseek(stackread
, d
* (off_t
) sizeof(Trail
), SEEK_SET
) == -1
923 || read(stackread
, &frameptr
, sizeof(Trail
)) != sizeof(Trail
))
924 { printf("getframe: frame read error\n");
930 #if !defined(SAFETY) && !defined(BITSTATE)
931 #if !defined(FULLSTACK) || defined(MA)
932 #define depth_of(x) A_depth /* an estimate */
935 depth_of(struct H_el
*s
)
937 for (d
= 0; d
<= A_depth
; d
++)
942 printf("pan: cannot happen, depth_of\n");
948 extern void cleanup_shm(int);
949 volatile unsigned int *search_terminated
; /* to signal early termination */
953 { void stop_timer(void);
955 { printf("--end of output--\n");
958 if (search_terminated
!= NULL
)
959 { *search_terminated
|= 1; /* pan_exit */
962 { void dsk_stats(void);
966 if (!state_tables
&& !readtrail
)
979 transmognify(char *s
)
981 static char buf
[2][2048];
983 if (!s
|| strlen(s
) > 2047) return s
;
984 memset(buf
[0], 0, 2048);
985 memset(buf
[1], 0, 2048);
986 strcpy(buf
[toggle
], s
);
987 while ((v
= strstr(buf
[toggle
], "{c_code")))
989 strcpy(buf
[1-toggle
], buf
[toggle
]);
990 for (w
= v
; *w
!= '}' && *w
!= '\0'; w
++) /* skip */;
991 if (*w
!= '}') return s
;
993 for (i
= 0; code_lookup
[i
].c
; i
++)
994 if (strcmp(v
, code_lookup
[i
].c
) == 0
995 && strlen(v
) == strlen(code_lookup
[i
].c
))
996 { if (strlen(buf
[1-toggle
])
997 + strlen(code_lookup
[i
].t
)
1000 strcat(buf
[1-toggle
], code_lookup
[i
].t
);
1003 strcat(buf
[1-toggle
], w
);
1004 toggle
= 1 - toggle
;
1006 buf
[toggle
][2047] = '\0';
1010 char * transmognify(char *s
) { return s
; }
1014 add_src_txt(int ot
, int tt
)
1018 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
1020 q
= transmognify(t
->tp
);
1021 for ( ; q
&& *q
; q
++)
1031 { static int wrap_in_progress
= 0;
1035 if (wrap_in_progress
++) return;
1037 printf("spin: trail ends after %ld steps\n", depth
);
1039 { if (onlyproc
>= now
._nr_pr
) { pan_exit(0); }
1042 printf("%3ld: proc %d (%s) ",
1043 depth
, II
, procname
[z
->_t
]);
1044 for (i
= 0; src_all
[i
].src
; i
++)
1045 if (src_all
[i
].tp
== (int) z
->_t
)
1046 { printf(" line %3d",
1047 src_all
[i
].src
[z
->_p
]);
1050 printf(" (state %2d)", z
->_p
);
1051 if (!stopstate
[z
->_t
][z
->_p
])
1052 printf(" (invalid end state)");
1054 add_src_txt(z
->_t
, z
->_p
);
1057 printf("#processes %d:\n", now
._nr_pr
);
1058 if (depth
< 0) depth
= 0;
1059 for (II
= 0; II
< now
._nr_pr
; II
++)
1060 { z
= (P0
*)pptr(II
);
1061 printf("%3ld: proc %d (%s) ",
1062 depth
, II
, procname
[z
->_t
]);
1063 for (i
= 0; src_all
[i
].src
; i
++)
1064 if (src_all
[i
].tp
== (int) z
->_t
)
1065 { printf(" line %3d",
1066 src_all
[i
].src
[z
->_p
]);
1069 printf(" (state %2d)", z
->_p
);
1070 if (!stopstate
[z
->_t
][z
->_p
])
1071 printf(" (invalid end state)");
1073 add_src_txt(z
->_t
, z
->_p
);
1076 for (II
= 0; II
< now
._nr_pr
; II
++)
1077 { z
= (P0
*)pptr(II
);
1078 c_locals(II
, z
->_t
);
1092 int candidate_files
;
1094 if (trailfilename
!= NULL
)
1095 { fd
= fopen(trailfilename
, "r");
1097 { printf("pan: cannot find %s\n", trailfilename
);
1104 candidate_files
= 0;
1106 strcpy(MyFile
, TrailFile
);
1107 do { /* see if there's more than one possible trailfile */
1109 { sprintf(fnm
, "%s%d.%s",
1110 MyFile
, whichtrail
, tprefix
);
1111 fd
= fopen(fnm
, "r");
1113 { candidate_files
++;
1115 printf("trail%d: %s\n",
1116 candidate_files
, fnm
);
1119 if ((q
= strchr(MyFile
, '.')) != NULL
)
1121 sprintf(fnm
, "%s%d.%s",
1122 MyFile
, whichtrail
, tprefix
);
1124 fd
= fopen(fnm
, "r");
1126 { candidate_files
++;
1128 printf("trail%d: %s\n",
1129 candidate_files
, fnm
);
1133 { sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1134 fd
= fopen(fnm
, "r");
1136 { candidate_files
++;
1138 printf("trail%d: %s\n",
1139 candidate_files
, fnm
);
1142 if ((q
= strchr(MyFile
, '.')) != NULL
)
1144 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1146 fd
= fopen(fnm
, "r");
1148 { candidate_files
++;
1150 printf("trail%d: %s\n",
1151 candidate_files
, fnm
);
1155 sprintf(tprefix
, "cpu%d_trail", try_core
++);
1156 } while (try_core
<= NCORE
);
1158 if (candidate_files
!= 1)
1159 { if (verbose
!= 100)
1160 { printf("error: there are %d trail files:\n",
1165 { printf("pan: rm or mv all except one\n");
1169 strcpy(MyFile
, TrailFile
); /* restore */
1173 { sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, tprefix
);
1174 fd
= fopen(fnm
, "r");
1175 if (fd
== NULL
&& (q
= strchr(MyFile
, '.')))
1177 sprintf(fnm
, "%s%d.%s",
1178 MyFile
, whichtrail
, tprefix
);
1180 fd
= fopen(fnm
, "r");
1183 { sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1184 fd
= fopen(fnm
, "r");
1185 if (fd
== NULL
&& (q
= strchr(MyFile
, '.')))
1187 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1189 fd
= fopen(fnm
, "r");
1192 { if (try_core
< NCORE
)
1193 { tprefix
= MySuffix
;
1194 sprintf(tprefix
, "cpu%d_trail", try_core
++);
1197 printf("pan: cannot find trailfile %s\n", fnm
);
1201 #if NCORE>1 && defined(SEP_STATE)
1202 { void set_root(void); /* for partial traces from local root */
1209 uchar
do_transit(Trans
*, short);
1215 int i
, t_id
, lastnever
=-1; short II
;
1219 fd
= findtrail(); /* exits if unsuccessful */
1220 while (fscanf(fd
, "%ld:%d:%d\n", &depth
, &i
, &t_id
) == 3)
1222 printf("<<<<<START OF CYCLE>>>>>\n");
1226 { printf("pan: Error, proc %d invalid pid ", i
);
1227 printf("transition %d\n", t_id
);
1232 for (t
= trans
[z
->_t
][z
->_p
]; t
; t
= t
->nxt
)
1233 if (t
->t_id
== (T_ID
) t_id
)
1236 { for (i
= 0; i
< NrStates
[z
->_t
]; i
++)
1237 { t
= trans
[z
->_t
][i
];
1238 if (t
&& t
->t_id
== (T_ID
) t_id
)
1239 { printf("\tRecovered at state %d\n", i
);
1243 printf("pan: Error, proc %d type %d state %d: ",
1245 printf("transition %d not found\n", t_id
);
1246 printf("pan: list of possible transitions in this process:\n");
1247 if (z
->_t
>= 0 && z
->_t
<= _NP_
)
1248 for (t
= trans
[z
->_t
][z
->_p
]; t
; t
= t
->nxt
)
1249 printf(" t_id %d -- case %d, [%s]\n",
1250 t
->t_id
, t
->forw
, t
->tp
);
1251 break; /* pan_exit(1); */
1254 q
= transmognify(t
->tp
);
1255 if (gui
) simvals
[0] = '\0';
1258 if (!do_transit(t
, II
))
1259 { if (onlyproc
>= 0 && II
!= onlyproc
)
1261 printf("pan: error, next transition UNEXECUTABLE on replay\n");
1262 printf(" most likely causes: missing c_track statements\n");
1263 printf(" or illegal side-effects in c_expr statements\n");
1265 if (onlyproc
>= 0 && II
!= onlyproc
)
1268 { printf("%3ld: proc %2d (%s) ", depth
, II
, procname
[z
->_t
]);
1269 for (i
= 0; src_all
[i
].src
; i
++)
1270 if (src_all
[i
].tp
== (int) z
->_t
)
1271 { printf(" line %3d \"%s\" ",
1272 src_all
[i
].src
[z
->_p
], PanSource
);
1275 printf("(state %d) trans {%d,%d} [%s]\n",
1276 z
->_p
, t_id
, t
->forw
, q
?q
:"");
1278 for (i
= 0; i
< now
._nr_pr
; i
++)
1279 { c_locals(i
, ((P0
*)pptr(i
))->_t
);
1282 if (strcmp(procname
[z
->_t
], ":never:") == 0)
1283 { if (lastnever
!= (int) z
->_p
)
1284 { for (i
= 0; src_all
[i
].src
; i
++)
1285 if (src_all
[i
].tp
== (int) z
->_t
)
1286 { printf("MSC: ~G %d\n",
1287 src_all
[i
].src
[z
->_p
]);
1290 if (!src_all
[i
].src
)
1291 printf("MSC: ~R %d\n", z
->_p
);
1296 if (strcmp(procname
[z
->_t
], ":np_:") != 0)
1298 sameas
: if (no_rck
) goto moveon
;
1300 { printf("%ld: ", depth
);
1301 for (i
= 0; i
< II
; i
++)
1303 printf("%s(%d):", procname
[z
->_t
], II
);
1304 printf("[%s]\n", q
?q
:"");
1306 { if (strlen(simvals
) > 0) {
1307 printf("%3ld: proc %2d (%s)",
1308 depth
, II
, procname
[z
->_t
]);
1309 for (i
= 0; src_all
[i
].src
; i
++)
1310 if (src_all
[i
].tp
== (int) z
->_t
)
1311 { printf(" line %3d \"%s\" ",
1312 src_all
[i
].src
[z
->_p
], PanSource
);
1315 printf("(state %d) [values: %s]\n", z
->_p
, simvals
);
1317 printf("%3ld: proc %2d (%s)",
1318 depth
, II
, procname
[z
->_t
]);
1319 for (i
= 0; src_all
[i
].src
; i
++)
1320 if (src_all
[i
].tp
== (int) z
->_t
)
1321 { printf(" line %3d \"%s\" ",
1322 src_all
[i
].src
[z
->_p
], PanSource
);
1325 printf("(state %d) [%s]\n", z
->_p
, q
?q
:"");
1328 moveon
: z
->_p
= t
->st
;
1337 for (i
= 0; i
< now
._nr_pr
; i
++)
1338 { z
= (P0
*)pptr(i
);
1339 if (z
->_t
== (unsigned) pt
)
1340 return BASE
+z
->_pid
;
1345 void check_claim(int);
1348 #if !defined(HASH64) && !defined(HASH32)
1351 #if defined(HASH32) && defined(SAFETY) && !defined(SFH) && !defined(SPACE)
1354 #if defined(SFH) && (defined(BITSTATE) || defined(COLLAPSE) || defined(HC) || defined(HASH64))
1357 #if defined(SFH) && !defined(NOCOMP)
1358 #define NOCOMP /* go for speed */
1360 #if NCORE>1 && !defined(GLOB_HEAP)
1361 #define SEP_HEAP /* version 5.1.2 */
1366 bstore_mod(char *v
, int n
) /* hasharray size not a power of two */
1367 { unsigned long x
, y
;
1370 d_hash((uchar
*) v
, n
); /* sets j3, j4, K1, K2 */
1373 { if (!(SS
[x
%udmem
]&(1<<y
))) break;
1376 printf("Old bitstate\n");
1385 if (rand()%100 > RANDSTOR
) return 0;
1388 { SS
[x
%udmem
] |= (1<<y
);
1389 if (i
== hfns
) break;
1395 printf("New bitstate\n");
1403 bstore_reg(char *v
, int n
) /* extended hashing, Peter Dillinger, 2004 */
1404 { unsigned long x
, y
;
1407 d_hash((uchar
*) v
, n
); /* sets j1-j4 */
1410 { if (!(SS
[x
]&(1<<y
))) break;
1413 printf("Old bitstate\n");
1417 x
= (x
+ j1
+ i
) & nmask
;
1422 if (rand()%100 > RANDSTOR
) return 0;
1426 if (i
== hfns
) break;
1427 x
= (x
+ j1
+ i
) & nmask
;
1432 printf("New bitstate\n");
1440 unsigned long TMODE
= 0666; /* file permission bits for trail files */
1443 char snap
[64], fnm
[512];
1450 int w_flags
= O_CREAT
|O_WRONLY
|O_TRUNC
;
1452 if (exclusive
== 1 && iterative
== 0)
1453 { w_flags
|= O_EXCL
;
1456 q
= strrchr(TrailFile
, '/');
1457 if (q
== NULL
) q
= TrailFile
; else q
++;
1458 strcpy(MyFile
, q
); /* TrailFile is not a writable string */
1460 if (iterative
== 0 && Nr_Trails
++ > 0)
1461 { sprintf(fnm
, "%s%d.%s",
1462 MyFile
, Nr_Trails
-1, tprefix
);
1466 sprintf(fnm
, "%s%d.%s", MyFile
, getpid(), tprefix
);
1468 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1471 if ((fd
= open(fnm
, w_flags
, TMODE
)) < 0)
1472 { if ((q
= strchr(MyFile
, '.')))
1474 if (iterative
== 0 && Nr_Trails
-1 > 0)
1475 sprintf(fnm
, "%s%d.%s",
1476 MyFile
, Nr_Trails
-1, tprefix
);
1478 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1480 fd
= open(fnm
, w_flags
, TMODE
);
1483 { printf("pan: cannot create %s\n", fnm
);
1487 #if NCORE>1 && (defined(SEP_STATE) || !defined(FULL_TRAIL))
1488 void write_root(void);
1491 printf("pan: wrote %s\n", fnm
);
1498 #define FREQ (1000000)
1506 typedef struct SV_Hold
{
1509 struct SV_Hold
*nxt
;
1512 typedef struct EV_Hold
{
1520 struct EV_Hold
*nxt
;
1523 typedef struct BFS_Trail
{
1528 struct H_el
*lstate
;
1531 struct BFS_Trail
*nxt
;
1534 BFS_Trail
*bfs_trail
, *bfs_bot
, *bfs_free
;
1536 SV_Hold
*svhold
, *svfree
;
1540 #define BFS_LIMIT 100000
1542 #ifndef BFS_DSK_LIMIT
1543 #define BFS_DSK_LIMIT 1000000
1545 #if defined(WIN32) || defined(WIN64)
1546 #define RFLAGS (O_RDONLY|O_BINARY)
1547 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
1549 #define RFLAGS (O_RDONLY)
1550 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
1552 long bfs_size_limit
;
1553 int bfs_dsk_write
= -1;
1554 int bfs_dsk_read
= -1;
1555 long bfs_dsk_writes
, bfs_dsk_reads
;
1556 int bfs_dsk_seqno_w
, bfs_dsk_seqno_r
;
1559 uchar
do_reverse(Trans
*, short, uchar
);
1560 void snapshot(void);
1564 { SV_Hold
*h
= (SV_Hold
*) 0, *oh
;
1567 for (h
= svfree
; h
; oh
= h
, h
= h
->nxt
)
1573 h
->nxt
= (SV_Hold
*) 0;
1577 { h
= (SV_Hold
*) 0;
1584 { h
= (SV_Hold
*) emalloc(sizeof(SV_Hold
));
1587 if (bfs_size_limit
>= BFS_LIMIT
)
1588 { h
->sv
= (State
*) 0; /* means: read disk */
1589 bfs_dsk_writes
++; /* count */
1590 if (bfs_dsk_write
< 0 /* file descriptor */
1591 || bfs_dsk_writes
%BFS_DSK_LIMIT
== 0)
1593 if (bfs_dsk_write
>= 0)
1594 { (void) close(bfs_dsk_write
);
1596 sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_w
++);
1597 bfs_dsk_write
= open(dsk_nm
, WFLAGS
, 0644);
1598 if (bfs_dsk_write
< 0)
1599 { Uerror("could not create tmp disk file");
1601 printf("pan: created disk file %s\n", dsk_nm
);
1603 if (write(bfs_dsk_write
, (char *) &now
, n
) != n
)
1604 { Uerror("aborting -- disk write failed (disk full?)");
1606 return h
; /* no memcpy */
1610 h
->sv
= (State
*) emalloc(sizeof(State
) - VECTORSZ
+ n
);
1613 memcpy((char *)h
->sv
, (char *)&now
, n
);
1620 static EV_Hold
*kept
= (EV_Hold
*) 0;
1622 for (h
= kept
; h
; h
= h
->nxt
)
1624 && (memcmp((char *) Mask
, (char *) h
->sv
, n
) == 0)
1625 && (now
._nr_pr
== h
->nrpr
)
1626 && (now
._nr_qs
== h
->nrqs
)
1628 && (memcmp((char *) proc_offset
, (char *) h
->po
, now
._nr_pr
* sizeof(int)) == 0)
1629 && (memcmp((char *) q_offset
, (char *) h
->qo
, now
._nr_qs
* sizeof(int)) == 0)
1631 && (memcmp((char *) proc_offset
, (char *) h
->po
, now
._nr_pr
* sizeof(short)) == 0)
1632 && (memcmp((char *) q_offset
, (char *) h
->qo
, now
._nr_qs
* sizeof(short)) == 0)
1634 && (memcmp((char *) proc_skip
, (char *) h
->ps
, now
._nr_pr
* sizeof(uchar
)) == 0)
1635 && (memcmp((char *) q_skip
, (char *) h
->qs
, now
._nr_qs
* sizeof(uchar
)) == 0))
1638 { h
= (EV_Hold
*) emalloc(sizeof(EV_Hold
));
1640 h
->nrpr
= now
._nr_pr
;
1641 h
->nrqs
= now
._nr_qs
;
1643 h
->sv
= (char *) emalloc(n
* sizeof(char));
1644 memcpy((char *) h
->sv
, (char *) Mask
, n
);
1647 { h
->ps
= (char *) emalloc(now
._nr_pr
* sizeof(int));
1648 memcpy((char *) h
->ps
, (char *) proc_skip
, now
._nr_pr
* sizeof(uchar
));
1650 h
->po
= (char *) emalloc(now
._nr_pr
* sizeof(int));
1651 memcpy((char *) h
->po
, (char *) proc_offset
, now
._nr_pr
* sizeof(int));
1653 h
->po
= (char *) emalloc(now
._nr_pr
* sizeof(short));
1654 memcpy((char *) h
->po
, (char *) proc_offset
, now
._nr_pr
* sizeof(short));
1658 { h
->qs
= (char *) emalloc(now
._nr_qs
* sizeof(int));
1659 memcpy((char *) h
->qs
, (char *) q_skip
, now
._nr_qs
* sizeof(uchar
));
1661 h
->qo
= (char *) emalloc(now
._nr_qs
* sizeof(int));
1662 memcpy((char *) h
->qo
, (char *) q_offset
, now
._nr_qs
* sizeof(int));
1664 h
->qo
= (char *) emalloc(now
._nr_qs
* sizeof(short));
1665 memcpy((char *) h
->qo
, (char *) q_offset
, now
._nr_qs
* sizeof(short));
1680 for (h
= svfree
; h
; oh
= h
, h
= h
->nxt
)
1699 bfs_free
= bfs_free
->nxt
;
1700 t
->nxt
= (BFS_Trail
*) 0;
1702 { t
= (BFS_Trail
*) emalloc(sizeof(BFS_Trail
));
1704 t
->frame
= (Trail
*) emalloc(sizeof(Trail
));
1709 push_bfs(Trail
*f
, int d
)
1712 t
= get_bfs_frame();
1713 memcpy((char *)t
->frame
, (char *)f
, sizeof(Trail
));
1714 t
->frame
->o_tt
= d
; /* depth */
1717 t
->onow
= getsv(vsize
);
1718 t
->omask
= getsv_mask(vsize
);
1719 #if defined(FULLSTACK) && defined(Q_PROVISO)
1723 { bfs_bot
= bfs_trail
= t
;
1729 printf("PUSH %u (%d)\n", t
->frame
, d
);
1743 bfs_bot
= (BFS_Trail
*) 0;
1744 #if defined(Q_PROVISO) && !defined(BITSTATE) && !defined(NOREDUCE)
1745 if (t
->lstate
) t
->lstate
->tagged
= 0;
1751 vsize
= t
->onow
->sz
;
1754 if (t
->onow
->sv
== (State
*) 0)
1756 bfs_dsk_reads
++; /* count */
1757 if (bfs_dsk_read
>= 0 /* file descriptor */
1758 && bfs_dsk_reads
%BFS_DSK_LIMIT
== 0)
1759 { (void) close(bfs_dsk_read
);
1760 sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_r
-1);
1761 (void) unlink(dsk_nm
);
1764 if (bfs_dsk_read
< 0)
1765 { sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_r
++);
1766 bfs_dsk_read
= open(dsk_nm
, RFLAGS
);
1767 if (bfs_dsk_read
< 0)
1768 { Uerror("could not open temp disk file");
1770 if (read(bfs_dsk_read
, (char *) &now
, vsize
) != vsize
)
1771 { Uerror("bad bfs disk file read");
1774 if (now
._vsz
!= vsize
)
1775 { Uerror("disk read vsz mismatch");
1780 memcpy((uchar
*) &now
, (uchar
*) t
->onow
->sv
, vsize
);
1781 memcpy((uchar
*) Mask
, (uchar
*) t
->omask
->sv
, vsize
);
1784 { memcpy((char *)proc_offset
, (char *)t
->omask
->po
, now
._nr_pr
* sizeof(int));
1786 { memcpy((char *)proc_offset
, (char *)t
->omask
->po
, now
._nr_pr
* sizeof(short));
1788 memcpy((char *)proc_skip
, (char *)t
->omask
->ps
, now
._nr_pr
* sizeof(uchar
));
1792 { memcpy((uchar
*)q_offset
, (uchar
*)t
->omask
->qo
, now
._nr_qs
* sizeof(int));
1794 { memcpy((uchar
*)q_offset
, (uchar
*)t
->omask
->qo
, now
._nr_qs
* sizeof(short));
1796 memcpy((uchar
*)q_skip
, (uchar
*)t
->omask
->qs
, now
._nr_qs
* sizeof(uchar
));
1799 if (t
->onow
->sv
!= (State
*) 0)
1801 freesv(t
->onow
); /* omask not freed */
1803 printf("POP %u (%d)\n", t
->frame
, t
->frame
->o_tt
);
1809 store_state(Trail
*ntrpt
, int shortcut
, short oboq
)
1812 Trans
*t2
= (Trans
*) 0;
1813 uchar ot
; int tt
, E_state
;
1814 uchar o_opm
= trpt
->o_pm
, *othis
= this;
1819 printf("claim: shortcut\n");
1821 goto store_it
; /* no claim move */
1824 this = (((uchar
*)&now
)+proc_offset
[0]); /* 0 = never claim */
1827 tt
= (int) ((P0
*)this)->_p
;
1828 ot
= (uchar
) ((P0
*)this)->_t
;
1833 for (t2
= trans
[ot
][tt
]; t2
; t2
= t2
?t2
->nxt
:(Trans
*)0)
1837 && E_state
!= t2
->e_trans
)
1840 if (do_transit(t2
, 0))
1843 if (!reached
[ot
][t2
->st
])
1844 printf("depth: %d -- claim move from %d -> %d\n",
1845 trpt
->o_tt
, ((P0
*)this)->_p
, t2
->st
);
1848 E_state
= t2
->e_trans
;
1851 { ((P0
*)this)->_p
= t2
->st
;
1852 reached
[ot
][t2
->st
] = 1;
1854 check_claim(t2
->st
);
1857 if (now
._nr_pr
== 0) /* claim terminated */
1858 uerror("end state in claim reached");
1865 Uerror("atomic in claim not supported in BFS mode");
1871 if (!bstore((char *)&now
, vsize
))
1874 if (!gstore((char *)&now
, vsize
, 0))
1876 if (!hstore((char *)&now
, vsize
))
1879 { static long sdone
= (long) 0; long ndone
;
1884 ndone
= (unsigned long) (nstates
/((double) FREQ
));
1885 if (ndone
!= sdone
&& mreached
%10 != 0)
1888 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
1889 if (nstates
> ((double)(1<<(ssize
+1))))
1890 { void resize_hashtable(void);
1898 else if (oboq
!= -1)
1900 x
= (Trail
*) trpt
->ostate
; /* pre-rv state */
1901 if (x
) x
->o_pm
|= 4; /* mark success */
1904 push_bfs(ntrpt
, trpt
->o_tt
+1);
1907 #if !defined(NOREDUCE) && defined(FULLSTACK) && defined(Q_PROVISO)
1908 #if !defined(BITSTATE)
1909 if (Lstate
&& Lstate
->tagged
) trpt
->tau
|= 64;
1913 for (tprov
= bfs_trail
; tprov
; tprov
= tprov
->nxt
)
1914 if (tprov
->onow
->sv
!= (State
*) 0
1915 && memcmp((uchar
*)&now
, (uchar
*)tprov
->onow
->sv
, vsize
) == 0)
1917 break; /* state is in queue */
1923 ((P0
*)this)->_p
= tt
; /* reset claim */
1925 do_reverse(t2
, 0, 0);
1938 { Trans
*t
; Trail
*otrpt
, *x
;
1939 uchar _n
, _m
, ot
, nps
= 0;
1941 short II
, From
= (short) (now
._nr_pr
-1), To
= BASE
;
1944 ntrpt
= (Trail
*) emalloc(sizeof(Trail
));
1945 trpt
->ostate
= (struct H_el
*) 0;
1949 store_state(ntrpt
, 0, oboq
); /* initial state */
1951 while ((otrpt
= pop_bfs())) /* also restores now */
1952 { memcpy((char *) trpt
, (char *) otrpt
, sizeof(Trail
));
1953 #if defined(C_States) && (HAS_TRACK==1)
1954 c_revert((uchar
*) &(now
.c_state
[0]));
1959 printf("Revisit of atomic not needed (%d)\n",
1967 if (trpt
->o_pm
== 8)
1972 printf("Break atomic (pm:%d,tau:%d)\n",
1973 trpt
->o_pm
, trpt
->tau
);
1978 else if (trpt
->tau
&32)
1981 printf("Void preselection (pm:%d,tau:%d)\n",
1982 trpt
->o_pm
, trpt
->tau
);
1985 nps
= 1; /* no preselection in repeat */
1989 trpt
->o_pm
&= ~(4|8);
1990 if (trpt
->o_tt
> mreached
)
1991 { mreached
= trpt
->o_tt
;
1992 if (mreached
%10 == 0)
1996 if (depth
>= maxdepth
)
2001 { x
= (Trail
*) trpt
->ostate
;
2002 if (x
) x
->o_pm
|= 4; /* not failing */
2008 printf("error: max search depth too small\n");
2011 uerror("depth limit reached");
2015 if (boq
== -1 && !(trpt
->tau
&8) && nps
== 0)
2016 for (II
= now
._nr_pr
-1; II
>= BASE
; II
-= 1)
2018 Pickup
: this = pptr(II
);
2019 tt
= (int) ((P0
*)this)->_p
;
2020 ot
= (uchar
) ((P0
*)this)->_t
;
2021 if (trans
[ot
][tt
]->atom
& 8)
2022 { t
= trans
[ot
][tt
];
2030 trpt
->tau
|= 32; /* preselect marker */
2032 printf("%3d: proc %d PreSelected (tau=%d)\n",
2033 depth
, II
, trpt
->tau
);
2040 if (trpt
->tau
&8) /* atomic */
2041 { From
= To
= (short ) trpt
->pr
;
2044 { From
= now
._nr_pr
-1;
2049 for (II
= From
; II
>= To
; II
-= 1)
2051 this = (((uchar
*)&now
)+proc_offset
[II
]);
2052 tt
= (int) ((P0
*)this)->_p
;
2053 ot
= (uchar
) ((P0
*)this)->_t
;
2055 /* no rendezvous with same proc */
2056 if (boq
!= -1 && trpt
->pr
== II
) continue;
2058 ntrpt
->pr
= (uchar
) II
;
2060 trpt
->o_pm
&= ~1; /* no move yet */
2062 trpt
->o_event
= now
._event
;
2065 if (!provided(II
, ot
, tt
, t
)) continue;
2070 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
2074 && E_state
!= t
->e_trans
)
2081 if (!(_m
= do_transit(t
, II
)))
2084 trpt
->o_pm
|= 1; /* we moved */
2085 (trpt
+1)->o_m
= _m
; /* for unsend */
2090 printf("%3d: proc %d exec %d, ",
2091 depth
, II
, t
->forw
);
2092 printf("%d to %d, %s %s %s",
2094 (t
->atom
&2)?"atomic":"",
2095 (boq
!= -1)?"rendez-vous":"");
2098 printf(" (escapes to state %d)", t
->st
);
2100 printf(" %saccepting [tau=%d]\n",
2101 (trpt
->o_pm
&2)?"":"non-", trpt
->tau
);
2104 E_state
= t
->e_trans
;
2106 if (t
->e_trans
> 0 && (boq
!= -1 /* || oboq != -1 */))
2107 { fprintf(efd
, "error: the use of rendezvous stmnt in the escape clause\n");
2108 fprintf(efd
, " of an unless stmnt is not compatible with -DBFS\n");
2113 if (t
->st
> 0) ((P0
*)this)->_p
= t
->st
;
2115 /* ptr to pred: */ ntrpt
->ostate
= (struct H_el
*) otrpt
;
2117 if (boq
== -1 && (t
->atom
&2)) /* atomic */
2118 ntrpt
->tau
= 8; /* record for next move */
2122 store_state(ntrpt
, (boq
!= -1 || (t
->atom
&2)), oboq
);
2124 now
._event
= trpt
->o_event
;
2127 /* undo move and continue */
2128 trpt
++; /* this is where ovals and ipt are set */
2129 do_reverse(t
, II
, _m
); /* restore now. */
2133 enter_critical(GLOBAL_LOCK
); /* in verbose mode only */
2134 printf("cpu%d: ", core_id
);
2136 printf("%3d: proc %d ", depth
, II
);
2137 printf("reverses %d, %d to %d,",
2138 t
->forw
, tt
, t
->st
);
2139 printf(" %s [abit=%d,adepth=%d,",
2140 t
->tp
, now
._a_t
, A_depth
);
2141 printf("tau=%d,%d]\n",
2142 trpt
->tau
, (trpt
-1)->tau
);
2144 leave_critical(GLOBAL_LOCK
);
2147 reached
[ot
][t
->st
] = 1;
2148 reached
[ot
][tt
] = 1;
2150 ((P0
*)this)->_p
= tt
;
2154 /* preselected - no succ definitely outside stack */
2155 if ((trpt
->tau
&32) && !(trpt
->tau
&64))
2156 { From
= now
._nr_pr
-1; To
= BASE
;
2158 cpu_printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
2159 depth
, II
+1, (int) _n
, trpt
->tau
);
2161 _n
= 0; trpt
->tau
&= ~32;
2166 trpt
->tau
&= ~(32|64);
2171 printf("%3d: no move [II=%d, tau=%d, boq=%d, _nr_pr=%d]\n",
2172 depth
, II
, trpt
->tau
, boq
, now
._nr_pr
);
2176 x
= (Trail
*) trpt
->ostate
; /* pre-rv state */
2177 if (!x
) continue; /* root state */
2178 if ((x
->tau
&8) || (x
->tau
&32)) /* break atomic or preselect at parent */
2179 { x
->o_pm
|= 8; /* mark failure */
2180 this = (((uchar
*)&now
)+proc_offset
[otrpt
->pr
]);
2182 printf("\treset state of %d from %d to %d\n",
2183 otrpt
->pr
, ((P0
*)this)->_p
, otrpt
->st
);
2185 ((P0
*)this)->_p
= otrpt
->st
;
2186 unsend(boq
); /* retract rv offer */
2188 push_bfs(x
, x
->o_tt
);
2190 printf("failed rv, repush with %d\n", x
->o_pm
);
2194 else printf("failed rv, tau at parent: %d\n", x
->tau
);
2196 } else if (now
._nr_pr
> 0)
2198 if ((trpt
->tau
&8)) /* atomic */
2199 { trpt
->tau
&= ~(1|8); /* 1=timeout, 8=atomic */
2201 printf("%3d: atomic step proc %d blocks\n",
2207 if (!(trpt
->tau
&1)) /* didn't try timeout yet */
2210 printf("%d: timeout\n", depth
);
2215 if (!noends
&& !a_cycles
&& !endstate())
2216 uerror("invalid end state");
2222 putter(Trail
*trpt
, int fd
)
2227 if (trpt
!= (Trail
*) trpt
->ostate
)
2228 putter((Trail
*) trpt
->ostate
, fd
);
2231 { sprintf(snap
, "%d:%d:%d\n",
2232 trcnt
++, trpt
->pr
, trpt
->o_t
->t_id
);
2234 if (write(fd
, snap
, j
) != j
)
2235 { printf("pan: error writing %s\n", fnm
);
2242 { int fd
= make_trail();
2247 sprintf(snap
, "-2:%d:-2\n", VERI
);
2248 write(fd
, snap
, strlen(snap
));
2251 sprintf(snap
, "-4:-4:-4\n");
2252 write(fd
, snap
, strlen(snap
));
2257 { sprintf(snap
, "%d:%d:%d\n",
2258 trcnt
++, ntrpt
->pr
, ntrpt
->o_t
->t_id
);
2260 if (write(fd
, snap
, j
) != j
)
2261 { printf("pan: error writing %s\n", fnm
);
2265 if (errors
>= upto
&& upto
!= 0)
2271 #if defined(WIN32) || defined(WIN64)
2278 #include <windows.h>
2281 #define long long long
2284 #include <sys/ipc.h>
2285 #include <sys/sem.h>
2286 #include <sys/shm.h>
2289 /* code common to cygwin/linux and win32/win64: */
2292 #define VVERBOSE (1)
2294 #define VVERBOSE (0)
2297 /* the following values must be larger than 256 and must fit in an int */
2298 #define QUIT 1024 /* terminate now command */
2299 #define QUERY 512 /* termination status query message */
2300 #define QUERY_F 513 /* query failed, cannot quit */
2302 #define GN_FRAMES (int) (GWQ_SIZE / (double) sizeof(SM_frame))
2303 #define LN_FRAMES (int) (LWQ_SIZE / (double) sizeof(SM_frame))
2306 #define VMAX VECTORSZ
2322 /* no longer usefule -- being recomputed for local heap size anyway */
2323 double SEG_SIZE
= (((double) SET_SEG_SIZE
) * 1048576.);
2325 double SEG_SIZE
= (1048576.*1024.); /* 1GB default shared memory pool segments */
2328 double LWQ_SIZE
= 0.; /* initialized in main */
2332 #warning SET_WQ_SIZE applies to global queue -- ignored
2333 double GWQ_SIZE
= 0.;
2335 double GWQ_SIZE
= (((double) SET_WQ_SIZE
) * 1048576.);
2336 /* must match the value in pan_proxy.c, if used */
2340 double GWQ_SIZE
= 0.;
2342 double GWQ_SIZE
= (128.*1048576.); /* 128 MB default queue sizes */
2346 /* Crash Detection Parameters */
2348 #define ONESECOND (1<<25)
2351 #define SHORT_T (0.1)
2354 #define LONG_T (600)
2357 double OneSecond
= (double) (ONESECOND
); /* waiting for a free slot -- checks crash */
2358 double TenSeconds
= 10. * (ONESECOND
); /* waiting for a lock -- check for a crash */
2360 /* Termination Detection Params -- waiting for new state input in Get_Full_Frame */
2361 double Delay
= ((double) SHORT_T
) * (ONESECOND
); /* termination detection trigger */
2362 double OneHour
= ((double) LONG_T
) * (ONESECOND
); /* timeout termination detection */
2364 typedef struct SM_frame SM_frame
;
2365 typedef struct SM_results SM_results
;
2366 typedef struct sh_Allocater sh_Allocater
;
2368 struct SM_frame
{ /* about 6K per slot */
2369 volatile int m_vsize
; /* 0 means free slot */
2370 volatile int m_boq
; /* >500 is a control message */
2372 volatile struct Stack_Tree
*m_stack
; /* ptr to previous state */
2374 volatile uchar m_tau
;
2375 volatile uchar m_o_pm
;
2376 volatile int nr_handoffs
; /* to compute real_depth */
2377 volatile char m_now
[VMAX
];
2378 volatile char m_Mask
[(VMAX
+ 7)/8];
2379 volatile OFFT m_p_offset
[PMAX
];
2380 volatile OFFT m_q_offset
[QMAX
];
2381 volatile uchar m_p_skip
[PMAX
];
2382 volatile uchar m_q_skip
[QMAX
];
2383 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
2384 volatile uchar m_c_stack
[StackSize
];
2388 int proxy_pid
; /* id of proxy if nonzero -- receive half */
2389 int store_proxy_pid
;
2391 int proxy_pid_snd
; /* id of proxy if nonzero -- send half */
2392 char o_cmdline
[512]; /* to pass options to children */
2394 int iamin
[CS_NR
+NCORE
]; /* non-shared */
2396 #if defined(WIN32) || defined(WIN64)
2397 int tas(volatile LONG
*);
2399 HANDLE proxy_handle_snd
; /* for Windows Create and Terminate */
2401 struct sh_Allocater
{ /* shared memory for states */
2402 volatile char *dc_arena
; /* to allocate states from */
2403 volatile long pattern
; /* to detect overruns */
2404 volatile long dc_size
; /* nr of bytes left */
2405 volatile void *dc_start
; /* where memory segment starts */
2406 volatile void *dc_id
; /* to attach, detach, remove shared memory segments */
2407 volatile sh_Allocater
*nxt
; /* linked list of pools */
2409 DWORD worker_pids
[NCORE
]; /* root mem of pids of all workers created */
2410 HANDLE worker_handles
[NCORE
]; /* for windows Create and Terminate */
2411 void * shmid
[NR_QS
]; /* return value from CreateFileMapping */
2412 void * shmid_M
; /* shared mem for state allocation in hashtable */
2417 void *shmid_S
; /* shared bitstate arena or hashtable */
2420 int tas(volatile int *);
2422 struct sh_Allocater
{ /* shared memory for states */
2423 volatile char *dc_arena
; /* to allocate states from */
2424 volatile long pattern
; /* to detect overruns */
2425 volatile long dc_size
; /* nr of bytes left */
2426 volatile char *dc_start
; /* where memory segment starts */
2427 volatile int dc_id
; /* to attach, detach, remove shared memory segments */
2428 volatile sh_Allocater
*nxt
; /* linked list of pools */
2431 int worker_pids
[NCORE
]; /* root mem of pids of all workers created */
2432 int shmid
[NR_QS
]; /* return value from shmget */
2433 int nibis
= 0; /* set after shared mem has been released */
2434 int shmid_M
; /* shared mem for state allocation in hashtable */
2438 int shmid_S
; /* shared bitstate arena or hashtable */
2439 volatile sh_Allocater
*first_pool
; /* of shared state memory */
2440 volatile sh_Allocater
*last_pool
;
2444 struct SM_results
{ /* for shuttling back final stats */
2445 volatile int m_vsize
; /* avoid conflicts with frames */
2446 volatile int m_boq
; /* these 2 fields are not written in record_info */
2447 /* probably not all fields really need to be volatile */
2448 volatile double m_memcnt
;
2449 volatile double m_nstates
;
2450 volatile double m_truncs
;
2451 volatile double m_truncs2
;
2452 volatile double m_nShadow
;
2453 volatile double m_nlinks
;
2454 volatile double m_ngrabs
;
2455 volatile double m_nlost
;
2456 volatile double m_hcmp
;
2457 volatile double m_frame_wait
;
2458 volatile int m_hmax
;
2459 volatile int m_svmax
;
2460 volatile int m_smax
;
2461 volatile int m_mreached
;
2462 volatile int m_errors
;
2463 volatile int m_VMAX
;
2464 volatile short m_PMAX
;
2465 volatile short m_QMAX
;
2466 volatile uchar m_R
; /* reached info for all proctypes */
2469 int core_id
= 0; /* internal process nr, to know which q to use */
2470 unsigned long nstates_put
= 0; /* statistics */
2471 unsigned long nstates_get
= 0;
2472 int query_in_progress
= 0; /* termination detection */
2474 double free_wait
= 0.; /* waiting for a free frame */
2475 double frame_wait
= 0.; /* waiting for a full frame */
2476 double lock_wait
= 0.; /* waiting for access to cs */
2477 double glock_wait
[3]; /* waiting for access to global lock */
2479 char *sprefix
= "rst";
2480 uchar was_interrupted
, issued_kill
, writing_trail
;
2482 static SM_frame cur_Root
; /* current root, to be safe with error trails */
2484 SM_frame
*m_workq
[NR_QS
]; /* per cpu work queues + global q */
2485 char *shared_mem
[NR_QS
]; /* return value from shmat */
2490 volatile sh_Allocater
*dc_shared
; /* assigned at initialization */
2492 static int vmax_seen
, pmax_seen
, qmax_seen
;
2493 static double gq_tries
, gq_hasroom
, gq_hasnoroom
;
2495 volatile int *prfree
;
2496 volatile int *prfull
;
2497 volatile int *prcnt
;
2498 volatile int *prmax
;
2500 volatile int *sh_lock
; /* mutual exclusion locks - in shared memory */
2501 volatile double *is_alive
; /* to detect when processes crash */
2502 volatile int *grfree
, *grfull
, *grcnt
, *grmax
; /* access to shared global q */
2503 volatile double *gr_readmiss
, *gr_writemiss
;
2504 static int lrfree
; /* used for temporary recording of slot */
2505 static int dfs_phase2
;
2507 void mem_put(int); /* handoff state to other cpu */
2508 void mem_put_acc(void); /* liveness mode */
2509 void mem_get(void); /* get state from work queue */
2510 void sudden_stop(char *);
2512 void enter_critical(int);
2513 void leave_critical(int);
2517 record_info(SM_results
*r
)
2523 { cpu_printf("nstates %g nshadow %g -- memory %-6.3f Mb\n",
2524 nstates
, nShadow
, memcnt
/(1048576.));
2529 r
->m_memcnt
= 0; /* it's shared */
2531 r
->m_memcnt
= memcnt
;
2533 if (a_cycles
&& core_id
== 1)
2534 { r
->m_nstates
= nstates
;
2535 r
->m_nShadow
= nstates
;
2537 { r
->m_nstates
= nstates
;
2538 r
->m_nShadow
= nShadow
;
2540 r
->m_truncs
= truncs
;
2541 r
->m_truncs2
= truncs2
;
2542 r
->m_nlinks
= nlinks
;
2543 r
->m_ngrabs
= ngrabs
;
2546 r
->m_frame_wait
= frame_wait
;
2550 r
->m_mreached
= mreached
;
2551 r
->m_errors
= errors
;
2552 r
->m_VMAX
= vmax_seen
;
2553 r
->m_PMAX
= (short) pmax_seen
;
2554 r
->m_QMAX
= (short) qmax_seen
;
2555 ptr
= (uchar
*) &(r
->m_R
);
2556 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
2557 { memcpy(ptr
, reached
[i
], NrStates
[i
]*sizeof(uchar
));
2558 ptr
+= NrStates
[i
]*sizeof(uchar
);
2561 { cpu_printf("Put Results nstates %g (sz %d)\n", nstates
, ptr
- &(r
->m_R
));
2565 void snapshot(void);
2568 retrieve_info(SM_results
*r
)
2570 volatile uchar
*ptr
;
2572 snapshot(); /* for a final report */
2574 enter_critical(GLOBAL_LOCK
);
2577 { printf("cpu%d: local heap-left %ld KB (%d MB)\n",
2578 core_id
, (int) (my_size
/1024), (int) (my_size
/1048576));
2581 if (verbose
&& core_id
== 0)
2583 for (i
= 0; i
< NCORE
; i
++)
2584 { printf("%d ", prmax
[i
]);
2587 printf("G: %d", *grmax
);
2591 leave_critical(GLOBAL_LOCK
);
2593 memcnt
+= r
->m_memcnt
;
2594 nstates
+= r
->m_nstates
;
2595 nShadow
+= r
->m_nShadow
;
2596 truncs
+= r
->m_truncs
;
2597 truncs2
+= r
->m_truncs2
;
2598 nlinks
+= r
->m_nlinks
;
2599 ngrabs
+= r
->m_ngrabs
;
2600 nlost
+= r
->m_nlost
;
2602 /* frame_wait += r->m_frame_wait; */
2603 errors
+= r
->m_errors
;
2605 if (hmax
< r
->m_hmax
) hmax
= r
->m_hmax
;
2606 if (svmax
< r
->m_svmax
) svmax
= r
->m_svmax
;
2607 if (smax
< r
->m_smax
) smax
= r
->m_smax
;
2608 if (mreached
< r
->m_mreached
) mreached
= r
->m_mreached
;
2610 if (vmax_seen
< r
->m_VMAX
) vmax_seen
= r
->m_VMAX
;
2611 if (pmax_seen
< (int) r
->m_PMAX
) pmax_seen
= (int) r
->m_PMAX
;
2612 if (qmax_seen
< (int) r
->m_QMAX
) qmax_seen
= (int) r
->m_QMAX
;
2615 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
2616 { for (j
= 0; j
< NrStates
[i
]; j
++)
2617 { if (*(ptr
+ j
) != 0)
2618 { reached
[i
][j
] = 1;
2620 ptr
+= NrStates
[i
]*sizeof(uchar
);
2623 { cpu_printf("Got Results (%d)\n", ptr
- &(r
->m_R
));
2628 #if !defined(WIN32) && !defined(WIN64)
2630 rm_shared_segments(void)
2632 volatile sh_Allocater
*nxt_pool
;
2634 * mark all shared memory segments for removal
2635 * the actual removes wont happen intil last process dies or detaches
2636 * the shmctl calls can return -1 if not all procs have detached yet
2638 for (m
= 0; m
< NR_QS
; m
++) /* +1 for global q */
2639 { if (shmid
[m
] != -1)
2640 { (void) shmctl(shmid
[m
], IPC_RMID
, NULL
);
2644 { (void) shmctl(shmid_M
, IPC_RMID
, NULL
);
2648 { (void) shmctl(shmid_S
, IPC_RMID
, NULL
);
2650 for (last_pool
= first_pool
; last_pool
!= NULL
; last_pool
= nxt_pool
)
2651 { shmid_M
= (int) (last_pool
->dc_id
);
2652 nxt_pool
= last_pool
->nxt
; /* as a pre-caution only */
2654 { (void) shmctl(shmid_M
, IPC_RMID
, NULL
);
2661 sudden_stop(char *s
)
2665 printf("cpu%d: stop - %s\n", core_id
, s
);
2666 #if !defined(WIN32) && !defined(WIN64)
2668 { rm_shared_segments();
2671 if (search_terminated
!= NULL
)
2672 { if (*search_terminated
!= 0)
2674 { printf("cpu%d: termination initiated (%d)\n",
2675 core_id
, *search_terminated
);
2679 { printf("cpu%d: initiated termination\n", core_id
);
2681 *search_terminated
|= 8; /* sudden_stop */
2684 { if (((*search_terminated
) & 4) /* uerror in one of the cpus */
2685 && !((*search_terminated
) & (8|32|128|256))) /* abnormal stop */
2686 { if (errors
== 0) errors
++; /* we know there is at least 1 */
2688 wrapup(); /* incomplete stats, but at least something */
2691 } /* else: should rarely happen, take more drastic measures */
2693 if (core_id
== 0) /* local root process */
2694 { for (i
= 1; i
< NCORE
; i
++) /* not for 0 of course */
2696 #if defined(WIN32) || defined(WIN64)
2697 DWORD dwExitCode
= 0;
2698 GetExitCodeProcess(worker_handles
[i
], &dwExitCode
);
2699 if (dwExitCode
== STILL_ACTIVE
)
2700 { TerminateProcess(worker_handles
[i
], 0);
2702 printf("cpu0: terminate %d %d\n",
2703 worker_pids
[i
], (dwExitCode
== STILL_ACTIVE
));
2705 sprintf(b
, "kill -%d %d", SIGKILL
, worker_pids
[i
]);
2706 system(b
); /* if this is a proxy: receive half */
2707 printf("cpu0: %s\n", b
);
2712 { /* on WIN32/WIN64 -- these merely kills the root process... */
2713 if (was_interrupted
== 0)
2714 { sprintf(b
, "kill -%d %d", SIGINT
, worker_pids
[0]);
2715 system(b
); /* warn the root process */
2716 printf("cpu%d: %s\n", core_id
, b
);
2721 #define iam_alive() is_alive[core_id]++
2723 extern int crash_test(double);
2724 extern void crash_reset(void);
2727 someone_crashed(int wait_type
)
2728 { static double last_value
= 0.0;
2729 static int count
= 0;
2731 if (search_terminated
== NULL
2732 || *search_terminated
!= 0)
2734 if (!(*search_terminated
& (8|32|128|256)))
2735 { if (count
++ < 100*NCORE
)
2740 /* check left neighbor only */
2741 if (last_value
== is_alive
[(core_id
+ NCORE
- 1) % NCORE
])
2742 { if (count
++ >= 100) /* to avoid unnecessary checks */
2747 last_value
= is_alive
[(core_id
+ NCORE
- 1) % NCORE
];
2756 enter_critical(GLOBAL_LOCK
);
2760 printf("cpu%d: locks: global %g\tother %g\t",
2761 core_id
, glock_wait
[0], lock_wait
- glock_wait
[0]);
2763 printf("cpu%d: locks: GL %g, RQ %g, WQ %g, HT %g\t",
2764 core_id
, glock_wait
[0], glock_wait
[1], glock_wait
[2],
2765 lock_wait
- glock_wait
[0] - glock_wait
[1] - glock_wait
[2]);
2767 printf("waits: states %g slots %g\n", frame_wait
, free_wait
);
2769 printf("cpu%d: gq [tries %g, room %g, noroom %g]\n", core_id
, gq_tries
, gq_hasroom
, gq_hasnoroom
);
2770 if (core_id
== 0 && (*gr_readmiss
>= 1.0 || *gr_readmiss
>= 1.0 || *grcnt
!= 0))
2771 printf("cpu0: gq [readmiss: %g, writemiss: %g cnt %d]\n", *gr_readmiss
, *gr_writemiss
, *grcnt
);
2774 if (free_wait
> 1000000.)
2777 { printf("hint: this search may be faster with a larger work-queue\n");
2778 printf(" (-DSET_WQ_SIZE=N with N>%g), and/or with -DUSE_DISK\n",
2779 GWQ_SIZE
/sizeof(SM_frame
));
2780 printf(" or with a larger value for -zN (N>%d)\n", z_handoff
);
2782 { printf("hint: this search may be faster if compiled without -DNGQ, with -DUSE_DISK, ");
2783 printf("or with a larger -zN (N>%d)\n", z_handoff
);
2786 leave_critical(GLOBAL_LOCK
);
2789 #ifndef MAX_DSK_FILE
2790 #define MAX_DSK_FILE 1000000 /* default is max 1M states per file */
2794 multi_usage(FILE *fd
)
2795 { static int warned
= 0;
2796 if (warned
> 0) { return; } else { warned
++; }
2798 fprintf(fd
, "Defining multi-core mode:\n\n");
2799 fprintf(fd
, " -DDUAL_CORE --> same as -DNCORE=2\n");
2800 fprintf(fd
, " -DQUAD_CORE --> same as -DNCORE=4\n");
2801 fprintf(fd
, " -DNCORE=N --> enables multi_core verification if N>1\n");
2803 fprintf(fd
, "Additional directives supported in multi-core mode:\n\n");
2804 fprintf(fd
, " -DSEP_STATE --> forces separate statespaces instead of a single shared state space\n");
2805 fprintf(fd
, " -DNUSE_DISK --> use disk for storing states when a work queue overflows\n");
2806 fprintf(fd
, " -DMAX_DSK_FILE --> max nr of states per diskfile (%d)\n", MAX_DSK_FILE
);
2807 fprintf(fd
, " -DFULL_TRAIL --> support full error trails (increases memory use)\n");
2809 fprintf(fd
, "More advanced use (should rarely need changing):\n\n");
2810 fprintf(fd
, " To change the nr of states that can be stored in the global queue\n");
2811 fprintf(fd
, " (lower numbers allow for more states to be stored, prefer multiples of 8):\n");
2812 fprintf(fd
, " -DVMAX=N --> upperbound on statevector for handoffs (N=%d)\n", VMAX
);
2813 fprintf(fd
, " -DPMAX=N --> upperbound on nr of procs (default: N=%d)\n", PMAX
);
2814 fprintf(fd
, " -DQMAX=N --> upperbound on nr of channels (default: N=%d)\n", QMAX
);
2816 fprintf(fd
, " To set the total amount of memory reserved for the global workqueue:\n");
2817 fprintf(fd
, " -DSET_WQ_SIZE=N --> default: N=128 (defined in MBytes)\n\n");
2818 fprintf(fd
, " To force the use of a single global heap, instead of separate heaps:\n");
2819 fprintf(fd
, " -DGLOB_HEAP\n");
2821 fprintf(fd
, " To define a fct to initialize data before spawning processes (use quotes):\n");
2822 fprintf(fd
, " \"-DC_INIT=fct()\"\n");
2824 fprintf(fd
, " Timer settings for termination and crash detection:\n");
2825 fprintf(fd
, " -DSHORT_T=N --> timeout for termination detection trigger (N=%g)\n", (double) SHORT_T
);
2826 fprintf(fd
, " -DLONG_T=N --> timeout for giving up on termination detection (N=%g)\n", (double) LONG_T
);
2827 fprintf(fd
, " -DONESECOND --> (1<<29) --> timeout waiting for a free slot -- to check for crash\n");
2828 fprintf(fd
, " -DT_ALERT --> collect stats on crash alert timeouts\n\n");
2829 fprintf(fd
, "Help with Linux/Windows/Cygwin configuration for multi-core:\n");
2830 fprintf(fd
, " http://spinroot.com/spin/multicore/V5_Readme.html\n");
2833 #if NCORE>1 && defined(FULL_TRAIL)
2834 typedef struct Stack_Tree
{
2835 uchar pr
; /* process that made transition */
2836 T_ID t_id
; /* id of transition */
2837 volatile struct Stack_Tree
*prv
; /* backward link towards root */
2840 struct H_el
*grab_shared(int);
2841 volatile Stack_Tree
**stack_last
; /* in shared memory */
2842 char *stack_cache
= NULL
; /* local */
2843 int nr_cached
= 0; /* local */
2846 #define CACHE_NR 1024
2849 volatile Stack_Tree
*
2850 stack_prefetch(void)
2851 { volatile Stack_Tree
*st
;
2854 { stack_cache
= (char *) grab_shared(CACHE_NR
* sizeof(Stack_Tree
));
2855 nr_cached
= CACHE_NR
;
2857 st
= (volatile Stack_Tree
*) stack_cache
;
2858 stack_cache
+= sizeof(Stack_Tree
);
2864 Push_Stack_Tree(short II
, T_ID t_id
)
2865 { volatile Stack_Tree
*st
;
2867 st
= (volatile Stack_Tree
*) stack_prefetch();
2870 st
->prv
= (Stack_Tree
*) stack_last
[core_id
];
2871 stack_last
[core_id
] = st
;
2875 Pop_Stack_Tree(void)
2876 { volatile Stack_Tree
*cf
= stack_last
[core_id
];
2879 { stack_last
[core_id
] = cf
->prv
;
2880 } else if (nr_handoffs
* z_handoff
+ depth
> 0)
2881 { printf("cpu%d: error pop_stack_tree (depth %d)\n",
2888 e_critical(int which
)
2891 if (readtrail
|| iamin
[which
] > 0)
2892 { if (!readtrail
&& verbose
)
2893 { printf("cpu%d: Double Lock on %d (now %d)\n",
2894 core_id
, which
, iamin
[which
]+1);
2897 iamin
[which
]++; /* local variable */
2901 cnt_start
= lock_wait
;
2903 while (sh_lock
!= NULL
) /* as long as we have shared memory */
2904 { int r
= tas(&sh_lock
[which
]);
2907 return; /* locked */
2912 if (which
< 3) { glock_wait
[which
]++; }
2914 if (which
== 0) { glock_wait
[which
]++; }
2918 if (lock_wait
- cnt_start
> TenSeconds
)
2919 { printf("cpu%d: lock timeout on %d\n", core_id
, which
);
2920 cnt_start
= lock_wait
;
2921 if (someone_crashed(1))
2922 { sudden_stop("lock timeout");
2928 x_critical(int which
)
2930 if (iamin
[which
] != 1)
2931 { if (iamin
[which
] > 1)
2932 { iamin
[which
]--; /* this is thread-local - no races on this one */
2933 if (!readtrail
&& verbose
)
2934 { printf("cpu%d: Partial Unlock on %d (%d more needed)\n",
2935 core_id
, which
, iamin
[which
]);
2939 } else /* iamin[which] <= 0 */
2941 { printf("cpu%d: Invalid Unlock iamin[%d] = %d\n",
2942 core_id
, which
, iamin
[which
]);
2948 if (sh_lock
!= NULL
)
2950 sh_lock
[which
] = 0; /* unlock */
2955 #if defined(WIN32) || defined(WIN64)
2956 start_proxy(char *s
, DWORD r_pid
)
2958 start_proxy(char *s
, int r_pid
)
2960 { char Q_arg
[16], Z_arg
[16], Y_arg
[16];
2961 char *args
[32], *ptr
;
2964 sprintf(Q_arg
, "-Q%d", getpid());
2965 sprintf(Y_arg
, "-Y%d", r_pid
);
2966 sprintf(Z_arg
, "-Z%d", proxy_pid
/* core_id */);
2968 args
[argcnt
++] = "proxy";
2969 args
[argcnt
++] = s
; /* -r or -s */
2970 args
[argcnt
++] = Q_arg
;
2971 args
[argcnt
++] = Z_arg
;
2972 args
[argcnt
++] = Y_arg
;
2974 if (strlen(o_cmdline
) > 0)
2975 { ptr
= o_cmdline
; /* assume args separated by spaces */
2976 do { args
[argcnt
++] = ptr
++;
2977 if ((ptr
= strchr(ptr
, ' ')) != NULL
)
2978 { while (*ptr
== ' ')
2984 } while (argcnt
< 31);
2986 args
[argcnt
] = NULL
;
2987 #if defined(WIN32) || defined(WIN64)
2988 execvp("pan_proxy", args
); /* no return */
2990 execvp("./pan_proxy", args
); /* no return */
2992 Uerror("pan_proxy exec failed");
2994 /*** end of common code fragment ***/
2996 #if !defined(WIN32) && !defined(WIN64)
2998 init_shm(void) /* initialize shared work-queues - linux/cygwin */
3003 if (core_id
== 0 && verbose
)
3004 { printf("cpu0: step 3: allocate shared workqueues %g MB\n",
3005 ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
) / (1048576.) );
3007 for (m
= 0; m
< NR_QS
; m
++) /* last q is the global q */
3008 { double qsize
= (m
== NCORE
) ? GWQ_SIZE
: LWQ_SIZE
;
3009 key
[m
] = ftok(PanSource
, m
+1);
3011 { perror("ftok shared queues"); must_exit
= 1; break;
3014 if (core_id
== 0) /* root creates */
3015 { /* check for stale copy */
3016 shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600);
3017 if (shmid
[m
] != -1) /* yes there is one; remove it */
3018 { printf("cpu0: removing stale q%d, status: %d\n",
3019 m
, shmctl(shmid
[m
], IPC_RMID
, NULL
));
3021 shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600|IPC_CREAT
|IPC_EXCL
);
3023 } else /* workers attach */
3024 { shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600);
3025 /* never called, since we create shm *before* we fork */
3028 { perror("shmget shared queues"); must_exit
= 1; break;
3031 shared_mem
[m
] = (char *) shmat(shmid
[m
], (void *) 0, 0); /* attach */
3032 if (shared_mem
[m
] == (char *) -1)
3033 { fprintf(stderr
, "error: cannot attach shared wq %d (%d Mb)\n",
3034 m
+1, (int) (qsize
/(1048576.)));
3035 perror("shmat shared queues"); must_exit
= 1; break;
3038 m_workq
[m
] = (SM_frame
*) shared_mem
[m
];
3040 { int nframes
= (m
== NCORE
) ? GN_FRAMES
: LN_FRAMES
;
3041 for (n
= 0; n
< nframes
; n
++)
3042 { m_workq
[m
][n
].m_vsize
= 0;
3043 m_workq
[m
][n
].m_boq
= 0;
3047 { rm_shared_segments();
3048 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3049 pan_exit(1); /* calls cleanup_shm */
3054 prep_shmid_S(size_t n
) /* either sets SS or H_tab, linux/cygwin */
3059 if (verbose
&& core_id
== 0)
3062 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
3063 (double) n
/ (1048576.));
3065 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
3066 (double) n
/ (1048576.));
3070 if (memcnt
+ (double) n
> memlim
)
3071 { printf("cpu0: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
3072 memcnt
/1024., n
/1024, memlim
/(1048576.));
3073 printf("cpu0: insufficient memory -- aborting\n");
3078 key
= ftok(PanSource
, NCORE
+2); /* different from queues */
3080 { perror("ftok shared bitstate or hashtable");
3081 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3085 if (core_id
== 0) /* root */
3086 { shmid_S
= shmget(key
, n
, 0600);
3088 { printf("cpu0: removing stale segment, status: %d\n",
3089 shmctl(shmid_S
, IPC_RMID
, NULL
));
3091 shmid_S
= shmget(key
, n
, 0600 | IPC_CREAT
| IPC_EXCL
);
3092 memcnt
+= (double) n
;
3094 { shmid_S
= shmget(key
, n
, 0600);
3097 { perror("shmget shared bitstate or hashtable too large?");
3098 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3102 rval
= (char *) shmat(shmid_S
, (void *) 0, 0); /* attach */
3103 if ((char *) rval
== (char *) -1)
3104 { perror("shmat shared bitstate or hashtable");
3105 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3109 rval
= (char *) emalloc(n
);
3111 return (uchar
*) rval
;
3117 static char shm_prep_result
;
3120 prep_state_mem(size_t n
) /* sets memory arena for states linux/cygwin */
3123 static int cnt
= 3; /* start larger than earlier ftok calls */
3125 shm_prep_result
= NOT_AGAIN
; /* default */
3126 if (verbose
&& core_id
== 0)
3127 { printf("cpu0: step 2+: pre-allocate memory arena %d of %6.2g Mb\n",
3128 cnt
-3, (double) n
/ (1048576.));
3131 if (memcnt
+ (double) n
> memlim
)
3132 { printf("cpu0: error: M %.0f + %.0f Kb exceeds memory limit of %.0f Mb\n",
3133 memcnt
/1024.0, (double) n
/1024.0, memlim
/(1048576.));
3138 key
= ftok(PanSource
, NCORE
+cnt
); cnt
++;
3141 printf("pan: check './pan --' for usage details\n");
3146 { shmid_M
= shmget(key
, n
, 0600);
3148 { printf("cpu0: removing stale memory segment %d, status: %d\n",
3149 cnt
-3, shmctl(shmid_M
, IPC_RMID
, NULL
));
3151 shmid_M
= shmget(key
, n
, 0600 | IPC_CREAT
| IPC_EXCL
);
3152 /* memcnt += (double) n; -- only amount actually used is counted */
3154 { shmid_M
= shmget(key
, n
, 0600);
3159 { printf("error: failed to get pool of shared memory %d of %.0f Mb\n",
3160 cnt
-3, ((double)n
)/(1048576.));
3161 perror("state mem");
3162 printf("pan: check './pan --' for usage details\n");
3164 shm_prep_result
= TRY_AGAIN
;
3167 rval
= (char *) shmat(shmid_M
, (void *) 0, 0); /* attach */
3169 if ((char *) rval
== (char *) -1)
3170 { printf("cpu%d error: failed to attach pool of shared memory %d of %.0f Mb\n",
3171 core_id
, cnt
-3, ((double)n
)/(1048576.));
3172 perror("state mem");
3175 return (uchar
*) rval
;
3179 init_HT(unsigned long n
) /* cygwin/linux version */
3183 volatile char *dc_mem_start
;
3184 double need_mem
, got_mem
= 0.;
3190 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
3194 { printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
3195 MEMLIM
, ((double)n
/(1048576.)), (((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
) /(1048576.) );
3198 get_mem
= NCORE
* sizeof(double) + (1 + CS_NR
) * sizeof(void *) + 4*sizeof(void *) + 2*sizeof(double);
3199 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
3200 get_mem
+= 4 * NCORE
* sizeof(void *); /* prfree, prfull, prcnt, prmax */
3202 get_mem
+= (NCORE
) * sizeof(Stack_Tree
*); /* NCORE * stack_last */
3204 x
= (volatile char *) prep_state_mem((size_t) get_mem
); /* work queues and basic structs */
3207 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
3210 search_terminated
= (volatile unsigned int *) x
; /* comes first */
3211 x
+= sizeof(void *); /* maintain alignment */
3213 is_alive
= (volatile double *) x
;
3214 x
+= NCORE
* sizeof(double);
3216 sh_lock
= (volatile int *) x
;
3217 x
+= CS_NR
* sizeof(void *);
3219 grfree
= (volatile int *) x
;
3220 x
+= sizeof(void *);
3221 grfull
= (volatile int *) x
;
3222 x
+= sizeof(void *);
3223 grcnt
= (volatile int *) x
;
3224 x
+= sizeof(void *);
3225 grmax
= (volatile int *) x
;
3226 x
+= sizeof(void *);
3227 prfree
= (volatile int *) x
;
3228 x
+= NCORE
* sizeof(void *);
3229 prfull
= (volatile int *) x
;
3230 x
+= NCORE
* sizeof(void *);
3231 prcnt
= (volatile int *) x
;
3232 x
+= NCORE
* sizeof(void *);
3233 prmax
= (volatile int *) x
;
3234 x
+= NCORE
* sizeof(void *);
3235 gr_readmiss
= (volatile double *) x
;
3236 x
+= sizeof(double);
3237 gr_writemiss
= (volatile double *) x
;
3238 x
+= sizeof(double);
3241 stack_last
= (volatile Stack_Tree
**) x
;
3242 x
+= NCORE
* sizeof(Stack_Tree
*);
3246 H_tab
= (struct H_el
**) emalloc(n
);
3250 #warning MEMLIM not set
3251 #define MEMLIM (2048)
3254 if (core_id
== 0 && verbose
)
3255 { printf("cpu0: step 0: -DMEMLIM=%d Mb minus hashtable+workqs (%g + %g Mb) leaves %g Mb\n",
3256 MEMLIM
, ((double)n
/(1048576.)), (NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.),
3257 (memlim
- memcnt
- (double) n
- (NCORE
* LWQ_SIZE
+ GWQ_SIZE
))/(1048576.));
3260 H_tab
= (struct H_el
**) prep_shmid_S((size_t) n
); /* hash_table */
3262 need_mem
= memlim
- memcnt
- ((double) NCORE
* LWQ_SIZE
) - GWQ_SIZE
;
3264 { Uerror("internal error -- shared state memory");
3267 if (core_id
== 0 && verbose
)
3268 { printf("cpu0: step 2: pre-allocate shared state memory %g Mb\n",
3269 need_mem
/(1048576.));
3272 SEG_SIZE
= need_mem
/ NCORE
;
3273 if (verbose
&& core_id
== 0)
3274 { printf("cpu0: setting segsize to %6g MB\n",
3275 SEG_SIZE
/(1048576.));
3277 #if defined(CYGWIN) || defined(__CYGWIN__)
3278 if (SEG_SIZE
> 512.*1024.*1024.)
3279 { printf("warning: reducing SEG_SIZE of %g MB to 512MB (exceeds max for Cygwin)\n",
3280 SEG_SIZE
/(1024.*1024.));
3281 SEG_SIZE
= 512.*1024.*1024.;
3285 mem_reserved
= need_mem
;
3286 while (need_mem
> 1024.)
3287 { get_mem
= need_mem
;
3289 if (get_mem
> (double) SEG_SIZE
)
3290 { get_mem
= (double) SEG_SIZE
;
3292 if (get_mem
<= 0.0) break;
3294 /* for allocating states: */
3295 x
= dc_mem_start
= (volatile char *) prep_state_mem((size_t) get_mem
);
3297 { if (shm_prep_result
== NOT_AGAIN
3298 || first_pool
!= NULL
3299 || SEG_SIZE
< (16. * 1048576.))
3304 { printf("pan: lowered segsize to 0.000000\n", SEG_SIZE
);
3306 if (SEG_SIZE
>= 1024.)
3312 need_mem
-= get_mem
;
3314 if (first_pool
== NULL
)
3315 { search_terminated
= (volatile unsigned int *) x
; /* comes first */
3316 x
+= sizeof(void *); /* maintain alignment */
3318 is_alive
= (volatile double *) x
;
3319 x
+= NCORE
* sizeof(double);
3321 sh_lock
= (volatile int *) x
;
3322 x
+= CS_NR
* sizeof(void *);
3324 grfree
= (volatile int *) x
;
3325 x
+= sizeof(void *);
3326 grfull
= (volatile int *) x
;
3327 x
+= sizeof(void *);
3328 grcnt
= (volatile int *) x
;
3329 x
+= sizeof(void *);
3330 grmax
= (volatile int *) x
;
3331 x
+= sizeof(void *);
3332 prfree
= (volatile int *) x
;
3333 x
+= NCORE
* sizeof(void *);
3334 prfull
= (volatile int *) x
;
3335 x
+= NCORE
* sizeof(void *);
3336 prcnt
= (volatile int *) x
;
3337 x
+= NCORE
* sizeof(void *);
3338 prmax
= (volatile int *) x
;
3339 x
+= NCORE
* sizeof(void *);
3340 gr_readmiss
= (volatile double *) x
;
3341 x
+= sizeof(double);
3342 gr_writemiss
= (volatile double *) x
;
3343 x
+= sizeof(double);
3345 stack_last
= (volatile Stack_Tree
**) x
;
3346 x
+= NCORE
* sizeof(Stack_Tree
*);
3348 if (((long)x
)&(sizeof(void *)-1)) /* 64-bit word alignment */
3349 { x
+= sizeof(void *)-(((long)x
)&(sizeof(void *)-1));
3353 ncomps
= (unsigned long *) x
;
3354 x
+= (256+2) * sizeof(unsigned long);
3358 dc_shared
= (sh_Allocater
*) x
; /* must be in shared memory */
3359 x
+= sizeof(sh_Allocater
);
3361 if (core_id
== 0) /* root only */
3362 { dc_shared
->dc_id
= shmid_M
;
3363 dc_shared
->dc_start
= dc_mem_start
;
3364 dc_shared
->dc_arena
= x
;
3365 dc_shared
->pattern
= 1234567; /* protection */
3366 dc_shared
->dc_size
= (long) get_mem
- (long) (x
- dc_mem_start
);
3367 dc_shared
->nxt
= (long) 0;
3369 if (last_pool
== NULL
)
3370 { first_pool
= last_pool
= dc_shared
;
3372 { last_pool
->nxt
= dc_shared
;
3373 last_pool
= dc_shared
;
3375 } else if (first_pool
== NULL
)
3376 { first_pool
= dc_shared
;
3379 if (need_mem
> 1024.)
3380 { printf("cpu0: could allocate only %g Mb of shared memory (wanted %g more)\n",
3381 got_mem
/(1048576.), need_mem
/(1048576.));
3385 { printf("cpu0: insufficient memory -- aborting.\n");
3388 /* we are still single-threaded at this point, with core_id 0 */
3389 dc_shared
= first_pool
;
3394 /* Test and Set assembly code */
3396 #if defined(i386) || defined(__i386__) || defined(__x86_64__)
3398 tas(volatile int *s
) /* tested */
3400 __asm__
__volatile__(
3408 #elif defined(__arm__)
3410 tas(volatile int *s
) /* not tested */
3412 __asm__
__volatile__(
3413 "swpb %0, %0, [%3] \n"
3419 #elif defined(sparc) || defined(__sparc__)
3421 tas(volatile int *s
) /* not tested */
3423 __asm__
__volatile__(
3424 " ldstub [%2], %0 \n"
3430 #elif defined(ia64) || defined(__ia64__)
3433 tas(volatile int *s
) /* tested */
3435 __asm__
__volatile__(
3436 " xchg4 %0=%1,%2 \n"
3443 #error missing definition of test and set operation for this platform
3447 cleanup_shm(int val
)
3448 { volatile sh_Allocater
*nxt_pool
;
3449 unsigned long cnt
= 0;
3453 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id
, val
);
3458 if (search_terminated
!= NULL
)
3459 { *search_terminated
|= 16; /* cleanup_shm */
3462 for (m
= 0; m
< NR_QS
; m
++)
3463 { if (shmdt((void *) shared_mem
[m
]) > 0)
3464 { perror("shmdt detaching from shared queues");
3468 if (shmdt((void *) shmid_X
) != 0)
3469 { perror("shmdt detaching from shared state memory");
3473 if (SS
> 0 && shmdt((void *) SS
) != 0)
3475 { perror("shmdt detaching from shared bitstate arena");
3479 { /* before detaching: */
3480 for (nxt_pool
= dc_shared
; nxt_pool
!= NULL
; nxt_pool
= nxt_pool
->nxt
)
3481 { cnt
+= nxt_pool
->dc_size
;
3484 { printf("cpu0: done, %ld Mb of shared state memory left\n",
3485 cnt
/ (long)(1048576));
3488 if (shmdt((void *) H_tab
) != 0)
3489 { perror("shmdt detaching from shared hashtable");
3492 for (last_pool
= first_pool
; last_pool
!= NULL
; last_pool
= nxt_pool
)
3493 { nxt_pool
= last_pool
->nxt
;
3494 if (shmdt((void *) last_pool
->dc_start
) != 0)
3495 { perror("shmdt detaching from shared state memory");
3497 first_pool
= last_pool
= NULL
; /* precaution */
3500 /* detached from shared memory - so cannot use cpu_printf */
3502 { printf("cpu%d: done -- got %d states from queue\n",
3503 core_id
, nstates_get
);
3507 extern void give_up(int);
3508 extern void Read_Queue(int);
3515 #if defined(MA) && !defined(SEP_STATE)
3516 #error MA without SEP_STATE is not supported with multi-core
3519 #error BFS is not supported with multi-core
3522 #error SC is not supported with multi-core
3524 init_shm(); /* we are single threaded when this starts */
3526 if (core_id
== 0 && verbose
)
3527 { printf("cpu0: step 4: calling fork()\n");
3531 /* if NCORE > 1 the child or the parent should fork N-1 more times
3532 * the parent is the only process with core_id == 0 and is_parent > 0
3533 * the workers have is_parent = 0 and core_id = 1..NCORE-1
3536 { worker_pids
[0] = getpid(); /* for completeness */
3537 while (++core_id
< NCORE
) /* first worker sees core_id = 1 */
3538 { is_parent
= fork();
3539 if (is_parent
== -1)
3540 { Uerror("fork failed");
3542 if (is_parent
== 0) /* this is a worker process */
3543 { if (proxy_pid
== core_id
) /* always non-zero */
3544 { start_proxy("-r", 0); /* no return */
3546 goto adapt
; /* root process continues spawning */
3548 worker_pids
[core_id
] = is_parent
;
3550 /* note that core_id is now NCORE */
3551 if (proxy_pid
> 0 && proxy_pid
< NCORE
)
3552 { proxy_pid_snd
= fork();
3553 if (proxy_pid_snd
== -1)
3554 { Uerror("proxy fork failed");
3556 if (proxy_pid_snd
== 0)
3557 { start_proxy("-s", worker_pids
[proxy_pid
]); /* no return */
3558 } } /* else continue */
3560 { core_id
= 0; /* reset core_id for root process */
3563 { static char db0
[16]; /* good for up to 10^6 cores */
3564 static char db1
[16];
3565 adapt
: tprefix
= db0
; sprefix
= db1
;
3566 sprintf(tprefix
, "cpu%d_trail", core_id
);
3567 sprintf(sprefix
, "cpu%d_rst", core_id
);
3568 memcnt
= 0; /* count only additionally allocated memory */
3570 signal(SIGINT
, give_up
);
3572 if (proxy_pid
== 0) /* not in a cluster setup, pan_proxy must attach */
3573 { rm_shared_segments(); /* mark all shared segments for removal on exit */
3576 { cpu_printf("starting core_id %d -- pid %d\n", core_id
, getpid());
3578 #if defined(SEP_HEAP) && !defined(SEP_STATE)
3580 volatile sh_Allocater
*ptr
;
3582 for (i
= 0; i
< NCORE
&& ptr
!= NULL
; i
++)
3584 { my_heap
= (char *) ptr
->dc_arena
;
3585 my_size
= (long) ptr
->dc_size
;
3587 cpu_printf("local heap %ld MB\n", my_size
/(1048576));
3590 ptr
= ptr
->nxt
; /* local */
3592 if (my_heap
== NULL
)
3593 { printf("cpu%d: no local heap\n", core_id
);
3596 #if defined(CYGWIN) || defined(__CYGWIN__)
3598 for (i
= 0; i
< NCORE
&& ptr
!= NULL
; i
++)
3599 { ptr
= ptr
->nxt
; /* local */
3601 dc_shared
= ptr
; /* any remainder */
3603 dc_shared
= NULL
; /* used all mem for local heaps */
3607 if (core_id
== 0 && !remote_party
)
3608 { new_state(); /* cpu0 explores root */
3610 cpu_printf("done with 1st dfs, nstates %g (put %d states), read q\n",
3611 nstates
, nstates_put
);
3614 Read_Queue(core_id
); /* all cores */
3617 { cpu_printf("put %6d states into queue -- got %6d\n",
3618 nstates_put
, nstates_get
);
3621 { rm_shared_segments();
3629 int unpack_state(SM_frame
*, int);
3636 char *rval
= (char *) 0;
3639 { printf("cpu%d: grab shared zero\n", core_id
); fflush(stdout
);
3640 return (struct H_el
*) rval
;
3641 } else if (n
&(sizeof(void *)-1))
3642 { n
+= sizeof(void *)-(n
&(sizeof(void *)-1)); /* alignment */
3647 if (my_heap
!= NULL
&& my_size
> n
)
3656 { sudden_stop("pan: out of memory");
3659 /* another lock is always already in effect when this is called */
3660 /* but not always the same lock -- i.e., on different parts of the hashtable */
3661 enter_critical(GLOBAL_LOCK
); /* this must be independently mutex */
3662 #if defined(SEP_HEAP) && !defined(WIN32) && !defined(WIN64)
3663 { static int noted
= 0;
3666 printf("cpu%d: global heap has %ld bytes left, needed %d\n",
3667 core_id
, dc_shared
?dc_shared
->dc_size
:0, n
);
3671 if (dc_shared
->pattern
!= 1234567)
3672 { leave_critical(GLOBAL_LOCK
);
3673 Uerror("overrun -- memory corruption");
3676 if (dc_shared
->dc_size
< n
)
3678 { printf("Next Pool %g Mb + %d\n", memcnt
/(1048576.), n
);
3680 if (dc_shared
->nxt
== NULL
3681 || dc_shared
->nxt
->dc_arena
== NULL
3682 || dc_shared
->nxt
->dc_size
< n
)
3683 { printf("cpu%d: memcnt %g Mb + wanted %d bytes more\n",
3684 core_id
, memcnt
/ (1048576.), n
);
3685 leave_critical(GLOBAL_LOCK
);
3686 sudden_stop("out of memory -- aborting");
3687 wrapup(); /* exits */
3689 { dc_shared
= (sh_Allocater
*) dc_shared
->nxt
;
3692 rval
= (char *) dc_shared
->dc_arena
;
3693 dc_shared
->dc_arena
+= n
;
3694 dc_shared
->dc_size
-= (long) n
;
3697 printf("cpu%d grab shared (%d bytes) -- %ld left\n",
3698 core_id
, n
, dc_shared
->dc_size
);
3700 leave_critical(GLOBAL_LOCK
);
3703 memcnt
+= (double) n
;
3705 return (struct H_el
*) rval
;
3707 return (struct H_el
*) emalloc(n
);
3712 Get_Full_Frame(int n
)
3714 double cnt_start
= frame_wait
;
3716 f
= &m_workq
[n
][prfull
[n
]];
3717 while (f
->m_vsize
== 0) /* await full slot LOCK : full frame */
3721 if (!a_cycles
|| core_id
!= 0)
3723 if (*grcnt
> 0) /* accessed outside lock, but safe even if wrong */
3724 { enter_critical(GQ_RD
); /* gq - read access */
3725 if (*grcnt
> 0) /* could have changed */
3726 { f
= &m_workq
[NCORE
][*grfull
]; /* global q */
3727 if (f
->m_vsize
== 0)
3728 { /* writer is still filling the slot */
3730 f
= &m_workq
[n
][prfull
[n
]]; /* reset */
3732 { *grfull
= (*grfull
+1) % (GN_FRAMES
);
3733 enter_critical(GQ_WR
);
3734 *grcnt
= *grcnt
- 1;
3735 leave_critical(GQ_WR
);
3736 leave_critical(GQ_RD
);
3739 leave_critical(GQ_RD
);
3742 if (frame_wait
++ - cnt_start
> Delay
)
3744 { cpu_printf("timeout on q%d -- %u -- query %d\n",
3745 n
, f
, query_in_progress
);
3747 return (SM_frame
*) 0; /* timeout */
3750 if (VVERBOSE
) cpu_printf("got frame from q%d\n", n
);
3751 prfull
[n
] = (prfull
[n
] + 1) % (LN_FRAMES
);
3752 enter_critical(QLOCK(n
));
3753 prcnt
[n
]--; /* lock out increments */
3754 leave_critical(QLOCK(n
));
3759 Get_Free_Frame(int n
)
3761 double cnt_start
= free_wait
;
3763 if (VVERBOSE
) { cpu_printf("get free frame from q%d\n", n
); }
3765 if (n
== NCORE
) /* global q */
3766 { f
= &(m_workq
[n
][lrfree
]);
3768 { f
= &(m_workq
[n
][prfree
[n
]]);
3770 while (f
->m_vsize
!= 0) /* await free slot LOCK : free slot */
3772 if (free_wait
++ - cnt_start
> OneSecond
)
3774 { cpu_printf("timeout waiting for free slot q%d\n", n
);
3776 cnt_start
= free_wait
;
3777 if (someone_crashed(1))
3778 { printf("cpu%d: search terminated\n", core_id
);
3779 sudden_stop("get free frame");
3783 { prfree
[n
] = (prfree
[n
] + 1) % (LN_FRAMES
);
3784 enter_critical(QLOCK(n
));
3785 prcnt
[n
]++; /* lock out decrements */
3786 if (prmax
[n
] < prcnt
[n
])
3787 { prmax
[n
] = prcnt
[n
];
3789 leave_critical(QLOCK(n
));
3795 GlobalQ_HasRoom(void)
3799 if (*grcnt
< GN_FRAMES
) /* there seems to be room */
3800 { enter_critical(GQ_WR
); /* gq write access */
3801 if (*grcnt
< GN_FRAMES
)
3802 { if (m_workq
[NCORE
][*grfree
].m_vsize
!= 0)
3803 { /* can happen if reader is slow emptying slot */
3805 goto out
; /* dont wait: release lock and return */
3807 lrfree
= *grfree
; /* Get_Free_Frame use lrfree in this mode */
3808 *grfree
= (*grfree
+ 1) % GN_FRAMES
;
3809 *grcnt
= *grcnt
+ 1; /* count nr of slots filled -- no additional lock needed */
3810 if (*grmax
< *grcnt
) *grmax
= *grcnt
;
3811 leave_critical(GQ_WR
); /* for short lock duration */
3813 mem_put(NCORE
); /* copy state into reserved slot */
3814 rval
= 1; /* successfull handoff */
3817 out
: leave_critical(GQ_WR
);
3824 unpack_state(SM_frame
*f
, int from_q
)
3826 static struct H_el D_State
;
3831 { cpu_printf("saw control %d, expected state\n", boq
);
3836 memcpy((uchar
*) &now
, (uchar
*) f
->m_now
, vsize
);
3837 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
3838 { Mask
[i
] = (f
->m_Mask
[i
/8] & (1<<j
)) ? 1 : 0;
3841 { memcpy((uchar
*) proc_offset
, (uchar
*) f
->m_p_offset
, now
._nr_pr
* sizeof(OFFT
));
3842 memcpy((uchar
*) proc_skip
, (uchar
*) f
->m_p_skip
, now
._nr_pr
* sizeof(uchar
));
3845 { memcpy((uchar
*) q_offset
, (uchar
*) f
->m_q_offset
, now
._nr_qs
* sizeof(OFFT
));
3846 memcpy((uchar
*) q_skip
, (uchar
*) f
->m_q_skip
, now
._nr_qs
* sizeof(uchar
));
3849 if (vsize
!= now
._vsz
)
3850 { cpu_printf("vsize %d != now._vsz %d (type %d) %d\n",
3851 vsize
, now
._vsz
, f
->m_boq
, f
->m_vsize
);
3853 goto correct
; /* rare event: a race */
3856 hmax
= max(hmax
, vsize
);
3859 { memcpy((uchar
*) &cur_Root
, (uchar
*) f
, sizeof(SM_frame
));
3862 if (((now
._a_t
) & 1) == 1) /* i.e., when starting nested DFS */
3863 { A_depth
= depthfound
= 0;
3864 memcpy((uchar
*)&A_Root
, (uchar
*)&now
, vsize
);
3866 nr_handoffs
= f
->nr_handoffs
;
3868 { cpu_printf("pan: state empty\n");
3873 trpt
->tau
= f
->m_tau
;
3874 trpt
->o_pm
= f
->m_o_pm
;
3876 (trpt
-1)->ostate
= &D_State
; /* stub */
3877 trpt
->ostate
= &D_State
;
3881 { stack_last
[core_id
] = (Stack_Tree
*) f
->m_stack
;
3883 #if defined(VERBOSE)
3884 if (stack_last
[core_id
])
3885 { cpu_printf("%d: UNPACK -- SET m_stack %u (%d,%d)\n",
3886 depth
, stack_last
[core_id
], stack_last
[core_id
]->pr
,
3887 stack_last
[core_id
]->t_id
);
3893 { static Trans D_Trans
;
3894 trpt
->o_t
= &D_Trans
;
3898 if ((trpt
->tau
& 4) != 4)
3899 { trpt
->tau
|= 4; /* the claim moves first */
3900 cpu_printf("warning: trpt was not up to date\n");
3904 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
3905 { P0
*ptr
= (P0
*) pptr(i
);
3907 if (accpstate
[ptr
->_t
][ptr
->_p
])
3911 if (progstate
[ptr
->_t
][ptr
->_p
])
3919 if (accpstate
[EVENT_TRACE
][now
._event
])
3923 if (progstate
[EVENT_TRACE
][now
._event
])
3929 #if defined(C_States) && (HAS_TRACK==1)
3930 /* restore state of tracked C objects */
3931 c_revert((uchar
*) &(now
.c_state
[0]));
3933 c_unstack((uchar
*) f
->m_c_stack
); /* unmatched tracked data */
3940 write_root(void) /* for trail file */
3943 if (iterative
== 0 && Nr_Trails
> 1)
3944 sprintf(fnm
, "%s%d.%s", TrailFile
, Nr_Trails
-1, sprefix
);
3946 sprintf(fnm
, "%s.%s", TrailFile
, sprefix
);
3948 if (cur_Root
.m_vsize
== 0)
3949 { (void) unlink(fnm
); /* remove possible old copy */
3950 return; /* its the default initial state */
3953 if ((fd
= creat(fnm
, TMODE
)) < 0)
3955 if ((q
= strchr(TrailFile
, '.')))
3956 { *q
= '\0'; /* strip .pml */
3957 if (iterative
== 0 && Nr_Trails
-1 > 0)
3958 sprintf(fnm
, "%s%d.%s", TrailFile
, Nr_Trails
-1, sprefix
);
3960 sprintf(fnm
, "%s.%s", TrailFile
, sprefix
);
3962 fd
= creat(fnm
, TMODE
);
3965 { cpu_printf("pan: cannot create %s\n", fnm
);
3970 if (write(fd
, &cur_Root
, sizeof(SM_frame
)) != sizeof(SM_frame
))
3971 { cpu_printf("pan: error writing %s\n", fnm
);
3973 { cpu_printf("pan: wrote %s\n", fnm
);
3984 char *ssuffix
= "rst";
3987 strcpy(MyFile
, TrailFile
);
3990 { sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, ssuffix
);
3991 fd
= open(fnm
, O_RDONLY
, 0);
3992 if (fd
< 0 && (q
= strchr(MyFile
, '.')))
3993 { *q
= '\0'; /* strip .pml */
3994 sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, ssuffix
);
3996 fd
= open(fnm
, O_RDONLY
, 0);
3999 { sprintf(fnm
, "%s.%s", MyFile
, ssuffix
);
4000 fd
= open(fnm
, O_RDONLY
, 0);
4001 if (fd
< 0 && (q
= strchr(MyFile
, '.')))
4002 { *q
= '\0'; /* strip .pml */
4003 sprintf(fnm
, "%s.%s", MyFile
, ssuffix
);
4005 fd
= open(fnm
, O_RDONLY
, 0);
4009 { if (try_core
< NCORE
)
4010 { ssuffix
= MySuffix
;
4011 sprintf(ssuffix
, "cpu%d_rst", try_core
++);
4014 cpu_printf("no file '%s.rst' or '%s' (not an error)\n", MyFile
, fnm
);
4016 { if (read(fd
, &cur_Root
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4017 { cpu_printf("read error %s\n", fnm
);
4022 (void) unpack_state(&cur_Root
, -2);
4024 cpu_printf("partial trail -- last few steps only\n");
4026 cpu_printf("restored root from '%s'\n", fnm
);
4027 printf("=====State:=====\n");
4029 for (i
= 0; i
< now
._nr_pr
; i
++)
4030 { z
= (P0
*)pptr(i
);
4031 printf("proc %2d (%s) ", i
, procname
[z
->_t
]);
4032 for (j
= 0; src_all
[j
].src
; j
++)
4033 if (src_all
[j
].tp
== (int) z
->_t
)
4034 { printf(" line %3d \"%s\" ",
4035 src_all
[j
].src
[z
->_p
], PanSource
);
4038 printf("(state %d)\n", z
->_p
);
4043 printf("================\n");
4048 unsigned long dsk_written
, dsk_drained
;
4049 void mem_drain(void);
4053 m_clear_frame(SM_frame
*f
)
4054 { int i
, clr_sz
= sizeof(SM_results
);
4056 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
4057 { clr_sz
+= NrStates
[i
]*sizeof(uchar
);
4059 memset(f
, 0, clr_sz
);
4060 /* caution if sizeof(SM_results) > sizeof(SM_frame) */
4063 #define TargetQ_Full(n) (m_workq[n][prfree[n]].m_vsize != 0)
4064 #define TargetQ_NotFull(n) (m_workq[n][prfree[n]].m_vsize == 0)
4067 AllQueuesEmpty(void)
4074 for (q
= 0; q
< NCORE
; q
++)
4075 { if (prcnt
[q
] != 0)
4084 int remember
, target_q
;
4086 double patience
= 0.0;
4088 target_q
= (q
+ 1) % NCORE
;
4091 { f
= Get_Full_Frame(q
);
4092 if (!f
) /* 1 second timeout -- and trigger for Query */
4093 { if (someone_crashed(2))
4094 { printf("cpu%d: search terminated [code %d]\n",
4095 core_id
, search_terminated
?*search_terminated
:-1);
4100 /* to profile with cc -pg and gprof pan.exe -- set handoff depth beyond maxdepth */
4104 if (core_id
== 0 /* root can initiate termination */
4105 && remote_party
== 0 /* and only the original root */
4106 && query_in_progress
== 0 /* unless its already in progress */
4107 && AllQueuesEmpty())
4108 { f
= Get_Free_Frame(target_q
);
4109 query_in_progress
= 1; /* only root process can do this */
4110 if (!f
) { Uerror("Fatal1: no free slot"); }
4111 f
->m_boq
= QUERY
; /* initiate Query */
4113 { cpu_printf("snd QUERY to q%d (%d) into slot %d\n",
4114 target_q
, nstates_get
+ 1, prfree
[target_q
]-1);
4116 f
->m_vsize
= remember
+ 1;
4117 /* number will not change unless we receive more states */
4118 } else if (patience
++ > OneHour
) /* one hour watchdog timer */
4119 { cpu_printf("timeout -- giving up\n");
4120 sudden_stop("queue timeout");
4123 if (0) cpu_printf("timed out -- try again\n");
4126 patience
= 0.0; /* reset watchdog */
4128 if (f
->m_boq
== QUERY
)
4130 { cpu_printf("got QUERY on q%d (%d <> %d) from slot %d\n",
4131 q
, f
->m_vsize
, nstates_put
+ 1, prfull
[q
]-1);
4134 remember
= f
->m_vsize
;
4135 f
->m_vsize
= 0; /* release slot */
4137 if (core_id
== 0 && remote_party
== 0) /* original root cpu0 */
4138 { if (query_in_progress
== 1 /* didn't send more states in the interim */
4139 && *grfree
+ 1 == remember
) /* no action on global queue meanwhile */
4140 { if (verbose
) cpu_printf("Termination detected\n");
4141 if (TargetQ_Full(target_q
))
4143 cpu_printf("warning: target q is full\n");
4145 f
= Get_Free_Frame(target_q
);
4146 if (!f
) { Uerror("Fatal2: no free slot"); }
4148 f
->m_boq
= QUIT
; /* send final Quit, collect stats */
4149 f
->m_vsize
= 111; /* anything non-zero will do */
4151 cpu_printf("put QUIT on q%d\n", target_q
);
4153 { if (verbose
) cpu_printf("Stale Query\n");
4158 query_in_progress
= 0;
4160 { if (TargetQ_Full(target_q
))
4162 cpu_printf("warning: forward query - target q full\n");
4164 f
= Get_Free_Frame(target_q
);
4166 cpu_printf("snd QUERY response to q%d (%d <> %d) in slot %d\n",
4167 target_q
, remember
, *grfree
+ 1, prfree
[target_q
]-1);
4168 if (!f
) { Uerror("Fatal4: no free slot"); }
4170 if (*grfree
+ 1 == remember
) /* no action on global queue */
4171 { f
->m_boq
= QUERY
; /* forward query, to root */
4172 f
->m_vsize
= remember
;
4174 { f
->m_boq
= QUERY_F
; /* no match -- busy */
4175 f
->m_vsize
= 112; /* anything non-zero */
4177 if (dsk_written
!= dsk_drained
)
4185 if (f
->m_boq
== QUERY_F
)
4187 { cpu_printf("got QUERY_F on q%d from slot %d\n", q
, prfull
[q
]-1);
4189 f
->m_vsize
= 0; /* release slot */
4191 if (core_id
== 0 && remote_party
== 0) /* original root cpu0 */
4192 { if (verbose
) cpu_printf("No Match on Query\n");
4193 query_in_progress
= 0;
4195 { if (TargetQ_Full(target_q
))
4196 { if (verbose
) cpu_printf("warning: forwarding query_f, target queue full\n");
4198 f
= Get_Free_Frame(target_q
);
4199 if (verbose
) cpu_printf("forward QUERY_F to q%d into slot %d\n",
4200 target_q
, prfree
[target_q
]-1);
4201 if (!f
) { Uerror("Fatal5: no free slot"); }
4202 f
->m_boq
= QUERY_F
; /* cannot terminate yet */
4203 f
->m_vsize
= 113; /* anything non-zero */
4206 if (dsk_written
!= dsk_drained
)
4213 if (f
->m_boq
== QUIT
)
4214 { if (0) cpu_printf("done -- local memcnt %g Mb\n", memcnt
/(1048576.));
4215 retrieve_info((SM_results
*) f
); /* collect and combine stats */
4217 { cpu_printf("received Quit\n");
4220 f
->m_vsize
= 0; /* release incoming slot */
4222 { f
= Get_Free_Frame(target_q
); /* new outgoing slot */
4223 if (!f
) { Uerror("Fatal6: no free slot"); }
4224 m_clear_frame(f
); /* start with zeroed stats */
4225 record_info((SM_results
*) f
);
4226 f
->m_boq
= QUIT
; /* forward combined results */
4227 f
->m_vsize
= 114; /* anything non-zero */
4229 cpu_printf("fwd Results to q%d\n", target_q
);
4231 break; /* successful termination */
4234 /* else: 0<= boq <= 255, means STATE transfer */
4235 if (unpack_state(f
, q
) != 0)
4237 f
->m_vsize
= 0; /* release slot */
4238 if (VVERBOSE
) cpu_printf("Got state\n");
4240 if (search_terminated
!= NULL
4241 && *search_terminated
== 0)
4242 { new_state(); /* explore successors */
4243 memset((uchar
*) &cur_Root
, 0, sizeof(SM_frame
)); /* avoid confusion */
4250 if (verbose
) cpu_printf("done got %d put %d\n", nstates_get
, nstates_put
);
4255 give_up(int unused_x
)
4257 if (search_terminated
!= NULL
)
4258 { *search_terminated
|= 32; /* give_up */
4261 { was_interrupted
= 1;
4263 cpu_printf("Give Up\n");
4266 } else /* we are already terminating */
4267 { cpu_printf("SIGINT\n");
4272 check_overkill(void)
4274 vmax_seen
= (vmax_seen
+ 7)/ 8;
4275 vmax_seen
*= 8; /* round up to a multiple of 8 */
4280 && VMAX
- vmax_seen
> 8)
4283 printf("cpu0: max VMAX value seen in this run: ");
4285 printf("cpu0: recommend recompiling with ");
4287 printf("-DVMAX=%d\n", vmax_seen
);
4292 mem_put(int q
) /* handoff state to other cpu, workq q */
4297 { vsize
= (vsize
+ 7)/8; vsize
*= 8; /* round up */
4298 printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize
);
4301 if (now
._nr_pr
> PMAX
)
4302 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now
._nr_pr
);
4305 if (now
._nr_qs
> QMAX
)
4306 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now
._nr_qs
);
4309 if (vsize
> vmax_seen
) vmax_seen
= vsize
;
4310 if (now
._nr_pr
> pmax_seen
) pmax_seen
= now
._nr_pr
;
4311 if (now
._nr_qs
> qmax_seen
) qmax_seen
= now
._nr_qs
;
4313 f
= Get_Free_Frame(q
); /* not called in likely deadlock states */
4314 if (!f
) { Uerror("Fatal3: no free slot"); }
4316 if (VVERBOSE
) cpu_printf("putting state into q%d\n", q
);
4318 memcpy((uchar
*) f
->m_now
, (uchar
*) &now
, vsize
);
4319 memset((uchar
*) f
->m_Mask
, 0, (VMAX
+7)/8 * sizeof(char));
4320 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
4322 { f
->m_Mask
[i
/8] |= (1<<j
);
4326 { memcpy((uchar
*) f
->m_p_offset
, (uchar
*) proc_offset
, now
._nr_pr
* sizeof(OFFT
));
4327 memcpy((uchar
*) f
->m_p_skip
, (uchar
*) proc_skip
, now
._nr_pr
* sizeof(uchar
));
4330 { memcpy((uchar
*) f
->m_q_offset
, (uchar
*) q_offset
, now
._nr_qs
* sizeof(OFFT
));
4331 memcpy((uchar
*) f
->m_q_skip
, (uchar
*) q_skip
, now
._nr_qs
* sizeof(uchar
));
4333 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4334 c_stack((uchar
*) f
->m_c_stack
); /* save unmatched tracked data */
4337 f
->m_stack
= stack_last
[core_id
];
4339 f
->nr_handoffs
= nr_handoffs
+1;
4340 f
->m_tau
= trpt
->tau
;
4341 f
->m_o_pm
= trpt
->o_pm
;
4343 f
->m_vsize
= vsize
; /* must come last - now the other cpu can see it */
4345 if (query_in_progress
== 1)
4346 query_in_progress
= 2; /* make sure we know, if a query makes the rounds */
4351 int Dsk_W_Nr
, Dsk_R_Nr
;
4352 int dsk_file
= -1, dsk_read
= -1;
4353 unsigned long dsk_written
, dsk_drained
;
4357 #if defined(WIN32) || defined(WIN64)
4358 #define RFLAGS (O_RDONLY|O_BINARY)
4359 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
4361 #define RFLAGS (O_RDONLY)
4362 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
4370 if (dsk_written
> 0)
4371 { cpu_printf("dsk_written %d states in %d files\ncpu%d: dsk_drained %6d states\n",
4372 dsk_written
, Dsk_W_Nr
, core_id
, dsk_drained
);
4375 for (i
= 0; i
< Dsk_W_Nr
; i
++)
4376 { sprintf(dsk_name
, "Q%.3d_%.3d.tmp", i
, core_id
);
4384 int q
= (core_id
+ 1) % NCORE
; /* target q */
4388 || dsk_written
<= dsk_drained
)
4392 while (dsk_written
> dsk_drained
4393 && TargetQ_NotFull(q
))
4394 { f
= Get_Free_Frame(q
);
4395 if (!f
) { Uerror("Fatal: unhandled condition"); }
4397 if ((dsk_drained
+1)%MAX_DSK_FILE
== 0) /* 100K states max per file */
4398 { (void) close(dsk_read
); /* close current read handle */
4399 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_R_Nr
++, core_id
);
4400 (void) unlink(dsk_name
); /* remove current file */
4401 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_R_Nr
, core_id
);
4402 cpu_printf("reading %s\n", dsk_name
);
4403 dsk_read
= open(dsk_name
, RFLAGS
); /* open next file */
4405 { Uerror("could not open dsk file");
4407 if (read(dsk_read
, &g
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4408 { Uerror("bad dsk file read");
4412 memcpy(f
, &g
, sizeof(SM_frame
));
4413 f
->m_vsize
= sz
; /* last */
4422 int i
, j
, q
= (core_id
+ 1) % NCORE
; /* target q */
4425 { printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize
);
4428 if (now
._nr_pr
> PMAX
)
4429 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now
._nr_pr
);
4432 if (now
._nr_qs
> QMAX
)
4433 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now
._nr_qs
);
4437 if (VVERBOSE
) cpu_printf("filing state for q%d\n", q
);
4439 memcpy((uchar
*) f
.m_now
, (uchar
*) &now
, vsize
);
4440 memset((uchar
*) f
.m_Mask
, 0, (VMAX
+7)/8 * sizeof(char));
4441 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
4443 { f
.m_Mask
[i
/8] |= (1<<j
);
4447 { memcpy((uchar
*)f
.m_p_offset
, (uchar
*)proc_offset
, now
._nr_pr
*sizeof(OFFT
));
4448 memcpy((uchar
*)f
.m_p_skip
, (uchar
*)proc_skip
, now
._nr_pr
*sizeof(uchar
));
4451 { memcpy((uchar
*) f
.m_q_offset
, (uchar
*) q_offset
, now
._nr_qs
*sizeof(OFFT
));
4452 memcpy((uchar
*) f
.m_q_skip
, (uchar
*) q_skip
, now
._nr_qs
*sizeof(uchar
));
4454 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4455 c_stack((uchar
*) f
.m_c_stack
); /* save unmatched tracked data */
4458 f
.m_stack
= stack_last
[core_id
];
4460 f
.nr_handoffs
= nr_handoffs
+1;
4461 f
.m_tau
= trpt
->tau
;
4462 f
.m_o_pm
= trpt
->o_pm
;
4466 if (query_in_progress
== 1)
4467 { query_in_progress
= 2;
4470 { sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_W_Nr
, core_id
);
4471 dsk_file
= open(dsk_name
, WFLAGS
, 0644);
4472 dsk_read
= open(dsk_name
, RFLAGS
);
4473 if (dsk_file
< 0 || dsk_read
< 0)
4474 { cpu_printf("File: <%s>\n", dsk_name
);
4475 Uerror("cannot open diskfile");
4477 Dsk_W_Nr
++; /* nr of next file to open */
4478 cpu_printf("created temporary diskfile %s\n", dsk_name
);
4479 } else if ((dsk_written
+1)%MAX_DSK_FILE
== 0)
4480 { close(dsk_file
); /* close write handle */
4481 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_W_Nr
++, core_id
);
4482 dsk_file
= open(dsk_name
, WFLAGS
, 0644);
4484 { cpu_printf("File: <%s>\n", dsk_name
);
4485 Uerror("aborting: cannot open new diskfile");
4487 cpu_printf("created temporary diskfile %s\n", dsk_name
);
4489 if (write(dsk_file
, &f
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4490 { Uerror("aborting -- disk write failed (disk full?)");
4500 if (search_terminated
== NULL
4501 || *search_terminated
!= 0) /* not a full crash check */
4504 iam_alive(); /* on every transition of Down */
4506 mem_drain(); /* maybe call this also on every Up */
4508 if (depth
> z_handoff
/* above handoff limit */
4510 && !a_cycles
/* not in liveness mode */
4513 && boq
== -1 /* not mid-rv */
4516 && (trpt
->tau
&4) /* claim moves first */
4517 && !((trpt
-1)->tau
&128) /* not a stutter move */
4519 && !(trpt
->tau
&8)) /* not an atomic move */
4520 { int q
= (core_id
+ 1) % NCORE
; /* circular handoff */
4522 if (prcnt
[q
] < LN_FRAMES
)
4524 if (TargetQ_NotFull(q
)
4525 && (dfs_phase2
== 0 || prcnt
[core_id
] > 0))
4532 rval
= GlobalQ_HasRoom();
4538 { void mem_file(void);
4546 return 0; /* i.e., no handoff */
4550 mem_put_acc(void) /* liveness mode */
4551 { int q
= (core_id
+ 1) % NCORE
;
4553 if (search_terminated
== NULL
4554 || *search_terminated
!= 0)
4560 /* some tortured use of preprocessing: */
4561 #if !defined(NGQ) || defined(USE_DISK)
4562 if (TargetQ_Full(q
))
4566 if (GlobalQ_HasRoom())
4574 #if !defined(NGQ) || defined(USE_DISK)
4582 #if defined(WIN32) || defined(WIN64)
4584 init_shm(void) /* initialize shared work-queues */
4589 if (core_id
== 0 && verbose
)
4590 { printf("cpu0: step 3: allocate shared work-queues %g Mb\n",
4591 ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
) / (1048576.));
4593 for (m
= 0; m
< NR_QS
; m
++) /* last q is global 1 */
4594 { double qsize
= (m
== NCORE
) ? GWQ_SIZE
: LWQ_SIZE
;
4595 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, m
);
4597 { shmid
[m
] = CreateFileMapping(
4598 INVALID_HANDLE_VALUE
, /* use paging file */
4599 NULL
, /* default security */
4600 PAGE_READWRITE
, /* access permissions */
4601 0, /* high-order 4 bytes */
4602 qsize
, /* low-order bytes, size in bytes */
4604 } else /* worker nodes just open these segments */
4605 { shmid
[m
] = OpenFileMapping(
4606 FILE_MAP_ALL_ACCESS
, /* read/write access */
4607 FALSE
, /* children do not inherit handle */
4610 if (shmid
[m
] == NULL
)
4611 { fprintf(stderr
, "cpu%d: could not create or open shared queues\n",
4617 shared_mem
[m
] = (char *) MapViewOfFile(shmid
[m
], FILE_MAP_ALL_ACCESS
, 0, 0, 0);
4618 if (shared_mem
[m
] == NULL
)
4619 { fprintf(stderr
, "cpu%d: cannot attach shared q%d (%d Mb)\n",
4620 core_id
, m
+1, (int) (qsize
/(1048576.)));
4627 m_workq
[m
] = (SM_frame
*) shared_mem
[m
];
4629 { int nframes
= (m
== NCORE
) ? GN_FRAMES
: LN_FRAMES
;
4630 for (n
= 0; n
< nframes
; n
++)
4631 { m_workq
[m
][n
].m_vsize
= 0;
4632 m_workq
[m
][n
].m_boq
= 0;
4636 { fprintf(stderr
, "pan: check './pan --' for usage details\n");
4637 pan_exit(1); /* calls cleanup_shm */
4642 prep_shmid_S(size_t n
) /* either sets SS or H_tab, WIN32/WIN64 */
4647 if (verbose
&& core_id
== 0)
4650 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
4651 (double) n
/ (1048576.));
4653 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
4654 (double) n
/ (1048576.));
4658 if (memcnt
+ (double) n
> memlim
)
4659 { printf("cpu%d: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
4660 core_id
, memcnt
/1024., n
/1024, memlim
/(1048576.));
4661 printf("cpu%d: insufficient memory -- aborting\n", core_id
);
4666 /* make key different from queues: */
4667 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, NCORE
+2); /* different from qs */
4669 if (core_id
== 0) /* root */
4670 { shmid_S
= CreateFileMapping(INVALID_HANDLE_VALUE
, NULL
,
4672 PAGE_READWRITE
, (n
>>32), (n
& 0xffffffff), key
);
4674 PAGE_READWRITE
, 0, n
, key
);
4676 memcnt
+= (double) n
;
4678 { shmid_S
= OpenFileMapping(FILE_MAP_ALL_ACCESS
, FALSE
, key
);
4680 if (shmid_S
== NULL
)
4683 fprintf(stderr
, "cpu%d: cannot %s shared bitstate",
4684 core_id
, core_id
?"open":"create");
4686 fprintf(stderr
, "cpu%d: cannot %s shared hashtable",
4687 core_id
, core_id
?"open":"create");
4689 fprintf(stderr
, "pan: check './pan --' for usage details\n");
4693 rval
= (char *) MapViewOfFile(shmid_S
, FILE_MAP_ALL_ACCESS
, 0, 0, 0); /* attach */
4694 if ((char *) rval
== NULL
)
4695 { fprintf(stderr
, "cpu%d: cannot attach shared bitstate or hashtable\n", core_id
);
4696 fprintf(stderr
, "pan: check './pan --' for usage details\n");
4700 rval
= (char *) emalloc(n
);
4702 return (uchar
*) rval
;
4706 prep_state_mem(size_t n
) /* WIN32/WIN64 sets memory arena for states */
4709 static int cnt
= 3; /* start larger than earlier ftok calls */
4711 if (verbose
&& core_id
== 0)
4712 { printf("cpu0: step 2+: pre-allocate memory arena %d of %g Mb\n",
4713 cnt
-3, (double) n
/ (1048576.));
4716 if (memcnt
+ (double) n
> memlim
)
4717 { printf("cpu%d: error: M %.0f + %.0f exceeds memory limit of %.0f Kb\n",
4718 core_id
, memcnt
/1024.0, (double) n
/1024.0, memlim
/1024.0);
4723 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, NCORE
+cnt
); cnt
++;
4726 { shmid_M
= CreateFileMapping(INVALID_HANDLE_VALUE
, NULL
,
4728 PAGE_READWRITE
, (n
>>32), (n
& 0xffffffff), key
);
4730 PAGE_READWRITE
, 0, n
, key
);
4733 { shmid_M
= OpenFileMapping(FILE_MAP_ALL_ACCESS
, FALSE
, key
);
4735 if (shmid_M
== NULL
)
4736 { printf("cpu%d: failed to get pool of shared memory nr %d of size %d\n",
4738 printf("pan: check './pan --' for usage details\n");
4741 rval
= (char *) MapViewOfFile(shmid_M
, FILE_MAP_ALL_ACCESS
, 0, 0, 0); /* attach */
4744 { printf("cpu%d: failed to attach pool of shared memory nr %d of size %d\n",
4748 return (uchar
*) rval
;
4752 init_HT(unsigned long n
) /* WIN32/WIN64 version */
4758 if (verbose
) printf("cpu%d: initialization for Windows\n", core_id
);
4763 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
4767 printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
4768 MEMLIM
, ((double)n
/(1048576.)), ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.));
4770 get_mem
= NCORE
* sizeof(double) + (1 + CS_NR
) * sizeof(void *)+ 4*sizeof(void *) + 2*sizeof(double);
4771 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
4772 get_mem
+= 4 * NCORE
* sizeof(void *);
4774 get_mem
+= (NCORE
) * sizeof(Stack_Tree
*);
4775 /* NCORE * stack_last */
4777 x
= (volatile char *) prep_state_mem((size_t) get_mem
);
4778 shmid_X
= (void *) x
;
4780 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
4783 search_terminated
= (volatile unsigned int *) x
; /* comes first */
4784 x
+= sizeof(void *); /* maintain alignment */
4786 is_alive
= (volatile double *) x
;
4787 x
+= NCORE
* sizeof(double);
4789 sh_lock
= (volatile int *) x
;
4790 x
+= CS_NR
* sizeof(void *); /* allow 1 word per entry */
4792 grfree
= (volatile int *) x
;
4793 x
+= sizeof(void *);
4794 grfull
= (volatile int *) x
;
4795 x
+= sizeof(void *);
4796 grcnt
= (volatile int *) x
;
4797 x
+= sizeof(void *);
4798 grmax
= (volatile int *) x
;
4799 x
+= sizeof(void *);
4800 prfree
= (volatile int *) x
;
4801 x
+= NCORE
* sizeof(void *);
4802 prfull
= (volatile int *) x
;
4803 x
+= NCORE
* sizeof(void *);
4804 prcnt
= (volatile int *) x
;
4805 x
+= NCORE
* sizeof(void *);
4806 prmax
= (volatile int *) x
;
4807 x
+= NCORE
* sizeof(void *);
4808 gr_readmiss
= (volatile double *) x
;
4809 x
+= sizeof(double);
4810 gr_writemiss
= (volatile double *) x
;
4811 x
+= sizeof(double);
4814 stack_last
= (volatile Stack_Tree
**) x
;
4815 x
+= NCORE
* sizeof(Stack_Tree
*);
4819 H_tab
= (struct H_el
**) emalloc(n
);
4823 #warning MEMLIM not set
4824 #define MEMLIM (2048)
4827 if (core_id
== 0 && verbose
)
4828 printf("cpu0: step 0: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb) = %g Mb for state storage\n",
4829 MEMLIM
, ((double)n
/(1048576.)), ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.),
4830 (memlim
- memcnt
- (double) n
- ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
))/(1048576.));
4832 H_tab
= (struct H_el
**) prep_shmid_S((size_t) n
); /* hash_table */
4834 get_mem
= memlim
- memcnt
- ((double) NCORE
) * LWQ_SIZE
- GWQ_SIZE
;
4836 { Uerror("internal error -- shared state memory");
4839 if (core_id
== 0 && verbose
)
4840 { printf("cpu0: step 2: shared state memory %g Mb\n",
4841 get_mem
/(1048576.));
4843 x
= dc_mem_start
= (char *) prep_state_mem((size_t) get_mem
); /* for states */
4845 { printf("cpu%d: insufficient memory -- aborting\n", core_id
);
4849 search_terminated
= (volatile unsigned int *) x
; /* comes first */
4850 x
+= sizeof(void *); /* maintain alignment */
4852 is_alive
= (volatile double *) x
;
4853 x
+= NCORE
* sizeof(double);
4855 sh_lock
= (volatile int *) x
;
4856 x
+= CS_NR
* sizeof(int);
4858 grfree
= (volatile int *) x
;
4859 x
+= sizeof(void *);
4860 grfull
= (volatile int *) x
;
4861 x
+= sizeof(void *);
4862 grcnt
= (volatile int *) x
;
4863 x
+= sizeof(void *);
4864 grmax
= (volatile int *) x
;
4865 x
+= sizeof(void *);
4866 prfree
= (volatile int *) x
;
4867 x
+= NCORE
* sizeof(void *);
4868 prfull
= (volatile int *) x
;
4869 x
+= NCORE
* sizeof(void *);
4870 prcnt
= (volatile int *) x
;
4871 x
+= NCORE
* sizeof(void *);
4872 prmax
= (volatile int *) x
;
4873 x
+= NCORE
* sizeof(void *);
4874 gr_readmiss
= (volatile double *) x
;
4875 x
+= sizeof(double);
4876 gr_writemiss
= (volatile double *) x
;
4877 x
+= sizeof(double);
4880 stack_last
= (volatile Stack_Tree
**) x
;
4881 x
+= NCORE
* sizeof(Stack_Tree
*);
4883 if (((long)x
)&(sizeof(void *)-1)) /* word alignment */
4884 { x
+= sizeof(void *)-(((long)x
)&(sizeof(void *)-1)); /* 64-bit align */
4888 ncomps
= (unsigned long *) x
;
4889 x
+= (256+2) * sizeof(unsigned long);
4892 dc_shared
= (sh_Allocater
*) x
; /* in shared memory */
4893 x
+= sizeof(sh_Allocater
);
4895 if (core_id
== 0) /* root only */
4896 { dc_shared
->dc_id
= shmid_M
;
4897 dc_shared
->dc_start
= (void *) dc_mem_start
;
4898 dc_shared
->dc_arena
= x
;
4899 dc_shared
->pattern
= 1234567;
4900 dc_shared
->dc_size
= (long) get_mem
- (long) (x
- dc_mem_start
);
4901 dc_shared
->nxt
= NULL
;
4906 #if defined(WIN32) || defined(WIN64) || defined(__i386__) || defined(__x86_64__)
4907 extern BOOLEAN
InterlockedBitTestAndSet(LONG
volatile* Base
, LONG Bit
);
4909 tas(volatile LONG
*s
)
4910 { return InterlockedBitTestAndSet(s
, 1);
4913 #error missing definition of test and set operation for this platform
4917 cleanup_shm(int val
)
4919 static int nibis
= 0;
4922 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id
, val
);
4927 if (search_terminated
!= NULL
)
4928 { *search_terminated
|= 16; /* cleanup_shm */
4931 for (m
= 0; m
< NR_QS
; m
++)
4932 { if (shmid
[m
] != NULL
)
4933 { UnmapViewOfFile((char *) shared_mem
[m
]);
4934 CloseHandle(shmid
[m
]);
4937 UnmapViewOfFile((void *) shmid_X
);
4938 CloseHandle((void *) shmid_M
);
4941 if (shmid_S
!= NULL
)
4942 { UnmapViewOfFile(SS
);
4943 CloseHandle(shmid_S
);
4946 if (core_id
== 0 && verbose
)
4947 { printf("cpu0: done, %ld Mb of shared state memory left\n",
4948 dc_shared
->dc_size
/ (long)(1048576));
4950 if (shmid_S
!= NULL
)
4951 { UnmapViewOfFile(H_tab
);
4952 CloseHandle(shmid_S
);
4954 shmid_M
= (void *) (dc_shared
->dc_id
);
4955 UnmapViewOfFile((char *) dc_shared
->dc_start
);
4956 CloseHandle(shmid_M
);
4959 /* detached from shared memory - so cannot use cpu_printf */
4961 { printf("cpu%d: done -- got %d states from queue\n",
4962 core_id
, nstates_get
);
4971 #if defined(MA) && !defined(SEP_STATE)
4972 #error MA requires SEP_STATE in multi-core mode
4975 #error BFS is not supported in multi-core mode
4978 #error SC is not supported in multi-core mode
4980 init_shm(); /* we are single threaded when this starts */
4981 signal(SIGINT
, give_up
); /* windows control-c interrupt */
4983 if (core_id
== 0 && verbose
)
4984 { printf("cpu0: step 4: creating additional workers (proxy %d)\n",
4988 if NCORE
> 1 the child
or the parent should fork N
-1 more times
4989 the parent is the only process with core_id
== 0 and is_parent
> 0
4990 the
others (workers
) have is_parent
= 0 and core_id
= 1..NCORE
-1
4992 if (core_id
== 0) /* root starts up the workers */
4993 { worker_pids
[0] = (DWORD
) getpid(); /* for completeness */
4994 while (++core_id
< NCORE
) /* first worker sees core_id = 1 */
4996 STARTUPINFO si
= { sizeof(si
) };
4997 PROCESS_INFORMATION pi
;
4999 if (proxy_pid
== core_id
) /* always non-zero */
5000 { sprintf(cmdline
, "pan_proxy.exe -r %s-Q%d -Z%d",
5001 o_cmdline
, getpid(), core_id
);
5003 { sprintf(cmdline
, "pan.exe %s-Q%d -Z%d",
5004 o_cmdline
, getpid(), core_id
);
5006 if (verbose
) printf("cpu%d: spawn %s\n", core_id
, cmdline
);
5008 is_parent
= CreateProcess(0, cmdline
, 0, 0, FALSE
, 0, 0, 0, &si
, &pi
);
5010 { Uerror("fork failed");
5012 worker_pids
[core_id
] = pi
.dwProcessId
;
5013 worker_handles
[core_id
] = pi
.hProcess
;
5015 { cpu_printf("created core %d, pid %d\n",
5016 core_id
, pi
.dwProcessId
);
5018 if (proxy_pid
== core_id
) /* we just created the receive half */
5019 { /* add proxy send, store pid in proxy_pid_snd */
5020 sprintf(cmdline
, "pan_proxy.exe -s %s-Q%d -Z%d -Y%d",
5021 o_cmdline
, getpid(), core_id
, worker_pids
[proxy_pid
]);
5022 if (verbose
) printf("cpu%d: spawn %s\n", core_id
, cmdline
);
5023 is_parent
= CreateProcess(0, cmdline
, 0,0, FALSE
, 0,0,0, &si
, &pi
);
5025 { Uerror("fork failed");
5027 proxy_pid_snd
= pi
.dwProcessId
;
5028 proxy_handle_snd
= pi
.hProcess
;
5030 { cpu_printf("created core %d, pid %d (send proxy)\n",
5031 core_id
, pi
.dwProcessId
);
5033 core_id
= 0; /* reset core_id for root process */
5035 { static char db0
[16]; /* good for up to 10^6 cores */
5036 static char db1
[16];
5037 tprefix
= db0
; sprefix
= db1
;
5038 sprintf(tprefix
, "cpu%d_trail", core_id
); /* avoid conflicts on file access */
5039 sprintf(sprefix
, "cpu%d_rst", core_id
);
5040 memcnt
= 0; /* count only additionally allocated memory */
5043 { cpu_printf("starting core_id %d -- pid %d\n", core_id
, getpid());
5045 if (core_id
== 0 && !remote_party
)
5046 { new_state(); /* root starts the search */
5048 cpu_printf("done with 1st dfs, nstates %g (put %d states), start reading q\n",
5049 nstates
, nstates_put
);
5052 Read_Queue(core_id
); /* all cores */
5055 { cpu_printf("put %6d states into queue -- got %6d\n",
5056 nstates_put
, nstates_get
);
5066 init_SS(unsigned long n
)
5068 SS
= (uchar
*) prep_shmid_S((size_t) n
);
5076 clock_t crash_stamp
;
5078 #if !defined(WIN32) && !defined(WIN64)
5079 struct tms start_tm
;
5085 #if defined(WIN32) || defined(WIN64)
5086 start_time
= clock();
5088 start_time
= times(&start_tm
);
5094 { clock_t stop_time
;
5096 #if !defined(WIN32) && !defined(WIN64)
5098 stop_time
= times(&stop_tm
);
5099 delta_time
= ((double) (stop_time
- start_time
)) / ((double) sysconf(_SC_CLK_TCK
));
5101 stop_time
= clock();
5102 delta_time
= ((double) (stop_time
- start_time
)) / ((double) CLOCKS_PER_SEC
);
5104 if (readtrail
|| delta_time
< 0.00) return;
5106 if (core_id
== 0 && nstates
> (double) 0)
5107 { printf("\ncpu%d: elapsed time %.3g seconds (%g states visited)\n", core_id
, delta_time
, nstates
);
5108 if (delta_time
> 0.01)
5109 { printf("cpu%d: rate %g states/second\n", core_id
, nstates
/delta_time
);
5111 { void check_overkill(void);
5115 printf("\npan: elapsed time %.3g seconds\n", delta_time
);
5116 if (delta_time
> 0.01)
5117 { printf("pan: rate %9.8g states/second\n", nstates
/delta_time
);
5119 { printf("pan: avg transition delay %.5g usec\n",
5120 delta_time
/(nstates
+truncs
));
5127 double t_alerts
[17];
5132 printf("crash alert intervals:\n");
5133 for (i
= 0; i
< 17; i
++)
5134 { printf("%d\t%g\n", i
, t_alerts
[i
]);
5141 if (crash_stamp
!= (clock_t) 0)
5146 #if defined(WIN32) || defined(WIN64)
5147 delta_time
= ((double) (clock() - crash_stamp
)) / ((double) CLOCKS_PER_SEC
);
5149 delta_time
= ((double) (times(&start_tm
) - crash_stamp
)) / ((double) sysconf(_SC_CLK_TCK
));
5151 for (i
= 0; i
< 16; i
++)
5152 { if (delta_time
<= (i
*30))
5153 { t_alerts
[i
] = delta_time
;
5156 if (i
== 16) t_alerts
[i
] = delta_time
;
5159 printf("cpu%d: crash alert off\n", core_id
);
5161 crash_stamp
= (clock_t) 0;
5165 crash_test(double maxtime
)
5166 { double delta_time
;
5167 if (crash_stamp
== (clock_t) 0)
5168 { /* start timing */
5169 #if defined(WIN32) || defined(WIN64)
5170 crash_stamp
= clock();
5172 crash_stamp
= times(&start_tm
);
5175 { printf("cpu%d: crash detection\n", core_id
);
5179 #if defined(WIN32) || defined(WIN64)
5180 delta_time
= ((double) (clock() - crash_stamp
)) / ((double) CLOCKS_PER_SEC
);
5182 delta_time
= ((double) (times(&start_tm
) - crash_stamp
)) / ((double) sysconf(_SC_CLK_TCK
));
5184 return (delta_time
>= maxtime
);
5191 depth
= mreached
= 0;
5194 trpt
->tau
|= 4; /* the claim moves first */
5196 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
5197 { P0
*ptr
= (P0
*) pptr(i
);
5200 && accpstate
[ptr
->_t
][ptr
->_p
])
5205 && progstate
[ptr
->_t
][ptr
->_p
])
5212 if (accpstate
[EVENT_TRACE
][now
._event
])
5216 if (progstate
[EVENT_TRACE
][now
._event
])
5222 Mask
[0] = Mask
[1] = 1; /* _nr_pr, _nr_qs */
5224 { i
= &(now
._a_t
) - (uchar
*) &now
;
5225 Mask
[i
] = 1; /* _a_t */
5230 i
= &(now
._cnt
[0]) - (uchar
*) &now
;
5232 Mask
[i
++] = 1; /* _cnt[] */
5238 && (a_cycles
&& (trpt
->o_pm
&2)))
5239 { now
._a_t
= 2; /* set the A-bit */
5240 now
._cnt
[0] = now
._nr_pr
+ 1;
5242 printf("%3d: fairness Rule 1, cnt=%d, _a_t=%d\n",
5243 depth
, now
._cnt
[now
._a_t
&1], now
._a_t
);
5247 c_stack_start
= (char *) &i
; /* meant to be read-only */
5248 #if defined(HAS_CODE) && defined (C_INIT)
5249 C_INIT
; /* initialization of data that must precede fork() */
5252 #if defined(C_States) && (HAS_TRACK==1)
5253 /* capture initial state of tracked C objects */
5254 c_update((uchar
*) &(now
.c_state
[0]));
5257 if (readtrail
) getrail(); /* no return */
5263 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
5264 /* initial state of tracked & unmatched objects */
5265 c_stack((uchar
*) &(svtack
->c_stack
[0]));
5277 new_state(); /* start 1st DFS */
5283 do_reverse(Trans
*t
, short II
, uchar M
)
5285 int tt
= (int) ((P0
*)this)->_p
;
5286 #include REVERSE_MOVES
5292 static char _tp
= 'n'; static int _qid
= 0;
5295 do_transit(Trans
*t
, short II
)
5297 int tt
= (int) ((P0
*)this)->_p
;
5303 uchar ot
= (uchar
) ((P0
*)this)->_t
;
5304 if (ot
== EVENT_TRACE
) boq
= -1;
5305 #define continue { boq = oboq; return 0; }
5307 #define continue return 0
5309 uchar ot
= (uchar
) ((P0
*)this)->_t
;
5312 #include FORWARD_MOVES
5315 if (ot
== EVENT_TRACE
) boq
= oboq
;
5322 require(char tp
, int qid
)
5324 _tp
= tp
; _qid
= qid
;
5326 if (now
._event
!= endevent
)
5327 for (t
= trans
[EVENT_TRACE
][now
._event
]; t
; t
= t
->nxt
)
5328 { if (do_transit(t
, EVENT_TRACE
))
5329 { now
._event
= t
->st
;
5330 reached
[EVENT_TRACE
][t
->st
] = 1;
5332 printf(" event_trace move to -> %d\n", t
->st
);
5336 if (accpstate
[EVENT_TRACE
][now
._event
])
5337 (trpt
+1)->o_pm
|= 2;
5339 if (progstate
[EVENT_TRACE
][now
._event
])
5340 (trpt
+1)->o_pm
|= 4;
5343 #ifdef NEGATED_TRACE
5344 if (now
._event
== endevent
)
5349 uerror("event_trace error (all events matched)");
5356 for (t
= t
->nxt
; t
; t
= t
->nxt
)
5357 { if (do_transit(t
, EVENT_TRACE
))
5358 Uerror("non-determinism in event-trace");
5364 printf(" event_trace miss '%c' -- %d, %d, %d\n",
5365 tp
, qid
, now
._event
, t
->forw
);
5368 #ifdef NEGATED_TRACE
5369 now
._event
= endevent
; /* only 1st try will count -- fixed 4.2.6 */
5374 uerror("event_trace error (no matching event)");
5382 enabled(int iam
, int pid
)
5383 { Trans
*t
; uchar
*othis
= this;
5384 int res
= 0; int tt
; uchar ot
;
5386 /* if (pid > 0) */ pid
++;
5389 Uerror("used: enabled(pid=thisproc)");
5390 if (pid
< 0 || pid
>= (int) now
._nr_pr
)
5394 tt
= (int) ((P0
*)this)->_p
;
5395 ot
= (uchar
) ((P0
*)this)->_t
;
5396 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
5397 if (do_transit(t
, (short) pid
))
5408 { clock_t stop_time
;
5410 #if !defined(WIN32) && !defined(WIN64)
5412 stop_time
= times(&stop_tm
);
5413 delta_time
= ((double) (stop_time
- start_time
)) / ((double) sysconf(_SC_CLK_TCK
));
5415 stop_time
= clock();
5416 delta_time
= ((double) (stop_time
- start_time
)) / ((double) CLOCKS_PER_SEC
);
5418 if (delta_time
> 0.01)
5419 { printf("t= %6.3g ", delta_time
);
5420 printf("R= %7.0g", nstates
/delta_time
);
5423 if (quota
> 0.1 && delta_time
> quota
)
5424 { printf("Time limit of %6.3g minutes exceeded\n", quota
/60.0);
5427 leave_critical(GLOBAL_LOCK
);
5428 sudden_stop("time-limit");
5438 enter_critical(GLOBAL_LOCK
); /* snapshot */
5439 printf("cpu%d: ", core_id
);
5441 printf("Depth= %7ld States= %8.3g ",
5443 (long) (nr_handoffs
* z_handoff
) +
5446 printf("Transitions= %8.3g ", nstates
+truncs
);
5448 printf("Nodes= %7d ", nr_states
);
5450 printf("Memory= %9.3f\t", memcnt
/1048576.);
5454 leave_critical(GLOBAL_LOCK
);
5462 && (stackwrite
= creat(stackfile
, TMODE
)) < 0)
5463 Uerror("cannot create stackfile");
5465 if (write(stackwrite
, trail
, DDD
*sizeof(Trail
))
5466 != DDD
*sizeof(Trail
))
5467 Uerror("stackfile write error -- disk is full?");
5469 memmove(trail
, &trail
[DDD
], (HHH
-DDD
+2)*sizeof(Trail
));
5470 memset(&trail
[HHH
-DDD
+2], 0, (omaxdepth
- HHH
+ DDD
- 2)*sizeof(Trail
));
5478 memmove(&trail
[DDD
], trail
, (HHH
-DDD
+2)*sizeof(Trail
));
5481 || lseek(stackwrite
, -DDD
* (off_t
) sizeof(Trail
), SEEK_CUR
) == -1)
5482 Uerror("disk2stack lseek error");
5485 && (stackread
= open(stackfile
, 0)) < 0)
5486 Uerror("cannot open stackfile");
5488 if (lseek(stackread
, (CNT1
-CNT2
)*DDD
* (off_t
) sizeof(Trail
), SEEK_SET
) == -1)
5489 Uerror("disk2stack lseek error");
5491 have
= read(stackread
, trail
, DDD
*sizeof(Trail
));
5492 if (have
!= DDD
*sizeof(Trail
))
5493 Uerror("stackfile read error");
5498 { if (x
< 0 || x
>= MAXPROC
|| !proc_offset
[x
])
5501 return (uchar
*) pptr(x
);
5505 * new_state() is the main DFS search routine in the verifier
5506 * it has a lot of code ifdef-ed together to support
5507 * different search modes, which makes it quite unreadable.
5508 * if you are studying the code, first use the C preprocessor
5509 * to generate a specific version from the pan.c source,
5511 * gcc -E -DNOREDUCE -DBITSTATE pan.c > ppan.c
5512 * and then study the resulting file, rather than this one
5514 #if !defined(BFS) && (!defined(BITSTATE) || !defined(MA))
5520 { if (cnt
< 512) N_succ
[cnt
]++;
5521 else printf("tally_succ: cnt %d exceeds range\n", cnt
);
5526 { int i
; double sum
= 0.0;
5528 printf("Successor counts:\n");
5529 for (i
= 0; i
< 512; i
++)
5530 { sum
+= (double) N_succ
[i
];
5532 for (i
= 0; i
< 512; i
++)
5533 { if (N_succ
[i
] > 0)
5534 { printf("%3d %10d (%.4g %% of total)\n",
5535 i
, N_succ
[i
], (100.0 * (double) N_succ
[i
])/sum
);
5536 w_avg
+= (double) i
* (double) N_succ
[i
];
5538 if (sum
> N_succ
[0])
5539 printf("mean %.4g (without 0: %.4g)\n", w_avg
/ sum
, w_avg
/ (sum
- (double) N_succ
[0]));
5553 short II
, JJ
= 0, kk
;
5556 short From
= BASE
, To
= now
._nr_pr
-1;
5558 short From
= now
._nr_pr
-1, To
= BASE
;
5562 cpu_printf("%d: Down - %s %saccepting [pids %d-%d]\n",
5563 depth
, (trpt
->tau
&4)?"claim":"program",
5564 (trpt
->o_pm
&2)?"":"non-", From
, To
);
5568 { trpt
->sched_limit
= (trpt
-1)->sched_limit
;
5570 { trpt
->sched_limit
= 0;
5574 if (depth
> hiwater
)
5580 printf("zap %d: %d (maxdepth now %d)\n",
5581 CNT1
, hiwater
, maxdepth
);
5584 trpt
->tau
&= ~(16|32|64); /* make sure these are off */
5585 #if defined(FULLSTACK) && defined(MA)
5595 (trpt
+1)->o_n
= 1; /* not a deadlock: as below */
5598 (trpt
-1)->tau
|= 16; /* worstcase guess: as below */
5600 #if NCORE>1 && defined(FULL_TRAIL)
5608 if (depth
>= maxdepth
)
5611 printf("error: max search depth too small\n");
5614 { uerror("depth limit reached");
5618 (trpt
+1)->o_n
= 1; /* not a deadlock */
5621 (trpt
-1)->tau
|= 16; /* worstcase guess */
5623 #if NCORE>1 && defined(FULL_TRAIL)
5631 #if (defined(FULLSTACK) && !defined(MA)) || NCORE>1
5632 /* if atomic or rv move, carry forward previous state */
5633 trpt
->ostate
= (trpt
-1)->ostate
;
5636 if ((trpt
->tau
&4) || ((trpt
-1)->tau
&128))
5638 if (boq
== -1) { /* if not mid-rv */
5640 /* this check should now be redundant
5641 * because the seed state also appears
5642 * on the 1st dfs stack and would be
5643 * matched in hstore below
5645 if ((now
._a_t
&1) && depth
> A_depth
)
5646 { if (!memcmp((char *)&A_Root
,
5647 (char *)&now
, vsize
))
5649 depthfound
= A_depth
;
5651 printf("matches seed\n");
5654 uerror("non-progress cycle");
5656 uerror("acceptance cycle");
5658 #if NCORE>1 && defined(FULL_TRAIL)
5666 printf("not seed\n");
5670 if (!(trpt
->tau
&8)) /* if no atomic move */
5674 II
= bstore((char *)&now
, vsize
);
5675 trpt
->j6
= j1
; trpt
->j7
= j2
;
5676 JJ
= LL
[j1
] && LL
[j2
];
5682 JJ
= II
; /* worstcase guess for p.o. */
5685 II
= bstore((char *)&now
, vsize
);
5689 II
= gstore((char *)&now
, vsize
, 0);
5696 II
= hstore((char *)&now
, vsize
);
5702 kk
= (II
== 1 || II
== 2);
5704 #if NCORE==1 || defined (SEP_STATE)
5705 if (II
== 2 && ((trpt
->o_pm
&2) || ((trpt
-1)->o_pm
&2)))
5708 if (!fairness
|| ((now
._a_t
&1) && now
._cnt
[1] == 1)) /* 5.1.4 */
5710 if (a_cycles
&& !fairness
) /* 5.1.6 -- example by Hirofumi Watanabe */
5714 II
= 3; /* Schwoon & Esparza 2005, Gastin&Moro 2004 */
5716 printf("state match on dfs stack\n");
5721 #if defined(FULLSTACK) && defined(BITSTATE)
5722 if (!JJ
&& (now
._a_t
&1) && depth
> A_depth
)
5724 uchar o_a_t
= now
._a_t
;
5725 now
._a_t
&= ~(1|16|32);
5729 printf("state match on 1st dfs stack\n");
5736 if (II
== 3 && a_cycles
&& (now
._a_t
&1))
5739 if (fairness
&& now
._cnt
[1] > 1) /* was != 0 */
5742 printf(" fairness count non-zero\n");
5751 same_case
: if (Lstate
) depthfound
= Lstate
->D
;
5753 uerror("non-progress cycle");
5755 uerror("acceptance cycle");
5757 #if NCORE>1 && defined(FULL_TRAIL)
5768 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5769 if (II
!= 0 && (!Lstate
|| Lstate
->cpu_id
< core_id
))
5770 { (trpt
-1)->tau
|= 16;
5773 if ((II
&& JJ
) || (II
== 3))
5774 { /* marker for liveness proviso */
5776 (trpt
-1)->tau
|= 16;
5781 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5782 if (!(II
!= 0 && (!Lstate
|| Lstate
->cpu_id
< core_id
)))
5783 { /* treat as stack state */
5784 (trpt
-1)->tau
|= 16;
5786 { /* treat as non-stack state */
5787 (trpt
-1)->tau
|= 64;
5791 { /* successor outside stack */
5792 (trpt
-1)->tau
|= 64;
5798 #if NCORE>1 && defined(FULL_TRAIL)
5808 { static long sdone
= (long) 0; long ndone
;
5810 #if defined(ZAPH) && defined(BITSTATE)
5811 zstates
+= (double) hfns
;
5813 ndone
= (unsigned long) (nstates
/((double) FREQ
));
5817 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
5818 if (nstates
> ((double)(ONE_L
<<(ssize
+1))))
5819 { void resize_hashtable(void);
5823 #if defined(ZAPH) && defined(BITSTATE)
5824 if (zstates
> ((double)(ONE_L
<<(ssize
-2))))
5825 { /* more than half the bits set */
5826 void zap_hashtable(void);
5834 if (write(svfd
, (uchar
*) &now
, vprefix
) != vprefix
)
5835 { fprintf(efd
, "writing %s.svd failed\n", PanSource
);
5839 #if defined(MA) && defined(W_XPT)
5840 if ((unsigned long) nstates
%W_XPT
== 0)
5841 { void w_xpoint(void);
5846 #if defined(FULLSTACK) || defined(CNTRSTACK)
5849 #if defined(FULLSTACK) && !defined(MA)
5850 printf("%d: putting %u (%d)\n", depth
,
5852 (trpt
->ostate
)?trpt
->ostate
->tagged
:0);
5854 printf("%d: putting\n", depth
);
5859 trpt
->ostate
= Lstate
;
5863 if (depth
> mreached
)
5868 trpt
->tau
&= ~(1|2); /* timeout and -request off */
5874 if (now
._nr_pr
== 0) /* claim terminated */
5875 uerror("end state in claim reached");
5876 check_claim(((P0
*)pptr(0))->_p
);
5878 if (trpt
->tau
&4) /* must make a claimmove */
5881 if ((now
._a_t
&2) /* A-bit set */
5882 && now
._cnt
[now
._a_t
&1] == 1)
5884 now
._cnt
[now
._a_t
&1] = 0;
5887 printf("%3d: fairness Rule 3.: _a_t = %d\n",
5897 /* Look for a process with only safe transitions */
5898 /* (special rules apply in the 2nd dfs) */
5899 if (boq
== -1 && From
!= To
5903 && (depth
< z_handoff
)
5908 && ((a_cycles
) || (!a_cycles
&& depth
< z_handoff
))
5915 !((trpt
-1)->proviso
))
5922 !(((char *)&((trpt
-1)->ostate
->state
))[0] & 128))
5924 !(((char *)&(trpt
->ostate
->state
))[0] & 128))
5930 (trpt
-1)->ostate
->proviso
== 0)
5932 trpt
->ostate
->proviso
== 0)
5939 for (II
= From
; II
<= To
; II
++)
5941 for (II
= From
; II
>= To
; II
--)
5944 Resume
: /* pick up here if preselect fails */
5946 tt
= (int) ((P0
*)this)->_p
;
5947 ot
= (uchar
) ((P0
*)this)->_t
;
5948 if (trans
[ot
][tt
]->atom
& 8)
5949 { t
= trans
[ot
][tt
];
5956 From
= To
= II
; /* the process preselected */
5960 trpt
->tau
|= 32; /* preselect marker */
5963 printf("%3d: proc %d Pre", depth
, II
);
5964 printf("Selected (om=%d, tau=%d)\n",
5967 printf("%3d: proc %d PreSelected (tau=%d)\n",
5968 depth
, II
, trpt
->tau
);
5976 #if !defined(NOREDUCE) || (defined(ETIM) && !defined(VERI))
5979 /* The Main Expansion Loop over Processes */
5980 trpt
->o_pm
&= ~(8|16|32|64); /* fairness-marks */
5982 if (fairness
&& boq
== -1
5984 && (!(trpt
->tau
&4) && !((trpt
-1)->tau
&128))
5987 { /* A_bit = 1; Cnt = N in acc states with A_bit 0 */
5990 if (a_cycles
&& (trpt
->o_pm
&2))
5991 { /* Accepting state */
5993 now
._cnt
[now
._a_t
&1] = now
._nr_pr
+ 1;
5996 printf("%3d: fairness Rule 1: cnt=%d, _a_t=%d\n",
5997 depth
, now
._cnt
[now
._a_t
&1], now
._a_t
);
6001 { /* A_bit = 0 when Cnt 0 */
6002 if (now
._cnt
[now
._a_t
&1] == 1)
6004 now
._cnt
[now
._a_t
&1] = 0;
6007 printf("%3d: fairness Rule 3: _a_t = %d\n",
6014 for (II
= From
; II
<= To
; II
++)
6016 for (II
= From
; II
>= To
; II
--)
6020 /* no rendezvous with same proc */
6021 if (boq
!= -1 && trpt
->pr
== II
) continue;
6024 /* limit max nr of interleavings */
6030 && (trpt
-1)->pr
!= II
6031 && trpt
->sched_limit
>= sched_max
)
6039 tt
= (int) ((P0
*)this)->_p
;
6040 ot
= (uchar
) ((P0
*)this)->_t
;
6042 /* don't repeat a previous preselected expansion */
6043 /* could hit this if reduction proviso was false */
6054 if (_m
>_n
||(_n
>3&&_m
!=0)) _n
=_m
;
6055 continue; /* did it before */
6058 trpt
->o_pm
&= ~1; /* no move in this pid yet */
6060 (trpt
+1)->o_event
= now
._event
;
6062 /* Fairness: Cnt++ when Cnt == II */
6064 trpt
->o_pm
&= ~64; /* didn't apply rule 2 */
6069 && now
._cnt
[now
._a_t
&1] == II
+2)
6070 { now
._cnt
[now
._a_t
&1] -= 1;
6072 /* claim need not participate */
6074 now
._cnt
[now
._a_t
&1] = 1;
6077 printf("%3d: proc %d fairness ", depth
, II
);
6078 printf("Rule 2: --cnt to %d (%d)\n",
6079 now
._cnt
[now
._a_t
&1], now
._a_t
);
6081 trpt
->o_pm
|= (32|64);
6085 if (!provided(II
, ot
, tt
, t
)) continue;
6087 /* check all trans of proc II - escapes first */
6091 (trpt
+1)->pr
= (uchar
) II
;
6094 for (ooi
= eoi
= 0, t
= trans
[ot
][tt
]; t
; t
= t
->nxt
, ooi
++)
6095 { if (strcmp(t
->tp
, "else") == 0)
6100 { t
= trans
[ot
][tt
];
6102 printf("randomizer: suppressed, saw else\n");
6107 printf("randomizer: skip %d in %d\n", eoi
, ooi
);
6109 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
6110 if (eoi
-- <= 0) break;
6113 for ( ; t
&& ooi
> 0; t
= t
->nxt
, ooi
--)
6115 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
6119 /* exploring all transitions from
6120 * a single escape state suffices
6122 if (trpt
->e_state
> 0
6123 && trpt
->e_state
!= t
->e_trans
)
6126 printf("skip 2nd escape %d (did %d before)\n",
6127 t
->e_trans
, trpt
->e_state
);
6134 #include FORWARD_MOVES
6135 P999
: /* jumps here when move succeeds */
6137 if (!(_m
= do_transit(t
, II
))) continue;
6144 && (trpt
-1)->pr
!= II
)
6145 { trpt
->sched_limit
= 1 + (trpt
-1)->sched_limit
;
6150 /* for branching-time, can accept reduction only if */
6151 /* the persistent set contains just 1 transition */
6152 { if ((trpt
->tau
&32) && (trpt
->o_pm
&1))
6154 trpt
->o_pm
|= 1; /* we moved */
6157 trpt
->o_pm
|= 1; /* we moved */
6160 if (loopstate
[ot
][tt
])
6163 printf("exiting from loopstate:\n");
6172 #if defined(VERBOSE) || defined(CHECK)
6174 cpu_printf("%3d: proc %d exec %d \n", depth
, II
, t
->t_id
);
6176 cpu_printf("%3d: proc %d exec %d, %d to %d, %s %s %s %saccepting [tau=%d]\n",
6177 depth
, II
, t
->forw
, tt
, t
->st
, t
->tp
,
6178 (t
->atom
&2)?"atomic":"",
6179 (boq
!= -1)?"rendez-vous":"",
6180 (trpt
->o_pm
&2)?"":"non-", trpt
->tau
);
6183 cpu_printf("\t(escape to state %d)\n", t
->st
);
6187 cpu_printf("\t(randomizer %d)\n", ooi
);
6194 now
._last
= II
- BASE
;
6197 trpt
->e_state
= t
->e_trans
;
6200 trpt
->pr
= (uchar
) II
;
6202 trpt
->o_pm
&= ~(2|4);
6204 { ((P0
*)this)->_p
= t
->st
;
6205 /* moved down reached[ot][t->st] = 1; */
6210 #if (ACCEPT_LAB>0 && !defined(NP)) || (PROG_LAB>0 && defined(HAS_NP))
6213 #define P__Q ((P0 *)pptr(ii))
6216 /* state 1 of np_ claim is accepting */
6217 if (((P0
*)pptr(0))->_p
== 1)
6220 for (ii
= 0; ii
< (int) now
._nr_pr
; ii
++)
6221 { if (accpstate
[P__Q
->_t
][P__Q
->_p
])
6227 #if defined(HAS_NP) && PROG_LAB>0
6228 for (ii
= 0; ii
< (int) now
._nr_pr
; ii
++)
6229 { if (progstate
[P__Q
->_t
][P__Q
->_p
])
6237 trpt
->o_t
= t
; trpt
->o_n
= _n
;
6238 trpt
->o_ot
= ot
; trpt
->o_tt
= tt
;
6239 trpt
->o_To
= To
; trpt
->o_m
= _m
;
6244 if (boq
!= -1 || (t
->atom
&2))
6247 /* atomic sequence in claim */
6253 { if ((trpt
-1)->tau
&4)
6258 /* if claim allowed timeout, so */
6259 /* does the next program-step: */
6260 if (((trpt
-1)->tau
&1) && !(trpt
->tau
&4))
6266 if (boq
== -1 && (t
->atom
&2))
6267 { From
= To
= II
; nlinks
++;
6270 { From
= BASE
; To
= now
._nr_pr
-1;
6272 { From
= now
._nr_pr
-1; To
= BASE
;
6275 #if NCORE>1 && defined(FULL_TRAIL)
6277 { Push_Stack_Tree(II
, t
->t_id
);
6280 goto Down
; /* pseudo-recursion */
6283 cpu_printf("%d: Up - %s\n", depth
,
6284 (trpt
->tau
&4)?"claim":"program");
6292 #if defined(MA) || NCORE>1
6293 if (depth
<= 0) return;
6294 /* e.g., if first state is old, after a restart */
6298 && depth
< hiwater
- (HHH
-DDD
) + 2)
6305 printf("unzap %d: %d\n", CNT2
, hiwater
);
6309 if (trpt
->o_pm
&128) /* fairness alg */
6310 { now
._cnt
[now
._a_t
&1] = trpt
->bup
.oval
;
6311 _n
= 1; trpt
->o_pm
&= ~128;
6313 #if defined(VERBOSE) || defined(CHECK)
6314 printf("%3d: reversed fairness default move\n", depth
);
6321 { int d
; Trail
*trl
;
6323 for (d
= 1; d
< depth
; d
++)
6324 { trl
= getframe(depth
-d
); /* was (trpt-d) */
6326 { now
._last
= trl
->pr
- BASE
;
6330 now
._last
= (depth
<1)?0:(trpt
-1)->pr
;
6334 now
._event
= trpt
->o_event
;
6337 if ((now
._a_t
&1) && depth
<= A_depth
)
6338 return; /* to checkcycles() */
6340 t
= trpt
->o_t
; _n
= trpt
->o_n
;
6341 ot
= trpt
->o_ot
; II
= trpt
->pr
;
6342 tt
= trpt
->o_tt
; this = pptr(II
);
6343 To
= trpt
->o_To
; _m
= trpt
->o_m
;
6348 _m
= do_reverse(t
, II
, _m
);
6350 #include REVERSE_MOVES
6351 R999
: /* jumps here when done */
6354 cpu_printf("%3d: proc %d reverses %d, %d to %d\n",
6355 depth
, II
, t
->forw
, tt
, t
->st
);
6356 cpu_printf("\t%s [abit=%d,adepth=%d,tau=%d,%d]\n",
6357 t
->tp
, now
._a_t
, A_depth
, trpt
->tau
, (trpt
-1)->tau
);
6360 /* pass the proviso tags */
6361 if ((trpt
->tau
&8) /* rv or atomic */
6363 (trpt
-1)->tau
|= 16;
6365 if ((trpt
->tau
&8) /* rv or atomic */
6367 (trpt
-1)->tau
|= 64;
6376 (trans
[ot
][tt
])->om
= _m
; /* head of list */
6378 /* i.e., not set if rv fails */
6381 #if defined(VERI) && !defined(NP)
6382 if (II
== 0 && verbose
&& !reached
[ot
][t
->st
])
6384 printf("depth %d: Claim reached state %d (line %d)\n",
6385 depth
, t
->st
, src_claim
[t
->st
]);
6389 reached
[ot
][t
->st
] = 1;
6390 reached
[ot
][tt
] = 1;
6393 else trpt
->e_state
= 0; /* undo */
6395 if (_m
>_n
||(_n
>3&&_m
!=0)) _n
=_m
;
6396 ((P0
*)this)->_p
= tt
;
6400 { t
= trans
[ot
][tt
];
6402 printf("randomizer: continue for %d more\n", ooi
);
6408 printf("randomizer: done\n");
6412 /* Fairness: undo Rule 2 */
6418 if (now
._cnt
[now
._a_t
&1] == 1)
6419 now
._cnt
[now
._a_t
&1] = 2;
6421 now
._cnt
[now
._a_t
&1] += 1;
6423 printf("%3d: proc %d fairness ", depth
, II
);
6424 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6425 now
._cnt
[now
._a_t
&1], now
._a_t
);
6427 trpt
->o_pm
&= ~(32|64);
6440 if (II
== 0) break; /* never claim */
6442 } /* all processes */
6444 tally_succ(trpt
->n_succ
);
6447 if (_n
== 0 /* no process could move */
6452 && trpt
->sched_limit
>= sched_max
)
6453 { _n
= 1; /* not a deadlock */
6457 /* Fairness: undo Rule 2 */
6458 if (trpt
->o_pm
&32) /* remains if proc blocked */
6461 if (now
._cnt
[now
._a_t
&1] == 1)
6462 now
._cnt
[now
._a_t
&1] = 2;
6464 now
._cnt
[now
._a_t
&1] += 1;
6466 printf("%3d: proc -- fairness ", depth
);
6467 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6468 now
._cnt
[now
._a_t
&1], now
._a_t
);
6474 && _n
== 0 /* nobody moved */
6476 && !(trpt
->tau
&4) /* in program move */
6478 && !(trpt
->tau
&8) /* not an atomic one */
6480 && ((trpt
->tau
&1) || endstate())
6483 && (trpt
->tau
&1) /* already tried timeout */
6488 && !((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6490 && now
._cnt
[now
._a_t
&1] > 0) /* needed more procs */
6492 trpt
->o_pm
|= 128 | ((trpt
-1)->o_pm
&(2|4));
6493 trpt
->bup
.oval
= now
._cnt
[now
._a_t
&1];
6494 now
._cnt
[now
._a_t
&1] = 1;
6501 From
= BASE
; To
= now
._nr_pr
-1;
6503 From
= now
._nr_pr
-1; To
= BASE
;
6505 #if defined(VERBOSE) || defined(CHECK)
6506 printf("%3d: fairness default move ", depth
);
6507 printf("(all procs block)\n");
6512 Q999
: /* returns here with _n>0 when done */;
6515 now
._cnt
[now
._a_t
&1] = 0;
6518 printf("%3d: fairness undo Rule 1, _a_t=%d\n",
6524 now
._cnt
[now
._a_t
&1] = 1;
6527 printf("%3d: fairness undo Rule 3, _a_t=%d\n",
6535 /* at least one move that was preselected at this */
6536 /* level, blocked or was a loop control flow point */
6537 if ((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6539 /* preselected move - no successors outside stack */
6540 if ((trpt
->tau
&32) && !(trpt
->tau
&64))
6543 { From
= BASE
; To
= now
._nr_pr
-1;
6545 { From
= now
._nr_pr
-1; To
= BASE
;
6548 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6549 depth
, II
+1, _n
, trpt
->tau
);
6551 _n
= 0; trpt
->tau
&= ~(16|32|64);
6553 if (II
<= To
) /* II already decremented */
6555 if (II
>= BASE
) /* II already decremented */
6562 /* at least one move that was preselected at this */
6563 /* level, blocked or truncated at the next level */
6564 /* implied: #ifdef FULLSTACK */
6565 if ((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6568 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6569 depth
, II
+1, (int) _n
, trpt
->tau
);
6571 if (a_cycles
&& (trpt
->tau
&16))
6572 { if (!(now
._a_t
&1))
6575 printf("%3d: setting proviso bit\n", depth
);
6580 (trpt
-1)->proviso
= 1;
6586 if ((trpt
-1)->ostate
)
6587 ((char *)&((trpt
-1)->ostate
->state
))[0] |= 128;
6589 ((char *)&(trpt
->ostate
->state
))[0] |= 128;
6594 if ((trpt
-1)->ostate
)
6595 (trpt
-1)->ostate
->proviso
= 1;
6597 trpt
->ostate
->proviso
= 1;
6601 From
= BASE
; To
= now
._nr_pr
-1;
6603 From
= now
._nr_pr
-1; To
= BASE
;
6605 _n
= 0; trpt
->tau
&= ~(16|32|64);
6606 goto Again
; /* do full search */
6607 } /* else accept reduction */
6610 { From
= BASE
; To
= now
._nr_pr
-1;
6612 { From
= now
._nr_pr
-1; To
= BASE
;
6614 _n
= 0; trpt
->tau
&= ~(16|32|64);
6616 if (II
<= To
) /* already decremented */
6618 if (II
>= BASE
) /* already decremented */
6627 if (_n
== 0 || ((trpt
->tau
&4) && (trpt
->tau
&2)))
6630 cpu_printf("%3d: no move [II=%d, tau=%d, boq=%d]\n",
6631 depth
, II
, trpt
->tau
, boq
);
6634 /* ok if a rendez-vous fails: */
6635 if (boq
!= -1) goto Done
;
6637 /* ok if no procs or we're at maxdepth */
6638 if ((now
._nr_pr
== 0 && (!strict
|| qs_empty()))
6642 || depth
>= maxdepth
-1) goto Done
;
6643 if ((trpt
->tau
&8) && !(trpt
->tau
&4))
6644 { trpt
->tau
&= ~(1|8);
6645 /* 1=timeout, 8=atomic */
6647 From
= BASE
; To
= now
._nr_pr
-1;
6649 From
= now
._nr_pr
-1; To
= BASE
;
6652 cpu_printf("%3d: atomic step proc %d unexecutable\n", depth
, II
+1);
6655 trpt
->tau
|= 4; /* switch to claim */
6660 if (!(trpt
->tau
&1)) /* didn't try timeout yet */
6666 if (trpt
->tau
&2) /* requested */
6671 cpu_printf("%d: timeout\n", depth
);
6676 { /* only claim can enable timeout */
6678 && !((trpt
-1)->tau
&4))
6679 /* blocks inside an atomic */ goto BreakOut
;
6681 cpu_printf("%d: req timeout\n",
6684 (trpt
-1)->tau
|= 2; /* request */
6685 #if NCORE>1 && defined(FULL_TRAIL)
6694 cpu_printf("%d: timeout\n", depth
);
6705 { trpt
->tau
|= 4; /* claim stuttering */
6706 trpt
->tau
|= 128; /* stutter mark */
6708 cpu_printf("%d: claim stutter\n", depth
);
6716 if (!noends
&& !a_cycles
&& !endstate())
6717 { depth
--; trpt
--; /* new 4.2.3 */
6718 uerror("invalid end state");
6722 else if (a_cycles
&& (trpt
->o_pm
&2)) /* new 4.2.4 */
6724 uerror("accept stutter");
6731 if (!(trpt
->tau
&8)) /* not in atomic seqs */
6736 /* --after-- a program-step, i.e., */
6737 /* after backtracking a claim-step */
6739 /* with at least one running process */
6740 /* unless in a stuttered accept state */
6741 && ((now
._nr_pr
> 1) || (trpt
->o_pm
&2))
6749 cpu_printf("Consider check %d %d...\n",
6750 now
._a_t
, now
._cnt
[0]);
6752 if ((now
._a_t
&2) /* A-bit */
6753 && (now
._cnt
[0] == 1))
6757 if (a_cycles
&& (trpt
->o_pm
&2))
6762 #if defined(FULLSTACK) || defined(CNTRSTACK)
6765 && (((trpt
->tau
&4) && !(trpt
->tau
&128))
6766 || ( (trpt
-1)->tau
&128)))
6772 #if defined(FULLSTACK)
6773 printf("%d: zapping %u (%d)\n",
6774 depth
, trpt
->ostate
,
6775 (trpt
->ostate
)?trpt
->ostate
->tagged
:0);
6784 && (((trpt
->tau
&4) && !(trpt
->tau
&128))
6785 || ( (trpt
-1)->tau
&128)))
6791 printf("%d: zapping\n", depth
);
6796 gstore((char *) &now
, vsize
, 1);
6803 #if NCORE>1 && defined(FULL_TRAIL)
6813 void new_state(void) { /* place holder */ }
6817 assert(int a
, char *s
, int ii
, int tt
, Trans
*t
)
6819 if (!a
&& !noasserts
)
6821 strcpy(bad
, "assertion violated ");
6822 if (strlen(s
) > 1000)
6823 { strncpy(&bad
[19], (const char *) s
, 1000);
6826 strcpy(&bad
[19], s
);
6830 #ifndef NOBOUNDCHECK
6832 Boundcheck(int x
, int y
, int a1
, int a2
, Trans
*a3
)
6834 assert((x
>= 0 && x
< y
), "- invalid array index",
6843 printf("%9.8g states, stored (%g visited)\n",
6844 nstates
- nShadow
, nstates
);
6846 printf("%9.8g states, stored\n", nstates
);
6849 printf(" %8g nominal states (- rv and atomic)\n", nstates
-midrv
-nlinks
+revrv
);
6850 printf(" %8g rvs succeeded\n", midrv
-failedrv
);
6852 printf(" %8g nominal states (stored-atomic)\n", nstates
-nlinks
);
6855 printf(" %8g midrv\n", midrv
);
6856 printf(" %8g failedrv\n", failedrv
);
6857 printf(" %8g revrv\n", revrv
);
6860 printf("%9.8g states, matched\n", truncs
);
6862 printf("%9.8g matches within stack\n",truncs2
);
6865 printf("%9.8g transitions (= visited+matched)\n",
6868 printf("%9.8g transitions (= stored+matched)\n",
6870 printf("%9.8g atomic steps\n", nlinks
);
6871 if (nlost
) printf("%g lost messages\n", (double) nlost
);
6874 printf("hash conflicts: %9.8g (resolved)\n", hcmp
);
6876 if (hcmp
> (double) (1<<ssize
))
6877 { printf("hint: increase hashtable-size (-w) to reduce runtime\n");
6878 } /* in multi-core: also reduces lock delays on access to hashtable */
6882 printf("%8g states allocated for dfs stack\n", ngrabs
);
6885 printf("\nhash factor: %4g (best if > 100.)\n\n",
6886 (double)(((double) udmem
) * 8.0) / (double) nstates
);
6888 printf("\nhash factor: %4g (best if > 100.)\n\n",
6889 (double)(1<<(ssize
-8)) / (double) nstates
* 256.0);
6890 printf("bits set per state: %u (-k%u)\n", hfns
, hfns
);
6893 { printf("total bits available: %8g (-M%ld)\n",
6894 ((double) udmem
) * 8.0, udmem
/(1024L*1024L));
6896 printf("total bits available: %8g (-w%d)\n",
6897 ((double) (ONE_L
<< (ssize
-4)) * 16.0), ssize
);
6901 printf("bfs disk reads: %ld writes %ld -- diff %ld\n",
6902 bfs_dsk_reads
, bfs_dsk_writes
, bfs_dsk_writes
-bfs_dsk_reads
);
6903 if (bfs_dsk_read
>= 0) (void) close(bfs_dsk_read
);
6904 if (bfs_dsk_write
>= 0) (void) close(bfs_dsk_write
);
6905 (void) unlink("pan_bfs_dsk.tmp");
6912 #if defined(BITSTATE) || !defined(NOCOMP)
6913 double nr1
, nr2
, nr3
= 0.0, nr4
, nr5
= 0.0;
6914 #if !defined(MA) && (defined(MEMCNT) || defined(MEMLIM))
6917 int mverbose
= verbose
;
6921 if (verbose
) cpu_printf("wrapup -- %d error(s)\n", errors
);
6925 void dsk_stats(void);
6928 if (search_terminated
!= NULL
)
6929 { *search_terminated
|= 2; /* wrapup */
6931 exit(0); /* normal termination, not an error */
6934 #if !defined(WIN32) && !defined(WIN64)
6935 signal(SIGINT
, SIG_DFL
);
6937 printf("\n(%s)\n", SpinVersion
);
6938 if (!done
) printf("Warning: Search not completed\n");
6940 (void) unlink((const char *)stackfile
);
6944 { printf(" + Multi-Core (NCORE=%d)\n", NCORE
);
6946 { printf(" + Multi-Core (NCORE=%d -z%d)\n", NCORE
, z_handoff
);
6950 printf(" + Using Breadth-First Search\n");
6953 printf(" + Partial Order Reduction\n");
6956 printf(" + Reverse Depth-First Search Order\n");
6959 printf(" + Reverse Transition Ordering\n");
6962 printf(" + Randomized Transition Ordering\n");
6965 printf(" + Scheduling Restriction (-DSCHED=%d)\n", sched_max
);
6968 printf(" + Compression\n");
6971 printf(" + Graph Encoding (-DMA=%d)\n", MA
);
6973 printf(" Restarted from checkpoint %s.xpt\n", PanSource
);
6978 printf(" + FullStack Matching\n");
6981 printf(" + CntrStack Matching\n");
6985 printf("\nBit statespace search for:\n");
6988 printf("\nHash-Compact %d search for:\n", HC
);
6990 printf("\nFull statespace search for:\n");
6994 #ifdef NEGATED_TRACE
6995 printf(" notrace assertion +\n");
6997 printf(" trace assertion +\n");
7001 printf(" never claim +\n");
7002 printf(" assertion violations ");
7004 printf("- (disabled by -A flag)\n");
7006 printf("+ (if within scope of claim)\n");
7009 printf(" never claim - (not selected)\n");
7011 printf(" never claim - (none specified)\n");
7013 printf(" assertion violations ");
7015 printf("- (disabled by -A flag)\n");
7021 printf(" non-progress cycles ");
7023 printf(" acceptance cycles ");
7026 printf("+ (fairness %sabled)\n",
7027 fairness
?"en":"dis");
7028 else printf("- (not selected)\n");
7030 printf(" cycle checks - (disabled by -DSAFETY)\n");
7033 printf(" invalid end states - ");
7034 printf("(disabled by ");
7036 printf("-E flag)\n\n");
7038 printf("never claim)\n\n");
7040 printf(" invalid end states ");
7042 printf("- (disabled by -E flag)\n\n");
7046 printf("State-vector %d byte, depth reached %ld", hmax
,
7048 (nr_handoffs
* z_handoff
) +
7051 printf(", errors: %d\n", errors
);
7055 { extern void dfa_stats(void);
7056 if (maxgs
+a_cycles
+2 < MA
)
7057 printf("MA stats: -DMA=%d is sufficient\n",
7064 printf("stackframes: %d/%d\n\n", smax
, svmax
);
7065 printf("stats: fa %d, fh %d, zh %d, zn %d - ",
7067 printf("check %d holds %d\n", Ccheck
, Cholds
);
7068 printf("stack stats: puts %d, probes %d, zaps %d\n",
7074 #if defined(BITSTATE) || !defined(NOCOMP)
7075 nr1
= (nstates
-nShadow
)*
7076 (double)(hmax
+sizeof(struct H_el
)-sizeof(unsigned));
7080 nr2
= (double) ((maxdepth
+3)*sizeof(Trail
));
7083 #if !defined(MA) || defined(COLLAPSE)
7084 nr3
= (double) (ONE_L
<<ssize
)*sizeof(struct H_el
*);
7088 nr3
= (double) (udmem
);
7090 nr3
= (double) (ONE_L
<<(ssize
-3));
7092 nr5
= (double) (ONE_L
<<(ssize
-3));
7095 nr5
= (double) (maxdepth
*sizeof(struct H_el
*));
7098 nr4
= (double) (svmax
* (sizeof(Svtack
) + hmax
))
7099 + (double) (smax
* (sizeof(Stack
) + Maxbody
));
7101 if (mverbose
|| memcnt
< nr1
+nr2
+nr3
+nr4
+nr5
)
7103 { double remainder
= memcnt
;
7104 double tmp_nr
= memcnt
-nr3
-nr4
-(nr2
-fragment
)-nr5
;
7105 #if NCORE>1 && !defined(SEP_STATE)
7106 tmp_nr
-= ((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
;
7108 if (tmp_nr
< 0.0) tmp_nr
= 0.;
7109 printf("Stats on memory usage (in Megabytes):\n");
7110 printf("%9.3f equivalent memory usage for states",
7111 nr1
/1048576.); /* 1024*1024=1048576 */
7112 printf(" (stored*(State-vector + overhead))\n");
7113 #if NCORE>1 && !defined(WIN32) && !defined(WIN64)
7114 printf("%9.3f shared memory reserved for state storage\n",
7115 mem_reserved
/1048576.);
7117 printf(" in %d local heaps of %7.3f MB each\n",
7118 NCORE
, mem_reserved
/(NCORE
*1048576.));
7124 printf("%9.3f memory used for hash array (-M%ld)\n",
7125 nr3
/1048576., udmem
/(1024L*1024L));
7127 printf("%9.3f memory used for hash array (-w%d)\n",
7128 nr3
/1048576., ssize
);
7130 printf("%9.3f memory used for bit stack\n",
7132 remainder
= remainder
- nr3
- nr5
;
7134 printf("%9.3f actual memory usage for states",
7136 remainder
-= tmp_nr
;
7139 { if (tmp_nr
> nr1
) printf("unsuccessful ");
7140 printf("compression: %.2f%%)\n",
7141 (100.0*tmp_nr
)/nr1
);
7143 printf("less than 1k)\n");
7146 { printf(" state-vector as stored = %.0f byte",
7147 (tmp_nr
)/(nstates
-nShadow
) -
7148 (double) (sizeof(struct H_el
) - sizeof(unsigned)));
7149 printf(" + %ld byte overhead\n",
7150 (long int) sizeof(struct H_el
)-sizeof(unsigned));
7153 #if !defined(MA) || defined(COLLAPSE)
7154 printf("%9.3f memory used for hash table (-w%d)\n",
7155 nr3
/1048576., ssize
);
7160 printf("%9.3f memory used for DFS stack (-m%ld)\n",
7161 nr2
/1048576., maxdepth
);
7165 remainder
-= ((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
;
7166 printf("%9.3f shared memory used for work-queues\n",
7167 (GWQ_SIZE
+ (double) NCORE
* LWQ_SIZE
) /1048576.);
7168 printf(" in %d queues of %7.3f MB each",
7169 NCORE
, (double) LWQ_SIZE
/1048576.);
7171 printf(" + a global q of %7.3f MB\n",
7172 (double) GWQ_SIZE
/ 1048576.);
7177 if (remainder
- fragment
> 1048576.)
7178 printf("%9.3f other (proc and chan stacks)\n",
7179 (remainder
-fragment
)/1048576.);
7180 if (fragment
> 1048576.)
7181 printf("%9.3f memory lost to fragmentation\n",
7183 printf("%9.3f total actual memory usage\n\n",
7191 printf("%9.3f memory usage (Mbyte)\n\n",
7195 printf("nr of templates: [ globals chans procs ]\n");
7196 printf("collapse counts: [ ");
7197 { int i
; for (i
= 0; i
< 256+2; i
++)
7199 printf("%d ", ncomps
[i
]);
7203 if ((done
|| verbose
) && !no_rck
) do_reach();
7206 printf("\nPeg Counts (transitions executed):\n");
7207 for (i
= 1; i
< NTRANS
; i
++)
7208 { if (peg
[i
]) putpeg(i
, peg
[i
]);
7215 if (vprefix
> 0) close(svfd
);
7218 printf("%g loopstates hit\n", cnt_loops
);
7223 #if NCORE>1 && defined(T_ALERT)
7231 { printf("Interrupted\n");
7233 was_interrupted
= 1;
7241 * super fast hash, based on Paul Hsieh's function
7242 * http://www.azillionmonkeys.com/qed/hash.html
7246 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
7247 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
7248 #define get16bits(d) (*((const uint16_t *) (d)))
7252 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
7253 +(uint32_t)(((const uint8_t *)(d))[0]) )
7257 d_sfh(const char *s
, int len
)
7258 { uint32_t h
= len
, tmp
;
7264 for ( ; len
> 0; len
--)
7265 { h
+= get16bits(s
);
7266 tmp
= (get16bits(s
+2) << 11) ^ h
;
7267 h
= (h
<< 16) ^ tmp
;
7268 s
+= 2*sizeof(uint16_t);
7272 case 3: h
+= get16bits(s
);
7274 h
^= s
[sizeof(uint16_t)] << 18;
7277 case 2: h
+= get16bits(s
);
7298 #if defined(HASH64) || defined(WIN64)
7299 /* 64-bit Jenkins hash, 1997
7300 * http://burtleburtle.net/bob/c/lookup8.c
7302 #define mix(a,b,c) \
7303 { a -= b; a -= c; a ^= (c>>43); \
7304 b -= c; b -= a; b ^= (a<<9); \
7305 c -= a; c -= b; c ^= (b>>8); \
7306 a -= b; a -= c; a ^= (c>>38); \
7307 b -= c; b -= a; b ^= (a<<23); \
7308 c -= a; c -= b; c ^= (b>>5); \
7309 a -= b; a -= c; a ^= (c>>35); \
7310 b -= c; b -= a; b ^= (a<<49); \
7311 c -= a; c -= b; c ^= (b>>11); \
7312 a -= b; a -= c; a ^= (c>>12); \
7313 b -= c; b -= a; b ^= (a<<18); \
7314 c -= a; c -= b; c ^= (b>>22); \
7317 /* 32-bit Jenkins hash, 2006
7318 * http://burtleburtle.net/bob/c/lookup3.c
7320 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k))))
7322 #define mix(a,b,c) \
7323 { a -= c; a ^= rot(c, 4); c += b; \
7324 b -= a; b ^= rot(a, 6); a += c; \
7325 c -= b; c ^= rot(b, 8); b += a; \
7326 a -= c; a ^= rot(c,16); c += b; \
7327 b -= a; b ^= rot(a,19); a += c; \
7328 c -= b; c ^= rot(b, 4); b += a; \
7331 #define final(a,b,c) \
7332 { c ^= b; c -= rot(b,14); \
7333 a ^= c; a -= rot(c,11); \
7334 b ^= a; b -= rot(a,25); \
7335 c ^= b; c -= rot(b,16); \
7336 a ^= c; a -= rot(c,4); \
7337 b ^= a; b -= rot(a,14); \
7338 c ^= b; c -= rot(b,24); \
7343 d_hash(uchar
*kb
, int nbytes
)
7345 #if defined(HASH64) || defined(WIN64)
7346 uint64_t a
= 0, b
, c
, n
;
7347 uint64_t *k
= (uint64_t *) kb
;
7349 uint32_t a
, b
, c
, n
;
7350 uint32_t *k
= (uint32_t *) kb
;
7352 /* extend to multiple of words, if needed */
7353 n
= nbytes
/WS
; /* nr of words */
7354 a
= nbytes
- (n
*WS
);
7359 case 3: *bp
++ = 0; /* fall thru */
7360 case 2: *bp
++ = 0; /* fall thru */
7364 #if defined(HASH64) || defined(WIN64)
7365 b
= HASH_CONST
[HASH_NR
];
7366 c
= 0x9e3779b97f4a7c13LL
; /* arbitrary value */
7375 c
+= (((uint64_t) nbytes
)<<3);
7383 a
= c
= 0xdeadbeef + (n
<<2);
7384 b
= HASH_CONST
[HASH_NR
];
7401 j1
= c
&nmask
; j3
= a
&7; /* 1st bit */
7402 j2
= b
&nmask
; j4
= (a
>>3)&7; /* 2nd bit */
7407 s_hash(uchar
*cp
, int om
)
7410 d_sfh((const char *) cp
, om
); /* sets K1 */
7412 d_hash(cp
, om
); /* sets K1 etc */
7416 j1
= K1
% omaxdepth
;
7429 srand(123); /* fixed startpoint */
7430 prerand
= (int *) emalloc((omaxdepth
+3)*sizeof(int));
7431 for (i
= 0; i
< omaxdepth
+3; i
++)
7432 prerand
[i
] = rand();
7436 { if (!prerand
) inirand();
7437 return prerand
[depth
];
7442 set_masks(void) /* 4.2.5 */
7444 if (WS
== 4 && ssize
>= 32)
7445 { mask
= 0xffffffff;
7448 case 34: nmask
= (mask
>>1); break;
7449 case 33: nmask
= (mask
>>2); break;
7450 default: nmask
= (mask
>>3); break;
7456 { mask
= ((ONE_L
<<ssize
)-1); /* hash init */
7463 { fprintf(stderr
, "pan: wordsize %ld not supported\n", (long int) WS
);
7465 } else /* WS == 4 and ssize < 32 */
7466 { mask
= ((ONE_L
<<ssize
)-1); /* hash init */
7471 static long reclaim_size
;
7472 static char *reclaim_mem
;
7473 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
7475 #error cannot combine AUTO_RESIZE with NCORE>1 yet
7477 static struct H_el
**N_tab
;
7479 reverse_capture(struct H_el
*p
)
7481 reverse_capture(p
->nxt
);
7482 /* last element of list moves first */
7483 /* to preserve list-order */
7485 if (ssize
< 8*WS
) /* probably always true */
7492 resize_hashtable(void)
7494 if (WS
== 4 && ssize
>= 27 - 1)
7495 { return; /* canot increase further */
7498 ssize
+= 2; /* 4x size */
7500 printf("pan: resizing hashtable to -w%d.. ", ssize
);
7502 N_tab
= (struct H_el
**)
7503 emalloc((ONE_L
<<ssize
)*sizeof(struct H_el
*));
7505 set_masks(); /* they changed */
7507 for (j1
= 0; j1
< (ONE_L
<< (ssize
- 2)); j1
++)
7508 { reverse_capture(H_tab
[j1
]);
7510 reclaim_mem
= (char *) H_tab
;
7511 reclaim_size
= (ONE_L
<< (ssize
- 2));
7517 #if defined(ZAPH) && defined(BITSTATE)
7520 { cpu_printf("pan: resetting hashtable\n");
7522 { memset(SS
, 0, udmem
);
7524 { memset(SS
, 0, ONE_L
<<(ssize
-3));
7530 main(int argc
, char *argv
[])
7531 { void to_compile(void);
7533 efd
= stderr
; /* default */
7535 bstore
= bstore_reg
; /* default */
7539 strcpy(o_cmdline
, "");
7540 for (j
= 1; j
< argc
; j
++)
7541 { strcat(o_cmdline
, argv
[j
]);
7542 strcat(o_cmdline
, " ");
7544 /* printf("Command Line: %s\n", o_cmdline); */
7545 if (strlen(o_cmdline
) >= sizeof(o_cmdline
))
7546 { Uerror("option list too long");
7549 while (argc
> 1 && argv
[1][0] == '-')
7550 { switch (argv
[1][1]) {
7553 case 'a': fprintf(efd
, "error: -a disabled");
7556 case 'a': a_cycles
= 1; break;
7559 case 'A': noasserts
= 1; break;
7560 case 'b': bounded
= 1; break;
7562 case 'C': coltrace
= 1; goto samething
;
7564 case 'c': upto
= atoi(&argv
[1][2]); break;
7565 case 'd': state_tables
++; break;
7566 case 'e': every_error
= 1; Nr_Trails
= 1; break;
7567 case 'E': noends
= 1; break;
7569 case 'F': if (strlen(argv
[1]) > 2)
7570 stackfile
= &argv
[1][2];
7573 #if !defined(SAFETY) && !defined(NOFAIR)
7574 case 'f': fairness
= 1; break;
7577 case 'g': gui
= 1; goto samething
;
7579 case 'h': if (!argv
[1][2]) usage(efd
); else
7580 HASH_NR
= atoi(&argv
[1][2])%33; break;
7581 case 'I': iterative
= 2; every_error
= 1; break;
7582 case 'i': iterative
= 1; every_error
= 1; break;
7583 case 'J': like_java
= 1; break; /* Klaus Havelund */
7585 case 'k': hfns
= atoi(&argv
[1][2]); break;
7588 case 'L': sched_max
= atoi(&argv
[1][2]); break;
7592 case 'l': a_cycles
= 1; break;
7594 case 'l': fprintf(efd
, "error: -l disabled");
7599 case 'M': udmem
= atoi(&argv
[1][2]); break;
7600 case 'G': udmem
= atoi(&argv
[1][2]); udmem
*= 1024; break;
7603 fprintf(stderr
, "-M and -G affect only -DBITSTATE\n");
7606 case 'm': maxdepth
= atoi(&argv
[1][2]); break;
7607 case 'n': no_rck
= 1; break;
7608 case 'P': readtrail
= 1; onlyproc
= atoi(&argv
[1][2]);
7609 if (argv
[2][0] != '-') /* check next arg */
7610 { trailfilename
= argv
[2];
7611 argc
--; argv
++; /* skip next arg */
7615 case 'p': vprefix
= atoi(&argv
[1][2]); break;
7618 case 'Q': quota
= (double) 60.0 * (double) atoi(&argv
[1][2]); break;
7620 case 'q': strict
= 1; break;
7621 case 'R': Nrun
= atoi(&argv
[1][2]); break;
7624 samething
: readtrail
= 1;
7625 if (isdigit(argv
[1][2]))
7626 whichtrail
= atoi(&argv
[1][2]);
7627 else if (argc
> 2 && argv
[2][0] != '-') /* check next arg */
7628 { trailfilename
= argv
[2];
7629 argc
--; argv
++; /* skip next arg */
7632 case 'S': silent
= 1; goto samething
;
7635 case 's': hfns
= 1; break;
7637 case 'T': TMODE
= 0444; break;
7638 case 't': if (argv
[1][2]) tprefix
= &argv
[1][2]; break;
7639 case 'V': start_timer(); printf("Generated by %s\n", SpinVersion
);
7640 to_compile(); pan_exit(2); break;
7641 case 'v': verbose
++; break;
7642 case 'w': ssize
= atoi(&argv
[1][2]); break;
7643 case 'Y': signoff
= 1; break;
7644 case 'X': efd
= stdout
; break;
7645 case 'x': exclusive
= 1; break;
7647 /* -B ip is passthru to proxy of remote ip address: */
7648 case 'B': argc
--; argv
++; break;
7649 case 'Q': worker_pids
[0] = atoi(&argv
[1][2]); break;
7650 /* -Un means that the nth worker should be instantiated as a proxy */
7651 case 'U': proxy_pid
= atoi(&argv
[1][2]); break;
7652 /* -W means that this copy is started by a cluster-server as a remote */
7653 /* this flag is passed to ./pan_proxy, which interprets it */
7654 case 'W': remote_party
++; break;
7655 case 'Z': core_id
= atoi(&argv
[1][2]);
7657 { printf("cpu%d: pid %d parent %d\n",
7658 core_id
, getpid(), worker_pids
[0]);
7661 case 'z': z_handoff
= atoi(&argv
[1][2]); break;
7663 case 'z': break; /* ignored for single-core */
7665 default : fprintf(efd
, "saw option -%c\n", argv
[1][1]); usage(efd
); break;
7669 if (iterative
&& TMODE
!= 0666)
7671 fprintf(efd
, "warning: -T ignored when -i or -I is used\n");
7673 #if defined(HASH32) && !defined(SFH)
7675 { fprintf(efd
, "strong warning: compiling -DHASH32 on a 64-bit machine\n");
7676 fprintf(efd
, " without -DSFH can slow down performance a lot\n");
7679 #if defined(WIN32) || defined(WIN64)
7681 TMODE
= _S_IWRITE
| _S_IREAD
;
7686 store_proxy_pid
= proxy_pid
; /* for checks in mem_file() and someone_crashed() */
7687 if (core_id
!= 0) { proxy_pid
= 0; }
7689 if (core_id
== 0 && a_cycles
)
7690 { fprintf(efd
, "hint: this search may be more efficient ");
7691 fprintf(efd
, "if pan.c is compiled -DSEP_STATE\n");
7695 { z_handoff
= 20; /* conservative default - for non-liveness checks */
7697 #if defined(NGQ) || defined(LWQ_FIXED)
7698 LWQ_SIZE
= (double) (128.*1048576.);
7700 LWQ_SIZE
= (double) ( z_handoff
+ 2.) * (double) sizeof(SM_frame
);
7704 { fprintf(efd
, "warning: the intended nr of cores to be used in liveness mode is 2\n");
7706 fprintf(efd
, "warning: without -DSEP_STATE there is no guarantee that all liveness violations are found\n");
7711 #error cannot use hidden variables when compiling multi-core
7717 fprintf(efd
, "warning: using -k%d as minimal usable value\n", hfns
);
7720 omaxdepth
= maxdepth
;
7722 if (WS
== 4 && ssize
> 34)
7724 fprintf(efd
, "warning: using -w%d as max usable value\n", ssize
);
7726 * -w35 would not work: 35-3 = 32 but 1^31 is the largest
7727 * power of 2 that can be represented in an unsigned long
7731 if (WS
== 4 && ssize
> 27)
7733 fprintf(efd
, "warning: using -w%d as max usable value\n", ssize
);
7735 * for emalloc, the lookup table size multiplies by 4 for the pointers
7736 * the largest power of 2 that can be represented in a ulong is 1^31
7737 * hence the largest number of lookup table slots is 31-4 = 27
7742 hiwater
= HHH
= maxdepth
-10;
7745 { stackfile
= (char *) emalloc(strlen(PanSource
)+4+1);
7746 sprintf(stackfile
, "%s._s_", PanSource
);
7749 { fprintf(efd
, "error: cannot use -i or -I with -DSC\n");
7753 #if (defined(R_XPT) || defined(W_XPT)) && !defined(MA)
7754 #warning -DR_XPT and -DW_XPT assume -DMA (ignored)
7756 if (iterative
&& a_cycles
)
7757 fprintf(efd
, "warning: -i or -I work for safety properties only\n");
7760 #error -DBFS not compatible with -DSC
7763 #error -DBFS not compatible with _last
7766 #error cannot use c_track UnMatched with BFS
7769 #warning -DREACH is redundant when -DBFS is used
7772 #if defined(MERGED) && defined(PEG)
7773 #error to use -DPEG use: spin -o3 -a
7777 #error cannot combine -DHC and -DSFH
7778 /* use of NOCOMP is the real reason */
7781 #error cannot combine -DHC and -DNOCOMP
7785 #error cannot combine -DHC and -DBITSTATE
7788 #if defined(SAFETY) && defined(NP)
7789 #error cannot combine -DNP and -DBFS or -DSAFETY
7793 #error cannot combine -DMA and -DBITSTATE
7796 #error usage: -DMA=N with N > 0 and N < VECTORSZ
7801 #error cannot combine -DBITSTATE and -DCOLLAPSE
7804 #error cannot combine -DCOLLAPSE and -DSFH
7805 /* use of NOCOMP is the real reason */
7808 #error cannot combine -DCOLLAPSE and -DNOCOMP
7812 if (maxdepth
<= 0 || ssize
<= 1) usage(efd
);
7813 #if SYNC>0 && !defined(NOREDUCE)
7814 if (a_cycles
&& fairness
)
7815 { fprintf(efd
, "error: p.o. reduction not compatible with ");
7816 fprintf(efd
, "fairness (-f) in models\n");
7817 fprintf(efd
, " with rendezvous operations: ");
7818 fprintf(efd
, "recompile with -DNOREDUCE\n");
7822 #if defined(REM_VARS) && !defined(NOREDUCE)
7823 #warning p.o. reduction not compatible with remote varrefs (use -DNOREDUCE)
7825 #if defined(NOCOMP) && !defined(BITSTATE)
7827 { fprintf(efd
, "error: use of -DNOCOMP voids -l and -a\n");
7832 memlim
= ((double) MEMLIM
) * (double) (1<<20); /* size in Mbyte */
7835 if (Nrun
> 1) HASH_NR
= Nrun
- 1;
7837 if (Nrun
< 1 || Nrun
> 32)
7838 { fprintf(efd
, "error: invalid arg for -R\n");
7842 if (fairness
&& !a_cycles
)
7843 { fprintf(efd
, "error: -f requires -a or -l\n");
7848 { fprintf(efd
, "error: no accept labels defined ");
7849 fprintf(efd
, "in model (for option -a)\n");
7856 #error use of enabled() requires -DNOREDUCE
7859 #error use of pcvalue() requires -DNOREDUCE
7862 #error use of 'else' combined with i/o stmnts requires -DNOREDUCE
7865 #error use of _last requires -DNOREDUCE
7868 #if SYNC>0 && !defined(NOREDUCE)
7870 fprintf(efd
, "warning: use of a rendezvous stmnts in the escape\n");
7871 fprintf(efd
, " of an unless clause, if present, could make p.o. reduction\n");
7872 fprintf(efd
, " invalid (use -DNOREDUCE to avoid this)\n");
7874 fprintf(efd
, " (this type of rv is also not compatible with -DBFS)\n");
7878 #if SYNC>0 && defined(BFS)
7879 #warning use of rendezvous with BFS does not preserve all invalid endstates
7881 #if !defined(REACH) && !defined(BITSTATE)
7882 if (iterative
!= 0 && a_cycles
== 0)
7883 { fprintf(efd
, "warning: -i and -I need -DREACH to work accurately\n");
7886 #if defined(BITSTATE) && defined(REACH)
7887 #warning -DREACH is voided by -DBITSTATE
7889 #if defined(MA) && defined(REACH)
7890 #warning -DREACH is voided by -DMA
7892 #if defined(FULLSTACK) && defined(CNTRSTACK)
7893 #error cannot combine -DFULLSTACK and -DCNTRSTACK
7906 { fprintf(efd
, "warning: never claim + accept labels ");
7907 fprintf(efd
, "requires -a flag to fully verify\n");
7915 { fprintf(efd
, "warning: verification in BFS mode ");
7916 fprintf(efd
, "is restricted to safety properties\n");
7930 { fprintf(efd
, "hint: this search is more efficient ");
7931 fprintf(efd
, "if pan.c is compiled -DSAFETY\n");
7938 { S_A
= 1; /* _a_t */
7940 } else /* _a_t and _cnt[NFAIR] */
7941 { S_A
= (&(now
._cnt
[0]) - (uchar
*) &now
) + NFAIR
- 2;
7942 /* -2 because first two uchars in now are masked */
7947 signal(SIGINT
, stopped
);
7950 trail
= (Trail
*) emalloc(6*sizeof(Trail
));
7953 trail
= (Trail
*) emalloc((maxdepth
+3)*sizeof(Trail
));
7954 trail
++; /* protect trpt-1 refs at depth 0 */
7959 sprintf(nm
, "%s.svd", PanSource
);
7960 if ((svfd
= creat(nm
, TMODE
)) < 0)
7961 { fprintf(efd
, "couldn't create %s\n", nm
);
7968 #if SYNC>0 && ASYNC==0
7980 fprintf(fd
, "%s\n", SpinVersion
);
7981 fprintf(fd
, "Valid Options are:\n");
7984 fprintf(fd
, " -a -> is disabled by -DNP ");
7985 fprintf(fd
, "(-DNP compiles for -l only)\n");
7987 fprintf(fd
, " -a find acceptance cycles\n");
7990 fprintf(fd
, " -a,-l,-f -> are disabled by -DSAFETY\n");
7992 fprintf(fd
, " -A ignore assert() violations\n");
7993 fprintf(fd
, " -b consider it an error to exceed the depth-limit\n");
7994 fprintf(fd
, " -cN stop at Nth error ");
7995 fprintf(fd
, "(defaults to -c1)\n");
7996 fprintf(fd
, " -d print state tables and stop\n");
7997 fprintf(fd
, " -e create trails for all errors\n");
7998 fprintf(fd
, " -E ignore invalid end states\n");
8000 fprintf(fd
, " -Ffile use 'file' to store disk-stack\n");
8003 fprintf(fd
, " -f add weak fairness (to -a or -l)\n");
8005 fprintf(fd
, " -hN use different hash-seed N:1..32\n");
8006 fprintf(fd
, " -i search for shortest path to error\n");
8007 fprintf(fd
, " -I like -i, but approximate and faster\n");
8008 fprintf(fd
, " -J reverse eval order of nested unlesses\n");
8010 fprintf(fd
, " -kN set N bits per state (defaults to 3)\n");
8013 fprintf(fd
, " -LN set scheduling restriction to N (default 10)\n");
8017 fprintf(fd
, " -l find non-progress cycles\n");
8019 fprintf(fd
, " -l find non-progress cycles -> ");
8020 fprintf(fd
, "disabled, requires ");
8021 fprintf(fd
, "compilation with -DNP\n");
8025 fprintf(fd
, " -MN use N Megabytes for bitstate hash array\n");
8026 fprintf(fd
, " -GN use N Gigabytes for bitstate hash array\n");
8028 fprintf(fd
, " -mN max depth N steps (default=10k)\n");
8029 fprintf(fd
, " -n no listing of unreached states\n");
8031 fprintf(fd
, " -pN create svfile (save N bytes per state)\n");
8033 fprintf(fd
, " -QN set time-limit on execution of N minutes\n");
8034 fprintf(fd
, " -q require empty chans in valid end states\n");
8036 fprintf(fd
, " -r read and execute trail - can add -v,-n,-PN,-g,-C\n");
8037 fprintf(fd
, " -rN read and execute N-th error trail\n");
8038 fprintf(fd
, " -C read and execute trail - columnated output (can add -v,-n)\n");
8039 fprintf(fd
, " -PN read and execute trail - restrict trail output to proc N\n");
8040 fprintf(fd
, " -g read and execute trail + msc gui support\n");
8041 fprintf(fd
, " -S silent replay: only user defined printfs show\n");
8044 fprintf(fd
, " -RN repeat run Nx with N ");
8045 fprintf(fd
, "[1..32] independent hash functions\n");
8046 fprintf(fd
, " -s same as -k1 (single bit per state)\n");
8048 fprintf(fd
, " -T create trail files in read-only mode\n");
8049 fprintf(fd
, " -tsuf replace .trail with .suf on trailfiles\n");
8050 fprintf(fd
, " -V print SPIN version number\n");
8051 fprintf(fd
, " -v verbose -- filenames in unreached state listing\n");
8052 fprintf(fd
, " -wN hashtable of 2^N entries ");
8053 fprintf(fd
, "(defaults to -w%d)\n", ssize
);
8054 fprintf(fd
, " -x do not overwrite an existing trail file\n");
8056 fprintf(fd
, " -zN handoff states below depth N to 2nd cpu (multi_core)\n");
8059 fprintf(fd
, "\n options -r, -C, -PN, -g, and -S can optionally be followed by\n");
8060 fprintf(fd
, " a filename argument, as in '-r filename', naming the trailfile\n");
8069 Malloc(unsigned long n
)
8072 if (memcnt
+ (double) n
> memlim
) goto err
;
8075 tmp
= (char *) malloc(n
);
8078 tmp
= (char *) sbrk(n
);
8079 if (tmp
== (char *) -ONE_L
)
8085 printf("pan: out of memory\n");
8087 printf(" %g bytes used\n", memcnt
);
8088 printf(" %g bytes more needed\n", (double) n
);
8089 printf(" %g bytes limit\n",
8093 printf("hint: to reduce memory, recompile with\n");
8095 printf(" -DMA=%d # better/slower compression, or\n", hmax
);
8097 printf(" -DBITSTATE # supertrace, approximation\n");
8100 printf("hint: to reduce memory, recompile with\n");
8102 printf(" -DCOLLAPSE # good, fast compression, or\n");
8104 printf(" -DMA=%d # better/slower compression, or\n", hmax
);
8106 printf(" -DHC # hash-compaction, approximation\n");
8108 printf(" -DBITSTATE # supertrace, approximation\n");
8113 printf(" omit -DFULL_TRAIL or use pan -c0 to reduce memory\n");
8116 printf("hint: to reduce memory, recompile without\n");
8117 printf(" -DSEP_STATE # may be faster, but uses more memory\n");
8122 memcnt
+= (double) n
;
8126 #define CHUNK (100*VECTORSZ)
8129 emalloc(unsigned long n
) /* never released or reallocated */
8132 return (char *) NULL
;
8133 if (n
&(sizeof(void *)-1)) /* for proper alignment */
8134 n
+= sizeof(void *)-(n
&(sizeof(void *)-1));
8135 if ((unsigned long) left
< n
)
8136 { grow
= (n
< CHUNK
) ? CHUNK
: n
;
8137 have
= Malloc(grow
);
8138 fragment
+= (double) left
;
8149 { /* always fatal */
8152 sudden_stop("Uerror");
8157 #if defined(MA) && !defined(SAFETY)
8160 { Trans
*t
; uchar ot
, _m
; int tt
; short II
;
8164 uchar oat
= now
._a_t
;
8165 now
._a_t
&= ~(1|16|32);
8166 memcpy((char *) &comp_now
, (char *) &now
, vsize
);
8170 trpt
= getframe(depth
);
8173 printf("%d State: ", depth
);
8174 for (i
= 0; i
< vsize
; i
++) printf("%d%s,",
8175 ((char *)&now
)[i
], Mask
[i
]?"*":"");
8179 if (trpt
->o_pm
&128) /* fairness alg */
8180 { now
._cnt
[now
._a_t
&1] = trpt
->bup
.oval
;
8183 trpt
= getframe(depth
);
8192 { int d
; Trail
*trl
;
8194 for (d
= 1; d
< depth
; d
++)
8195 { trl
= getframe(depth
-d
); /* was trl = (trpt-d); */
8197 { now
._last
= trl
->pr
- BASE
;
8201 now
._last
= (depth
<1)?0:(trpt
-1)->pr
;
8205 now
._event
= trpt
->o_event
;
8207 if ((now
._a_t
&1) && depth
<= A_depth
)
8208 { now
._a_t
&= ~(1|16|32);
8209 if (fairness
) now
._a_t
|= 2; /* ? */
8211 goto CameFromHere
; /* checkcycles() */
8214 ot
= trpt
->o_ot
; II
= trpt
->pr
;
8215 tt
= trpt
->o_tt
; this = pptr(II
);
8216 _m
= do_reverse(t
, II
, trpt
->o_m
);
8218 printf("%3d: proc %d ", depth
, II
);
8219 printf("reverses %d, %d to %d,",
8220 t
->forw
, tt
, t
->st
);
8221 printf(" %s [abit=%d,adepth=%d,",
8222 t
->tp
, now
._a_t
, A_depth
);
8223 printf("tau=%d,%d] <unwind>\n",
8224 trpt
->tau
, (trpt
-1)->tau
);
8228 trpt
= getframe(depth
);
8232 /* reached[ot][t->st] = 1; 3.4.13 */
8233 ((P0
*)this)->_p
= tt
;
8235 if ((trpt
->o_pm
&32))
8238 if (now
._cnt
[now
._a_t
&1] == 0)
8239 now
._cnt
[now
._a_t
&1] = 1;
8241 now
._cnt
[now
._a_t
&1] += 1;
8246 now
._cnt
[now
._a_t
&1] = 0;
8252 if (memcmp((char *) &now
, (char *) &comp_now
, vsize
) == 0)
8254 if (depth
> 0) goto Up
;
8258 static char unwinding
;
8261 { static char laststr
[256];
8264 if (unwinding
) return; /* 1.4.2 */
8265 if (strncmp(str
, laststr
, 254))
8267 cpu_printf("pan: %s (at depth %ld)\n", str
,
8269 printf("pan: %s (at depth %ld)\n", str
,
8272 (nr_handoffs
* z_handoff
) +
8274 ((depthfound
==-1)?depth
:depthfound
));
8275 strncpy(laststr
, str
, 254);
8278 if (readtrail
) { wrap_trail(); return; }
8280 is_cycle
= (strstr(str
, " cycle") != (char *) 0);
8284 if ((every_error
!= 0)
8287 #if defined(MA) && !defined(SAFETY)
8291 depthfound
= Unwind();
8300 if (depth
> 1) trpt
--;
8302 if (depth
> 1) trpt
++;
8306 #if defined(MA) && !defined(SAFETY)
8307 if (strstr(str
, " cycle"))
8309 printf("sorry: MA writes 1 trail max\n");
8310 wrapup(); /* no recovery from unwind */
8314 if (search_terminated
!= NULL
)
8315 { *search_terminated
|= 4; /* uerror */
8321 { depth
--; trpt
--; /* undo */
8324 if (iterative
!= 0 && maxdepth
> 0)
8325 { maxdepth
= (iterative
== 1)?(depth
-1):(depth
/2);
8327 printf("pan: reducing search depth to %ld\n",
8331 if (errors
>= upto
&& upto
!= 0)
8334 sudden_stop("uerror");
8342 xrefsrc(int lno
, S_F_MAP
*mp
, int M
, int i
)
8343 { Trans
*T
; int j
, retval
=1;
8344 for (T
= trans
[M
][i
]; T
; T
= T
->nxt
)
8346 { if (strcmp(T
->tp
, ".(goto)") == 0
8347 || strncmp(T
->tp
, "goto :", 6) == 0)
8348 return 1; /* not reported */
8350 printf("\tline %d", lno
);
8352 for (j
= 0; j
< sizeof(mp
); j
++)
8353 if (i
>= mp
[j
].from
&& i
<= mp
[j
].upto
)
8354 { printf(", \"%s\"", mp
[j
].fnm
);
8357 printf(", state %d", i
);
8358 if (strcmp(T
->tp
, "") != 0)
8360 q
= transmognify(T
->tp
);
8361 printf(", \"%s\"", q
?q
:"");
8362 } else if (stopstate
[M
][i
])
8363 printf(", -end state-");
8365 retval
= 0; /* reported */
8371 r_ck(uchar
*which
, int N
, int M
, short *src
, S_F_MAP
*mp
)
8375 if (M
== VERI
&& !verbose
) return;
8377 printf("unreached in proctype %s\n", procname
[M
]);
8378 for (i
= 1; i
< N
; i
++)
8380 && (mapstate
[M
][i
] == 0
8381 || which
[mapstate
[M
][i
]] == 0))
8382 m
+= xrefsrc((int) src
[i
], mp
, M
, i
);
8385 printf(" (%d of %d states)\n", N
-1-m
, N
-1);
8387 #if NCORE>1 && !defined(SEP_STATE)
8388 static long rev_trail_cnt
;
8392 rev_trail(int fd
, volatile Stack_Tree
*st_tr
)
8393 { long j
; char snap
[64];
8398 rev_trail(fd
, st_tr
->prv
);
8400 printf("%d (%d) LRT [%d,%d] -- %9u (root %9u)\n",
8401 depth
, rev_trail_cnt
, st_tr
->pr
, st_tr
->t_id
, st_tr
, stack_last
[core_id
]);
8403 if (st_tr
->pr
!= 255)
8404 { sprintf(snap
, "%ld:%d:%d\n",
8405 rev_trail_cnt
++, st_tr
->pr
, st_tr
->t_id
);
8407 if (write(fd
, snap
, j
) != j
)
8408 { printf("pan: error writing trailfile\n");
8413 } else /* handoff point */
8415 { write(fd
, "-1:-1:-1\n", 9);
8424 #if defined VERI || defined(MERGED)
8427 #if NCORE==1 || defined(SEP_STATE) || !defined(FULL_TRAIL)
8434 sprintf(snap
, "-2:%d:-2\n", VERI
);
8435 write(fd
, snap
, strlen(snap
));
8438 sprintf(snap
, "-4:-4:-4\n");
8439 write(fd
, snap
, strlen(snap
));
8441 #if NCORE>1 && !defined(SEP_STATE) && defined(FULL_TRAIL)
8443 enter_critical(GLOBAL_LOCK
);
8444 rev_trail(fd
, stack_last
[core_id
]);
8445 leave_critical(GLOBAL_LOCK
);
8447 i
= 1; /* trail starts at position 1 */
8448 #if NCORE>1 && defined(SEP_STATE)
8449 if (cur_Root
.m_vsize
> 0) { i
++; depth
++; }
8451 for ( ; i
<= depth
; i
++)
8452 { if (i
== depthfound
+1)
8453 write(fd
, "-1:-1:-1\n", 9);
8455 if (!trl
->o_t
) continue;
8456 if (trl
->o_pm
&128) continue;
8457 sprintf(snap
, "%ld:%d:%d\n",
8458 i
, trl
->pr
, trl
->o_t
->t_id
);
8460 if (write(fd
, snap
, j
) != j
)
8461 { printf("pan: error writing trailfile\n");
8468 cpu_printf("pan: wrote trailfile\n");
8473 sv_save(void) /* push state vector onto save stack */
8475 { svtack
->nxt
= (Svtack
*) emalloc(sizeof(Svtack
));
8476 svtack
->nxt
->body
= emalloc(vsize
*sizeof(char));
8477 svtack
->nxt
->lst
= svtack
;
8478 svtack
->nxt
->m_delta
= vsize
;
8480 } else if (vsize
> svtack
->nxt
->m_delta
)
8481 { svtack
->nxt
->body
= emalloc(vsize
*sizeof(char));
8482 svtack
->nxt
->lst
= svtack
;
8483 svtack
->nxt
->m_delta
= vsize
;
8486 svtack
= svtack
->nxt
;
8488 svtack
->o_boq
= boq
;
8490 svtack
->o_delta
= vsize
; /* don't compress */
8491 memcpy((char *)(svtack
->body
), (char *) &now
, vsize
);
8492 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
8493 c_stack((uchar
*) &(svtack
->c_stack
[0]));
8496 cpu_printf("%d: sv_save\n", depth
);
8501 sv_restor(void) /* pop state vector from save stack */
8503 memcpy((char *)&now
, svtack
->body
, svtack
->o_delta
);
8505 boq
= svtack
->o_boq
;
8507 #if defined(C_States) && (HAS_TRACK==1)
8509 c_unstack((uchar
*) &(svtack
->c_stack
[0]));
8511 c_revert((uchar
*) &(now
.c_state
[0]));
8513 if (vsize
!= svtack
->o_delta
)
8514 Uerror("sv_restor");
8516 Uerror("error: v_restor");
8517 svtack
= svtack
->lst
;
8519 cpu_printf(" sv_restor\n");
8525 { int i
; char *z
= (char *) &now
;
8527 proc_offset
[h
] = stack
->o_offset
;
8528 proc_skip
[h
] = (uchar
) stack
->o_skip
;
8530 p_name
[h
] = stack
->o_name
;
8533 for (i
= vsize
+ stack
->o_skip
; i
> vsize
; i
--)
8534 Mask
[i
-1] = 1; /* align */
8536 vsize
+= stack
->o_skip
;
8537 memcpy(z
+vsize
, stack
->body
, stack
->o_delta
);
8538 vsize
+= stack
->o_delta
;
8543 for (i
= 1; i
<= Air
[((P0
*)pptr(h
))->_t
]; i
++)
8544 Mask
[vsize
- i
] = 1; /* pad */
8545 Mask
[proc_offset
[h
]] = 1; /* _pid */
8547 if (BASE
> 0 && h
> 0)
8548 ((P0
*)pptr(h
))->_pid
= h
-BASE
;
8550 ((P0
*)pptr(h
))->_pid
= h
;
8553 if (!stack
->lst
) /* debugging */
8554 Uerror("error: p_restor");
8563 { char *z
= (char *) &now
;
8567 q_offset
[now
._nr_qs
] = stack
->o_offset
;
8568 q_skip
[now
._nr_qs
] = (uchar
) stack
->o_skip
;
8570 q_name
[now
._nr_qs
] = stack
->o_name
;
8572 vsize
+= stack
->o_skip
;
8573 memcpy(z
+vsize
, stack
->body
, stack
->o_delta
);
8574 vsize
+= stack
->o_delta
;
8580 k_end
= stack
->o_offset
;
8581 k
= k_end
- stack
->o_skip
;
8584 if (q_zero(now
._nr_qs
)) k_end
+= stack
->o_delta
;
8587 for ( ; k
< k_end
; k
++)
8590 if (!stack
->lst
) /* debugging */
8591 Uerror("error: q_restor");
8594 typedef struct IntChunks
{
8596 struct IntChunks
*nxt
;
8598 IntChunks
*filled_chunks
[512];
8599 IntChunks
*empty_chunks
[512];
8603 if (nr
>= 512) Uerror("cannot happen grab_int");
8604 if (filled_chunks
[nr
])
8605 { z
= filled_chunks
[nr
];
8606 filled_chunks
[nr
] = filled_chunks
[nr
]->nxt
;
8608 { z
= (IntChunks
*) emalloc(sizeof(IntChunks
));
8609 z
->ptr
= (int *) emalloc(nr
* sizeof(int));
8611 z
->nxt
= empty_chunks
[nr
];
8612 empty_chunks
[nr
] = z
;
8616 ungrab_ints(int *p
, int nr
)
8618 if (!empty_chunks
[nr
]) Uerror("cannot happen ungrab_int");
8619 z
= empty_chunks
[nr
];
8620 empty_chunks
[nr
] = empty_chunks
[nr
]->nxt
;
8622 z
->nxt
= filled_chunks
[nr
];
8623 filled_chunks
[nr
] = z
;
8626 delproc(int sav
, int h
)
8629 int o_vsize
= vsize
;
8631 if (h
+1 != (int) now
._nr_pr
) return 0;
8634 && q_offset
[now
._nr_qs
-1] > proc_offset
[h
])
8638 d
= vsize
- proc_offset
[h
];
8641 { stack
->nxt
= (Stack
*)
8642 emalloc(sizeof(Stack
));
8644 emalloc(Maxbody
*sizeof(char));
8645 stack
->nxt
->lst
= stack
;
8649 stack
->o_offset
= proc_offset
[h
];
8651 stack
->o_skip
= (int) proc_skip
[h
];
8653 stack
->o_skip
= (short) proc_skip
[h
];
8656 stack
->o_name
= p_name
[h
];
8660 memcpy(stack
->body
, (char *)pptr(h
), d
);
8662 vsize
= proc_offset
[h
];
8663 now
._nr_pr
= now
._nr_pr
- 1;
8664 memset((char *)pptr(h
), 0, d
);
8665 vsize
-= (int) proc_skip
[h
];
8670 for (i
= vsize
; i
< o_vsize
; i
++)
8671 Mask
[i
] = 0; /* reset */
8678 { int h
= now
._nr_qs
- 1;
8679 int d
= vsize
- q_offset
[now
._nr_qs
- 1];
8681 int k
, o_vsize
= vsize
;
8685 { stack
->nxt
= (Stack
*)
8686 emalloc(sizeof(Stack
));
8688 emalloc(Maxbody
*sizeof(char));
8689 stack
->nxt
->lst
= stack
;
8693 stack
->o_offset
= q_offset
[h
];
8695 stack
->o_skip
= (int) q_skip
[h
];
8697 stack
->o_skip
= (short) q_skip
[h
];
8700 stack
->o_name
= q_name
[h
];
8703 memcpy(stack
->body
, (char *)qptr(h
), d
);
8705 vsize
= q_offset
[h
];
8706 now
._nr_qs
= now
._nr_qs
- 1;
8707 memset((char *)qptr(h
), 0, d
);
8708 vsize
-= (int) q_skip
[h
];
8713 for (k
= vsize
; k
< o_vsize
; k
++)
8714 Mask
[k
] = 0; /* reset */
8721 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
8731 for (i
= BASE
; i
< (int) now
._nr_pr
; i
++)
8732 { ptr
= (P0
*) pptr(i
);
8733 if (!stopstate
[ptr
->_t
][ptr
->_p
])
8736 if (strict
) return qs_empty();
8737 #if defined(EVENT_TRACE) && !defined(OTIM)
8738 if (!stopstate
[EVENT_TRACE
][now
._event
] && !a_cycles
)
8739 { printf("pan: event_trace not completed\n");
8749 { uchar o_a_t
= now
._a_t
;
8754 uchar o_cnt
= now
._cnt
[1];
8758 struct H_el
*sv
= trpt
->ostate
; /* save */
8760 uchar prov
= trpt
->proviso
; /* save */
8764 { int i
; uchar
*v
= (uchar
*) &now
;
8765 printf(" set Seed state ");
8767 if (fairness
) printf("(cnt = %d:%d, nrpr=%d) ",
8768 now
._cnt
[0], now
._cnt
[1], now
._nr_pr
);
8770 /* for (i = 0; i < n; i++) printf("%d,", v[i]); */
8773 printf("%d: cycle check starts\n", depth
);
8775 now
._a_t
|= (1|16|32);
8776 /* 1 = 2nd DFS; (16|32) to help hasher */
8778 now
._cnt
[1] = now
._cnt
[0];
8780 memcpy((char *)&A_Root
, (char *)&now
, vsize
);
8781 A_depth
= depthfound
= depth
;
8786 o_limit
= trpt
->sched_limit
;
8787 trpt
->sched_limit
= 0;
8789 new_state(); /* start 2nd DFS */
8791 trpt
->sched_limit
= o_limit
;
8796 now
._cnt
[1] = o_cnt
;
8798 A_depth
= 0; depthfound
= -1;
8800 printf("%d: cycle check returns\n", depth
);
8804 trpt
->ostate
= sv
; /* restore */
8806 trpt
->proviso
= prov
;
8812 #if defined(FULLSTACK) && defined(BITSTATE)
8813 struct H_el
*Free_list
= (struct H_el
*) 0;
8815 onstack_init(void) /* to store stack states in a bitstate search */
8816 { S_Tab
= (struct H_el
**) emalloc(maxdepth
*sizeof(struct H_el
*));
8820 { struct H_el
*v
, *last
= 0;
8822 { for (v
= Free_list
; v
&& ((int) v
->tagged
>= n
); v
=v
->nxt
)
8823 { if ((int) v
->tagged
== n
)
8827 gotcha
: Free_list
= v
->nxt
;
8837 /* new: second try */
8839 if (v
&& ((int) v
->tagged
>= n
))
8843 return (struct H_el
*)
8844 emalloc(sizeof(struct H_el
)+n
-sizeof(unsigned));
8851 { struct H_el
*grab_shared(int);
8852 return grab_shared(sizeof(struct H_el
)+n
-sizeof(unsigned));
8856 #define grab_state(n) (struct H_el *) \
8857 emalloc(sizeof(struct H_el)+n-sizeof(unsigned long));
8862 int cnt
= sizeof(struct H_el
)+n
-sizeof(unsigned long);
8864 if (reclaim_size
>= cnt
+WS
)
8865 { if ((cnt
& (WS
-1)) != 0) /* alignment */
8866 { cnt
+= WS
- (cnt
& (WS
-1));
8868 p
= (struct H_el
*) reclaim_mem
;
8870 reclaim_size
-= cnt
;
8873 { p
= (struct H_el
*) emalloc(cnt
);
8882 ordinal(char *v
, long n
, short tp
)
8883 { struct H_el
*tmp
, *ntmp
; long m
;
8884 struct H_el
*olst
= (struct H_el
*) 0;
8885 s_hash((uchar
*)v
, n
);
8886 #if NCORE>1 && !defined(SEP_STATE)
8887 enter_critical(CS_ID
); /* uses spinlock - 1..128 */
8891 { tmp
= grab_state(n
);
8894 for ( ;; olst
= tmp
, tmp
= tmp
->nxt
)
8895 { m
= memcmp(((char *)&(tmp
->state
)), v
, n
);
8902 Insert
: ntmp
= grab_state(n
);
8910 } else if (!tmp
->nxt
)
8912 Append
: tmp
->nxt
= grab_state(n
);
8929 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
8932 memcpy(((char *)&(tmp
->state
)), v
, n
);
8935 #if NCORE>1 && !defined(SEP_STATE)
8936 leave_critical(CS_ID
); /* uses spinlock */
8946 compress(char *vin
, int nin
) /* collapse compression */
8947 { char *w
, *v
= (char *) &comp_now
;
8951 static uchar nbytes
[513]; /* 1 + 256 + 256 */
8952 static unsigned short nbytelen
;
8953 long col_q(int, char *);
8954 long col_p(int, char *);
8960 for (i
= 0; i
< NFAIR
; i
++)
8966 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
8967 { n
= col_p(i
, (char *) 0);
8969 nbytes
[nbytelen
] = 0;
8971 nbytes
[nbytelen
] = 1;
8972 *v
++ = ((P0
*) pptr(i
))->_t
;
8976 { nbytes
[nbytelen
]++;
8980 { nbytes
[nbytelen
]++;
8984 { nbytes
[nbytelen
]++;
8991 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
8993 n
= ordinal(scratch
, x
-scratch
, 2); /* procs */
8995 nbytes
[nbytelen
] = 0;
8997 { nbytes
[nbytelen
]++;
9001 { nbytes
[nbytelen
]++;
9005 { nbytes
[nbytelen
]++;
9011 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
9012 { n
= col_q(i
, (char *) 0);
9013 nbytes
[nbytelen
] = 0;
9016 { nbytes
[nbytelen
]++;
9020 { nbytes
[nbytelen
]++;
9024 { nbytes
[nbytelen
]++;
9031 /* 3 = _a_t, _nr_pr, _nr_qs */
9032 w
= (char *) &now
+ 3 * sizeof(uchar
);
9038 w
= (char *) &(now
._vsz
) + sizeof(unsigned short);
9040 w
= (char *) &(now
._vsz
) + sizeof(unsigned long);
9046 if (now
._nr_qs
> 0 && qptr(0) < pptr(0))
9047 n
= qptr(0) - (uchar
*) w
;
9049 n
= pptr(0) - (uchar
*) w
;
9050 j
= w
- (char *) &now
;
9051 for (i
= 0; i
< (int) n
; i
++, w
++)
9052 if (!Mask
[j
++]) *x
++ = *w
;
9054 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
9058 for (i
= 0, j
= 6; i
< nbytelen
; i
++)
9064 *x
|= (nbytes
[i
] << j
);
9067 for (j
= 0; j
< WS
-1; j
++)
9070 n
= ordinal(scratch
, x
-scratch
, 0); /* globals */
9072 if (n
>= (1<< 8)) { *v
++ = (n
>> 8)&255; j
++; }
9073 if (n
>= (1<<16)) { *v
++ = (n
>>16)&255; j
++; }
9074 if (n
>= (1<<24)) { *v
++ = (n
>>24)&255; j
++; }
9075 *v
++ = j
; /* add last count as a byte */
9076 for (i
= 0; i
< WS
-1; i
++)
9080 printf("collapse %d -> %d\n",
9081 vsize
, v
- (char *)&comp_now
);
9083 return v
- (char *)&comp_now
;
9086 #if !defined(NOCOMP)
9088 compress(char *vin
, int n
) /* default compression */
9092 s_hash((uchar
*)vin
, n
); /* sets K1 and K2 */
9095 { delta
++; /* _a_t */
9098 delta
+= NFAIR
; /* _cnt[] */
9102 memcpy((char *) &comp_now
+ delta
, (char *) &K1
, WS
);
9105 memcpy((char *) &comp_now
+ delta
, (char *) &K2
, HC
);
9111 char *v
= (char *) &comp_now
;
9114 int r
= 0, unroll
= n
/8;
9117 while (r
++ < unroll
)
9118 { /* unroll 8 times, avoid ifs */
9136 r
= n
- i
; /* the rest, at most 7 */
9138 case 7: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9139 case 6: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9140 case 5: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9141 case 4: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9142 case 3: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9143 case 2: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9144 case 1: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9147 r
= (n
+WS
-1)/WS
; /* words rounded up */
9148 r
*= WS
; /* bytes */
9149 i
= r
- i
; /* remainder */
9151 case 7: *v
++ = 0; /* fall thru */
9159 default: Uerror("unexpected wordsize");
9164 { for (i
= 0; i
< n
; i
++, vv
++)
9165 if (!Mask
[i
]) *v
++ = *vv
;
9166 for (i
= 0; i
< WS
-1; i
++)
9171 printf("compress %d -> %d\n",
9172 n
, v
- (char *)&comp_now
);
9174 return v
- (char *)&comp_now
;
9179 #if defined(FULLSTACK) && defined(BITSTATE)
9181 #if !defined(onstack_now)
9182 int onstack_now(void) {}
9184 #if !defined(onstack_put)
9185 void onstack_put(void) {}
9187 #if !defined(onstack_zap)
9188 void onstack_zap(void) {}
9193 { struct H_el
*v
, *w
, *last
= 0;
9194 struct H_el
**tmp
= H_tab
;
9197 static char warned
= 0;
9201 nv
= (char *) &comp_now
;
9202 n
= compress((char *)&now
, vsize
);
9204 #if defined(BITSTATE) && defined(LC)
9205 nv
= (char *) &comp_now
;
9206 n
= compact_stack((char *)&now
, vsize
);
9212 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9213 s_hash((uchar
*)nv
, n
);
9216 for (v
= S_Tab
[j1
]; v
; Zh
++, last
=v
, v
=v
->nxt
)
9217 { m
= memcmp(&(v
->state
), nv
, n
);
9225 #if defined(BITSTATE) && NCORE>1
9226 /* seen this happen, likely harmless, but not yet understood */
9229 { /* Uerror("stack out of wack - zap"); */
9230 cpu_printf("pan: warning, stack incomplete\n");
9241 v
->tagged
= (unsigned) n
;
9242 #if !defined(NOREDUCE) && !defined(SAFETY)
9245 v
->nxt
= last
= (struct H_el
*) 0;
9246 for (w
= Free_list
; w
; Fa
++, last
=w
, w
= w
->nxt
)
9247 { if ((int) w
->tagged
<= n
)
9252 { v
->nxt
= Free_list
;
9265 { struct H_el
**tmp
= H_tab
;
9267 if (hstore((char *)&now
, vsize
) != 0)
9268 #if defined(BITSTATE) && defined(LC)
9269 printf("pan: warning, double stack entry\n");
9272 Uerror("cannot happen - unstack_put");
9276 trpt
->ostate
= Lstate
;
9282 struct H_el
**tmp2
= H_tab
;
9283 char *v
; int n
, m
= 1;
9287 #if defined(BITSTATE) && defined(LC)
9288 v
= (char *) &comp_now
;
9289 n
= compact_stack((char *)&now
, vsize
);
9295 v
= (char *) &comp_now
;
9296 n
= compress((char *)&now
, vsize
);
9298 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9299 s_hash((uchar
*)v
, n
);
9302 for (tmp
= S_Tab
[j1
]; tmp
; Zn
++, tmp
= tmp
->nxt
)
9303 { m
= memcmp(((char *)&(tmp
->state
)),v
,n
);
9305 { Lstate
= (struct H_el
*) tmp
;
9319 { void r_xpoint(void);
9323 dfa_init((unsigned short) (MA
+a_cycles
));
9324 #if NCORE>1 && !defined(COLLAPSE)
9326 { void init_HT(unsigned long);
9332 #if !defined(MA) || defined(COLLAPSE)
9335 { void init_HT(unsigned long);
9336 init_HT((unsigned long) (ONE_L
<<ssize
)*sizeof(struct H_el
*));
9339 H_tab
= (struct H_el
**)
9340 emalloc((ONE_L
<<ssize
)*sizeof(struct H_el
*));
9345 #if !defined(BITSTATE) || defined(FULLSTACK)
9348 dumpstate(int wasnew
, char *v
, int n
, int tag
)
9352 { printf(" state tags %d (%d::%d): ",
9355 printf(" %d ", tag
);
9363 for (i
= 0; i
< vsize
; i
++) printf("%d%s,",
9364 ((char *)&now
)[i
], Mask
[i
]?"*":"");
9366 printf("\n Vector: ");
9367 for (i
= 0; i
< n
; i
++) printf("%d,", v
[i
]);
9374 gstore(char *vin
, int nin
, uchar pbit
)
9378 static uchar Info
[MA
+1];
9380 n
= compress(vin
, nin
);
9381 v
= (uchar
*) &comp_now
;
9387 { printf("pan: error, MA too small, recompile pan.c");
9388 printf(" with -DMA=N with N>%d\n", n
);
9391 if (n
> (int) maxgs
)
9392 { maxgs
= (unsigned int) n
;
9394 for (i
= 0; i
< n
; i
++)
9397 for ( ; i
< MA
-1; i
++)
9401 if (a_cycles
) /* place _a_t at the end */
9402 { Info
[MA
] = Info
[0];
9406 #if NCORE>1 && !defined(SEP_STATE)
9407 enter_critical(GLOBAL_LOCK
); /* crude, but necessary */
9408 /* to make this mode work, also replace emalloc with grab_shared inside store MA routines */
9411 if (!dfa_store(Info
))
9415 { Info
[MA
] &= ~(1|16|32); /* _a_t */
9417 { Info
[MA
-1] = 4; /* off-stack bit */
9419 if (!dfa_member(MA
-1))
9422 printf("intersected 1st dfs stack\n");
9428 printf("new state\n");
9434 { Info
[MA
-1] = 1; /* proviso bit */
9436 trpt
->proviso
= dfa_member(MA
-1);
9438 Info
[MA
-1] = 4; /* off-stack bit */
9439 if (dfa_member(MA
-1))
9440 { ret_val
= 1; /* off-stack */
9442 printf("old state\n");
9445 { ret_val
= 2; /* on-stack */
9447 printf("on-stack\n");
9455 printf("old state\n");
9458 #if NCORE>1 && !defined(SEP_STATE)
9459 leave_critical(GLOBAL_LOCK
);
9461 return ret_val
; /* old state */
9464 #if defined(BITSTATE) && defined(LC)
9466 compact_stack(char *vin
, int n
)
9468 s_hash((uchar
*)vin
, n
); /* sets K1 and K2 */
9470 delta
++; /* room for state[0] |= 128 */
9472 memcpy((char *) &comp_now
+ delta
, (char *) &K1
, WS
);
9474 memcpy((char *) &comp_now
+ delta
, (char *) &K2
, WS
);
9475 delta
+= WS
; /* use all available bits */
9480 hstore(char *vin
, int nin
) /* hash table storage */
9481 { struct H_el
*ntmp
;
9482 struct H_el
*tmp
, *olst
= (struct H_el
*) 0;
9483 char *v
; int n
, m
=0;
9488 #if defined(BITSTATE) && defined(LC)
9490 { v
= (char *) &comp_now
;
9491 n
= compact_stack(vin
, nin
);
9499 v
= (char *) &comp_now
;
9504 n
= compress(vin
, nin
);
9510 { v
[0] = 0; /* _a_t */
9513 for (m
= 0; m
< NFAIR
; m
++)
9514 v
[m
+1] = 0; /* _cnt[] */
9520 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9521 s_hash((uchar
*)v
, n
);
9523 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9524 enter_critical(CS_ID
); /* uses spinlock */
9528 { tmp
= grab_state(n
);
9531 { /* if we get here -- we've already issued a warning */
9532 /* but we want to allow the normal distributed termination */
9533 /* to collect the stats on all cpus in the wrapup */
9534 #if !defined(SEP_STATE) && !defined(BITSTATE)
9535 leave_critical(CS_ID
);
9537 return 1; /* allow normal termination */
9542 { for (;; hcmp
++, olst
= tmp
, tmp
= tmp
->nxt
)
9543 { /* skip the _a_t and the _cnt bytes */
9546 { if (!tmp
->nxt
) goto Append
;
9550 m
= memcmp(((char *)&(tmp
->state
)) + S_A
,
9561 { if ((((char *)&(tmp
->state
))[0] & V_A
) != V_A
)
9562 { wasnew
= 1; nShadow
++;
9563 ((char *)&(tmp
->state
))[0] |= V_A
;
9567 { /* 0 <= now._cnt[now._a_t&1] < MAXPROC */
9568 unsigned ci
, bp
; /* index, bit pos */
9569 ci
= (now
._cnt
[now
._a_t
&1] / 8);
9570 bp
= (now
._cnt
[now
._a_t
&1] - 8*ci
);
9571 if (now
._a_t
&1) /* use tail-bits in _cnt */
9572 { ci
= (NFAIR
- 1) - ci
;
9573 bp
= 7 - bp
; /* bp = 0..7 */
9575 ci
++; /* skip over _a_t */
9576 bp
= 1 << bp
; /* the bit mask */
9577 if ((((char *)&(tmp
->state
))[ci
] & bp
)==0)
9582 ((char *)&(tmp
->state
))[ci
] |= bp
;
9585 /* else: wasnew == 0, i.e., old state */
9591 Lstate
= (struct H_el
*) tmp
;
9596 { Lstate
= (struct H_el
*) tmp
;
9599 && (tmp
->tagged
&A_V
)
9605 printf("cpu%d: ", core_id
);
9607 printf("1st dfs-stack intersected on state %d+\n",
9610 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9611 leave_critical(CS_ID
);
9617 printf("cpu%d: ", core_id
);
9619 printf(" New state %d+\n", (int) tmp
->st_id
);
9622 dumpstate(1, (char *)&(tmp
->state
),n
,tmp
->tagged
);
9624 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9625 leave_critical(CS_ID
);
9630 if ((S_A
)?(tmp
->tagged
&V_A
):tmp
->tagged
)
9631 { Lstate
= (struct H_el
*) tmp
;
9633 /* already on current dfs stack */
9634 /* but may also be on 1st dfs stack */
9636 && (tmp
->tagged
&A_V
)
9639 && (!fairness
|| now
._cnt
[1] <= 1)
9646 printf("cpu%d: ", core_id
);
9648 printf(" Stack state %d\n", (int) tmp
->st_id
);
9651 dumpstate(0, (char *)&(tmp
->state
),n
,tmp
->tagged
);
9653 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9654 leave_critical(CS_ID
);
9656 return 2; /* match on stack */
9663 printf("cpu%d: ", core_id
);
9665 printf(" New state %d+\n", (int) tmp
->st_id
);
9668 dumpstate(1, (char *)&(tmp
->state
), n
, 0);
9670 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9671 leave_critical(CS_ID
);
9678 printf("cpu%d: ", core_id
);
9680 printf(" Old state %d\n", (int) tmp
->st_id
);
9683 dumpstate(0, (char *)&(tmp
->state
), n
, 0);
9690 printf("cpu%d: ", core_id
);
9692 printf(" ReVisiting (from smaller depth)\n");
9695 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9696 leave_critical(CS_ID
);
9701 #if (defined(BFS) && defined(Q_PROVISO)) || NCORE>1
9702 Lstate
= (struct H_el
*) tmp
;
9704 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9705 leave_critical(CS_ID
);
9707 return 1; /* match outside stack */
9709 { /* insert state before tmp */
9710 ntmp
= grab_state(n
);
9714 #if !defined(SEP_STATE) && !defined(BITSTATE)
9715 leave_critical(CS_ID
);
9717 return 1; /* allow normal termination */
9727 } else if (!tmp
->nxt
)
9728 { /* append after tmp */
9732 tmp
->nxt
= grab_state(n
);
9736 #if !defined(SEP_STATE) && !defined(BITSTATE)
9737 leave_critical(CS_ID
);
9739 return 1; /* allow normal termination */
9747 tmp
->st_id
= (unsigned) nstates
;
9749 printf("cpu%d: ", core_id
);
9752 printf(" Push state %d\n", ((int) nstates
) - 1);
9754 printf(" New state %d\n", (int) nstates
);
9757 #if !defined(SAFETY) || defined(REACH)
9766 { unsigned ci
, bp
; /* as above */
9767 ci
= (now
._cnt
[now
._a_t
&1] / 8);
9768 bp
= (now
._cnt
[now
._a_t
&1] - 8*ci
);
9770 { ci
= (NFAIR
- 1) - ci
;
9771 bp
= 7 - bp
; /* bp = 0..7 */
9779 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
9782 memcpy(((char *)&(tmp
->state
)), v
, n
);
9784 tmp
->tagged
= (S_A
)?V_A
:(depth
+1);
9786 dumpstate(-1, v
, n
, tmp
->tagged
);
9788 Lstate
= (struct H_el
*) tmp
;
9791 dumpstate(-1, v
, n
, 0);
9794 Lstate
= (struct H_el
*) tmp
;
9797 /* #if NCORE>1 && !defined(SEP_STATE) */
9800 tmp
->cpu_id
= core_id
;
9802 #if !defined(SEP_STATE) && !defined(BITSTATE)
9803 leave_critical(CS_ID
);
9809 #include TRANSITIONS
9813 r_ck(reached0
, nstates0
, 0, src_ln0
, src_file0
);
9814 r_ck(reached1
, nstates1
, 1, src_ln1
, src_file1
);
9815 r_ck(reached2
, nstates2
, 2, src_ln2
, src_file2
);
9816 r_ck(reached3
, nstates3
, 3, src_ln3
, src_file3
);
9817 r_ck(reached4
, nstates4
, 4, src_ln4
, src_file4
);
9825 for (l_in
= 0; l_in
< 4; l_in
++)
9827 now
.buffer_use
[l_in
] = 0;
9832 for (l_in
= 0; l_in
< 2; l_in
++)
9834 now
.commit_count
[l_in
] = 0;
9839 for (l_in
= 0; l_in
< 2; l_in
++)
9841 now
.retrieve_count
[l_in
] = 0;
9844 now
.events_lost
= 0;
9848 for (l_in
= 0; l_in
< 4; l_in
++)
9850 logval("buffer_use[l_in]", now
.buffer_use
[l_in
]);
9853 logval("write_off", now
.write_off
);
9855 for (l_in
= 0; l_in
< 2; l_in
++)
9857 logval("commit_count[l_in]", now
.commit_count
[l_in
]);
9860 logval("read_off", now
.read_off
);
9862 for (l_in
= 0; l_in
< 2; l_in
++)
9864 logval("retrieve_count[l_in]", now
.retrieve_count
[l_in
]);
9867 logval("events_lost", now
.events_lost
);
9868 logval("refcount", now
.refcount
);
9870 Maxbody
= max(Maxbody
, sizeof(State
)-VECTORSZ
);
9874 addqueue(int n
, int is_rv
)
9875 { int j
=0, i
= now
._nr_qs
;
9880 Uerror("too many queues");
9882 default: Uerror("bad queue - addqueue");
9885 q_skip
[i
] = WS
-(vsize
%WS
);
9893 for (k
+= (int) q_skip
[i
]; k
> vsize
; k
--)
9896 vsize
+= (int) q_skip
[i
];
9897 q_offset
[i
] = vsize
;
9903 hmax
= max(hmax
, vsize
);
9904 if (vsize
>= VECTORSZ
)
9905 Uerror("VECTORSZ is too small, edit pan.h");
9906 memset((char *)qptr(i
), 0, j
);
9907 ((Q0
*)qptr(i
))->_t
= n
;
9913 qsend(int into
, int sorted
, int args_given
)
9920 uerror("ref to uninitialized chan name (sending)");
9921 if (into
>= (int) now
._nr_qs
|| into
< 0)
9922 Uerror("qsend bad queue#");
9924 j
= ((Q0
*)qptr(into
))->Qlen
;
9925 switch (((Q0
*)qptr(into
))->_t
) {
9926 case 0: printf("queue %d was deleted\n", into
+1);
9927 default: Uerror("bad queue - qsend");
9930 if (in_s_scope(into
+1))
9940 { uerror("ref to uninitialized chan name (q_zero)");
9943 switch(((Q0
*)qptr(from
))->_t
) {
9944 case 0: printf("queue %d was deleted\n", from
+1);
9946 Uerror("bad queue q-zero");
9952 { printf("==>> a test of the contents of a rv ");
9953 printf("channel always returns FALSE\n");
9954 uerror("error to poll rendezvous channel");
9961 setq_claim(int x
, int m
, char *s
, int y
, char *p
)
9963 uerror("x[rs] claim on uninitialized channel");
9964 if (x
< 0 || x
> MAXQ
)
9965 Uerror("cannot happen setq_claim");
9969 if (m
&2) q_S_check(x
, y
);
9970 if (m
&1) q_R_check(x
, y
);
9972 short q_sender
[MAXQ
+1];
9974 q_S_check(int x
, int who
)
9976 { q_sender
[x
] = who
+1;
9979 { printf("chan %s (%d), ",
9981 printf("sndr proc %s (%d)\n",
9983 uerror("xs chans cannot be used for rv");
9987 if (q_sender
[x
] != who
+1)
9988 { printf("pan: xs assertion violated: ");
9989 printf("access to chan <%s> (%d)\npan: by ",
9991 if (q_sender
[x
] > 0 && p_name
[q_sender
[x
]-1])
9992 printf("%s (proc %d) and by ",
9993 p_name
[q_sender
[x
]-1], q_sender
[x
]-1);
9994 printf("%s (proc %d)\n",
9996 uerror("error, partial order reduction invalid");
10000 short q_recver
[MAXQ
+1];
10002 q_R_check(int x
, int who
)
10003 { if (!q_recver
[x
])
10004 { q_recver
[x
] = who
+1;
10007 { printf("chan %s (%d), ",
10009 printf("recv proc %s (%d)\n",
10011 uerror("xr chans cannot be used for rv");
10015 if (q_recver
[x
] != who
+1)
10016 { printf("pan: xr assertion violated: ");
10017 printf("access to chan %s (%d)\npan: ",
10019 if (q_recver
[x
] > 0 && p_name
[q_recver
[x
]-1])
10020 printf("by %s (proc %d) and ",
10021 p_name
[q_recver
[x
]-1], q_recver
[x
]-1);
10022 printf("by %s (proc %d)\n",
10024 uerror("error, partial order reduction invalid");
10032 uerror("ref to uninitialized chan name (len)");
10033 return ((Q0
*)qptr(x
))->Qlen
;
10039 uerror("ref to uninitialized chan name (qfull)");
10040 switch(((Q0
*)qptr(from
))->_t
) {
10041 case 0: printf("queue %d was deleted\n", from
+1);
10043 Uerror("bad queue - q_full");
10050 { /* empty or full */
10051 return !q_len(from
) || q_full(from
);
10056 qrecv(int from
, int slot
, int fld
, int done
)
10061 uerror("ref to uninitialized chan name (receiving)");
10062 if (from
>= (int) now
._nr_qs
|| from
< 0)
10063 Uerror("qrecv bad queue#");
10066 if (done
&& (in_r_scope(from
+1)))
10067 require('r', from
);
10069 switch (((Q0
*)qptr(from
))->_t
) {
10070 case 0: printf("queue %d was deleted\n", from
+1);
10071 default: Uerror("bad queue - qrecv");
10080 col_q(int i
, char *z
)
10083 Q0
*ptr
= (Q0
*) qptr(i
);
10085 default: Uerror("bad qtype - collapse");
10087 if (z
) x
= z
; else x
= scratch
;
10088 y
= (char *) ptr
; k
= q_offset
[i
];
10089 /* no need to store the empty slots at the end */
10090 j
-= (q_max
[ptr
->_t
] - ptr
->Qlen
) * ((j
- 2)/q_max
[ptr
->_t
]);
10091 for ( ; j
> 0; j
--, y
++)
10092 if (!Mask
[k
++]) *x
++ = *y
;
10093 for (j
= 0; j
< WS
-1; j
++)
10096 if (z
) return (long) (x
- z
);
10097 return ordinal(scratch
, x
-scratch
, 1); /* chan */
10103 { int _m
=0, j
; uchar
*z
;
10109 uerror("ref to uninitialized chan (unsend)");
10111 j
= ((Q0
*)z
)->Qlen
;
10112 ((Q0
*)z
)->Qlen
= --j
;
10113 switch (((Q0
*)qptr(into
))->_t
) {
10114 default: Uerror("bad queue - unsend");
10120 unrecv(int from
, int slot
, int fld
, int fldvar
, int strt
)
10124 uerror("ref to uninitialized chan (unrecv)");
10126 j
= ((Q0
*)z
)->Qlen
;
10127 if (strt
) ((Q0
*)z
)->Qlen
= j
+1;
10128 switch (((Q0
*)qptr(from
))->_t
) {
10129 default: Uerror("bad queue - qrecv");
10133 q_cond(short II
, Trans
*t
)
10135 for (i
= 0; i
< 6; i
++)
10136 { if (t
->ty
[i
] == TIMEOUT_F
) return 1;
10137 if (t
->ty
[i
] == ALPHA_F
)
10141 return (II
+1 == (short) now
._nr_pr
&& II
+1 < MAXPROC
);
10143 switch (t
->qu
[i
]) {
10145 default: Uerror("unknown qid - q_cond");
10153 { char ctd
[1024], carg
[64];
10155 strcpy(ctd
, "-DBITSTATE ");
10160 strcat(ctd
, "-DNOVSZ ");
10163 strcat(ctd
, "-DREVERSE ");
10166 strcat(ctd
, "-DT_REVERSE ");
10170 sprintf(carg
, "-DRANDOMIZE=%d ", RANDOMIZE
);
10173 strcat(ctd
, "-DRANDOMIZE ");
10177 sprintf(carg
, "-DSCHED=%d ", SCHED
);
10181 strcat(ctd
, "-DBFS ");
10184 sprintf(carg
, "-DMEMLIM=%d ", MEMLIM
);
10188 sprintf(carg
, "-DMEMCNT=%d ", MEMCNT
);
10193 strcat(ctd
, "-DNOCLAIM ");
10196 strcat(ctd
, "-DSAFETY ");
10199 strcat(ctd
, "-DNOFAIR ");
10203 { sprintf(carg
, "-DNFAIR=%d ", NFAIR
);
10210 strcat(ctd
, "-DNOREDUCE ");
10213 strcat(ctd
, "-DXUSAFE ");
10217 strcat(ctd
, "-DNP ");
10220 strcat(ctd
, "-DPEG ");
10223 strcat(ctd
, "-DVAR_RANGES ");
10226 strcat(ctd
, "-DHC0 ");
10229 strcat(ctd
, "-DHC1 ");
10232 strcat(ctd
, "-DHC2 ");
10235 strcat(ctd
, "-DHC3 ");
10238 strcat(ctd
, "-DHC4 ");
10241 strcat(ctd
, "-DCHECK ");
10244 strcat(ctd
, "-DCTL ");
10247 strcat(ctd
, "-DNIBIS ");
10249 #ifdef NOBOUNDCHECK
10250 strcat(ctd
, "-DNOBOUNDCHECK ");
10253 strcat(ctd
, "-DNOSTUTTER ");
10256 strcat(ctd
, "-DREACH ");
10259 strcat(ctd
, "-DPRINTF ");
10262 strcat(ctd
, "-DOTIM ");
10265 strcat(ctd
, "-DCOLLAPSE ");
10268 sprintf(carg
, "-DMA=%d ", MA
);
10272 strcat(ctd
, "-DSVDUMP ");
10275 if (VECTORSZ
!= 1024)
10276 { sprintf(carg
, "-DVECTORSZ=%d ", VECTORSZ
);
10281 strcat(ctd
, "-DVERBOSE ");
10284 strcat(ctd
, "-DCHECK ");
10287 strcat(ctd
, "-DSDUMP ");
10290 sprintf(carg
, "-DNCORE=%d ", NCORE
);
10294 sprintf(carg
, "-DSFH ");
10299 { sprintf(carg
, "-DVMAX=%d ", VMAX
);
10305 { sprintf(carg
, "-DPMAX=%d ", PMAX
);
10311 { sprintf(carg
, "-DQMAX=%d ", QMAX
);
10316 sprintf(carg
, "-DSET_WQ_SIZE=%d ", SET_WQ_SIZE
);
10319 printf("Compiled as: cc -o pan %span.c\n", ctd
);
10333 #define uchar unsigned char
10335 #define ulong unsigned long
10336 #define ushort unsigned short
10339 #define HASH(y,n) (n)*(((long)y))
10340 #define INRANGE(e,h) ((h>=e->From && h<=e->To)||(e->s==1 && e->S==h))
10342 extern char *emalloc(unsigned long); /* imported routine */
10343 extern void dfa_init(ushort
); /* 4 exported routines */
10344 extern int dfa_member(ulong
);
10345 extern int dfa_store(uchar
*);
10346 extern void dfa_stats(void);
10348 typedef struct Edge
{
10349 uchar From
, To
; /* max range 0..255 */
10350 uchar s
, S
; /* if s=1, S is singleton */
10351 struct Vertex
*Dst
;
10355 typedef struct Vertex
{
10356 ulong key
, num
; /* key for splay tree, nr incoming edges */
10357 uchar from
[2], to
[2]; /* in-node predefined edge info */
10358 struct Vertex
*dst
[2];/* most nodes have 2 or more edges */
10359 struct Edge
*Succ
; /* in case there are more edges */
10360 struct Vertex
*lnk
, *left
, *right
; /* splay tree plumbing */
10363 static Edge
*free_edges
;
10364 static Vertex
*free_vertices
;
10365 static Vertex
**layers
; /* one splay tree of nodes per layer */
10366 static Vertex
**path
; /* run of word in the DFA */
10367 static Vertex
*R
, *F
, *NF
; /* Root, Final, Not-Final */
10368 static uchar
*word
, *lastword
;/* string, and last string inserted */
10369 static int dfa_depth
, iv
=0, nv
=0, pfrst
=0, Tally
;
10371 static void insert_it(Vertex
*, int); /* splay-tree code */
10372 static void delete_it(Vertex
*, int);
10373 static Vertex
*find_it(Vertex
*, Vertex
*, uchar
, int);
10376 recyc_edges(Edge
*e
)
10379 recyc_edges(e
->Nxt
);
10380 e
->Nxt
= free_edges
;
10385 new_edge(Vertex
*dst
)
10390 free_edges
= e
->Nxt
;
10391 e
->From
= e
->To
= e
->s
= e
->S
= 0;
10392 e
->Nxt
= (Edge
*) 0;
10394 e
= (Edge
*) emalloc(sizeof(Edge
));
10401 recyc_vertex(Vertex
*v
)
10403 recyc_edges(v
->Succ
);
10404 v
->Succ
= (Edge
*) free_vertices
;
10414 { v
= free_vertices
;
10415 free_vertices
= (Vertex
*) v
->Succ
;
10416 v
->Succ
= (Edge
*) 0;
10419 v
= (Vertex
*) emalloc(sizeof(Vertex
));
10426 allDelta(Vertex
*v
, int n
)
10427 { Vertex
*dst
= new_vertex();
10438 insert_edge(Vertex
*v
, Edge
*e
)
10439 { /* put new edge first */
10441 { v
->dst
[0] = e
->Dst
;
10442 v
->from
[0] = e
->From
;
10448 { v
->from
[1] = v
->from
[0]; v
->from
[0] = e
->From
;
10449 v
->to
[1] = v
->to
[0]; v
->to
[0] = e
->To
;
10450 v
->dst
[1] = v
->dst
[0]; v
->dst
[0] = e
->Dst
;
10454 { int f
= v
->from
[1];
10456 Vertex
*d
= v
->dst
[1];
10457 v
->from
[1] = v
->from
[0]; v
->from
[0] = e
->From
;
10458 v
->to
[1] = v
->to
[0]; v
->to
[0] = e
->To
;
10459 v
->dst
[1] = v
->dst
[0]; v
->dst
[0] = e
->Dst
;
10469 copyRecursive(Vertex
*v
, Edge
*e
)
10471 if (e
->Nxt
) copyRecursive(v
, e
->Nxt
);
10472 f
= new_edge(e
->Dst
);
10482 copyEdges(Vertex
*to
, Vertex
*from
)
10484 for (i
= 0; i
< 2; i
++)
10485 { to
->from
[i
] = from
->from
[i
];
10486 to
->to
[i
] = from
->to
[i
];
10487 to
->dst
[i
] = from
->dst
[i
];
10489 if (from
->Succ
) copyRecursive(to
, from
->Succ
);
10493 cacheDelta(Vertex
*v
, int h
, int first
)
10494 { static Edge
*ov
, tmp
; int i
;
10496 if (!first
&& INRANGE(ov
,h
))
10497 return ov
; /* intercepts about 10% */
10498 for (i
= 0; i
< 2; i
++)
10499 if (v
->dst
[i
] && h
>= v
->from
[i
] && h
<= v
->to
[i
])
10500 { tmp
.From
= v
->from
[i
];
10502 tmp
.Dst
= v
->dst
[i
];
10507 for (ov
= v
->Succ
; ov
; ov
= ov
->Nxt
)
10508 if (INRANGE(ov
,h
)) return ov
;
10510 Uerror("cannot get here, cacheDelta");
10515 Delta(Vertex
*v
, int h
) /* v->delta[h] */
10518 if (v
->dst
[0] && h
>= v
->from
[0] && h
<= v
->to
[0])
10519 return v
->dst
[0]; /* oldest edge */
10520 if (v
->dst
[1] && h
>= v
->from
[1] && h
<= v
->to
[1])
10522 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10525 Uerror("cannot happen Delta");
10526 return (Vertex
*) 0;
10530 numDelta(Vertex
*v
, int d
)
10535 for (i
= 0; i
< 2; i
++)
10537 { cnt
= v
->dst
[i
]->num
+ d
*(1 + v
->to
[i
] - v
->from
[i
]);
10538 if (d
== 1 && cnt
< v
->dst
[i
]->num
) goto bad
;
10539 v
->dst
[i
]->num
= cnt
;
10541 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10542 { cnt
= e
->Dst
->num
+ d
*(1 + e
->To
- e
->From
+ e
->s
);
10543 if (d
== 1 && cnt
< e
->Dst
->num
)
10544 bad
: Uerror("too many incoming edges");
10550 setDelta(Vertex
*v
, int h
, Vertex
*newdst
) /* v->delta[h] = newdst; */
10551 { Edge
*e
, *f
= (Edge
*) 0, *g
;
10554 /* remove the old entry, if there */
10555 for (i
= 0; i
< 2; i
++)
10556 if (v
->dst
[i
] && h
>= v
->from
[i
] && h
<= v
->to
[i
])
10557 { if (h
== v
->from
[i
])
10558 { if (h
== v
->to
[i
])
10559 { v
->dst
[i
] = (Vertex
*) 0;
10560 v
->from
[i
] = v
->to
[i
] = 0;
10563 } else if (h
== v
->to
[i
])
10566 { g
= new_edge(v
->dst
[i
]);/* same dst */
10567 g
->From
= v
->from
[i
];
10568 g
->To
= h
-1; /* left half */
10569 v
->from
[i
] = h
+1; /* right half */
10574 for (e
= v
->Succ
; e
; f
= e
, e
= e
->Nxt
)
10575 { if (e
->s
== 1 && e
->S
== h
)
10579 if (h
>= e
->From
&& h
<= e
->To
)
10580 { if (h
== e
->From
)
10583 { e
->From
= e
->To
= e
->S
;
10590 } else if (h
== e
->To
)
10593 { g
= new_edge(e
->Dst
); /* same dst */
10595 g
->To
= h
-1; /* g=left half */
10596 e
->From
= h
+1; /* e=right half */
10597 g
->Nxt
= e
->Nxt
; /* insert g */
10598 e
->Nxt
= g
; /* behind e */
10602 rem_tst
: if (e
->From
> e
->To
)
10608 e
->Nxt
= (Edge
*) 0;
10611 { e
->From
= e
->To
= e
->S
;
10617 /* check if newdst is already there */
10618 for (i
= 0; i
< 2; i
++)
10619 if (v
->dst
[i
] == newdst
)
10620 { if (h
+1 == (int) v
->from
[i
])
10624 if (h
== (int) v
->to
[i
]+1)
10628 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10629 { if (e
->Dst
== newdst
)
10630 { if (h
+1 == (int) e
->From
)
10632 if (e
->s
== 1 && e
->S
+1 == e
->From
)
10638 if (h
== (int) e
->To
+1)
10640 if (e
->s
== 1 && e
->S
== e
->To
+1)
10651 /* add as a new edge */
10652 e
= new_edge(newdst
);
10653 e
->From
= e
->To
= h
;
10658 cheap_key(Vertex
*v
)
10662 { vk2
= (ulong
) v
->dst
[0];
10663 if ((ulong
) v
->dst
[1] > vk2
)
10664 vk2
= (ulong
) v
->dst
[1];
10665 } else if (v
->dst
[1])
10666 vk2
= (ulong
) v
->dst
[1];
10669 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10670 if ((ulong
) e
->Dst
> vk2
)
10671 vk2
= (ulong
) e
->Dst
;
10673 Tally
= (vk2
>>2)&(TWIDTH
-1);
10678 mk_key(Vertex
*v
) /* not sensitive to order */
10679 { ulong m
= 0, vk2
= 0;
10683 { m
+= HASH(v
->dst
[0], v
->to
[0] - v
->from
[0] + 1);
10684 vk2
= (ulong
) v
->dst
[0];
10687 { m
+= HASH(v
->dst
[1], v
->to
[1] - v
->from
[1] + 1);
10688 if ((ulong
) v
->dst
[1] > vk2
) vk2
= (ulong
) v
->dst
[1];
10690 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10691 { m
+= HASH(e
->Dst
, e
->To
- e
->From
+ 1 + e
->s
);
10692 if ((ulong
) e
->Dst
> vk2
) vk2
= (ulong
) e
->Dst
;
10694 Tally
= (vk2
>>2)&(TWIDTH
-1);
10699 mk_special(int sigma
, Vertex
*n
, Vertex
*v
)
10700 { ulong m
= 0, vk2
= 0;
10704 for (i
= 0; i
< 2; i
++)
10706 { if (sigma
>= v
->from
[i
] && sigma
<= v
->to
[i
])
10707 { m
+= HASH(v
->dst
[i
], v
->to
[i
]-v
->from
[i
]);
10708 if ((ulong
) v
->dst
[i
] > vk2
10709 && v
->to
[i
] > v
->from
[i
])
10710 vk2
= (ulong
) v
->dst
[i
];
10712 { m
+= HASH(v
->dst
[i
], v
->to
[i
]-v
->from
[i
]+1);
10713 if ((ulong
) v
->dst
[i
] > vk2
)
10714 vk2
= (ulong
) v
->dst
[i
];
10716 for (f
= v
->Succ
; f
; f
= f
->Nxt
)
10717 { if (sigma
>= f
->From
&& sigma
<= f
->To
)
10718 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ f
->s
);
10719 if ((ulong
) f
->Dst
> vk2
10720 && f
->To
- f
->From
+ f
->s
> 0)
10721 vk2
= (ulong
) f
->Dst
;
10722 } else if (f
->s
== 1 && sigma
== f
->S
)
10723 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ 1);
10724 if ((ulong
) f
->Dst
> vk2
) vk2
= (ulong
) f
->Dst
;
10726 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ 1 + f
->s
);
10727 if ((ulong
) f
->Dst
> vk2
) vk2
= (ulong
) f
->Dst
;
10730 if ((ulong
) n
> vk2
) vk2
= (ulong
) n
;
10731 Tally
= (vk2
>>2)&(TWIDTH
-1);
10737 dfa_init(ushort nr_layers
)
10738 { int i
; Vertex
*r
, *t
;
10740 dfa_depth
= nr_layers
; /* one byte per layer */
10741 path
= (Vertex
**) emalloc((dfa_depth
+1)*sizeof(Vertex
*));
10742 layers
= (Vertex
**) emalloc(TWIDTH
*(dfa_depth
+1)*sizeof(Vertex
*));
10743 lastword
= (uchar
*) emalloc((dfa_depth
+1)*sizeof(uchar
));
10744 lastword
[dfa_depth
] = lastword
[0] = 255;
10745 path
[0] = R
= new_vertex(); F
= new_vertex();
10747 for (i
= 1, r
= R
; i
< dfa_depth
; i
++, r
= t
)
10748 t
= allDelta(r
, i
-1);
10749 NF
= allDelta(r
, i
-1);
10753 static void complement_dfa(void) { Vertex
*tmp
= F
; F
= NF
; NF
= tmp
; }
10757 tree_stats(Vertex
*t
)
10758 { Edge
*e
; double cnt
=0.0;
10760 if (!t
->key
) return 0;
10761 t
->key
= 0; /* precaution */
10762 if (t
->dst
[0]) cnt
++;
10763 if (t
->dst
[1]) cnt
++;
10764 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
10766 cnt
+= tree_stats(t
->lnk
);
10767 cnt
+= tree_stats(t
->left
);
10768 cnt
+= tree_stats(t
->right
);
10774 { int i
, j
; double cnt
= 0.0;
10775 for (j
= 0; j
< TWIDTH
; j
++)
10776 for (i
= 0; i
< dfa_depth
+1; i
++)
10777 cnt
+= tree_stats(layers
[i
*TWIDTH
+j
]);
10778 printf("Minimized Automaton: %6d nodes and %6g edges\n",
10783 dfa_member(ulong n
)
10785 uchar
*w
= &word
[n
];
10788 p
= &path
[n
]; q
= (p
+1);
10789 for (i
= n
; i
< dfa_depth
; i
++)
10790 *q
++ = Delta(*p
++, *w
++);
10795 dfa_store(uchar
*sv
)
10796 { Vertex
**p
, **q
, *s
, *y
, *old
, *new = F
;
10797 uchar
*w
, *u
= lastword
;
10801 while (*w
++ == *u
++) /* find first byte that differs */
10803 pfrst
= (int) (u
- lastword
) - 1;
10804 memcpy(&lastword
[pfrst
], &sv
[pfrst
], dfa_depth
-pfrst
);
10805 if (pfrst
> iv
) pfrst
= iv
;
10806 if (pfrst
> nv
) pfrst
= nv
;
10808 p
= &path
[pfrst
]; q
= (p
+1); w
= &word
[pfrst
];
10809 for (i
= pfrst
; i
< dfa_depth
; i
++)
10810 *q
++ = Delta(*p
++, *w
++); /* (*p)->delta[*w++]; */
10812 if (*p
== F
) return 1; /* it's already there */
10817 new = find_it(path
[iv
], old
, word
[iv
], iv
);
10818 } while (new && iv
> 0);
10821 nv
= k
= 0; s
= path
[0];
10822 for (j
= 1; j
<= iv
; ++j
)
10823 if (path
[j
]->num
> 1)
10824 { y
= new_vertex();
10825 copyEdges(y
, path
[j
]);
10829 setDelta(s
, word
[j
-1], y
);
10831 y
->num
= 1; /* initial value 1 */
10833 path
[j
]->num
--; /* only 1 moved from j to y */
10839 y
= Delta(s
, word
[iv
]);
10842 setDelta(s
, word
[iv
], old
);
10846 for (j
= iv
+1; j
< dfa_depth
; j
++)
10847 if (path
[j
]->num
== 0)
10848 { numDelta(path
[j
], -1);
10849 delete_it(path
[j
], j
);
10850 recyc_vertex(path
[j
]);
10857 splay(ulong i
, Vertex
*t
)
10858 { Vertex N
, *l
, *r
, *y
;
10861 N
.left
= N
.right
= (Vertex
*) 0;
10865 { if (!t
->left
) break;
10866 if (i
< t
->left
->key
)
10868 t
->left
= y
->right
;
10871 if (!t
->left
) break;
10876 } else if (i
> t
->key
)
10877 { if (!t
->right
) break;
10878 if (i
> t
->right
->key
)
10880 t
->right
= y
->left
;
10883 if (!t
->right
) break;
10891 l
->right
= t
->left
;
10892 r
->left
= t
->right
;
10899 insert_it(Vertex
*v
, int L
)
10904 nr
= ((L
*TWIDTH
)+Tally
);
10915 new->left
= t
->left
;
10917 t
->left
= (Vertex
*) 0;
10918 } else if (i
> t
->key
)
10920 new->right
= t
->right
;
10922 t
->right
= (Vertex
*) 0;
10923 } else /* it's already there */
10924 { v
->lnk
= t
->lnk
; /* put in linked list off v */
10932 checkit(Vertex
*h
, Vertex
*v
, Vertex
*n
, uchar sigma
)
10936 for (k
= 0; k
< 2; k
++)
10938 { if (sigma
>= h
->from
[k
] && sigma
<= h
->to
[k
])
10939 { if (h
->dst
[k
] != n
) goto no_match
;
10941 for (i
= h
->from
[k
]; i
<= h
->to
[k
]; i
++)
10942 { if (i
== sigma
) continue;
10943 g
= cacheDelta(v
, i
, j
); j
= 0;
10944 if (h
->dst
[k
] != g
->Dst
)
10946 if (g
->s
== 0 || g
->S
!= i
)
10949 for (f
= h
->Succ
; f
; f
= f
->Nxt
)
10950 { if (INRANGE(f
,sigma
))
10951 { if (f
->Dst
!= n
) goto no_match
;
10953 for (i
= f
->From
; i
<= f
->To
; i
++)
10954 { if (i
== sigma
) continue;
10955 g
= cacheDelta(v
, i
, j
); j
= 0;
10956 if (f
->Dst
!= g
->Dst
)
10958 if (g
->s
== 1 && i
== g
->S
)
10962 if (f
->s
&& f
->S
!= sigma
)
10963 { g
= cacheDelta(v
, f
->S
, 1);
10964 if (f
->Dst
!= g
->Dst
)
10968 if (h
->Succ
|| h
->dst
[0] || h
->dst
[1]) return 1;
10974 find_it(Vertex
*v
, Vertex
*n
, uchar sigma
, int L
)
10978 i
= mk_special(sigma
,n
,v
);
10979 nr
= ((L
*TWIDTH
)+Tally
);
10982 if (!t
) return (Vertex
*) 0;
10983 layers
[nr
] = t
= splay(i
, t
);
10985 for (z
= t
; z
; z
= z
->lnk
)
10986 if (checkit(z
, v
, n
, sigma
))
10989 return (Vertex
*) 0;
10993 delete_it(Vertex
*v
, int L
)
10998 nr
= ((L
*TWIDTH
)+Tally
);
11004 { Vertex
*z
, *y
= (Vertex
*) 0;
11005 for (z
= t
; z
&& z
!= v
; y
= z
, z
= z
->lnk
)
11007 if (z
!= v
) goto bad
;
11010 z
->lnk
= (Vertex
*) 0;
11013 } else if (z
->lnk
) /* z == t == v */
11016 y
->right
= t
->right
;
11017 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11021 /* delete the node itself */
11025 { x
= splay(i
, t
->left
);
11026 x
->right
= t
->right
;
11028 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11032 bad
: Uerror("cannot happen delete");
11035 #if defined(MA) && (defined(W_XPT) || defined(R_XPT))
11036 static Vertex
**temptree
;
11037 static char wbuf
[4096];
11038 static int WCNT
= 4096, wcnt
=0;
11039 static uchar stacker
[MA
+1];
11040 static ulong stackcnt
= 0;
11041 extern double nstates
, nlinks
, truncs
, truncs2
;
11044 xwrite(int fd
, char *b
, int n
)
11046 if (wcnt
+n
>= 4096)
11047 { write(fd
, wbuf
, wcnt
);
11050 memcpy(&wbuf
[wcnt
], b
, n
);
11058 write(fd
, wbuf
, wcnt
);
11064 w_vertex(int fd
, Vertex
*v
)
11065 { char t
[3]; int i
; Edge
*e
;
11067 xwrite(fd
, (char *) &v
, sizeof(Vertex
*));
11069 for (i
= 0; i
< 2; i
++)
11071 { t
[1] = v
->from
[i
], t
[2] = v
->to
[i
];
11073 xwrite(fd
, (char *) &(v
->dst
[i
]), sizeof(Vertex
*));
11075 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
11076 { t
[1] = e
->From
, t
[2] = e
->To
;
11078 xwrite(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11081 { t
[1] = t
[2] = e
->S
;
11083 xwrite(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11088 w_layer(int fd
, Vertex
*v
)
11092 xwrite(fd
, (char *) &c
, 1);
11094 w_layer(fd
, v
->lnk
);
11095 w_layer(fd
, v
->left
);
11096 w_layer(fd
, v
->right
);
11101 { int fd
; char nm
[64];
11103 static uchar xwarned
= 0;
11105 sprintf(nm
, "%s.xpt", PanSource
);
11106 if ((fd
= creat(nm
, 0666)) <= 0)
11109 printf("cannot creat checkpoint file\n");
11112 xwrite(fd
, (char *) &nstates
, sizeof(double));
11113 xwrite(fd
, (char *) &truncs
, sizeof(double));
11114 xwrite(fd
, (char *) &truncs2
, sizeof(double));
11115 xwrite(fd
, (char *) &nlinks
, sizeof(double));
11116 xwrite(fd
, (char *) &dfa_depth
, sizeof(int));
11117 xwrite(fd
, (char *) &R
, sizeof(Vertex
*));
11118 xwrite(fd
, (char *) &F
, sizeof(Vertex
*));
11119 xwrite(fd
, (char *) &NF
, sizeof(Vertex
*));
11121 for (j
= 0; j
< TWIDTH
; j
++)
11122 for (i
= 0; i
< dfa_depth
+1; i
++)
11123 { w_layer(fd
, layers
[i
*TWIDTH
+j
]);
11124 c
= 2; xwrite(fd
, (char *) &c
, 1);
11130 xread(int fd
, char *b
, int n
)
11131 { int m
= wcnt
; int delta
= 0;
11133 { if (m
> 0) memcpy(b
, &wbuf
[WCNT
-m
], m
);
11135 WCNT
= wcnt
= read(fd
, wbuf
, 4096);
11137 Uerror("xread failed -- insufficient data");
11140 memcpy(&b
[delta
], &wbuf
[WCNT
-wcnt
], n
);
11145 x_cleanup(Vertex
*c
)
11146 { Edge
*e
; /* remove the tree and edges from c */
11148 for (e
= c
->Succ
; e
; e
= e
->Nxt
)
11155 { Vertex
*tmp
; int i
, s
;
11157 /* double-check: */
11158 stacker
[dfa_depth
-1] = 0; r
= dfa_store(stacker
);
11159 stacker
[dfa_depth
-1] = 4; j
= dfa_member(dfa_depth
-1);
11160 if (r
!= 1 || j
!= 0)
11161 { printf("%d: ", stackcnt
);
11162 for (i
= 0; i
< dfa_depth
; i
++)
11163 printf("%d,", stacker
[i
]);
11164 printf(" -- not a stackstate <o:%d,4:%d>\n", r
, j
);
11167 stacker
[dfa_depth
-1] = 1;
11168 s
= dfa_member(dfa_depth
-1);
11170 { tmp
= F
; F
= NF
; NF
= tmp
; } /* complement */
11171 if (s
) dfa_store(stacker
);
11172 stacker
[dfa_depth
-1] = 0;
11173 dfa_store(stacker
);
11175 { tmp
= F
; F
= NF
; NF
= tmp
; }
11179 x_rm_stack(Vertex
*t
, int k
)
11187 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11188 { for (j
= e
->From
; j
<= (int) e
->To
; j
++)
11189 { stacker
[k
] = (uchar
) j
;
11190 x_rm_stack(e
->Dst
, k
-1);
11193 { stacker
[k
] = e
->S
;
11194 x_rm_stack(e
->Dst
, k
-1);
11199 insert_withkey(Vertex
*v
, int L
)
11200 { Vertex
*new, *t
= temptree
[L
];
11202 if (!t
) { temptree
[L
] = v
; return v
; }
11203 t
= splay(v
->key
, t
);
11204 if (v
->key
< t
->key
)
11206 new->left
= t
->left
;
11208 t
->left
= (Vertex
*) 0;
11209 } else if (v
->key
> t
->key
)
11211 new->right
= t
->right
;
11213 t
->right
= (Vertex
*) 0;
11215 { if (t
!= R
&& t
!= F
&& t
!= NF
)
11216 Uerror("double insert, bad checkpoint data");
11227 find_withkey(Vertex
*v
, int L
)
11228 { Vertex
*t
= temptree
[L
];
11230 { temptree
[L
] = t
= splay((ulong
) v
, t
);
11231 if (t
->key
== (ulong
) v
)
11234 Uerror("not found error, bad checkpoint data");
11235 return (Vertex
*) 0;
11239 r_layer(int fd
, int n
)
11245 { xread(fd
, &c
, 1);
11248 { v
= new_vertex();
11249 xread(fd
, (char *) &(v
->key
), sizeof(Vertex
*));
11250 v
= insert_withkey(v
, n
);
11251 } else /* c == 0 */
11252 { e
= new_edge((Vertex
*) 0);
11256 xread(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11262 v_fix(Vertex
*t
, int nr
)
11267 for (i
= 0; i
< 2; i
++)
11269 t
->dst
[i
] = find_withkey(t
->dst
[i
], nr
);
11271 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11272 e
->Dst
= find_withkey(e
->Dst
, nr
);
11274 v_fix(t
->left
, nr
);
11275 v_fix(t
->right
, nr
);
11279 v_insert(Vertex
*t
, int nr
)
11283 v_insert(t
->left
, nr
);
11284 v_insert(t
->right
, nr
);
11286 /* remove only leafs from temptree */
11287 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11288 insert_it(t
, nr
); /* into layers */
11289 for (i
= 0; i
< 2; i
++)
11291 t
->dst
[i
]->num
+= (t
->to
[i
] - t
->from
[i
] + 1);
11292 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11293 e
->Dst
->num
+= (e
->To
- e
->From
+ 1 + e
->s
);
11300 for (i
= 0; i
< dfa_depth
; i
++)
11301 v_fix(temptree
[i
], (i
+1));
11303 for (i
= dfa_depth
; i
>= 0; i
--)
11304 v_insert(temptree
[i
], i
);
11308 x_tail(Vertex
*t
, ulong want
)
11309 { int i
, yes
, no
; Edge
*e
; Vertex
*v
= (Vertex
*) 0;
11314 for (i
= 0; i
< 2; i
++)
11315 if ((ulong
) t
->dst
[i
] == want
)
11316 { /* was t->from[i] <= 0 && t->to[i] >= 0 */
11317 /* but from and to are uchar */
11318 if (t
->from
[i
] == 0)
11321 if (t
->from
[i
] <= 4 && t
->to
[i
] >= 4)
11325 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11326 if ((ulong
) e
->Dst
== want
)
11327 { /* was INRANGE(e,0) but From and To are uchar */
11328 if ((e
->From
== 0) || (e
->s
==1 && e
->S
==0))
11330 else if (INRANGE(e
, 4))
11333 if (yes
&& !no
) return t
;
11334 v
= x_tail(t
->left
, want
); if (v
) return v
;
11335 v
= x_tail(t
->right
, want
); if (v
) return v
;
11336 return (Vertex
*) 0;
11340 x_anytail(Vertex
*t
, Vertex
*c
, int nr
)
11341 { int i
; Edge
*e
, *f
; Vertex
*v
;
11345 for (i
= 0; i
< 2; i
++)
11346 if ((ulong
) t
->dst
[i
] == c
->key
)
11347 { v
= new_vertex(); v
->key
= t
->key
;
11349 f
->From
= t
->from
[i
];
11354 x_anytail(temptree
[nr
-1], v
, nr
-1);
11357 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11358 if ((ulong
) e
->Dst
== c
->key
)
11359 { v
= new_vertex(); v
->key
= t
->key
;
11367 x_anytail(temptree
[nr
-1], v
, nr
-1);
11370 x_anytail(t
->left
, c
, nr
);
11371 x_anytail(t
->right
, c
, nr
);
11376 { Vertex
*c
, *v
; /* find 0 and !4 predecessor of F */
11378 v
= x_tail(temptree
[dfa_depth
-1], F
->key
);
11379 if (!v
) return (Vertex
*) 0;
11381 c
= new_vertex(); c
->key
= v
->key
;
11383 /* every node on dfa_depth-2 that has v->key as succ */
11384 /* make copy and let c point to these (reversing ptrs) */
11386 x_anytail(temptree
[dfa_depth
-2], c
, dfa_depth
-2);
11393 { int fd
; char nm
[64]; Vertex
*d
;
11397 sprintf(nm
, "%s.xpt", PanSource
);
11398 if ((fd
= open(nm
, 0)) < 0) /* O_RDONLY */
11399 Uerror("cannot open checkpoint file");
11401 xread(fd
, (char *) &nstates
, sizeof(double));
11402 xread(fd
, (char *) &truncs
, sizeof(double));
11403 xread(fd
, (char *) &truncs2
, sizeof(double));
11404 xread(fd
, (char *) &nlinks
, sizeof(double));
11405 xread(fd
, (char *) &dfa_depth
, sizeof(int));
11407 if (dfa_depth
!= MA
+a_cycles
)
11408 Uerror("bad dfa_depth in checkpoint file");
11410 path
= (Vertex
**) emalloc((dfa_depth
+1)*sizeof(Vertex
*));
11411 layers
= (Vertex
**) emalloc(TWIDTH
*(dfa_depth
+1)*sizeof(Vertex
*));
11412 temptree
= (Vertex
**) emalloc((dfa_depth
+2)*sizeof(Vertex
*));
11413 lastword
= (uchar
*) emalloc((dfa_depth
+1)*sizeof(uchar
));
11414 lastword
[dfa_depth
] = lastword
[0] = 255;
11416 path
[0] = R
= new_vertex();
11417 xread(fd
, (char *) &R
->key
, sizeof(Vertex
*));
11418 R
= insert_withkey(R
, 0);
11421 xread(fd
, (char *) &F
->key
, sizeof(Vertex
*));
11422 F
= insert_withkey(F
, dfa_depth
);
11425 xread(fd
, (char *) &NF
->key
, sizeof(Vertex
*));
11426 NF
= insert_withkey(NF
, dfa_depth
);
11428 for (j
= 0; j
< TWIDTH
; j
++)
11429 for (i
= 0; i
< dfa_depth
+1; i
++)
11432 if (wcnt
!= 0) Uerror("bad count in checkpoint file");
11436 stacker
[dfa_depth
-1] = 0;
11437 x_rm_stack(d
, dfa_depth
-2);
11441 printf("pan: removed %d stackstates\n", stackcnt
);
11442 nstates
-= (double) stackcnt
;
11447 check_claim(int st
)
11449 if (st
== endclaim
)
11450 uerror("claim violated!");
11451 if (stopstate
[VERI
][st
])
11452 uerror("end state in claim reached");
11458 printf("global vars:\n");
11459 printf(" byte write_off: %d\n", now
.write_off
);
11461 for (l_in
= 0; l_in
< 2; l_in
++)
11463 printf(" byte commit_count[%d]: %d\n", l_in
, now
.commit_count
[l_in
]);
11466 printf(" byte read_off: %d\n", now
.read_off
);
11468 for (l_in
= 0; l_in
< 2; l_in
++)
11470 printf(" byte retrieve_count[%d]: %d\n", l_in
, now
.retrieve_count
[l_in
]);
11473 printf(" byte events_lost: %d\n", now
.events_lost
);
11474 printf(" byte refcount: %d\n", now
.refcount
);
11476 for (l_in
= 0; l_in
< 4; l_in
++)
11478 printf(" bit buffer_use[%d]: %d\n", l_in
, now
.buffer_use
[l_in
]);
11483 c_locals(int pid
, int tp
)
11487 printf("local vars proc %d (:init:):\n", pid
);
11488 printf(" byte i: %d\n", ((P4
*)pptr(pid
))->i
);
11489 printf(" byte j: %d\n", ((P4
*)pptr(pid
))->j
);
11490 printf(" byte sum: %d\n", ((P4
*)pptr(pid
))->sum
);
11491 printf(" byte commit_sum: %d\n", ((P4
*)pptr(pid
))->commit_sum
);
11497 printf("local vars proc %d (reader):\n", pid
);
11498 printf(" byte i: %d\n", ((P2
*)pptr(pid
))->i
);
11499 printf(" byte j: %d\n", ((P2
*)pptr(pid
))->j
);
11500 printf(" byte tmp_retrieve: %d\n", ((P2
*)pptr(pid
))->tmp_retrieve
);
11501 printf(" byte lwrite_off: %d\n", ((P2
*)pptr(pid
))->lwrite_off
);
11502 printf(" byte lcommit_count: %d\n", ((P2
*)pptr(pid
))->lcommit_count
);
11505 printf("local vars proc %d (tracer):\n", pid
);
11506 printf(" byte size: %d\n", ((P1
*)pptr(pid
))->size
);
11507 printf(" byte prev_off: %d\n", ((P1
*)pptr(pid
))->prev_off
);
11508 printf(" byte new_off: %d\n", ((P1
*)pptr(pid
))->new_off
);
11509 printf(" byte tmp_commit: %d\n", ((P1
*)pptr(pid
))->tmp_commit
);
11510 printf(" byte i: %d\n", ((P1
*)pptr(pid
))->i
);
11511 printf(" byte j: %d\n", ((P1
*)pptr(pid
))->j
);
11514 printf("local vars proc %d (switcher):\n", pid
);
11515 printf(" byte prev_off: %d\n", ((P0
*)pptr(pid
))->prev_off
);
11516 printf(" byte new_off: %d\n", ((P0
*)pptr(pid
))->new_off
);
11517 printf(" byte tmp_commit: %d\n", ((P0
*)pptr(pid
))->tmp_commit
);
11518 printf(" byte size: %d\n", ((P0
*)pptr(pid
))->size
);
11526 default: Printf("%d", x
);
11530 c_chandump(int unused
) { unused
++; /* avoid complaints */ }