1 /*** Generated by Spin Version 5.1.6 -- 9 May 2008 ***/
2 /*** From source: buffer.spin ***/
5 #define _FILE_OFFSET_BITS 64
14 #if defined(WIN32) || defined(WIN64)
18 #include <sys/times.h>
20 #include <sys/types.h>
23 #define Offsetof(X, Y) ((unsigned long)(&(((X *)0)->Y)))
25 #define max(a,b) (((a)<(b)) ? (b) : (a))
28 int Printf(const char *fmt
, ...); /* prototype only */
34 State A_Root
; /* seed-state for cycles */
35 State now
; /* the full state-vector */
37 #if defined(C_States) && defined(HAS_TRACK)
39 c_update(uchar
*p_t_r
)
42 printf("c_update %u\n", p_t_r
);
46 c_revert(uchar
*p_t_r
)
49 printf("c_revert %u\n", p_t_r
);
78 #define onstack_now() (LL[trpt->j6] && LL[trpt->j7])
79 #define onstack_put() LL[trpt->j6]++; LL[trpt->j7]++
80 #define onstack_zap() LL[trpt->j6]--; LL[trpt->j7]--
82 #if !defined(SAFETY) && !defined(NOCOMP)
83 #define V_A (((now._a_t&1)?2:1) << (now._a_t&2))
84 #define A_V (((now._a_t&1)?1:2) << (now._a_t&2))
95 #define onstack_put() ;
96 #define onstack_zap() gstore((char *) &now, vsize, 4)
98 #if defined(FULLSTACK) && !defined(BITSTATE)
99 #define onstack_put() trpt->ostate = Lstate
100 #define onstack_zap() { \
102 trpt->ostate->tagged = \
103 (S_A)? (trpt->ostate->tagged&~V_A) : 0; \
110 #if !defined(NO_RESIZE) && !defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(SPACE) && NCORE==1
118 #if defined(BITSTATE) && !defined(NOREDUCE) && !defined(SAFETY)
119 unsigned int proviso
;
122 #if defined(CHECK) || (defined(COLLAPSE) && !defined(FULLSTACK))
125 #if !defined(SAFETY) || defined(REACH)
129 /* could cost 1 extra word: 4 bytes if 32-bit and 8 bytes if 64-bit */
131 uchar cpu_id
; /* id of cpu that created the state */
141 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
147 typedef struct Trail
{
148 int st
; /* current state */
149 uchar pr
; /* process id */
150 uchar tau
; /* 8 bit-flags */
151 uchar o_pm
; /* 8 more bit-flags */
153 Meaning of bit
-flags
:
154 tau
&1 -> timeout enabled
155 tau
&2 -> request to enable timeout
1 level
up (in claim
)
156 tau
&4 -> current transition is a claim move
157 tau
&8 -> current transition is an atomic move
158 tau
&16 -> last move was truncated on stack
159 tau
&32 -> current transition is a preselected move
160 tau
&64 -> at least one next state is
not on the stack
161 tau
&128 -> current transition is a stutter move
162 o_pm
&1 -> the current pid moved
-- implements
else
163 o_pm
&2 -> this is an acceptance state
164 o_pm
&4 -> this is a progress state
165 o_pm
&8 -> fairness alg rule
1 undo mark
166 o_pm
&16 -> fairness alg rule
3 undo mark
167 o_pm
&32 -> fairness alg rule
2 undo mark
168 o_pm
&64 -> the current proc applied rule2
169 o_pm
&128 -> a fairness
, dummy move
- all procs blocked
172 uchar n_succ
; /* nr of successor states */
174 #if defined(FULLSTACK) && defined(MA) && !defined(BFS)
178 uchar o_n
, o_ot
; /* to save locals */
182 #if nstates_event<256
185 unsigned short o_event
;
195 #if defined(HAS_UNLESS) && !defined(BFS)
196 int e_state
; /* if escape trans - state of origin */
198 #if (defined(FULLSTACK) && !defined(MA)) || defined(BFS) || (NCORE>1)
199 struct H_el
*ostate
; /* pointer to stored state */
201 #if defined(CNTRSTACK) && !defined(BFS)
206 /* based on Qadeer&Rehof, Tacas 2005, LNCS 3440, pp. 93-107 */
208 #error "-DSCHED cannot be combined with -DNCORE (yet)"
224 long omaxdepth
=10000;
233 double quota
; /* time limit */
243 double memcnt
= (double) 0;
244 double memlim
= (double) (1<<30); /* 1 GB */
246 double mem_reserved
= (double) 0;
251 static long left
= 0L;
252 static double fragment
= (double) 0;
253 static unsigned long grow
;
255 unsigned int HASH_CONST
[] = {
256 /* asuming 4 bytes per int */
257 0x88888EEF, 0x00400007,
258 0x04c11db7, 0x100d4e63,
259 0x0fc22f87, 0x3ff0c3ff,
260 0x38e84cd7, 0x02b148e9,
261 0x98b2e49d, 0xb616d379,
262 0xa5247fd9, 0xbae92a15,
263 0xb91c8bc5, 0x8e5880f3,
264 0xacd7c069, 0xb4c44bb3,
265 0x2ead1fb7, 0x8e428171,
266 0xdbebd459, 0x828ae611,
267 0x6cb25933, 0x86cdd651,
268 0x9e8f5f21, 0xd5f8d8e7,
269 0x9c4e956f, 0xb5cf2c71,
270 0x2e805a6d, 0x33fc3a55,
271 0xaf203ed1, 0xe31f5909,
272 0x5276db35, 0x0c565ef7,
273 0x273d1aa5, 0x8923b1dd,
280 int done
=0, errors
=0, Nrun
=1;
282 char *c_stack_start
= (char *) 0;
283 double nstates
=0, nlinks
=0, truncs
=0, truncs2
=0;
284 double nlost
=0, nShadow
=0, hcmp
=0, ngrabs
=0;
285 #if defined(ZAPH) && defined(BITSTATE)
290 double midrv
=0, failedrv
=0, revrv
=0;
292 unsigned long nr_states
=0; /* nodes in DFA */
293 long Fa
=0, Fh
=0, Zh
=0, Zn
=0;
294 long PUT
=0, PROBE
=0, ZAPS
=0;
295 long Ccheck
=0, Cholds
=0;
296 int a_cycles
=0, upto
=1, strict
=0, verbose
= 0, signoff
= 0;
298 int gui
= 0, coltrace
= 0, readtrail
= 0;
299 int whichtrail
= 0, onlyproc
= -1, silent
= 0;
301 int state_tables
=0, fairness
=0, no_rck
=0, Nr_Trails
=0;
306 unsigned long mask
, nmask
;
308 int ssize
=23; /* 1 Mb */
310 int ssize
=19; /* 512K slots */
312 int hmax
=0, svmax
=0, smax
=0;
314 uchar
*noptr
; /* used by macro Pptr(x) */
316 void logval(char *, int);
317 void dumpranges(void);
321 extern void dfa_init(unsigned short);
322 extern int dfa_member(unsigned long);
323 extern int dfa_store(uchar
*);
324 unsigned int maxgs
= 0;
328 State comp_now
__attribute__ ((aligned (8)));
329 /* gcc 64-bit aligned for Itanium2 systems */
330 /* MAJOR runtime penalty if not used on those systems */
332 State comp_now
; /* compressed state vector */
336 uchar
*Mask
= (uchar
*) &comp_msk
;
339 static char *scratch
= (char *) &comp_tmp
;
341 Stack
*stack
; /* for queues, processes */
342 Svtack
*svtack
; /* for old state vectors */
344 static unsigned int hfns
= 3; /* new default */
346 static unsigned long j1
;
347 static unsigned long K1
, K2
;
348 static unsigned long j2
, j3
, j4
;
352 static long A_depth
= 0;
355 long nr_handoffs
= 0;
357 static uchar warned
= 0, iterative
= 0, exclusive
= 0, like_java
= 0, every_error
= 0;
358 static uchar noasserts
= 0, noends
= 0, bounded
= 0;
359 #if SYNC>0 && ASYNC==0
360 void set_recvs(void);
364 #define IfNotBlocked if (boq != -1) continue;
365 #define UnBlock boq = -1
367 #define IfNotBlocked /* cannot block */
368 #define UnBlock /* don't bother */
372 int (*bstore
)(char *, int);
373 int bstore_reg(char *, int);
374 int bstore_mod(char *, int);
376 void active_procs(void);
378 void do_the_search(void);
379 void find_shorter(int);
380 void iniglobals(void);
384 void ungrab_ints(int *, int);
386 #define Index(x, y) Boundcheck(x, y, II, tt, t)
388 #define Index(x, y) x
390 short Air
[] = { (short) Air0
, (short) Air1
, (short) Air2
, (short) Air3
, (short) Air4
, (short) Air5
};
393 { int j
, h
= now
._nr_pr
;
397 uchar
*o_this
= this;
400 if (TstOnly
) return (h
< MAXPROC
);
403 /* redefine Index only within this procedure */
405 #define Index(x, y) Boundcheck(x, y, 0, 0, 0)
408 Uerror("too many processes");
410 case 0: j
= sizeof(P0
); break;
411 case 1: j
= sizeof(P1
); break;
412 case 2: j
= sizeof(P2
); break;
413 case 3: j
= sizeof(P3
); break;
414 case 4: j
= sizeof(P4
); break;
415 case 5: j
= sizeof(P5
); break;
416 default: Uerror("bad proc - addproc");
419 proc_skip
[h
] = WS
-(vsize
%WS
);
423 for (k
= vsize
+ (int) proc_skip
[h
]; k
> vsize
; k
--)
424 Mask
[k
-1] = 1; /* align */
426 vsize
+= (int) proc_skip
[h
];
427 proc_offset
[h
] = vsize
;
431 write(svfd
, (uchar
*) &dummy
, sizeof(int)); /* mark */
432 write(svfd
, (uchar
*) &h
, sizeof(int));
433 write(svfd
, (uchar
*) &n
, sizeof(int));
435 write(svfd
, (uchar
*) &proc_offset
[h
], sizeof(int));
437 write(svfd
, (uchar
*) &proc_offset
[h
], sizeof(short));
439 write(svfd
, (uchar
*) &now
, vprefix
-4*sizeof(int)); /* padd */
443 if (fairness
&& ((int) now
._nr_pr
+ 1 >= (8*NFAIR
)/2))
444 { printf("pan: error: too many processes -- current");
445 printf(" max is %d procs (-DNFAIR=%d)\n",
446 (8*NFAIR
)/2 - 2, NFAIR
);
447 printf("\trecompile with -DNFAIR=%d\n",
456 for (k
= 1; k
<= Air
[n
]; k
++)
457 Mask
[vsize
- k
] = 1; /* pad */
458 Mask
[vsize
-j
] = 1; /* _pid */
460 hmax
= max(hmax
, vsize
);
461 if (vsize
>= VECTORSZ
)
462 { printf("pan: error, VECTORSZ too small, recompile pan.c");
463 printf(" with -DVECTORSZ=N with N>%d\n", (int) vsize
);
466 memset((char *)pptr(h
), 0, j
);
468 if (BASE
> 0 && h
> 0)
469 ((P0
*)this)->_pid
= h
-BASE
;
471 ((P0
*)this)->_pid
= h
;
474 ((P5
*)pptr(h
))->_t
= 5;
475 ((P5
*)pptr(h
))->_p
= 0;
480 ((P4
*)pptr(h
))->_t
= 4;
481 ((P4
*)pptr(h
))->_p
= 41; reached4
[41]=1;
484 ((P4
*)pptr(h
))->i
= 0;
485 ((P4
*)pptr(h
))->j
= 0;
486 ((P4
*)pptr(h
))->sum
= 0;
487 ((P4
*)pptr(h
))->commit_sum
= 0;
489 logval(":init::i", ((P4
*)pptr(h
))->i
);
490 logval(":init::j", ((P4
*)pptr(h
))->j
);
491 logval(":init::sum", ((P4
*)pptr(h
))->sum
);
492 logval(":init::commit_sum", ((P4
*)pptr(h
))->commit_sum
);
498 case 3: /* cleaner */
499 ((P3
*)pptr(h
))->_t
= 3;
500 ((P3
*)pptr(h
))->_p
= 8; reached3
[8]=1;
510 ((P2
*)pptr(h
))->_t
= 2;
511 ((P2
*)pptr(h
))->_p
= 26; reached2
[26]=1;
514 ((P2
*)pptr(h
))->i
= 0;
515 ((P2
*)pptr(h
))->j
= 0;
517 logval("reader:i", ((P2
*)pptr(h
))->i
);
518 logval("reader:j", ((P2
*)pptr(h
))->j
);
525 ((P1
*)pptr(h
))->_t
= 1;
526 ((P1
*)pptr(h
))->_p
= 3; reached1
[3]=1;
529 ((P1
*)pptr(h
))->size
= 1;
530 ((P1
*)pptr(h
))->prev_off
= 0;
531 ((P1
*)pptr(h
))->new_off
= 0;
532 ((P1
*)pptr(h
))->tmp_commit
= 0;
533 ((P1
*)pptr(h
))->i
= 0;
534 ((P1
*)pptr(h
))->j
= 0;
536 logval("tracer:size", ((P1
*)pptr(h
))->size
);
537 logval("tracer:prev_off", ((P1
*)pptr(h
))->prev_off
);
538 logval("tracer:new_off", ((P1
*)pptr(h
))->new_off
);
539 logval("tracer:tmp_commit", ((P1
*)pptr(h
))->tmp_commit
);
540 logval("tracer:i", ((P1
*)pptr(h
))->i
);
541 logval("tracer:j", ((P1
*)pptr(h
))->j
);
547 case 0: /* switcher */
548 ((P0
*)pptr(h
))->_t
= 0;
549 ((P0
*)pptr(h
))->_p
= 11; reached0
[11]=1;
552 ((P0
*)pptr(h
))->prev_off
= 0;
553 ((P0
*)pptr(h
))->new_off
= 0;
554 ((P0
*)pptr(h
))->tmp_commit
= 0;
555 ((P0
*)pptr(h
))->size
= 0;
557 logval("switcher:prev_off", ((P0
*)pptr(h
))->prev_off
);
558 logval("switcher:new_off", ((P0
*)pptr(h
))->new_off
);
559 logval("switcher:tmp_commit", ((P0
*)pptr(h
))->tmp_commit
);
560 logval("switcher:size", ((P0
*)pptr(h
))->size
);
571 #define Index(x, y) Boundcheck(x, y, II, tt, t)
575 #if defined(BITSTATE) && defined(COLLAPSE)
576 /* just to allow compilation, to generate the error */
577 long col_p(int i
, char *z
) { return 0; }
578 long col_q(int i
, char *z
) { return 0; }
583 col_p(int i
, char *z
)
584 { int j
, k
; unsigned long ordinal(char *, long, short);
586 P0
*ptr
= (P0
*) pptr(i
);
588 case 0: j
= sizeof(P0
); break;
589 case 1: j
= sizeof(P1
); break;
590 case 2: j
= sizeof(P2
); break;
591 case 3: j
= sizeof(P3
); break;
592 case 4: j
= sizeof(P4
); break;
593 case 5: j
= sizeof(P5
); break;
594 default: Uerror("bad proctype - collapse");
596 if (z
) x
= z
; else x
= scratch
;
597 y
= (char *) ptr
; k
= proc_offset
[i
];
598 for ( ; j
> 0; j
--, y
++)
599 if (!Mask
[k
++]) *x
++ = *y
;
600 for (j
= 0; j
< WS
-1; j
++)
603 if (z
) return (long) (x
- z
);
604 return ordinal(scratch
, x
-scratch
, (short) (2+ptr
->_t
));
611 memset((char *)&now
, 0, sizeof(State
));
612 vsize
= (unsigned long) (sizeof(State
) - VECTORSZ
);
616 /* optional provisioning statements, e.g. to */
617 /* set hidden variables, used as constants */
622 Maxbody
= max(Maxbody
, ((int) sizeof(P0
)));
623 Maxbody
= max(Maxbody
, ((int) sizeof(P1
)));
624 Maxbody
= max(Maxbody
, ((int) sizeof(P2
)));
625 Maxbody
= max(Maxbody
, ((int) sizeof(P3
)));
626 Maxbody
= max(Maxbody
, ((int) sizeof(P4
)));
627 Maxbody
= max(Maxbody
, ((int) sizeof(P5
)));
628 reached
[0] = reached0
;
629 reached
[1] = reached1
;
630 reached
[2] = reached2
;
631 reached
[3] = reached3
;
632 reached
[4] = reached4
;
633 reached
[5] = reached5
;
634 accpstate
[0] = (uchar
*) emalloc(nstates0
);
635 accpstate
[1] = (uchar
*) emalloc(nstates1
);
636 accpstate
[2] = (uchar
*) emalloc(nstates2
);
637 accpstate
[3] = (uchar
*) emalloc(nstates3
);
638 accpstate
[4] = (uchar
*) emalloc(nstates4
);
639 accpstate
[5] = (uchar
*) emalloc(nstates5
);
640 progstate
[0] = (uchar
*) emalloc(nstates0
);
641 progstate
[1] = (uchar
*) emalloc(nstates1
);
642 progstate
[2] = (uchar
*) emalloc(nstates2
);
643 progstate
[3] = (uchar
*) emalloc(nstates3
);
644 progstate
[4] = (uchar
*) emalloc(nstates4
);
645 progstate
[5] = (uchar
*) emalloc(nstates5
);
646 loopstate0
= loopstate
[0] = (uchar
*) emalloc(nstates0
);
647 loopstate1
= loopstate
[1] = (uchar
*) emalloc(nstates1
);
648 loopstate2
= loopstate
[2] = (uchar
*) emalloc(nstates2
);
649 loopstate3
= loopstate
[3] = (uchar
*) emalloc(nstates3
);
650 loopstate4
= loopstate
[4] = (uchar
*) emalloc(nstates4
);
651 loopstate5
= loopstate
[5] = (uchar
*) emalloc(nstates5
);
652 stopstate
[0] = (uchar
*) emalloc(nstates0
);
653 stopstate
[1] = (uchar
*) emalloc(nstates1
);
654 stopstate
[2] = (uchar
*) emalloc(nstates2
);
655 stopstate
[3] = (uchar
*) emalloc(nstates3
);
656 stopstate
[4] = (uchar
*) emalloc(nstates4
);
657 stopstate
[5] = (uchar
*) emalloc(nstates5
);
658 visstate
[0] = (uchar
*) emalloc(nstates0
);
659 visstate
[1] = (uchar
*) emalloc(nstates1
);
660 visstate
[2] = (uchar
*) emalloc(nstates2
);
661 visstate
[3] = (uchar
*) emalloc(nstates3
);
662 visstate
[4] = (uchar
*) emalloc(nstates4
);
663 visstate
[5] = (uchar
*) emalloc(nstates5
);
664 mapstate
[0] = (short *) emalloc(nstates0
* sizeof(short));
665 mapstate
[1] = (short *) emalloc(nstates1
* sizeof(short));
666 mapstate
[2] = (short *) emalloc(nstates2
* sizeof(short));
667 mapstate
[3] = (short *) emalloc(nstates3
* sizeof(short));
668 mapstate
[4] = (short *) emalloc(nstates4
* sizeof(short));
669 mapstate
[5] = (short *) emalloc(nstates5
* sizeof(short));
676 NrStates
[0] = nstates0
;
677 NrStates
[1] = nstates1
;
678 NrStates
[2] = nstates2
;
679 NrStates
[3] = nstates3
;
680 NrStates
[4] = nstates4
;
681 NrStates
[5] = nstates5
;
688 stopstate
[0][endstate0
] = 1;
689 stopstate
[1][endstate1
] = 1;
690 stopstate
[2][endstate2
] = 1;
691 stopstate
[3][endstate3
] = 1;
692 stopstate
[4][endstate4
] = 1;
693 stopstate
[5][endstate5
] = 1;
694 stopstate
[1][48] = 1;
695 retrans(0, nstates0
, start0
, src_ln0
, reached0
, loopstate0
);
696 retrans(1, nstates1
, start1
, src_ln1
, reached1
, loopstate1
);
697 retrans(2, nstates2
, start2
, src_ln2
, reached2
, loopstate2
);
698 retrans(3, nstates3
, start3
, src_ln3
, reached3
, loopstate3
);
699 retrans(4, nstates4
, start4
, src_ln4
, reached4
, loopstate4
);
701 { printf("\nTransition Type: ");
702 printf("A=atomic; D=d_step; L=local; G=global\n");
703 printf("Source-State Labels: ");
704 printf("p=progress; e=end; a=accept;\n");
706 printf("Note: statement merging was used. Only the first\n");
707 printf(" stmnt executed in each merge sequence is shown\n");
708 printf(" (use spin -a -o3 to disable statement merging)\n");
713 #if defined(VERI) && !defined(NOREDUCE) && !defined(NP)
722 { printf("warning: for p.o. reduction to be valid ");
723 printf("the never claim must be stutter-invariant\n");
724 printf("(never claims generated from LTL ");
725 printf("formulae are stutter-invariant)\n");
728 UnBlock
; /* disable rendez-vous */
731 { udmem
*= 1024L*1024L;
734 { void init_SS(unsigned long);
735 init_SS((unsigned long) udmem
);
738 SS
= (uchar
*) emalloc(udmem
);
742 { void init_SS(unsigned long);
743 init_SS(ONE_L
<<(ssize
-3));
746 SS
= (uchar
*) emalloc(ONE_L
<<(ssize
-3));
751 #if defined(FULLSTACK) && defined(BITSTATE)
754 #if defined(CNTRSTACK) && !defined(BFS)
755 LL
= (uchar
*) emalloc(ONE_L
<<(ssize
-3));
757 stack
= ( Stack
*) emalloc(sizeof(Stack
));
758 svtack
= (Svtack
*) emalloc(sizeof(Svtack
));
759 /* a place to point for Pptr of non-running procs: */
760 noptr
= (uchar
*) emalloc(Maxbody
* sizeof(char));
763 write(svfd
, (uchar
*) &vprefix
, sizeof(int));
766 Addproc(VERI
); /* never - pid = 0 */
768 active_procs(); /* started after never */
770 now
._event
= start_event
;
771 reached
[EVENT_TRACE
][start_event
] = 1;
781 if (--Nrun
> 0 && HASH_CONST
[++HASH_NR
])
782 { printf("Run %d:\n", HASH_NR
);
785 memset(SS
, 0, ONE_L
<<(ssize
-3));
787 memset(LL
, 0, ONE_L
<<(ssize
-3));
790 memset((uchar
*) S_Tab
, 0,
791 maxdepth
*sizeof(struct H_el
*));
793 nstates
=nlinks
=truncs
=truncs2
=ngrabs
= 0;
794 nlost
=nShadow
=hcmp
= 0;
796 PUT
=PROBE
=ZAPS
=Ccheck
=Cholds
= 0;
802 int provided(int, uchar
, int, Trans
*);
805 #define GLOBAL_LOCK (0)
807 #define CS_N (256*NCORE)
810 #define NR_QS (NCORE)
811 #define CS_NR (CS_N+1) /* 2^N + 1, nr critical sections */
812 #define GQ_RD GLOBAL_LOCK
813 #define GQ_WR GLOBAL_LOCK
814 #define CS_ID (1 + (int) (j1 & (CS_N-1))) /* mask: 2^N - 1, zero reserved */
815 #define QLOCK(n) (1+n)
817 #define NR_QS (NCORE+1)
818 #define CS_NR (CS_N+3)
821 #define CS_ID (3 + (int) (j1 & (CS_N-1)))
822 #define QLOCK(n) (3+n)
825 void e_critical(int);
826 void x_critical(int);
829 #define enter_critical(w) e_critical(w)
830 #define leave_critical(w) x_critical(w)
833 #define enter_critical(w) { if (w < 1+NCORE) e_critical(w); }
834 #define leave_critical(w) { if (w < 1+NCORE) x_critical(w); }
836 #define enter_critical(w) { if (w < 3+NCORE) e_critical(w); }
837 #define leave_critical(w) { if (w < 3+NCORE) x_critical(w); }
842 cpu_printf(const char *fmt
, ...)
844 enter_critical(GLOBAL_LOCK
); /* printing */
845 printf("cpu%d: ", core_id
);
851 leave_critical(GLOBAL_LOCK
);
856 cpu_printf(const char *fmt
, ...)
865 Printf(const char *fmt
, ...)
866 { /* Make sure the args to Printf
867 * are always evaluated (e.g., they
868 * could contain a run stmnt)
869 * but do not generate the output
870 * during verification runs
871 * unless explicitly wanted
872 * If this fails on your system
873 * compile SPIN itself -DPRINTF
874 * and this code is not generated
893 extern void printm(int);
895 #define getframe(i) &trail[i];
897 static long HHH
, DDD
, hiwater
;
898 static long CNT1
, CNT2
;
899 static int stackwrite
;
900 static int stackread
;
901 static Trail frameptr
;
908 if (d
>= (CNT1
-CNT2
)*DDD
)
909 return &trail
[d
- (CNT1
-CNT2
)*DDD
];
912 && (stackread
= open(stackfile
, 0)) < 0)
913 { printf("getframe: cannot open %s\n", stackfile
);
916 if (lseek(stackread
, d
* (off_t
) sizeof(Trail
), SEEK_SET
) == -1
917 || read(stackread
, &frameptr
, sizeof(Trail
)) != sizeof(Trail
))
918 { printf("getframe: frame read error\n");
924 #if !defined(SAFETY) && !defined(BITSTATE)
925 #if !defined(FULLSTACK) || defined(MA)
926 #define depth_of(x) A_depth /* an estimate */
929 depth_of(struct H_el
*s
)
931 for (d
= 0; d
<= A_depth
; d
++)
936 printf("pan: cannot happen, depth_of\n");
942 extern void cleanup_shm(int);
943 volatile unsigned int *search_terminated
; /* to signal early termination */
947 { void stop_timer(void);
949 { printf("--end of output--\n");
952 if (search_terminated
!= NULL
)
953 { *search_terminated
|= 1; /* pan_exit */
956 { void dsk_stats(void);
960 if (!state_tables
&& !readtrail
)
973 transmognify(char *s
)
975 static char buf
[2][2048];
977 if (!s
|| strlen(s
) > 2047) return s
;
978 memset(buf
[0], 0, 2048);
979 memset(buf
[1], 0, 2048);
980 strcpy(buf
[toggle
], s
);
981 while ((v
= strstr(buf
[toggle
], "{c_code")))
983 strcpy(buf
[1-toggle
], buf
[toggle
]);
984 for (w
= v
; *w
!= '}' && *w
!= '\0'; w
++) /* skip */;
985 if (*w
!= '}') return s
;
987 for (i
= 0; code_lookup
[i
].c
; i
++)
988 if (strcmp(v
, code_lookup
[i
].c
) == 0
989 && strlen(v
) == strlen(code_lookup
[i
].c
))
990 { if (strlen(buf
[1-toggle
])
991 + strlen(code_lookup
[i
].t
)
994 strcat(buf
[1-toggle
], code_lookup
[i
].t
);
997 strcat(buf
[1-toggle
], w
);
1000 buf
[toggle
][2047] = '\0';
1004 char * transmognify(char *s
) { return s
; }
1008 add_src_txt(int ot
, int tt
)
1012 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
1014 q
= transmognify(t
->tp
);
1015 for ( ; q
&& *q
; q
++)
1025 { static int wrap_in_progress
= 0;
1029 if (wrap_in_progress
++) return;
1031 printf("spin: trail ends after %ld steps\n", depth
);
1033 { if (onlyproc
>= now
._nr_pr
) { pan_exit(0); }
1036 printf("%3ld: proc %d (%s) ",
1037 depth
, II
, procname
[z
->_t
]);
1038 for (i
= 0; src_all
[i
].src
; i
++)
1039 if (src_all
[i
].tp
== (int) z
->_t
)
1040 { printf(" line %3d",
1041 src_all
[i
].src
[z
->_p
]);
1044 printf(" (state %2d)", z
->_p
);
1045 if (!stopstate
[z
->_t
][z
->_p
])
1046 printf(" (invalid end state)");
1048 add_src_txt(z
->_t
, z
->_p
);
1051 printf("#processes %d:\n", now
._nr_pr
);
1052 if (depth
< 0) depth
= 0;
1053 for (II
= 0; II
< now
._nr_pr
; II
++)
1054 { z
= (P0
*)pptr(II
);
1055 printf("%3ld: proc %d (%s) ",
1056 depth
, II
, procname
[z
->_t
]);
1057 for (i
= 0; src_all
[i
].src
; i
++)
1058 if (src_all
[i
].tp
== (int) z
->_t
)
1059 { printf(" line %3d",
1060 src_all
[i
].src
[z
->_p
]);
1063 printf(" (state %2d)", z
->_p
);
1064 if (!stopstate
[z
->_t
][z
->_p
])
1065 printf(" (invalid end state)");
1067 add_src_txt(z
->_t
, z
->_p
);
1070 for (II
= 0; II
< now
._nr_pr
; II
++)
1071 { z
= (P0
*)pptr(II
);
1072 c_locals(II
, z
->_t
);
1086 int candidate_files
;
1088 if (trailfilename
!= NULL
)
1089 { fd
= fopen(trailfilename
, "r");
1091 { printf("pan: cannot find %s\n", trailfilename
);
1098 candidate_files
= 0;
1100 strcpy(MyFile
, TrailFile
);
1101 do { /* see if there's more than one possible trailfile */
1103 { sprintf(fnm
, "%s%d.%s",
1104 MyFile
, whichtrail
, tprefix
);
1105 fd
= fopen(fnm
, "r");
1107 { candidate_files
++;
1109 printf("trail%d: %s\n",
1110 candidate_files
, fnm
);
1113 if ((q
= strchr(MyFile
, '.')) != NULL
)
1115 sprintf(fnm
, "%s%d.%s",
1116 MyFile
, whichtrail
, tprefix
);
1118 fd
= fopen(fnm
, "r");
1120 { candidate_files
++;
1122 printf("trail%d: %s\n",
1123 candidate_files
, fnm
);
1127 { sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1128 fd
= fopen(fnm
, "r");
1130 { candidate_files
++;
1132 printf("trail%d: %s\n",
1133 candidate_files
, fnm
);
1136 if ((q
= strchr(MyFile
, '.')) != NULL
)
1138 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1140 fd
= fopen(fnm
, "r");
1142 { candidate_files
++;
1144 printf("trail%d: %s\n",
1145 candidate_files
, fnm
);
1149 sprintf(tprefix
, "cpu%d_trail", try_core
++);
1150 } while (try_core
<= NCORE
);
1152 if (candidate_files
!= 1)
1153 { if (verbose
!= 100)
1154 { printf("error: there are %d trail files:\n",
1159 { printf("pan: rm or mv all except one\n");
1163 strcpy(MyFile
, TrailFile
); /* restore */
1167 { sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, tprefix
);
1168 fd
= fopen(fnm
, "r");
1169 if (fd
== NULL
&& (q
= strchr(MyFile
, '.')))
1171 sprintf(fnm
, "%s%d.%s",
1172 MyFile
, whichtrail
, tprefix
);
1174 fd
= fopen(fnm
, "r");
1177 { sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1178 fd
= fopen(fnm
, "r");
1179 if (fd
== NULL
&& (q
= strchr(MyFile
, '.')))
1181 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1183 fd
= fopen(fnm
, "r");
1186 { if (try_core
< NCORE
)
1187 { tprefix
= MySuffix
;
1188 sprintf(tprefix
, "cpu%d_trail", try_core
++);
1191 printf("pan: cannot find trailfile %s\n", fnm
);
1195 #if NCORE>1 && defined(SEP_STATE)
1196 { void set_root(void); /* for partial traces from local root */
1203 uchar
do_transit(Trans
*, short);
1209 int i
, t_id
, lastnever
=-1; short II
;
1213 fd
= findtrail(); /* exits if unsuccessful */
1214 while (fscanf(fd
, "%ld:%d:%d\n", &depth
, &i
, &t_id
) == 3)
1216 printf("<<<<<START OF CYCLE>>>>>\n");
1220 { printf("pan: Error, proc %d invalid pid ", i
);
1221 printf("transition %d\n", t_id
);
1226 for (t
= trans
[z
->_t
][z
->_p
]; t
; t
= t
->nxt
)
1227 if (t
->t_id
== (T_ID
) t_id
)
1230 { for (i
= 0; i
< NrStates
[z
->_t
]; i
++)
1231 { t
= trans
[z
->_t
][i
];
1232 if (t
&& t
->t_id
== (T_ID
) t_id
)
1233 { printf("\tRecovered at state %d\n", i
);
1237 printf("pan: Error, proc %d type %d state %d: ",
1239 printf("transition %d not found\n", t_id
);
1240 printf("pan: list of possible transitions in this process:\n");
1241 if (z
->_t
>= 0 && z
->_t
<= _NP_
)
1242 for (t
= trans
[z
->_t
][z
->_p
]; t
; t
= t
->nxt
)
1243 printf(" t_id %d -- case %d, [%s]\n",
1244 t
->t_id
, t
->forw
, t
->tp
);
1245 break; /* pan_exit(1); */
1248 q
= transmognify(t
->tp
);
1249 if (gui
) simvals
[0] = '\0';
1252 if (!do_transit(t
, II
))
1253 { if (onlyproc
>= 0 && II
!= onlyproc
)
1255 printf("pan: error, next transition UNEXECUTABLE on replay\n");
1256 printf(" most likely causes: missing c_track statements\n");
1257 printf(" or illegal side-effects in c_expr statements\n");
1259 if (onlyproc
>= 0 && II
!= onlyproc
)
1262 { printf("%3ld: proc %2d (%s) ", depth
, II
, procname
[z
->_t
]);
1263 for (i
= 0; src_all
[i
].src
; i
++)
1264 if (src_all
[i
].tp
== (int) z
->_t
)
1265 { printf(" line %3d \"%s\" ",
1266 src_all
[i
].src
[z
->_p
], PanSource
);
1269 printf("(state %d) trans {%d,%d} [%s]\n",
1270 z
->_p
, t_id
, t
->forw
, q
?q
:"");
1272 for (i
= 0; i
< now
._nr_pr
; i
++)
1273 { c_locals(i
, ((P0
*)pptr(i
))->_t
);
1276 if (strcmp(procname
[z
->_t
], ":never:") == 0)
1277 { if (lastnever
!= (int) z
->_p
)
1278 { for (i
= 0; src_all
[i
].src
; i
++)
1279 if (src_all
[i
].tp
== (int) z
->_t
)
1280 { printf("MSC: ~G %d\n",
1281 src_all
[i
].src
[z
->_p
]);
1284 if (!src_all
[i
].src
)
1285 printf("MSC: ~R %d\n", z
->_p
);
1290 if (strcmp(procname
[z
->_t
], ":np_:") != 0)
1292 sameas
: if (no_rck
) goto moveon
;
1294 { printf("%ld: ", depth
);
1295 for (i
= 0; i
< II
; i
++)
1297 printf("%s(%d):", procname
[z
->_t
], II
);
1298 printf("[%s]\n", q
?q
:"");
1300 { if (strlen(simvals
) > 0) {
1301 printf("%3ld: proc %2d (%s)",
1302 depth
, II
, procname
[z
->_t
]);
1303 for (i
= 0; src_all
[i
].src
; i
++)
1304 if (src_all
[i
].tp
== (int) z
->_t
)
1305 { printf(" line %3d \"%s\" ",
1306 src_all
[i
].src
[z
->_p
], PanSource
);
1309 printf("(state %d) [values: %s]\n", z
->_p
, simvals
);
1311 printf("%3ld: proc %2d (%s)",
1312 depth
, II
, procname
[z
->_t
]);
1313 for (i
= 0; src_all
[i
].src
; i
++)
1314 if (src_all
[i
].tp
== (int) z
->_t
)
1315 { printf(" line %3d \"%s\" ",
1316 src_all
[i
].src
[z
->_p
], PanSource
);
1319 printf("(state %d) [%s]\n", z
->_p
, q
?q
:"");
1322 moveon
: z
->_p
= t
->st
;
1331 for (i
= 0; i
< now
._nr_pr
; i
++)
1332 { z
= (P0
*)pptr(i
);
1333 if (z
->_t
== (unsigned) pt
)
1334 return BASE
+z
->_pid
;
1339 void check_claim(int);
1342 #if !defined(HASH64) && !defined(HASH32)
1345 #if defined(HASH32) && defined(SAFETY) && !defined(SFH) && !defined(SPACE)
1348 #if defined(SFH) && (defined(BITSTATE) || defined(COLLAPSE) || defined(HC) || defined(HASH64))
1351 #if defined(SFH) && !defined(NOCOMP)
1352 #define NOCOMP /* go for speed */
1354 #if NCORE>1 && !defined(GLOB_HEAP)
1355 #define SEP_HEAP /* version 5.1.2 */
1360 bstore_mod(char *v
, int n
) /* hasharray size not a power of two */
1361 { unsigned long x
, y
;
1364 d_hash((uchar
*) v
, n
); /* sets j3, j4, K1, K2 */
1367 { if (!(SS
[x
%udmem
]&(1<<y
))) break;
1370 printf("Old bitstate\n");
1379 if (rand()%100 > RANDSTOR
) return 0;
1382 { SS
[x
%udmem
] |= (1<<y
);
1383 if (i
== hfns
) break;
1389 printf("New bitstate\n");
1397 bstore_reg(char *v
, int n
) /* extended hashing, Peter Dillinger, 2004 */
1398 { unsigned long x
, y
;
1401 d_hash((uchar
*) v
, n
); /* sets j1-j4 */
1404 { if (!(SS
[x
]&(1<<y
))) break;
1407 printf("Old bitstate\n");
1411 x
= (x
+ j1
+ i
) & nmask
;
1416 if (rand()%100 > RANDSTOR
) return 0;
1420 if (i
== hfns
) break;
1421 x
= (x
+ j1
+ i
) & nmask
;
1426 printf("New bitstate\n");
1434 unsigned long TMODE
= 0666; /* file permission bits for trail files */
1437 char snap
[64], fnm
[512];
1444 int w_flags
= O_CREAT
|O_WRONLY
|O_TRUNC
;
1446 if (exclusive
== 1 && iterative
== 0)
1447 { w_flags
|= O_EXCL
;
1450 q
= strrchr(TrailFile
, '/');
1451 if (q
== NULL
) q
= TrailFile
; else q
++;
1452 strcpy(MyFile
, q
); /* TrailFile is not a writable string */
1454 if (iterative
== 0 && Nr_Trails
++ > 0)
1455 { sprintf(fnm
, "%s%d.%s",
1456 MyFile
, Nr_Trails
-1, tprefix
);
1460 sprintf(fnm
, "%s%d.%s", MyFile
, getpid(), tprefix
);
1462 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1465 if ((fd
= open(fnm
, w_flags
, TMODE
)) < 0)
1466 { if ((q
= strchr(MyFile
, '.')))
1468 if (iterative
== 0 && Nr_Trails
-1 > 0)
1469 sprintf(fnm
, "%s%d.%s",
1470 MyFile
, Nr_Trails
-1, tprefix
);
1472 sprintf(fnm
, "%s.%s", MyFile
, tprefix
);
1474 fd
= open(fnm
, w_flags
, TMODE
);
1477 { printf("pan: cannot create %s\n", fnm
);
1481 #if NCORE>1 && (defined(SEP_STATE) || !defined(FULL_TRAIL))
1482 void write_root(void);
1485 printf("pan: wrote %s\n", fnm
);
1492 #define FREQ (1000000)
1500 typedef struct SV_Hold
{
1503 struct SV_Hold
*nxt
;
1506 typedef struct EV_Hold
{
1514 struct EV_Hold
*nxt
;
1517 typedef struct BFS_Trail
{
1522 struct H_el
*lstate
;
1525 struct BFS_Trail
*nxt
;
1528 BFS_Trail
*bfs_trail
, *bfs_bot
, *bfs_free
;
1530 SV_Hold
*svhold
, *svfree
;
1534 #define BFS_LIMIT 100000
1536 #ifndef BFS_DSK_LIMIT
1537 #define BFS_DSK_LIMIT 1000000
1539 #if defined(WIN32) || defined(WIN64)
1540 #define RFLAGS (O_RDONLY|O_BINARY)
1541 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
1543 #define RFLAGS (O_RDONLY)
1544 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
1546 long bfs_size_limit
;
1547 int bfs_dsk_write
= -1;
1548 int bfs_dsk_read
= -1;
1549 long bfs_dsk_writes
, bfs_dsk_reads
;
1550 int bfs_dsk_seqno_w
, bfs_dsk_seqno_r
;
1553 uchar
do_reverse(Trans
*, short, uchar
);
1554 void snapshot(void);
1558 { SV_Hold
*h
= (SV_Hold
*) 0, *oh
;
1561 for (h
= svfree
; h
; oh
= h
, h
= h
->nxt
)
1567 h
->nxt
= (SV_Hold
*) 0;
1571 { h
= (SV_Hold
*) 0;
1578 { h
= (SV_Hold
*) emalloc(sizeof(SV_Hold
));
1581 if (bfs_size_limit
>= BFS_LIMIT
)
1582 { h
->sv
= (State
*) 0; /* means: read disk */
1583 bfs_dsk_writes
++; /* count */
1584 if (bfs_dsk_write
< 0 /* file descriptor */
1585 || bfs_dsk_writes
%BFS_DSK_LIMIT
== 0)
1587 if (bfs_dsk_write
>= 0)
1588 { (void) close(bfs_dsk_write
);
1590 sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_w
++);
1591 bfs_dsk_write
= open(dsk_nm
, WFLAGS
, 0644);
1592 if (bfs_dsk_write
< 0)
1593 { Uerror("could not create tmp disk file");
1595 printf("pan: created disk file %s\n", dsk_nm
);
1597 if (write(bfs_dsk_write
, (char *) &now
, n
) != n
)
1598 { Uerror("aborting -- disk write failed (disk full?)");
1600 return h
; /* no memcpy */
1604 h
->sv
= (State
*) emalloc(sizeof(State
) - VECTORSZ
+ n
);
1607 memcpy((char *)h
->sv
, (char *)&now
, n
);
1614 static EV_Hold
*kept
= (EV_Hold
*) 0;
1616 for (h
= kept
; h
; h
= h
->nxt
)
1618 && (memcmp((char *) Mask
, (char *) h
->sv
, n
) == 0)
1619 && (now
._nr_pr
== h
->nrpr
)
1620 && (now
._nr_qs
== h
->nrqs
)
1622 && (memcmp((char *) proc_offset
, (char *) h
->po
, now
._nr_pr
* sizeof(int)) == 0)
1623 && (memcmp((char *) q_offset
, (char *) h
->qo
, now
._nr_qs
* sizeof(int)) == 0)
1625 && (memcmp((char *) proc_offset
, (char *) h
->po
, now
._nr_pr
* sizeof(short)) == 0)
1626 && (memcmp((char *) q_offset
, (char *) h
->qo
, now
._nr_qs
* sizeof(short)) == 0)
1628 && (memcmp((char *) proc_skip
, (char *) h
->ps
, now
._nr_pr
* sizeof(uchar
)) == 0)
1629 && (memcmp((char *) q_skip
, (char *) h
->qs
, now
._nr_qs
* sizeof(uchar
)) == 0))
1632 { h
= (EV_Hold
*) emalloc(sizeof(EV_Hold
));
1634 h
->nrpr
= now
._nr_pr
;
1635 h
->nrqs
= now
._nr_qs
;
1637 h
->sv
= (char *) emalloc(n
* sizeof(char));
1638 memcpy((char *) h
->sv
, (char *) Mask
, n
);
1641 { h
->ps
= (char *) emalloc(now
._nr_pr
* sizeof(int));
1642 memcpy((char *) h
->ps
, (char *) proc_skip
, now
._nr_pr
* sizeof(uchar
));
1644 h
->po
= (char *) emalloc(now
._nr_pr
* sizeof(int));
1645 memcpy((char *) h
->po
, (char *) proc_offset
, now
._nr_pr
* sizeof(int));
1647 h
->po
= (char *) emalloc(now
._nr_pr
* sizeof(short));
1648 memcpy((char *) h
->po
, (char *) proc_offset
, now
._nr_pr
* sizeof(short));
1652 { h
->qs
= (char *) emalloc(now
._nr_qs
* sizeof(int));
1653 memcpy((char *) h
->qs
, (char *) q_skip
, now
._nr_qs
* sizeof(uchar
));
1655 h
->qo
= (char *) emalloc(now
._nr_qs
* sizeof(int));
1656 memcpy((char *) h
->qo
, (char *) q_offset
, now
._nr_qs
* sizeof(int));
1658 h
->qo
= (char *) emalloc(now
._nr_qs
* sizeof(short));
1659 memcpy((char *) h
->qo
, (char *) q_offset
, now
._nr_qs
* sizeof(short));
1674 for (h
= svfree
; h
; oh
= h
, h
= h
->nxt
)
1693 bfs_free
= bfs_free
->nxt
;
1694 t
->nxt
= (BFS_Trail
*) 0;
1696 { t
= (BFS_Trail
*) emalloc(sizeof(BFS_Trail
));
1698 t
->frame
= (Trail
*) emalloc(sizeof(Trail
));
1703 push_bfs(Trail
*f
, int d
)
1706 t
= get_bfs_frame();
1707 memcpy((char *)t
->frame
, (char *)f
, sizeof(Trail
));
1708 t
->frame
->o_tt
= d
; /* depth */
1711 t
->onow
= getsv(vsize
);
1712 t
->omask
= getsv_mask(vsize
);
1713 #if defined(FULLSTACK) && defined(Q_PROVISO)
1717 { bfs_bot
= bfs_trail
= t
;
1723 printf("PUSH %u (%d)\n", t
->frame
, d
);
1737 bfs_bot
= (BFS_Trail
*) 0;
1738 #if defined(Q_PROVISO) && !defined(BITSTATE) && !defined(NOREDUCE)
1739 if (t
->lstate
) t
->lstate
->tagged
= 0;
1745 vsize
= t
->onow
->sz
;
1748 if (t
->onow
->sv
== (State
*) 0)
1750 bfs_dsk_reads
++; /* count */
1751 if (bfs_dsk_read
>= 0 /* file descriptor */
1752 && bfs_dsk_reads
%BFS_DSK_LIMIT
== 0)
1753 { (void) close(bfs_dsk_read
);
1754 sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_r
-1);
1755 (void) unlink(dsk_nm
);
1758 if (bfs_dsk_read
< 0)
1759 { sprintf(dsk_nm
, "pan_bfs_%d.tmp", bfs_dsk_seqno_r
++);
1760 bfs_dsk_read
= open(dsk_nm
, RFLAGS
);
1761 if (bfs_dsk_read
< 0)
1762 { Uerror("could not open temp disk file");
1764 if (read(bfs_dsk_read
, (char *) &now
, vsize
) != vsize
)
1765 { Uerror("bad bfs disk file read");
1768 if (now
._vsz
!= vsize
)
1769 { Uerror("disk read vsz mismatch");
1774 memcpy((uchar
*) &now
, (uchar
*) t
->onow
->sv
, vsize
);
1775 memcpy((uchar
*) Mask
, (uchar
*) t
->omask
->sv
, vsize
);
1778 { memcpy((char *)proc_offset
, (char *)t
->omask
->po
, now
._nr_pr
* sizeof(int));
1780 { memcpy((char *)proc_offset
, (char *)t
->omask
->po
, now
._nr_pr
* sizeof(short));
1782 memcpy((char *)proc_skip
, (char *)t
->omask
->ps
, now
._nr_pr
* sizeof(uchar
));
1786 { memcpy((uchar
*)q_offset
, (uchar
*)t
->omask
->qo
, now
._nr_qs
* sizeof(int));
1788 { memcpy((uchar
*)q_offset
, (uchar
*)t
->omask
->qo
, now
._nr_qs
* sizeof(short));
1790 memcpy((uchar
*)q_skip
, (uchar
*)t
->omask
->qs
, now
._nr_qs
* sizeof(uchar
));
1793 if (t
->onow
->sv
!= (State
*) 0)
1795 freesv(t
->onow
); /* omask not freed */
1797 printf("POP %u (%d)\n", t
->frame
, t
->frame
->o_tt
);
1803 store_state(Trail
*ntrpt
, int shortcut
, short oboq
)
1806 Trans
*t2
= (Trans
*) 0;
1807 uchar ot
; int tt
, E_state
;
1808 uchar o_opm
= trpt
->o_pm
, *othis
= this;
1813 printf("claim: shortcut\n");
1815 goto store_it
; /* no claim move */
1818 this = (((uchar
*)&now
)+proc_offset
[0]); /* 0 = never claim */
1821 tt
= (int) ((P0
*)this)->_p
;
1822 ot
= (uchar
) ((P0
*)this)->_t
;
1827 for (t2
= trans
[ot
][tt
]; t2
; t2
= t2
?t2
->nxt
:(Trans
*)0)
1831 && E_state
!= t2
->e_trans
)
1834 if (do_transit(t2
, 0))
1837 if (!reached
[ot
][t2
->st
])
1838 printf("depth: %d -- claim move from %d -> %d\n",
1839 trpt
->o_tt
, ((P0
*)this)->_p
, t2
->st
);
1842 E_state
= t2
->e_trans
;
1845 { ((P0
*)this)->_p
= t2
->st
;
1846 reached
[ot
][t2
->st
] = 1;
1848 check_claim(t2
->st
);
1851 if (now
._nr_pr
== 0) /* claim terminated */
1852 uerror("end state in claim reached");
1859 Uerror("atomic in claim not supported in BFS mode");
1865 if (!bstore((char *)&now
, vsize
))
1868 if (!gstore((char *)&now
, vsize
, 0))
1870 if (!hstore((char *)&now
, vsize
))
1873 { static long sdone
= (long) 0; long ndone
;
1878 ndone
= (unsigned long) (nstates
/((double) FREQ
));
1879 if (ndone
!= sdone
&& mreached
%10 != 0)
1882 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
1883 if (nstates
> ((double)(1<<(ssize
+1))))
1884 { void resize_hashtable(void);
1892 else if (oboq
!= -1)
1894 x
= (Trail
*) trpt
->ostate
; /* pre-rv state */
1895 if (x
) x
->o_pm
|= 4; /* mark success */
1898 push_bfs(ntrpt
, trpt
->o_tt
+1);
1901 #if !defined(NOREDUCE) && defined(FULLSTACK) && defined(Q_PROVISO)
1902 #if !defined(BITSTATE)
1903 if (Lstate
&& Lstate
->tagged
) trpt
->tau
|= 64;
1907 for (tprov
= bfs_trail
; tprov
; tprov
= tprov
->nxt
)
1908 if (tprov
->onow
->sv
!= (State
*) 0
1909 && memcmp((uchar
*)&now
, (uchar
*)tprov
->onow
->sv
, vsize
) == 0)
1911 break; /* state is in queue */
1917 ((P0
*)this)->_p
= tt
; /* reset claim */
1919 do_reverse(t2
, 0, 0);
1932 { Trans
*t
; Trail
*otrpt
, *x
;
1933 uchar _n
, _m
, ot
, nps
= 0;
1935 short II
, From
= (short) (now
._nr_pr
-1), To
= BASE
;
1938 ntrpt
= (Trail
*) emalloc(sizeof(Trail
));
1939 trpt
->ostate
= (struct H_el
*) 0;
1943 store_state(ntrpt
, 0, oboq
); /* initial state */
1945 while ((otrpt
= pop_bfs())) /* also restores now */
1946 { memcpy((char *) trpt
, (char *) otrpt
, sizeof(Trail
));
1947 #if defined(C_States) && (HAS_TRACK==1)
1948 c_revert((uchar
*) &(now
.c_state
[0]));
1953 printf("Revisit of atomic not needed (%d)\n",
1961 if (trpt
->o_pm
== 8)
1966 printf("Break atomic (pm:%d,tau:%d)\n",
1967 trpt
->o_pm
, trpt
->tau
);
1972 else if (trpt
->tau
&32)
1975 printf("Void preselection (pm:%d,tau:%d)\n",
1976 trpt
->o_pm
, trpt
->tau
);
1979 nps
= 1; /* no preselection in repeat */
1983 trpt
->o_pm
&= ~(4|8);
1984 if (trpt
->o_tt
> mreached
)
1985 { mreached
= trpt
->o_tt
;
1986 if (mreached
%10 == 0)
1990 if (depth
>= maxdepth
)
1995 { x
= (Trail
*) trpt
->ostate
;
1996 if (x
) x
->o_pm
|= 4; /* not failing */
2002 printf("error: max search depth too small\n");
2005 uerror("depth limit reached");
2009 if (boq
== -1 && !(trpt
->tau
&8) && nps
== 0)
2010 for (II
= now
._nr_pr
-1; II
>= BASE
; II
-= 1)
2012 Pickup
: this = pptr(II
);
2013 tt
= (int) ((P0
*)this)->_p
;
2014 ot
= (uchar
) ((P0
*)this)->_t
;
2015 if (trans
[ot
][tt
]->atom
& 8)
2016 { t
= trans
[ot
][tt
];
2024 trpt
->tau
|= 32; /* preselect marker */
2026 printf("%3d: proc %d PreSelected (tau=%d)\n",
2027 depth
, II
, trpt
->tau
);
2034 if (trpt
->tau
&8) /* atomic */
2035 { From
= To
= (short ) trpt
->pr
;
2038 { From
= now
._nr_pr
-1;
2043 for (II
= From
; II
>= To
; II
-= 1)
2045 this = (((uchar
*)&now
)+proc_offset
[II
]);
2046 tt
= (int) ((P0
*)this)->_p
;
2047 ot
= (uchar
) ((P0
*)this)->_t
;
2049 /* no rendezvous with same proc */
2050 if (boq
!= -1 && trpt
->pr
== II
) continue;
2052 ntrpt
->pr
= (uchar
) II
;
2054 trpt
->o_pm
&= ~1; /* no move yet */
2056 trpt
->o_event
= now
._event
;
2059 if (!provided(II
, ot
, tt
, t
)) continue;
2064 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
2068 && E_state
!= t
->e_trans
)
2075 if (!(_m
= do_transit(t
, II
)))
2078 trpt
->o_pm
|= 1; /* we moved */
2079 (trpt
+1)->o_m
= _m
; /* for unsend */
2084 printf("%3d: proc %d exec %d, ",
2085 depth
, II
, t
->forw
);
2086 printf("%d to %d, %s %s %s",
2088 (t
->atom
&2)?"atomic":"",
2089 (boq
!= -1)?"rendez-vous":"");
2092 printf(" (escapes to state %d)", t
->st
);
2094 printf(" %saccepting [tau=%d]\n",
2095 (trpt
->o_pm
&2)?"":"non-", trpt
->tau
);
2098 E_state
= t
->e_trans
;
2100 if (t
->e_trans
> 0 && (boq
!= -1 /* || oboq != -1 */))
2101 { fprintf(efd
, "error: the use of rendezvous stmnt in the escape clause\n");
2102 fprintf(efd
, " of an unless stmnt is not compatible with -DBFS\n");
2107 if (t
->st
> 0) ((P0
*)this)->_p
= t
->st
;
2109 /* ptr to pred: */ ntrpt
->ostate
= (struct H_el
*) otrpt
;
2111 if (boq
== -1 && (t
->atom
&2)) /* atomic */
2112 ntrpt
->tau
= 8; /* record for next move */
2116 store_state(ntrpt
, (boq
!= -1 || (t
->atom
&2)), oboq
);
2118 now
._event
= trpt
->o_event
;
2121 /* undo move and continue */
2122 trpt
++; /* this is where ovals and ipt are set */
2123 do_reverse(t
, II
, _m
); /* restore now. */
2127 enter_critical(GLOBAL_LOCK
); /* in verbose mode only */
2128 printf("cpu%d: ", core_id
);
2130 printf("%3d: proc %d ", depth
, II
);
2131 printf("reverses %d, %d to %d,",
2132 t
->forw
, tt
, t
->st
);
2133 printf(" %s [abit=%d,adepth=%d,",
2134 t
->tp
, now
._a_t
, A_depth
);
2135 printf("tau=%d,%d]\n",
2136 trpt
->tau
, (trpt
-1)->tau
);
2138 leave_critical(GLOBAL_LOCK
);
2141 reached
[ot
][t
->st
] = 1;
2142 reached
[ot
][tt
] = 1;
2144 ((P0
*)this)->_p
= tt
;
2148 /* preselected - no succ definitely outside stack */
2149 if ((trpt
->tau
&32) && !(trpt
->tau
&64))
2150 { From
= now
._nr_pr
-1; To
= BASE
;
2152 cpu_printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
2153 depth
, II
+1, (int) _n
, trpt
->tau
);
2155 _n
= 0; trpt
->tau
&= ~32;
2160 trpt
->tau
&= ~(32|64);
2165 printf("%3d: no move [II=%d, tau=%d, boq=%d, _nr_pr=%d]\n",
2166 depth
, II
, trpt
->tau
, boq
, now
._nr_pr
);
2170 x
= (Trail
*) trpt
->ostate
; /* pre-rv state */
2171 if (!x
) continue; /* root state */
2172 if ((x
->tau
&8) || (x
->tau
&32)) /* break atomic or preselect at parent */
2173 { x
->o_pm
|= 8; /* mark failure */
2174 this = (((uchar
*)&now
)+proc_offset
[otrpt
->pr
]);
2176 printf("\treset state of %d from %d to %d\n",
2177 otrpt
->pr
, ((P0
*)this)->_p
, otrpt
->st
);
2179 ((P0
*)this)->_p
= otrpt
->st
;
2180 unsend(boq
); /* retract rv offer */
2182 push_bfs(x
, x
->o_tt
);
2184 printf("failed rv, repush with %d\n", x
->o_pm
);
2188 else printf("failed rv, tau at parent: %d\n", x
->tau
);
2190 } else if (now
._nr_pr
> 0)
2192 if ((trpt
->tau
&8)) /* atomic */
2193 { trpt
->tau
&= ~(1|8); /* 1=timeout, 8=atomic */
2195 printf("%3d: atomic step proc %d blocks\n",
2201 if (!(trpt
->tau
&1)) /* didn't try timeout yet */
2204 printf("%d: timeout\n", depth
);
2209 if (!noends
&& !a_cycles
&& !endstate())
2210 uerror("invalid end state");
2216 putter(Trail
*trpt
, int fd
)
2221 if (trpt
!= (Trail
*) trpt
->ostate
)
2222 putter((Trail
*) trpt
->ostate
, fd
);
2225 { sprintf(snap
, "%d:%d:%d\n",
2226 trcnt
++, trpt
->pr
, trpt
->o_t
->t_id
);
2228 if (write(fd
, snap
, j
) != j
)
2229 { printf("pan: error writing %s\n", fnm
);
2236 { int fd
= make_trail();
2241 sprintf(snap
, "-2:%d:-2\n", VERI
);
2242 write(fd
, snap
, strlen(snap
));
2245 sprintf(snap
, "-4:-4:-4\n");
2246 write(fd
, snap
, strlen(snap
));
2251 { sprintf(snap
, "%d:%d:%d\n",
2252 trcnt
++, ntrpt
->pr
, ntrpt
->o_t
->t_id
);
2254 if (write(fd
, snap
, j
) != j
)
2255 { printf("pan: error writing %s\n", fnm
);
2259 if (errors
>= upto
&& upto
!= 0)
2265 #if defined(WIN32) || defined(WIN64)
2272 #include <windows.h>
2275 #define long long long
2278 #include <sys/ipc.h>
2279 #include <sys/sem.h>
2280 #include <sys/shm.h>
2283 /* code common to cygwin/linux and win32/win64: */
2286 #define VVERBOSE (1)
2288 #define VVERBOSE (0)
2291 /* the following values must be larger than 256 and must fit in an int */
2292 #define QUIT 1024 /* terminate now command */
2293 #define QUERY 512 /* termination status query message */
2294 #define QUERY_F 513 /* query failed, cannot quit */
2296 #define GN_FRAMES (int) (GWQ_SIZE / (double) sizeof(SM_frame))
2297 #define LN_FRAMES (int) (LWQ_SIZE / (double) sizeof(SM_frame))
2300 #define VMAX VECTORSZ
2316 /* no longer usefule -- being recomputed for local heap size anyway */
2317 double SEG_SIZE
= (((double) SET_SEG_SIZE
) * 1048576.);
2319 double SEG_SIZE
= (1048576.*1024.); /* 1GB default shared memory pool segments */
2322 double LWQ_SIZE
= 0.; /* initialized in main */
2326 #warning SET_WQ_SIZE applies to global queue -- ignored
2327 double GWQ_SIZE
= 0.;
2329 double GWQ_SIZE
= (((double) SET_WQ_SIZE
) * 1048576.);
2330 /* must match the value in pan_proxy.c, if used */
2334 double GWQ_SIZE
= 0.;
2336 double GWQ_SIZE
= (128.*1048576.); /* 128 MB default queue sizes */
2340 /* Crash Detection Parameters */
2342 #define ONESECOND (1<<25)
2345 #define SHORT_T (0.1)
2348 #define LONG_T (600)
2351 double OneSecond
= (double) (ONESECOND
); /* waiting for a free slot -- checks crash */
2352 double TenSeconds
= 10. * (ONESECOND
); /* waiting for a lock -- check for a crash */
2354 /* Termination Detection Params -- waiting for new state input in Get_Full_Frame */
2355 double Delay
= ((double) SHORT_T
) * (ONESECOND
); /* termination detection trigger */
2356 double OneHour
= ((double) LONG_T
) * (ONESECOND
); /* timeout termination detection */
2358 typedef struct SM_frame SM_frame
;
2359 typedef struct SM_results SM_results
;
2360 typedef struct sh_Allocater sh_Allocater
;
2362 struct SM_frame
{ /* about 6K per slot */
2363 volatile int m_vsize
; /* 0 means free slot */
2364 volatile int m_boq
; /* >500 is a control message */
2366 volatile struct Stack_Tree
*m_stack
; /* ptr to previous state */
2368 volatile uchar m_tau
;
2369 volatile uchar m_o_pm
;
2370 volatile int nr_handoffs
; /* to compute real_depth */
2371 volatile char m_now
[VMAX
];
2372 volatile char m_Mask
[(VMAX
+ 7)/8];
2373 volatile OFFT m_p_offset
[PMAX
];
2374 volatile OFFT m_q_offset
[QMAX
];
2375 volatile uchar m_p_skip
[PMAX
];
2376 volatile uchar m_q_skip
[QMAX
];
2377 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
2378 volatile uchar m_c_stack
[StackSize
];
2382 int proxy_pid
; /* id of proxy if nonzero -- receive half */
2383 int store_proxy_pid
;
2385 int proxy_pid_snd
; /* id of proxy if nonzero -- send half */
2386 char o_cmdline
[512]; /* to pass options to children */
2388 int iamin
[CS_NR
+NCORE
]; /* non-shared */
2390 #if defined(WIN32) || defined(WIN64)
2391 int tas(volatile LONG
*);
2393 HANDLE proxy_handle_snd
; /* for Windows Create and Terminate */
2395 struct sh_Allocater
{ /* shared memory for states */
2396 volatile char *dc_arena
; /* to allocate states from */
2397 volatile long pattern
; /* to detect overruns */
2398 volatile long dc_size
; /* nr of bytes left */
2399 volatile void *dc_start
; /* where memory segment starts */
2400 volatile void *dc_id
; /* to attach, detach, remove shared memory segments */
2401 volatile sh_Allocater
*nxt
; /* linked list of pools */
2403 DWORD worker_pids
[NCORE
]; /* root mem of pids of all workers created */
2404 HANDLE worker_handles
[NCORE
]; /* for windows Create and Terminate */
2405 void * shmid
[NR_QS
]; /* return value from CreateFileMapping */
2406 void * shmid_M
; /* shared mem for state allocation in hashtable */
2411 void *shmid_S
; /* shared bitstate arena or hashtable */
2414 int tas(volatile int *);
2416 struct sh_Allocater
{ /* shared memory for states */
2417 volatile char *dc_arena
; /* to allocate states from */
2418 volatile long pattern
; /* to detect overruns */
2419 volatile long dc_size
; /* nr of bytes left */
2420 volatile char *dc_start
; /* where memory segment starts */
2421 volatile int dc_id
; /* to attach, detach, remove shared memory segments */
2422 volatile sh_Allocater
*nxt
; /* linked list of pools */
2425 int worker_pids
[NCORE
]; /* root mem of pids of all workers created */
2426 int shmid
[NR_QS
]; /* return value from shmget */
2427 int nibis
= 0; /* set after shared mem has been released */
2428 int shmid_M
; /* shared mem for state allocation in hashtable */
2432 int shmid_S
; /* shared bitstate arena or hashtable */
2433 volatile sh_Allocater
*first_pool
; /* of shared state memory */
2434 volatile sh_Allocater
*last_pool
;
2438 struct SM_results
{ /* for shuttling back final stats */
2439 volatile int m_vsize
; /* avoid conflicts with frames */
2440 volatile int m_boq
; /* these 2 fields are not written in record_info */
2441 /* probably not all fields really need to be volatile */
2442 volatile double m_memcnt
;
2443 volatile double m_nstates
;
2444 volatile double m_truncs
;
2445 volatile double m_truncs2
;
2446 volatile double m_nShadow
;
2447 volatile double m_nlinks
;
2448 volatile double m_ngrabs
;
2449 volatile double m_nlost
;
2450 volatile double m_hcmp
;
2451 volatile double m_frame_wait
;
2452 volatile int m_hmax
;
2453 volatile int m_svmax
;
2454 volatile int m_smax
;
2455 volatile int m_mreached
;
2456 volatile int m_errors
;
2457 volatile int m_VMAX
;
2458 volatile short m_PMAX
;
2459 volatile short m_QMAX
;
2460 volatile uchar m_R
; /* reached info for all proctypes */
2463 int core_id
= 0; /* internal process nr, to know which q to use */
2464 unsigned long nstates_put
= 0; /* statistics */
2465 unsigned long nstates_get
= 0;
2466 int query_in_progress
= 0; /* termination detection */
2468 double free_wait
= 0.; /* waiting for a free frame */
2469 double frame_wait
= 0.; /* waiting for a full frame */
2470 double lock_wait
= 0.; /* waiting for access to cs */
2471 double glock_wait
[3]; /* waiting for access to global lock */
2473 char *sprefix
= "rst";
2474 uchar was_interrupted
, issued_kill
, writing_trail
;
2476 static SM_frame cur_Root
; /* current root, to be safe with error trails */
2478 SM_frame
*m_workq
[NR_QS
]; /* per cpu work queues + global q */
2479 char *shared_mem
[NR_QS
]; /* return value from shmat */
2484 volatile sh_Allocater
*dc_shared
; /* assigned at initialization */
2486 static int vmax_seen
, pmax_seen
, qmax_seen
;
2487 static double gq_tries
, gq_hasroom
, gq_hasnoroom
;
2489 volatile int *prfree
;
2490 volatile int *prfull
;
2491 volatile int *prcnt
;
2492 volatile int *prmax
;
2494 volatile int *sh_lock
; /* mutual exclusion locks - in shared memory */
2495 volatile double *is_alive
; /* to detect when processes crash */
2496 volatile int *grfree
, *grfull
, *grcnt
, *grmax
; /* access to shared global q */
2497 volatile double *gr_readmiss
, *gr_writemiss
;
2498 static int lrfree
; /* used for temporary recording of slot */
2499 static int dfs_phase2
;
2501 void mem_put(int); /* handoff state to other cpu */
2502 void mem_put_acc(void); /* liveness mode */
2503 void mem_get(void); /* get state from work queue */
2504 void sudden_stop(char *);
2506 void enter_critical(int);
2507 void leave_critical(int);
2511 record_info(SM_results
*r
)
2517 { cpu_printf("nstates %g nshadow %g -- memory %-6.3f Mb\n",
2518 nstates
, nShadow
, memcnt
/(1048576.));
2523 r
->m_memcnt
= 0; /* it's shared */
2525 r
->m_memcnt
= memcnt
;
2527 if (a_cycles
&& core_id
== 1)
2528 { r
->m_nstates
= nstates
;
2529 r
->m_nShadow
= nstates
;
2531 { r
->m_nstates
= nstates
;
2532 r
->m_nShadow
= nShadow
;
2534 r
->m_truncs
= truncs
;
2535 r
->m_truncs2
= truncs2
;
2536 r
->m_nlinks
= nlinks
;
2537 r
->m_ngrabs
= ngrabs
;
2540 r
->m_frame_wait
= frame_wait
;
2544 r
->m_mreached
= mreached
;
2545 r
->m_errors
= errors
;
2546 r
->m_VMAX
= vmax_seen
;
2547 r
->m_PMAX
= (short) pmax_seen
;
2548 r
->m_QMAX
= (short) qmax_seen
;
2549 ptr
= (uchar
*) &(r
->m_R
);
2550 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
2551 { memcpy(ptr
, reached
[i
], NrStates
[i
]*sizeof(uchar
));
2552 ptr
+= NrStates
[i
]*sizeof(uchar
);
2555 { cpu_printf("Put Results nstates %g (sz %d)\n", nstates
, ptr
- &(r
->m_R
));
2559 void snapshot(void);
2562 retrieve_info(SM_results
*r
)
2564 volatile uchar
*ptr
;
2566 snapshot(); /* for a final report */
2568 enter_critical(GLOBAL_LOCK
);
2571 { printf("cpu%d: local heap-left %ld KB (%d MB)\n",
2572 core_id
, (int) (my_size
/1024), (int) (my_size
/1048576));
2575 if (verbose
&& core_id
== 0)
2577 for (i
= 0; i
< NCORE
; i
++)
2578 { printf("%d ", prmax
[i
]);
2581 printf("G: %d", *grmax
);
2585 leave_critical(GLOBAL_LOCK
);
2587 memcnt
+= r
->m_memcnt
;
2588 nstates
+= r
->m_nstates
;
2589 nShadow
+= r
->m_nShadow
;
2590 truncs
+= r
->m_truncs
;
2591 truncs2
+= r
->m_truncs2
;
2592 nlinks
+= r
->m_nlinks
;
2593 ngrabs
+= r
->m_ngrabs
;
2594 nlost
+= r
->m_nlost
;
2596 /* frame_wait += r->m_frame_wait; */
2597 errors
+= r
->m_errors
;
2599 if (hmax
< r
->m_hmax
) hmax
= r
->m_hmax
;
2600 if (svmax
< r
->m_svmax
) svmax
= r
->m_svmax
;
2601 if (smax
< r
->m_smax
) smax
= r
->m_smax
;
2602 if (mreached
< r
->m_mreached
) mreached
= r
->m_mreached
;
2604 if (vmax_seen
< r
->m_VMAX
) vmax_seen
= r
->m_VMAX
;
2605 if (pmax_seen
< (int) r
->m_PMAX
) pmax_seen
= (int) r
->m_PMAX
;
2606 if (qmax_seen
< (int) r
->m_QMAX
) qmax_seen
= (int) r
->m_QMAX
;
2609 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
2610 { for (j
= 0; j
< NrStates
[i
]; j
++)
2611 { if (*(ptr
+ j
) != 0)
2612 { reached
[i
][j
] = 1;
2614 ptr
+= NrStates
[i
]*sizeof(uchar
);
2617 { cpu_printf("Got Results (%d)\n", ptr
- &(r
->m_R
));
2622 #if !defined(WIN32) && !defined(WIN64)
2624 rm_shared_segments(void)
2626 volatile sh_Allocater
*nxt_pool
;
2628 * mark all shared memory segments for removal
2629 * the actual removes wont happen intil last process dies or detaches
2630 * the shmctl calls can return -1 if not all procs have detached yet
2632 for (m
= 0; m
< NR_QS
; m
++) /* +1 for global q */
2633 { if (shmid
[m
] != -1)
2634 { (void) shmctl(shmid
[m
], IPC_RMID
, NULL
);
2638 { (void) shmctl(shmid_M
, IPC_RMID
, NULL
);
2642 { (void) shmctl(shmid_S
, IPC_RMID
, NULL
);
2644 for (last_pool
= first_pool
; last_pool
!= NULL
; last_pool
= nxt_pool
)
2645 { shmid_M
= (int) (last_pool
->dc_id
);
2646 nxt_pool
= last_pool
->nxt
; /* as a pre-caution only */
2648 { (void) shmctl(shmid_M
, IPC_RMID
, NULL
);
2655 sudden_stop(char *s
)
2659 printf("cpu%d: stop - %s\n", core_id
, s
);
2660 #if !defined(WIN32) && !defined(WIN64)
2662 { rm_shared_segments();
2665 if (search_terminated
!= NULL
)
2666 { if (*search_terminated
!= 0)
2668 { printf("cpu%d: termination initiated (%d)\n",
2669 core_id
, *search_terminated
);
2673 { printf("cpu%d: initiated termination\n", core_id
);
2675 *search_terminated
|= 8; /* sudden_stop */
2678 { if (((*search_terminated
) & 4) /* uerror in one of the cpus */
2679 && !((*search_terminated
) & (8|32|128|256))) /* abnormal stop */
2680 { if (errors
== 0) errors
++; /* we know there is at least 1 */
2682 wrapup(); /* incomplete stats, but at least something */
2685 } /* else: should rarely happen, take more drastic measures */
2687 if (core_id
== 0) /* local root process */
2688 { for (i
= 1; i
< NCORE
; i
++) /* not for 0 of course */
2690 #if defined(WIN32) || defined(WIN64)
2691 DWORD dwExitCode
= 0;
2692 GetExitCodeProcess(worker_handles
[i
], &dwExitCode
);
2693 if (dwExitCode
== STILL_ACTIVE
)
2694 { TerminateProcess(worker_handles
[i
], 0);
2696 printf("cpu0: terminate %d %d\n",
2697 worker_pids
[i
], (dwExitCode
== STILL_ACTIVE
));
2699 sprintf(b
, "kill -%d %d", SIGKILL
, worker_pids
[i
]);
2700 system(b
); /* if this is a proxy: receive half */
2701 printf("cpu0: %s\n", b
);
2706 { /* on WIN32/WIN64 -- these merely kills the root process... */
2707 if (was_interrupted
== 0)
2708 { sprintf(b
, "kill -%d %d", SIGINT
, worker_pids
[0]);
2709 system(b
); /* warn the root process */
2710 printf("cpu%d: %s\n", core_id
, b
);
2715 #define iam_alive() is_alive[core_id]++
2717 extern int crash_test(double);
2718 extern void crash_reset(void);
2721 someone_crashed(int wait_type
)
2722 { static double last_value
= 0.0;
2723 static int count
= 0;
2725 if (search_terminated
== NULL
2726 || *search_terminated
!= 0)
2728 if (!(*search_terminated
& (8|32|128|256)))
2729 { if (count
++ < 100*NCORE
)
2734 /* check left neighbor only */
2735 if (last_value
== is_alive
[(core_id
+ NCORE
- 1) % NCORE
])
2736 { if (count
++ >= 100) /* to avoid unnecessary checks */
2741 last_value
= is_alive
[(core_id
+ NCORE
- 1) % NCORE
];
2750 enter_critical(GLOBAL_LOCK
);
2754 printf("cpu%d: locks: global %g\tother %g\t",
2755 core_id
, glock_wait
[0], lock_wait
- glock_wait
[0]);
2757 printf("cpu%d: locks: GL %g, RQ %g, WQ %g, HT %g\t",
2758 core_id
, glock_wait
[0], glock_wait
[1], glock_wait
[2],
2759 lock_wait
- glock_wait
[0] - glock_wait
[1] - glock_wait
[2]);
2761 printf("waits: states %g slots %g\n", frame_wait
, free_wait
);
2763 printf("cpu%d: gq [tries %g, room %g, noroom %g]\n", core_id
, gq_tries
, gq_hasroom
, gq_hasnoroom
);
2764 if (core_id
== 0 && (*gr_readmiss
>= 1.0 || *gr_readmiss
>= 1.0 || *grcnt
!= 0))
2765 printf("cpu0: gq [readmiss: %g, writemiss: %g cnt %d]\n", *gr_readmiss
, *gr_writemiss
, *grcnt
);
2768 if (free_wait
> 1000000.)
2771 { printf("hint: this search may be faster with a larger work-queue\n");
2772 printf(" (-DSET_WQ_SIZE=N with N>%g), and/or with -DUSE_DISK\n",
2773 GWQ_SIZE
/sizeof(SM_frame
));
2774 printf(" or with a larger value for -zN (N>%d)\n", z_handoff
);
2776 { printf("hint: this search may be faster if compiled without -DNGQ, with -DUSE_DISK, ");
2777 printf("or with a larger -zN (N>%d)\n", z_handoff
);
2780 leave_critical(GLOBAL_LOCK
);
2783 #ifndef MAX_DSK_FILE
2784 #define MAX_DSK_FILE 1000000 /* default is max 1M states per file */
2788 multi_usage(FILE *fd
)
2789 { static int warned
= 0;
2790 if (warned
> 0) { return; } else { warned
++; }
2792 fprintf(fd
, "Defining multi-core mode:\n\n");
2793 fprintf(fd
, " -DDUAL_CORE --> same as -DNCORE=2\n");
2794 fprintf(fd
, " -DQUAD_CORE --> same as -DNCORE=4\n");
2795 fprintf(fd
, " -DNCORE=N --> enables multi_core verification if N>1\n");
2797 fprintf(fd
, "Additional directives supported in multi-core mode:\n\n");
2798 fprintf(fd
, " -DSEP_STATE --> forces separate statespaces instead of a single shared state space\n");
2799 fprintf(fd
, " -DNUSE_DISK --> use disk for storing states when a work queue overflows\n");
2800 fprintf(fd
, " -DMAX_DSK_FILE --> max nr of states per diskfile (%d)\n", MAX_DSK_FILE
);
2801 fprintf(fd
, " -DFULL_TRAIL --> support full error trails (increases memory use)\n");
2803 fprintf(fd
, "More advanced use (should rarely need changing):\n\n");
2804 fprintf(fd
, " To change the nr of states that can be stored in the global queue\n");
2805 fprintf(fd
, " (lower numbers allow for more states to be stored, prefer multiples of 8):\n");
2806 fprintf(fd
, " -DVMAX=N --> upperbound on statevector for handoffs (N=%d)\n", VMAX
);
2807 fprintf(fd
, " -DPMAX=N --> upperbound on nr of procs (default: N=%d)\n", PMAX
);
2808 fprintf(fd
, " -DQMAX=N --> upperbound on nr of channels (default: N=%d)\n", QMAX
);
2810 fprintf(fd
, " To set the total amount of memory reserved for the global workqueue:\n");
2811 fprintf(fd
, " -DSET_WQ_SIZE=N --> default: N=128 (defined in MBytes)\n\n");
2812 fprintf(fd
, " To force the use of a single global heap, instead of separate heaps:\n");
2813 fprintf(fd
, " -DGLOB_HEAP\n");
2815 fprintf(fd
, " To define a fct to initialize data before spawning processes (use quotes):\n");
2816 fprintf(fd
, " \"-DC_INIT=fct()\"\n");
2818 fprintf(fd
, " Timer settings for termination and crash detection:\n");
2819 fprintf(fd
, " -DSHORT_T=N --> timeout for termination detection trigger (N=%g)\n", (double) SHORT_T
);
2820 fprintf(fd
, " -DLONG_T=N --> timeout for giving up on termination detection (N=%g)\n", (double) LONG_T
);
2821 fprintf(fd
, " -DONESECOND --> (1<<29) --> timeout waiting for a free slot -- to check for crash\n");
2822 fprintf(fd
, " -DT_ALERT --> collect stats on crash alert timeouts\n\n");
2823 fprintf(fd
, "Help with Linux/Windows/Cygwin configuration for multi-core:\n");
2824 fprintf(fd
, " http://spinroot.com/spin/multicore/V5_Readme.html\n");
2827 #if NCORE>1 && defined(FULL_TRAIL)
2828 typedef struct Stack_Tree
{
2829 uchar pr
; /* process that made transition */
2830 T_ID t_id
; /* id of transition */
2831 volatile struct Stack_Tree
*prv
; /* backward link towards root */
2834 struct H_el
*grab_shared(int);
2835 volatile Stack_Tree
**stack_last
; /* in shared memory */
2836 char *stack_cache
= NULL
; /* local */
2837 int nr_cached
= 0; /* local */
2840 #define CACHE_NR 1024
2843 volatile Stack_Tree
*
2844 stack_prefetch(void)
2845 { volatile Stack_Tree
*st
;
2848 { stack_cache
= (char *) grab_shared(CACHE_NR
* sizeof(Stack_Tree
));
2849 nr_cached
= CACHE_NR
;
2851 st
= (volatile Stack_Tree
*) stack_cache
;
2852 stack_cache
+= sizeof(Stack_Tree
);
2858 Push_Stack_Tree(short II
, T_ID t_id
)
2859 { volatile Stack_Tree
*st
;
2861 st
= (volatile Stack_Tree
*) stack_prefetch();
2864 st
->prv
= (Stack_Tree
*) stack_last
[core_id
];
2865 stack_last
[core_id
] = st
;
2869 Pop_Stack_Tree(void)
2870 { volatile Stack_Tree
*cf
= stack_last
[core_id
];
2873 { stack_last
[core_id
] = cf
->prv
;
2874 } else if (nr_handoffs
* z_handoff
+ depth
> 0)
2875 { printf("cpu%d: error pop_stack_tree (depth %d)\n",
2882 e_critical(int which
)
2885 if (readtrail
|| iamin
[which
] > 0)
2886 { if (!readtrail
&& verbose
)
2887 { printf("cpu%d: Double Lock on %d (now %d)\n",
2888 core_id
, which
, iamin
[which
]+1);
2891 iamin
[which
]++; /* local variable */
2895 cnt_start
= lock_wait
;
2897 while (sh_lock
!= NULL
) /* as long as we have shared memory */
2898 { int r
= tas(&sh_lock
[which
]);
2901 return; /* locked */
2906 if (which
< 3) { glock_wait
[which
]++; }
2908 if (which
== 0) { glock_wait
[which
]++; }
2912 if (lock_wait
- cnt_start
> TenSeconds
)
2913 { printf("cpu%d: lock timeout on %d\n", core_id
, which
);
2914 cnt_start
= lock_wait
;
2915 if (someone_crashed(1))
2916 { sudden_stop("lock timeout");
2922 x_critical(int which
)
2924 if (iamin
[which
] != 1)
2925 { if (iamin
[which
] > 1)
2926 { iamin
[which
]--; /* this is thread-local - no races on this one */
2927 if (!readtrail
&& verbose
)
2928 { printf("cpu%d: Partial Unlock on %d (%d more needed)\n",
2929 core_id
, which
, iamin
[which
]);
2933 } else /* iamin[which] <= 0 */
2935 { printf("cpu%d: Invalid Unlock iamin[%d] = %d\n",
2936 core_id
, which
, iamin
[which
]);
2942 if (sh_lock
!= NULL
)
2944 sh_lock
[which
] = 0; /* unlock */
2949 #if defined(WIN32) || defined(WIN64)
2950 start_proxy(char *s
, DWORD r_pid
)
2952 start_proxy(char *s
, int r_pid
)
2954 { char Q_arg
[16], Z_arg
[16], Y_arg
[16];
2955 char *args
[32], *ptr
;
2958 sprintf(Q_arg
, "-Q%d", getpid());
2959 sprintf(Y_arg
, "-Y%d", r_pid
);
2960 sprintf(Z_arg
, "-Z%d", proxy_pid
/* core_id */);
2962 args
[argcnt
++] = "proxy";
2963 args
[argcnt
++] = s
; /* -r or -s */
2964 args
[argcnt
++] = Q_arg
;
2965 args
[argcnt
++] = Z_arg
;
2966 args
[argcnt
++] = Y_arg
;
2968 if (strlen(o_cmdline
) > 0)
2969 { ptr
= o_cmdline
; /* assume args separated by spaces */
2970 do { args
[argcnt
++] = ptr
++;
2971 if ((ptr
= strchr(ptr
, ' ')) != NULL
)
2972 { while (*ptr
== ' ')
2978 } while (argcnt
< 31);
2980 args
[argcnt
] = NULL
;
2981 #if defined(WIN32) || defined(WIN64)
2982 execvp("pan_proxy", args
); /* no return */
2984 execvp("./pan_proxy", args
); /* no return */
2986 Uerror("pan_proxy exec failed");
2988 /*** end of common code fragment ***/
2990 #if !defined(WIN32) && !defined(WIN64)
2992 init_shm(void) /* initialize shared work-queues - linux/cygwin */
2997 if (core_id
== 0 && verbose
)
2998 { printf("cpu0: step 3: allocate shared workqueues %g MB\n",
2999 ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
) / (1048576.) );
3001 for (m
= 0; m
< NR_QS
; m
++) /* last q is the global q */
3002 { double qsize
= (m
== NCORE
) ? GWQ_SIZE
: LWQ_SIZE
;
3003 key
[m
] = ftok(PanSource
, m
+1);
3005 { perror("ftok shared queues"); must_exit
= 1; break;
3008 if (core_id
== 0) /* root creates */
3009 { /* check for stale copy */
3010 shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600);
3011 if (shmid
[m
] != -1) /* yes there is one; remove it */
3012 { printf("cpu0: removing stale q%d, status: %d\n",
3013 m
, shmctl(shmid
[m
], IPC_RMID
, NULL
));
3015 shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600|IPC_CREAT
|IPC_EXCL
);
3017 } else /* workers attach */
3018 { shmid
[m
] = shmget(key
[m
], (size_t) qsize
, 0600);
3019 /* never called, since we create shm *before* we fork */
3022 { perror("shmget shared queues"); must_exit
= 1; break;
3025 shared_mem
[m
] = (char *) shmat(shmid
[m
], (void *) 0, 0); /* attach */
3026 if (shared_mem
[m
] == (char *) -1)
3027 { fprintf(stderr
, "error: cannot attach shared wq %d (%d Mb)\n",
3028 m
+1, (int) (qsize
/(1048576.)));
3029 perror("shmat shared queues"); must_exit
= 1; break;
3032 m_workq
[m
] = (SM_frame
*) shared_mem
[m
];
3034 { int nframes
= (m
== NCORE
) ? GN_FRAMES
: LN_FRAMES
;
3035 for (n
= 0; n
< nframes
; n
++)
3036 { m_workq
[m
][n
].m_vsize
= 0;
3037 m_workq
[m
][n
].m_boq
= 0;
3041 { rm_shared_segments();
3042 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3043 pan_exit(1); /* calls cleanup_shm */
3048 prep_shmid_S(size_t n
) /* either sets SS or H_tab, linux/cygwin */
3053 if (verbose
&& core_id
== 0)
3056 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
3057 (double) n
/ (1048576.));
3059 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
3060 (double) n
/ (1048576.));
3064 if (memcnt
+ (double) n
> memlim
)
3065 { printf("cpu0: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
3066 memcnt
/1024., n
/1024, memlim
/(1048576.));
3067 printf("cpu0: insufficient memory -- aborting\n");
3072 key
= ftok(PanSource
, NCORE
+2); /* different from queues */
3074 { perror("ftok shared bitstate or hashtable");
3075 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3079 if (core_id
== 0) /* root */
3080 { shmid_S
= shmget(key
, n
, 0600);
3082 { printf("cpu0: removing stale segment, status: %d\n",
3083 shmctl(shmid_S
, IPC_RMID
, NULL
));
3085 shmid_S
= shmget(key
, n
, 0600 | IPC_CREAT
| IPC_EXCL
);
3086 memcnt
+= (double) n
;
3088 { shmid_S
= shmget(key
, n
, 0600);
3091 { perror("shmget shared bitstate or hashtable too large?");
3092 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3096 rval
= (char *) shmat(shmid_S
, (void *) 0, 0); /* attach */
3097 if ((char *) rval
== (char *) -1)
3098 { perror("shmat shared bitstate or hashtable");
3099 fprintf(stderr
, "pan: check './pan --' for usage details\n");
3103 rval
= (char *) emalloc(n
);
3105 return (uchar
*) rval
;
3111 static char shm_prep_result
;
3114 prep_state_mem(size_t n
) /* sets memory arena for states linux/cygwin */
3117 static int cnt
= 3; /* start larger than earlier ftok calls */
3119 shm_prep_result
= NOT_AGAIN
; /* default */
3120 if (verbose
&& core_id
== 0)
3121 { printf("cpu0: step 2+: pre-allocate memory arena %d of %6.2g Mb\n",
3122 cnt
-3, (double) n
/ (1048576.));
3125 if (memcnt
+ (double) n
> memlim
)
3126 { printf("cpu0: error: M %.0f + %.0f Kb exceeds memory limit of %.0f Mb\n",
3127 memcnt
/1024.0, (double) n
/1024.0, memlim
/(1048576.));
3132 key
= ftok(PanSource
, NCORE
+cnt
); cnt
++;
3135 printf("pan: check './pan --' for usage details\n");
3140 { shmid_M
= shmget(key
, n
, 0600);
3142 { printf("cpu0: removing stale memory segment %d, status: %d\n",
3143 cnt
-3, shmctl(shmid_M
, IPC_RMID
, NULL
));
3145 shmid_M
= shmget(key
, n
, 0600 | IPC_CREAT
| IPC_EXCL
);
3146 /* memcnt += (double) n; -- only amount actually used is counted */
3148 { shmid_M
= shmget(key
, n
, 0600);
3153 { printf("error: failed to get pool of shared memory %d of %.0f Mb\n",
3154 cnt
-3, ((double)n
)/(1048576.));
3155 perror("state mem");
3156 printf("pan: check './pan --' for usage details\n");
3158 shm_prep_result
= TRY_AGAIN
;
3161 rval
= (char *) shmat(shmid_M
, (void *) 0, 0); /* attach */
3163 if ((char *) rval
== (char *) -1)
3164 { printf("cpu%d error: failed to attach pool of shared memory %d of %.0f Mb\n",
3165 core_id
, cnt
-3, ((double)n
)/(1048576.));
3166 perror("state mem");
3169 return (uchar
*) rval
;
3173 init_HT(unsigned long n
) /* cygwin/linux version */
3177 volatile char *dc_mem_start
;
3178 double need_mem
, got_mem
= 0.;
3184 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
3188 { printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
3189 MEMLIM
, ((double)n
/(1048576.)), (((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
) /(1048576.) );
3192 get_mem
= NCORE
* sizeof(double) + (1 + CS_NR
) * sizeof(void *) + 4*sizeof(void *) + 2*sizeof(double);
3193 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
3194 get_mem
+= 4 * NCORE
* sizeof(void *); /* prfree, prfull, prcnt, prmax */
3196 get_mem
+= (NCORE
) * sizeof(Stack_Tree
*); /* NCORE * stack_last */
3198 x
= (volatile char *) prep_state_mem((size_t) get_mem
); /* work queues and basic structs */
3201 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
3204 search_terminated
= (volatile unsigned int *) x
; /* comes first */
3205 x
+= sizeof(void *); /* maintain alignment */
3207 is_alive
= (volatile double *) x
;
3208 x
+= NCORE
* sizeof(double);
3210 sh_lock
= (volatile int *) x
;
3211 x
+= CS_NR
* sizeof(void *);
3213 grfree
= (volatile int *) x
;
3214 x
+= sizeof(void *);
3215 grfull
= (volatile int *) x
;
3216 x
+= sizeof(void *);
3217 grcnt
= (volatile int *) x
;
3218 x
+= sizeof(void *);
3219 grmax
= (volatile int *) x
;
3220 x
+= sizeof(void *);
3221 prfree
= (volatile int *) x
;
3222 x
+= NCORE
* sizeof(void *);
3223 prfull
= (volatile int *) x
;
3224 x
+= NCORE
* sizeof(void *);
3225 prcnt
= (volatile int *) x
;
3226 x
+= NCORE
* sizeof(void *);
3227 prmax
= (volatile int *) x
;
3228 x
+= NCORE
* sizeof(void *);
3229 gr_readmiss
= (volatile double *) x
;
3230 x
+= sizeof(double);
3231 gr_writemiss
= (volatile double *) x
;
3232 x
+= sizeof(double);
3235 stack_last
= (volatile Stack_Tree
**) x
;
3236 x
+= NCORE
* sizeof(Stack_Tree
*);
3240 H_tab
= (struct H_el
**) emalloc(n
);
3244 #warning MEMLIM not set
3245 #define MEMLIM (2048)
3248 if (core_id
== 0 && verbose
)
3249 { printf("cpu0: step 0: -DMEMLIM=%d Mb minus hashtable+workqs (%g + %g Mb) leaves %g Mb\n",
3250 MEMLIM
, ((double)n
/(1048576.)), (NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.),
3251 (memlim
- memcnt
- (double) n
- (NCORE
* LWQ_SIZE
+ GWQ_SIZE
))/(1048576.));
3254 H_tab
= (struct H_el
**) prep_shmid_S((size_t) n
); /* hash_table */
3256 need_mem
= memlim
- memcnt
- ((double) NCORE
* LWQ_SIZE
) - GWQ_SIZE
;
3258 { Uerror("internal error -- shared state memory");
3261 if (core_id
== 0 && verbose
)
3262 { printf("cpu0: step 2: pre-allocate shared state memory %g Mb\n",
3263 need_mem
/(1048576.));
3266 SEG_SIZE
= need_mem
/ NCORE
;
3267 if (verbose
&& core_id
== 0)
3268 { printf("cpu0: setting segsize to %6g MB\n",
3269 SEG_SIZE
/(1048576.));
3271 #if defined(CYGWIN) || defined(__CYGWIN__)
3272 if (SEG_SIZE
> 512.*1024.*1024.)
3273 { printf("warning: reducing SEG_SIZE of %g MB to 512MB (exceeds max for Cygwin)\n",
3274 SEG_SIZE
/(1024.*1024.));
3275 SEG_SIZE
= 512.*1024.*1024.;
3279 mem_reserved
= need_mem
;
3280 while (need_mem
> 1024.)
3281 { get_mem
= need_mem
;
3283 if (get_mem
> (double) SEG_SIZE
)
3284 { get_mem
= (double) SEG_SIZE
;
3286 if (get_mem
<= 0.0) break;
3288 /* for allocating states: */
3289 x
= dc_mem_start
= (volatile char *) prep_state_mem((size_t) get_mem
);
3291 { if (shm_prep_result
== NOT_AGAIN
3292 || first_pool
!= NULL
3293 || SEG_SIZE
< (16. * 1048576.))
3298 { printf("pan: lowered segsize to 0.000000\n", SEG_SIZE
);
3300 if (SEG_SIZE
>= 1024.)
3306 need_mem
-= get_mem
;
3308 if (first_pool
== NULL
)
3309 { search_terminated
= (volatile unsigned int *) x
; /* comes first */
3310 x
+= sizeof(void *); /* maintain alignment */
3312 is_alive
= (volatile double *) x
;
3313 x
+= NCORE
* sizeof(double);
3315 sh_lock
= (volatile int *) x
;
3316 x
+= CS_NR
* sizeof(void *);
3318 grfree
= (volatile int *) x
;
3319 x
+= sizeof(void *);
3320 grfull
= (volatile int *) x
;
3321 x
+= sizeof(void *);
3322 grcnt
= (volatile int *) x
;
3323 x
+= sizeof(void *);
3324 grmax
= (volatile int *) x
;
3325 x
+= sizeof(void *);
3326 prfree
= (volatile int *) x
;
3327 x
+= NCORE
* sizeof(void *);
3328 prfull
= (volatile int *) x
;
3329 x
+= NCORE
* sizeof(void *);
3330 prcnt
= (volatile int *) x
;
3331 x
+= NCORE
* sizeof(void *);
3332 prmax
= (volatile int *) x
;
3333 x
+= NCORE
* sizeof(void *);
3334 gr_readmiss
= (volatile double *) x
;
3335 x
+= sizeof(double);
3336 gr_writemiss
= (volatile double *) x
;
3337 x
+= sizeof(double);
3339 stack_last
= (volatile Stack_Tree
**) x
;
3340 x
+= NCORE
* sizeof(Stack_Tree
*);
3342 if (((long)x
)&(sizeof(void *)-1)) /* 64-bit word alignment */
3343 { x
+= sizeof(void *)-(((long)x
)&(sizeof(void *)-1));
3347 ncomps
= (unsigned long *) x
;
3348 x
+= (256+2) * sizeof(unsigned long);
3352 dc_shared
= (sh_Allocater
*) x
; /* must be in shared memory */
3353 x
+= sizeof(sh_Allocater
);
3355 if (core_id
== 0) /* root only */
3356 { dc_shared
->dc_id
= shmid_M
;
3357 dc_shared
->dc_start
= dc_mem_start
;
3358 dc_shared
->dc_arena
= x
;
3359 dc_shared
->pattern
= 1234567; /* protection */
3360 dc_shared
->dc_size
= (long) get_mem
- (long) (x
- dc_mem_start
);
3361 dc_shared
->nxt
= (long) 0;
3363 if (last_pool
== NULL
)
3364 { first_pool
= last_pool
= dc_shared
;
3366 { last_pool
->nxt
= dc_shared
;
3367 last_pool
= dc_shared
;
3369 } else if (first_pool
== NULL
)
3370 { first_pool
= dc_shared
;
3373 if (need_mem
> 1024.)
3374 { printf("cpu0: could allocate only %g Mb of shared memory (wanted %g more)\n",
3375 got_mem
/(1048576.), need_mem
/(1048576.));
3379 { printf("cpu0: insufficient memory -- aborting.\n");
3382 /* we are still single-threaded at this point, with core_id 0 */
3383 dc_shared
= first_pool
;
3388 /* Test and Set assembly code */
3390 #if defined(i386) || defined(__i386__) || defined(__x86_64__)
3392 tas(volatile int *s
) /* tested */
3394 __asm__
__volatile__(
3402 #elif defined(__arm__)
3404 tas(volatile int *s
) /* not tested */
3406 __asm__
__volatile__(
3407 "swpb %0, %0, [%3] \n"
3413 #elif defined(sparc) || defined(__sparc__)
3415 tas(volatile int *s
) /* not tested */
3417 __asm__
__volatile__(
3418 " ldstub [%2], %0 \n"
3424 #elif defined(ia64) || defined(__ia64__)
3427 tas(volatile int *s
) /* tested */
3429 __asm__
__volatile__(
3430 " xchg4 %0=%1,%2 \n"
3437 #error missing definition of test and set operation for this platform
3441 cleanup_shm(int val
)
3442 { volatile sh_Allocater
*nxt_pool
;
3443 unsigned long cnt
= 0;
3447 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id
, val
);
3452 if (search_terminated
!= NULL
)
3453 { *search_terminated
|= 16; /* cleanup_shm */
3456 for (m
= 0; m
< NR_QS
; m
++)
3457 { if (shmdt((void *) shared_mem
[m
]) > 0)
3458 { perror("shmdt detaching from shared queues");
3462 if (shmdt((void *) shmid_X
) != 0)
3463 { perror("shmdt detaching from shared state memory");
3467 if (SS
> 0 && shmdt((void *) SS
) != 0)
3469 { perror("shmdt detaching from shared bitstate arena");
3473 { /* before detaching: */
3474 for (nxt_pool
= dc_shared
; nxt_pool
!= NULL
; nxt_pool
= nxt_pool
->nxt
)
3475 { cnt
+= nxt_pool
->dc_size
;
3478 { printf("cpu0: done, %ld Mb of shared state memory left\n",
3479 cnt
/ (long)(1048576));
3482 if (shmdt((void *) H_tab
) != 0)
3483 { perror("shmdt detaching from shared hashtable");
3486 for (last_pool
= first_pool
; last_pool
!= NULL
; last_pool
= nxt_pool
)
3487 { nxt_pool
= last_pool
->nxt
;
3488 if (shmdt((void *) last_pool
->dc_start
) != 0)
3489 { perror("shmdt detaching from shared state memory");
3491 first_pool
= last_pool
= NULL
; /* precaution */
3494 /* detached from shared memory - so cannot use cpu_printf */
3496 { printf("cpu%d: done -- got %d states from queue\n",
3497 core_id
, nstates_get
);
3501 extern void give_up(int);
3502 extern void Read_Queue(int);
3509 #if defined(MA) && !defined(SEP_STATE)
3510 #error MA without SEP_STATE is not supported with multi-core
3513 #error BFS is not supported with multi-core
3516 #error SC is not supported with multi-core
3518 init_shm(); /* we are single threaded when this starts */
3520 if (core_id
== 0 && verbose
)
3521 { printf("cpu0: step 4: calling fork()\n");
3525 /* if NCORE > 1 the child or the parent should fork N-1 more times
3526 * the parent is the only process with core_id == 0 and is_parent > 0
3527 * the workers have is_parent = 0 and core_id = 1..NCORE-1
3530 { worker_pids
[0] = getpid(); /* for completeness */
3531 while (++core_id
< NCORE
) /* first worker sees core_id = 1 */
3532 { is_parent
= fork();
3533 if (is_parent
== -1)
3534 { Uerror("fork failed");
3536 if (is_parent
== 0) /* this is a worker process */
3537 { if (proxy_pid
== core_id
) /* always non-zero */
3538 { start_proxy("-r", 0); /* no return */
3540 goto adapt
; /* root process continues spawning */
3542 worker_pids
[core_id
] = is_parent
;
3544 /* note that core_id is now NCORE */
3545 if (proxy_pid
> 0 && proxy_pid
< NCORE
)
3546 { proxy_pid_snd
= fork();
3547 if (proxy_pid_snd
== -1)
3548 { Uerror("proxy fork failed");
3550 if (proxy_pid_snd
== 0)
3551 { start_proxy("-s", worker_pids
[proxy_pid
]); /* no return */
3552 } } /* else continue */
3554 { core_id
= 0; /* reset core_id for root process */
3557 { static char db0
[16]; /* good for up to 10^6 cores */
3558 static char db1
[16];
3559 adapt
: tprefix
= db0
; sprefix
= db1
;
3560 sprintf(tprefix
, "cpu%d_trail", core_id
);
3561 sprintf(sprefix
, "cpu%d_rst", core_id
);
3562 memcnt
= 0; /* count only additionally allocated memory */
3564 signal(SIGINT
, give_up
);
3566 if (proxy_pid
== 0) /* not in a cluster setup, pan_proxy must attach */
3567 { rm_shared_segments(); /* mark all shared segments for removal on exit */
3570 { cpu_printf("starting core_id %d -- pid %d\n", core_id
, getpid());
3572 #if defined(SEP_HEAP) && !defined(SEP_STATE)
3574 volatile sh_Allocater
*ptr
;
3576 for (i
= 0; i
< NCORE
&& ptr
!= NULL
; i
++)
3578 { my_heap
= (char *) ptr
->dc_arena
;
3579 my_size
= (long) ptr
->dc_size
;
3581 cpu_printf("local heap %ld MB\n", my_size
/(1048576));
3584 ptr
= ptr
->nxt
; /* local */
3586 if (my_heap
== NULL
)
3587 { printf("cpu%d: no local heap\n", core_id
);
3590 #if defined(CYGWIN) || defined(__CYGWIN__)
3592 for (i
= 0; i
< NCORE
&& ptr
!= NULL
; i
++)
3593 { ptr
= ptr
->nxt
; /* local */
3595 dc_shared
= ptr
; /* any remainder */
3597 dc_shared
= NULL
; /* used all mem for local heaps */
3601 if (core_id
== 0 && !remote_party
)
3602 { new_state(); /* cpu0 explores root */
3604 cpu_printf("done with 1st dfs, nstates %g (put %d states), read q\n",
3605 nstates
, nstates_put
);
3608 Read_Queue(core_id
); /* all cores */
3611 { cpu_printf("put %6d states into queue -- got %6d\n",
3612 nstates_put
, nstates_get
);
3615 { rm_shared_segments();
3623 int unpack_state(SM_frame
*, int);
3630 char *rval
= (char *) 0;
3633 { printf("cpu%d: grab shared zero\n", core_id
); fflush(stdout
);
3634 return (struct H_el
*) rval
;
3635 } else if (n
&(sizeof(void *)-1))
3636 { n
+= sizeof(void *)-(n
&(sizeof(void *)-1)); /* alignment */
3641 if (my_heap
!= NULL
&& my_size
> n
)
3650 { sudden_stop("pan: out of memory");
3653 /* another lock is always already in effect when this is called */
3654 /* but not always the same lock -- i.e., on different parts of the hashtable */
3655 enter_critical(GLOBAL_LOCK
); /* this must be independently mutex */
3656 #if defined(SEP_HEAP) && !defined(WIN32) && !defined(WIN64)
3657 { static int noted
= 0;
3660 printf("cpu%d: global heap has %ld bytes left, needed %d\n",
3661 core_id
, dc_shared
?dc_shared
->dc_size
:0, n
);
3665 if (dc_shared
->pattern
!= 1234567)
3666 { leave_critical(GLOBAL_LOCK
);
3667 Uerror("overrun -- memory corruption");
3670 if (dc_shared
->dc_size
< n
)
3672 { printf("Next Pool %g Mb + %d\n", memcnt
/(1048576.), n
);
3674 if (dc_shared
->nxt
== NULL
3675 || dc_shared
->nxt
->dc_arena
== NULL
3676 || dc_shared
->nxt
->dc_size
< n
)
3677 { printf("cpu%d: memcnt %g Mb + wanted %d bytes more\n",
3678 core_id
, memcnt
/ (1048576.), n
);
3679 leave_critical(GLOBAL_LOCK
);
3680 sudden_stop("out of memory -- aborting");
3681 wrapup(); /* exits */
3683 { dc_shared
= (sh_Allocater
*) dc_shared
->nxt
;
3686 rval
= (char *) dc_shared
->dc_arena
;
3687 dc_shared
->dc_arena
+= n
;
3688 dc_shared
->dc_size
-= (long) n
;
3691 printf("cpu%d grab shared (%d bytes) -- %ld left\n",
3692 core_id
, n
, dc_shared
->dc_size
);
3694 leave_critical(GLOBAL_LOCK
);
3697 memcnt
+= (double) n
;
3699 return (struct H_el
*) rval
;
3701 return (struct H_el
*) emalloc(n
);
3706 Get_Full_Frame(int n
)
3708 double cnt_start
= frame_wait
;
3710 f
= &m_workq
[n
][prfull
[n
]];
3711 while (f
->m_vsize
== 0) /* await full slot LOCK : full frame */
3715 if (!a_cycles
|| core_id
!= 0)
3717 if (*grcnt
> 0) /* accessed outside lock, but safe even if wrong */
3718 { enter_critical(GQ_RD
); /* gq - read access */
3719 if (*grcnt
> 0) /* could have changed */
3720 { f
= &m_workq
[NCORE
][*grfull
]; /* global q */
3721 if (f
->m_vsize
== 0)
3722 { /* writer is still filling the slot */
3724 f
= &m_workq
[n
][prfull
[n
]]; /* reset */
3726 { *grfull
= (*grfull
+1) % (GN_FRAMES
);
3727 enter_critical(GQ_WR
);
3728 *grcnt
= *grcnt
- 1;
3729 leave_critical(GQ_WR
);
3730 leave_critical(GQ_RD
);
3733 leave_critical(GQ_RD
);
3736 if (frame_wait
++ - cnt_start
> Delay
)
3738 { cpu_printf("timeout on q%d -- %u -- query %d\n",
3739 n
, f
, query_in_progress
);
3741 return (SM_frame
*) 0; /* timeout */
3744 if (VVERBOSE
) cpu_printf("got frame from q%d\n", n
);
3745 prfull
[n
] = (prfull
[n
] + 1) % (LN_FRAMES
);
3746 enter_critical(QLOCK(n
));
3747 prcnt
[n
]--; /* lock out increments */
3748 leave_critical(QLOCK(n
));
3753 Get_Free_Frame(int n
)
3755 double cnt_start
= free_wait
;
3757 if (VVERBOSE
) { cpu_printf("get free frame from q%d\n", n
); }
3759 if (n
== NCORE
) /* global q */
3760 { f
= &(m_workq
[n
][lrfree
]);
3762 { f
= &(m_workq
[n
][prfree
[n
]]);
3764 while (f
->m_vsize
!= 0) /* await free slot LOCK : free slot */
3766 if (free_wait
++ - cnt_start
> OneSecond
)
3768 { cpu_printf("timeout waiting for free slot q%d\n", n
);
3770 cnt_start
= free_wait
;
3771 if (someone_crashed(1))
3772 { printf("cpu%d: search terminated\n", core_id
);
3773 sudden_stop("get free frame");
3777 { prfree
[n
] = (prfree
[n
] + 1) % (LN_FRAMES
);
3778 enter_critical(QLOCK(n
));
3779 prcnt
[n
]++; /* lock out decrements */
3780 if (prmax
[n
] < prcnt
[n
])
3781 { prmax
[n
] = prcnt
[n
];
3783 leave_critical(QLOCK(n
));
3789 GlobalQ_HasRoom(void)
3793 if (*grcnt
< GN_FRAMES
) /* there seems to be room */
3794 { enter_critical(GQ_WR
); /* gq write access */
3795 if (*grcnt
< GN_FRAMES
)
3796 { if (m_workq
[NCORE
][*grfree
].m_vsize
!= 0)
3797 { /* can happen if reader is slow emptying slot */
3799 goto out
; /* dont wait: release lock and return */
3801 lrfree
= *grfree
; /* Get_Free_Frame use lrfree in this mode */
3802 *grfree
= (*grfree
+ 1) % GN_FRAMES
;
3803 *grcnt
= *grcnt
+ 1; /* count nr of slots filled -- no additional lock needed */
3804 if (*grmax
< *grcnt
) *grmax
= *grcnt
;
3805 leave_critical(GQ_WR
); /* for short lock duration */
3807 mem_put(NCORE
); /* copy state into reserved slot */
3808 rval
= 1; /* successfull handoff */
3811 out
: leave_critical(GQ_WR
);
3818 unpack_state(SM_frame
*f
, int from_q
)
3820 static struct H_el D_State
;
3825 { cpu_printf("saw control %d, expected state\n", boq
);
3830 memcpy((uchar
*) &now
, (uchar
*) f
->m_now
, vsize
);
3831 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
3832 { Mask
[i
] = (f
->m_Mask
[i
/8] & (1<<j
)) ? 1 : 0;
3835 { memcpy((uchar
*) proc_offset
, (uchar
*) f
->m_p_offset
, now
._nr_pr
* sizeof(OFFT
));
3836 memcpy((uchar
*) proc_skip
, (uchar
*) f
->m_p_skip
, now
._nr_pr
* sizeof(uchar
));
3839 { memcpy((uchar
*) q_offset
, (uchar
*) f
->m_q_offset
, now
._nr_qs
* sizeof(OFFT
));
3840 memcpy((uchar
*) q_skip
, (uchar
*) f
->m_q_skip
, now
._nr_qs
* sizeof(uchar
));
3843 if (vsize
!= now
._vsz
)
3844 { cpu_printf("vsize %d != now._vsz %d (type %d) %d\n",
3845 vsize
, now
._vsz
, f
->m_boq
, f
->m_vsize
);
3847 goto correct
; /* rare event: a race */
3850 hmax
= max(hmax
, vsize
);
3853 { memcpy((uchar
*) &cur_Root
, (uchar
*) f
, sizeof(SM_frame
));
3856 if (((now
._a_t
) & 1) == 1) /* i.e., when starting nested DFS */
3857 { A_depth
= depthfound
= 0;
3858 memcpy((uchar
*)&A_Root
, (uchar
*)&now
, vsize
);
3860 nr_handoffs
= f
->nr_handoffs
;
3862 { cpu_printf("pan: state empty\n");
3867 trpt
->tau
= f
->m_tau
;
3868 trpt
->o_pm
= f
->m_o_pm
;
3870 (trpt
-1)->ostate
= &D_State
; /* stub */
3871 trpt
->ostate
= &D_State
;
3875 { stack_last
[core_id
] = (Stack_Tree
*) f
->m_stack
;
3877 #if defined(VERBOSE)
3878 if (stack_last
[core_id
])
3879 { cpu_printf("%d: UNPACK -- SET m_stack %u (%d,%d)\n",
3880 depth
, stack_last
[core_id
], stack_last
[core_id
]->pr
,
3881 stack_last
[core_id
]->t_id
);
3887 { static Trans D_Trans
;
3888 trpt
->o_t
= &D_Trans
;
3892 if ((trpt
->tau
& 4) != 4)
3893 { trpt
->tau
|= 4; /* the claim moves first */
3894 cpu_printf("warning: trpt was not up to date\n");
3898 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
3899 { P0
*ptr
= (P0
*) pptr(i
);
3901 if (accpstate
[ptr
->_t
][ptr
->_p
])
3905 if (progstate
[ptr
->_t
][ptr
->_p
])
3913 if (accpstate
[EVENT_TRACE
][now
._event
])
3917 if (progstate
[EVENT_TRACE
][now
._event
])
3923 #if defined(C_States) && (HAS_TRACK==1)
3924 /* restore state of tracked C objects */
3925 c_revert((uchar
*) &(now
.c_state
[0]));
3927 c_unstack((uchar
*) f
->m_c_stack
); /* unmatched tracked data */
3934 write_root(void) /* for trail file */
3937 if (iterative
== 0 && Nr_Trails
> 1)
3938 sprintf(fnm
, "%s%d.%s", TrailFile
, Nr_Trails
-1, sprefix
);
3940 sprintf(fnm
, "%s.%s", TrailFile
, sprefix
);
3942 if (cur_Root
.m_vsize
== 0)
3943 { (void) unlink(fnm
); /* remove possible old copy */
3944 return; /* its the default initial state */
3947 if ((fd
= creat(fnm
, TMODE
)) < 0)
3949 if ((q
= strchr(TrailFile
, '.')))
3950 { *q
= '\0'; /* strip .pml */
3951 if (iterative
== 0 && Nr_Trails
-1 > 0)
3952 sprintf(fnm
, "%s%d.%s", TrailFile
, Nr_Trails
-1, sprefix
);
3954 sprintf(fnm
, "%s.%s", TrailFile
, sprefix
);
3956 fd
= creat(fnm
, TMODE
);
3959 { cpu_printf("pan: cannot create %s\n", fnm
);
3964 if (write(fd
, &cur_Root
, sizeof(SM_frame
)) != sizeof(SM_frame
))
3965 { cpu_printf("pan: error writing %s\n", fnm
);
3967 { cpu_printf("pan: wrote %s\n", fnm
);
3978 char *ssuffix
= "rst";
3981 strcpy(MyFile
, TrailFile
);
3984 { sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, ssuffix
);
3985 fd
= open(fnm
, O_RDONLY
, 0);
3986 if (fd
< 0 && (q
= strchr(MyFile
, '.')))
3987 { *q
= '\0'; /* strip .pml */
3988 sprintf(fnm
, "%s%d.%s", MyFile
, whichtrail
, ssuffix
);
3990 fd
= open(fnm
, O_RDONLY
, 0);
3993 { sprintf(fnm
, "%s.%s", MyFile
, ssuffix
);
3994 fd
= open(fnm
, O_RDONLY
, 0);
3995 if (fd
< 0 && (q
= strchr(MyFile
, '.')))
3996 { *q
= '\0'; /* strip .pml */
3997 sprintf(fnm
, "%s.%s", MyFile
, ssuffix
);
3999 fd
= open(fnm
, O_RDONLY
, 0);
4003 { if (try_core
< NCORE
)
4004 { ssuffix
= MySuffix
;
4005 sprintf(ssuffix
, "cpu%d_rst", try_core
++);
4008 cpu_printf("no file '%s.rst' or '%s' (not an error)\n", MyFile
, fnm
);
4010 { if (read(fd
, &cur_Root
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4011 { cpu_printf("read error %s\n", fnm
);
4016 (void) unpack_state(&cur_Root
, -2);
4018 cpu_printf("partial trail -- last few steps only\n");
4020 cpu_printf("restored root from '%s'\n", fnm
);
4021 printf("=====State:=====\n");
4023 for (i
= 0; i
< now
._nr_pr
; i
++)
4024 { z
= (P0
*)pptr(i
);
4025 printf("proc %2d (%s) ", i
, procname
[z
->_t
]);
4026 for (j
= 0; src_all
[j
].src
; j
++)
4027 if (src_all
[j
].tp
== (int) z
->_t
)
4028 { printf(" line %3d \"%s\" ",
4029 src_all
[j
].src
[z
->_p
], PanSource
);
4032 printf("(state %d)\n", z
->_p
);
4037 printf("================\n");
4042 unsigned long dsk_written
, dsk_drained
;
4043 void mem_drain(void);
4047 m_clear_frame(SM_frame
*f
)
4048 { int i
, clr_sz
= sizeof(SM_results
);
4050 for (i
= 0; i
<= _NP_
; i
++) /* all proctypes */
4051 { clr_sz
+= NrStates
[i
]*sizeof(uchar
);
4053 memset(f
, 0, clr_sz
);
4054 /* caution if sizeof(SM_results) > sizeof(SM_frame) */
4057 #define TargetQ_Full(n) (m_workq[n][prfree[n]].m_vsize != 0)
4058 #define TargetQ_NotFull(n) (m_workq[n][prfree[n]].m_vsize == 0)
4061 AllQueuesEmpty(void)
4068 for (q
= 0; q
< NCORE
; q
++)
4069 { if (prcnt
[q
] != 0)
4078 int remember
, target_q
;
4080 double patience
= 0.0;
4082 target_q
= (q
+ 1) % NCORE
;
4085 { f
= Get_Full_Frame(q
);
4086 if (!f
) /* 1 second timeout -- and trigger for Query */
4087 { if (someone_crashed(2))
4088 { printf("cpu%d: search terminated [code %d]\n",
4089 core_id
, search_terminated
?*search_terminated
:-1);
4094 /* to profile with cc -pg and gprof pan.exe -- set handoff depth beyond maxdepth */
4098 if (core_id
== 0 /* root can initiate termination */
4099 && remote_party
== 0 /* and only the original root */
4100 && query_in_progress
== 0 /* unless its already in progress */
4101 && AllQueuesEmpty())
4102 { f
= Get_Free_Frame(target_q
);
4103 query_in_progress
= 1; /* only root process can do this */
4104 if (!f
) { Uerror("Fatal1: no free slot"); }
4105 f
->m_boq
= QUERY
; /* initiate Query */
4107 { cpu_printf("snd QUERY to q%d (%d) into slot %d\n",
4108 target_q
, nstates_get
+ 1, prfree
[target_q
]-1);
4110 f
->m_vsize
= remember
+ 1;
4111 /* number will not change unless we receive more states */
4112 } else if (patience
++ > OneHour
) /* one hour watchdog timer */
4113 { cpu_printf("timeout -- giving up\n");
4114 sudden_stop("queue timeout");
4117 if (0) cpu_printf("timed out -- try again\n");
4120 patience
= 0.0; /* reset watchdog */
4122 if (f
->m_boq
== QUERY
)
4124 { cpu_printf("got QUERY on q%d (%d <> %d) from slot %d\n",
4125 q
, f
->m_vsize
, nstates_put
+ 1, prfull
[q
]-1);
4128 remember
= f
->m_vsize
;
4129 f
->m_vsize
= 0; /* release slot */
4131 if (core_id
== 0 && remote_party
== 0) /* original root cpu0 */
4132 { if (query_in_progress
== 1 /* didn't send more states in the interim */
4133 && *grfree
+ 1 == remember
) /* no action on global queue meanwhile */
4134 { if (verbose
) cpu_printf("Termination detected\n");
4135 if (TargetQ_Full(target_q
))
4137 cpu_printf("warning: target q is full\n");
4139 f
= Get_Free_Frame(target_q
);
4140 if (!f
) { Uerror("Fatal2: no free slot"); }
4142 f
->m_boq
= QUIT
; /* send final Quit, collect stats */
4143 f
->m_vsize
= 111; /* anything non-zero will do */
4145 cpu_printf("put QUIT on q%d\n", target_q
);
4147 { if (verbose
) cpu_printf("Stale Query\n");
4152 query_in_progress
= 0;
4154 { if (TargetQ_Full(target_q
))
4156 cpu_printf("warning: forward query - target q full\n");
4158 f
= Get_Free_Frame(target_q
);
4160 cpu_printf("snd QUERY response to q%d (%d <> %d) in slot %d\n",
4161 target_q
, remember
, *grfree
+ 1, prfree
[target_q
]-1);
4162 if (!f
) { Uerror("Fatal4: no free slot"); }
4164 if (*grfree
+ 1 == remember
) /* no action on global queue */
4165 { f
->m_boq
= QUERY
; /* forward query, to root */
4166 f
->m_vsize
= remember
;
4168 { f
->m_boq
= QUERY_F
; /* no match -- busy */
4169 f
->m_vsize
= 112; /* anything non-zero */
4171 if (dsk_written
!= dsk_drained
)
4179 if (f
->m_boq
== QUERY_F
)
4181 { cpu_printf("got QUERY_F on q%d from slot %d\n", q
, prfull
[q
]-1);
4183 f
->m_vsize
= 0; /* release slot */
4185 if (core_id
== 0 && remote_party
== 0) /* original root cpu0 */
4186 { if (verbose
) cpu_printf("No Match on Query\n");
4187 query_in_progress
= 0;
4189 { if (TargetQ_Full(target_q
))
4190 { if (verbose
) cpu_printf("warning: forwarding query_f, target queue full\n");
4192 f
= Get_Free_Frame(target_q
);
4193 if (verbose
) cpu_printf("forward QUERY_F to q%d into slot %d\n",
4194 target_q
, prfree
[target_q
]-1);
4195 if (!f
) { Uerror("Fatal5: no free slot"); }
4196 f
->m_boq
= QUERY_F
; /* cannot terminate yet */
4197 f
->m_vsize
= 113; /* anything non-zero */
4200 if (dsk_written
!= dsk_drained
)
4207 if (f
->m_boq
== QUIT
)
4208 { if (0) cpu_printf("done -- local memcnt %g Mb\n", memcnt
/(1048576.));
4209 retrieve_info((SM_results
*) f
); /* collect and combine stats */
4211 { cpu_printf("received Quit\n");
4214 f
->m_vsize
= 0; /* release incoming slot */
4216 { f
= Get_Free_Frame(target_q
); /* new outgoing slot */
4217 if (!f
) { Uerror("Fatal6: no free slot"); }
4218 m_clear_frame(f
); /* start with zeroed stats */
4219 record_info((SM_results
*) f
);
4220 f
->m_boq
= QUIT
; /* forward combined results */
4221 f
->m_vsize
= 114; /* anything non-zero */
4223 cpu_printf("fwd Results to q%d\n", target_q
);
4225 break; /* successful termination */
4228 /* else: 0<= boq <= 255, means STATE transfer */
4229 if (unpack_state(f
, q
) != 0)
4231 f
->m_vsize
= 0; /* release slot */
4232 if (VVERBOSE
) cpu_printf("Got state\n");
4234 if (search_terminated
!= NULL
4235 && *search_terminated
== 0)
4236 { new_state(); /* explore successors */
4237 memset((uchar
*) &cur_Root
, 0, sizeof(SM_frame
)); /* avoid confusion */
4244 if (verbose
) cpu_printf("done got %d put %d\n", nstates_get
, nstates_put
);
4249 give_up(int unused_x
)
4251 if (search_terminated
!= NULL
)
4252 { *search_terminated
|= 32; /* give_up */
4255 { was_interrupted
= 1;
4257 cpu_printf("Give Up\n");
4260 } else /* we are already terminating */
4261 { cpu_printf("SIGINT\n");
4266 check_overkill(void)
4268 vmax_seen
= (vmax_seen
+ 7)/ 8;
4269 vmax_seen
*= 8; /* round up to a multiple of 8 */
4274 && VMAX
- vmax_seen
> 8)
4277 printf("cpu0: max VMAX value seen in this run: ");
4279 printf("cpu0: recommend recompiling with ");
4281 printf("-DVMAX=%d\n", vmax_seen
);
4286 mem_put(int q
) /* handoff state to other cpu, workq q */
4291 { vsize
= (vsize
+ 7)/8; vsize
*= 8; /* round up */
4292 printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize
);
4295 if (now
._nr_pr
> PMAX
)
4296 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now
._nr_pr
);
4299 if (now
._nr_qs
> QMAX
)
4300 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now
._nr_qs
);
4303 if (vsize
> vmax_seen
) vmax_seen
= vsize
;
4304 if (now
._nr_pr
> pmax_seen
) pmax_seen
= now
._nr_pr
;
4305 if (now
._nr_qs
> qmax_seen
) qmax_seen
= now
._nr_qs
;
4307 f
= Get_Free_Frame(q
); /* not called in likely deadlock states */
4308 if (!f
) { Uerror("Fatal3: no free slot"); }
4310 if (VVERBOSE
) cpu_printf("putting state into q%d\n", q
);
4312 memcpy((uchar
*) f
->m_now
, (uchar
*) &now
, vsize
);
4313 memset((uchar
*) f
->m_Mask
, 0, (VMAX
+7)/8 * sizeof(char));
4314 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
4316 { f
->m_Mask
[i
/8] |= (1<<j
);
4320 { memcpy((uchar
*) f
->m_p_offset
, (uchar
*) proc_offset
, now
._nr_pr
* sizeof(OFFT
));
4321 memcpy((uchar
*) f
->m_p_skip
, (uchar
*) proc_skip
, now
._nr_pr
* sizeof(uchar
));
4324 { memcpy((uchar
*) f
->m_q_offset
, (uchar
*) q_offset
, now
._nr_qs
* sizeof(OFFT
));
4325 memcpy((uchar
*) f
->m_q_skip
, (uchar
*) q_skip
, now
._nr_qs
* sizeof(uchar
));
4327 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4328 c_stack((uchar
*) f
->m_c_stack
); /* save unmatched tracked data */
4331 f
->m_stack
= stack_last
[core_id
];
4333 f
->nr_handoffs
= nr_handoffs
+1;
4334 f
->m_tau
= trpt
->tau
;
4335 f
->m_o_pm
= trpt
->o_pm
;
4337 f
->m_vsize
= vsize
; /* must come last - now the other cpu can see it */
4339 if (query_in_progress
== 1)
4340 query_in_progress
= 2; /* make sure we know, if a query makes the rounds */
4345 int Dsk_W_Nr
, Dsk_R_Nr
;
4346 int dsk_file
= -1, dsk_read
= -1;
4347 unsigned long dsk_written
, dsk_drained
;
4351 #if defined(WIN32) || defined(WIN64)
4352 #define RFLAGS (O_RDONLY|O_BINARY)
4353 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC|O_BINARY)
4355 #define RFLAGS (O_RDONLY)
4356 #define WFLAGS (O_CREAT|O_WRONLY|O_TRUNC)
4364 if (dsk_written
> 0)
4365 { cpu_printf("dsk_written %d states in %d files\ncpu%d: dsk_drained %6d states\n",
4366 dsk_written
, Dsk_W_Nr
, core_id
, dsk_drained
);
4369 for (i
= 0; i
< Dsk_W_Nr
; i
++)
4370 { sprintf(dsk_name
, "Q%.3d_%.3d.tmp", i
, core_id
);
4378 int q
= (core_id
+ 1) % NCORE
; /* target q */
4382 || dsk_written
<= dsk_drained
)
4386 while (dsk_written
> dsk_drained
4387 && TargetQ_NotFull(q
))
4388 { f
= Get_Free_Frame(q
);
4389 if (!f
) { Uerror("Fatal: unhandled condition"); }
4391 if ((dsk_drained
+1)%MAX_DSK_FILE
== 0) /* 100K states max per file */
4392 { (void) close(dsk_read
); /* close current read handle */
4393 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_R_Nr
++, core_id
);
4394 (void) unlink(dsk_name
); /* remove current file */
4395 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_R_Nr
, core_id
);
4396 cpu_printf("reading %s\n", dsk_name
);
4397 dsk_read
= open(dsk_name
, RFLAGS
); /* open next file */
4399 { Uerror("could not open dsk file");
4401 if (read(dsk_read
, &g
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4402 { Uerror("bad dsk file read");
4406 memcpy(f
, &g
, sizeof(SM_frame
));
4407 f
->m_vsize
= sz
; /* last */
4416 int i
, j
, q
= (core_id
+ 1) % NCORE
; /* target q */
4419 { printf("pan: recompile with -DVMAX=N with N >= %d\n", vsize
);
4422 if (now
._nr_pr
> PMAX
)
4423 { printf("pan: recompile with -DPMAX=N with N >= %d\n", now
._nr_pr
);
4426 if (now
._nr_qs
> QMAX
)
4427 { printf("pan: recompile with -DQMAX=N with N >= %d\n", now
._nr_qs
);
4431 if (VVERBOSE
) cpu_printf("filing state for q%d\n", q
);
4433 memcpy((uchar
*) f
.m_now
, (uchar
*) &now
, vsize
);
4434 memset((uchar
*) f
.m_Mask
, 0, (VMAX
+7)/8 * sizeof(char));
4435 for (i
= j
= 0; i
< VMAX
; i
++, j
= (j
+1)%8)
4437 { f
.m_Mask
[i
/8] |= (1<<j
);
4441 { memcpy((uchar
*)f
.m_p_offset
, (uchar
*)proc_offset
, now
._nr_pr
*sizeof(OFFT
));
4442 memcpy((uchar
*)f
.m_p_skip
, (uchar
*)proc_skip
, now
._nr_pr
*sizeof(uchar
));
4445 { memcpy((uchar
*) f
.m_q_offset
, (uchar
*) q_offset
, now
._nr_qs
*sizeof(OFFT
));
4446 memcpy((uchar
*) f
.m_q_skip
, (uchar
*) q_skip
, now
._nr_qs
*sizeof(uchar
));
4448 #if defined(C_States) && (HAS_TRACK==1) && (HAS_STACK==1)
4449 c_stack((uchar
*) f
.m_c_stack
); /* save unmatched tracked data */
4452 f
.m_stack
= stack_last
[core_id
];
4454 f
.nr_handoffs
= nr_handoffs
+1;
4455 f
.m_tau
= trpt
->tau
;
4456 f
.m_o_pm
= trpt
->o_pm
;
4460 if (query_in_progress
== 1)
4461 { query_in_progress
= 2;
4464 { sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_W_Nr
, core_id
);
4465 dsk_file
= open(dsk_name
, WFLAGS
, 0644);
4466 dsk_read
= open(dsk_name
, RFLAGS
);
4467 if (dsk_file
< 0 || dsk_read
< 0)
4468 { cpu_printf("File: <%s>\n", dsk_name
);
4469 Uerror("cannot open diskfile");
4471 Dsk_W_Nr
++; /* nr of next file to open */
4472 cpu_printf("created temporary diskfile %s\n", dsk_name
);
4473 } else if ((dsk_written
+1)%MAX_DSK_FILE
== 0)
4474 { close(dsk_file
); /* close write handle */
4475 sprintf(dsk_name
, "Q%.3d_%.3d.tmp", Dsk_W_Nr
++, core_id
);
4476 dsk_file
= open(dsk_name
, WFLAGS
, 0644);
4478 { cpu_printf("File: <%s>\n", dsk_name
);
4479 Uerror("aborting: cannot open new diskfile");
4481 cpu_printf("created temporary diskfile %s\n", dsk_name
);
4483 if (write(dsk_file
, &f
, sizeof(SM_frame
)) != sizeof(SM_frame
))
4484 { Uerror("aborting -- disk write failed (disk full?)");
4494 if (search_terminated
== NULL
4495 || *search_terminated
!= 0) /* not a full crash check */
4498 iam_alive(); /* on every transition of Down */
4500 mem_drain(); /* maybe call this also on every Up */
4502 if (depth
> z_handoff
/* above handoff limit */
4504 && !a_cycles
/* not in liveness mode */
4507 && boq
== -1 /* not mid-rv */
4510 && (trpt
->tau
&4) /* claim moves first */
4511 && !((trpt
-1)->tau
&128) /* not a stutter move */
4513 && !(trpt
->tau
&8)) /* not an atomic move */
4514 { int q
= (core_id
+ 1) % NCORE
; /* circular handoff */
4516 if (prcnt
[q
] < LN_FRAMES
)
4518 if (TargetQ_NotFull(q
)
4519 && (dfs_phase2
== 0 || prcnt
[core_id
] > 0))
4526 rval
= GlobalQ_HasRoom();
4532 { void mem_file(void);
4540 return 0; /* i.e., no handoff */
4544 mem_put_acc(void) /* liveness mode */
4545 { int q
= (core_id
+ 1) % NCORE
;
4547 if (search_terminated
== NULL
4548 || *search_terminated
!= 0)
4554 /* some tortured use of preprocessing: */
4555 #if !defined(NGQ) || defined(USE_DISK)
4556 if (TargetQ_Full(q
))
4560 if (GlobalQ_HasRoom())
4568 #if !defined(NGQ) || defined(USE_DISK)
4576 #if defined(WIN32) || defined(WIN64)
4578 init_shm(void) /* initialize shared work-queues */
4583 if (core_id
== 0 && verbose
)
4584 { printf("cpu0: step 3: allocate shared work-queues %g Mb\n",
4585 ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
) / (1048576.));
4587 for (m
= 0; m
< NR_QS
; m
++) /* last q is global 1 */
4588 { double qsize
= (m
== NCORE
) ? GWQ_SIZE
: LWQ_SIZE
;
4589 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, m
);
4591 { shmid
[m
] = CreateFileMapping(
4592 INVALID_HANDLE_VALUE
, /* use paging file */
4593 NULL
, /* default security */
4594 PAGE_READWRITE
, /* access permissions */
4595 0, /* high-order 4 bytes */
4596 qsize
, /* low-order bytes, size in bytes */
4598 } else /* worker nodes just open these segments */
4599 { shmid
[m
] = OpenFileMapping(
4600 FILE_MAP_ALL_ACCESS
, /* read/write access */
4601 FALSE
, /* children do not inherit handle */
4604 if (shmid
[m
] == NULL
)
4605 { fprintf(stderr
, "cpu%d: could not create or open shared queues\n",
4611 shared_mem
[m
] = (char *) MapViewOfFile(shmid
[m
], FILE_MAP_ALL_ACCESS
, 0, 0, 0);
4612 if (shared_mem
[m
] == NULL
)
4613 { fprintf(stderr
, "cpu%d: cannot attach shared q%d (%d Mb)\n",
4614 core_id
, m
+1, (int) (qsize
/(1048576.)));
4621 m_workq
[m
] = (SM_frame
*) shared_mem
[m
];
4623 { int nframes
= (m
== NCORE
) ? GN_FRAMES
: LN_FRAMES
;
4624 for (n
= 0; n
< nframes
; n
++)
4625 { m_workq
[m
][n
].m_vsize
= 0;
4626 m_workq
[m
][n
].m_boq
= 0;
4630 { fprintf(stderr
, "pan: check './pan --' for usage details\n");
4631 pan_exit(1); /* calls cleanup_shm */
4636 prep_shmid_S(size_t n
) /* either sets SS or H_tab, WIN32/WIN64 */
4641 if (verbose
&& core_id
== 0)
4644 printf("cpu0: step 1: allocate shared bitstate %g Mb\n",
4645 (double) n
/ (1048576.));
4647 printf("cpu0: step 1: allocate shared hastable %g Mb\n",
4648 (double) n
/ (1048576.));
4652 if (memcnt
+ (double) n
> memlim
)
4653 { printf("cpu%d: S %8g + %d Kb exceeds memory limit of %8g Mb\n",
4654 core_id
, memcnt
/1024., n
/1024, memlim
/(1048576.));
4655 printf("cpu%d: insufficient memory -- aborting\n", core_id
);
4660 /* make key different from queues: */
4661 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, NCORE
+2); /* different from qs */
4663 if (core_id
== 0) /* root */
4664 { shmid_S
= CreateFileMapping(INVALID_HANDLE_VALUE
, NULL
,
4666 PAGE_READWRITE
, (n
>>32), (n
& 0xffffffff), key
);
4668 PAGE_READWRITE
, 0, n
, key
);
4670 memcnt
+= (double) n
;
4672 { shmid_S
= OpenFileMapping(FILE_MAP_ALL_ACCESS
, FALSE
, key
);
4674 if (shmid_S
== NULL
)
4677 fprintf(stderr
, "cpu%d: cannot %s shared bitstate",
4678 core_id
, core_id
?"open":"create");
4680 fprintf(stderr
, "cpu%d: cannot %s shared hashtable",
4681 core_id
, core_id
?"open":"create");
4683 fprintf(stderr
, "pan: check './pan --' for usage details\n");
4687 rval
= (char *) MapViewOfFile(shmid_S
, FILE_MAP_ALL_ACCESS
, 0, 0, 0); /* attach */
4688 if ((char *) rval
== NULL
)
4689 { fprintf(stderr
, "cpu%d: cannot attach shared bitstate or hashtable\n", core_id
);
4690 fprintf(stderr
, "pan: check './pan --' for usage details\n");
4694 rval
= (char *) emalloc(n
);
4696 return (uchar
*) rval
;
4700 prep_state_mem(size_t n
) /* WIN32/WIN64 sets memory arena for states */
4703 static int cnt
= 3; /* start larger than earlier ftok calls */
4705 if (verbose
&& core_id
== 0)
4706 { printf("cpu0: step 2+: pre-allocate memory arena %d of %g Mb\n",
4707 cnt
-3, (double) n
/ (1048576.));
4710 if (memcnt
+ (double) n
> memlim
)
4711 { printf("cpu%d: error: M %.0f + %.0f exceeds memory limit of %.0f Kb\n",
4712 core_id
, memcnt
/1024.0, (double) n
/1024.0, memlim
/1024.0);
4717 sprintf(key
, "Global\\pan_%s_%.3d", PanSource
, NCORE
+cnt
); cnt
++;
4720 { shmid_M
= CreateFileMapping(INVALID_HANDLE_VALUE
, NULL
,
4722 PAGE_READWRITE
, (n
>>32), (n
& 0xffffffff), key
);
4724 PAGE_READWRITE
, 0, n
, key
);
4727 { shmid_M
= OpenFileMapping(FILE_MAP_ALL_ACCESS
, FALSE
, key
);
4729 if (shmid_M
== NULL
)
4730 { printf("cpu%d: failed to get pool of shared memory nr %d of size %d\n",
4732 printf("pan: check './pan --' for usage details\n");
4735 rval
= (char *) MapViewOfFile(shmid_M
, FILE_MAP_ALL_ACCESS
, 0, 0, 0); /* attach */
4738 { printf("cpu%d: failed to attach pool of shared memory nr %d of size %d\n",
4742 return (uchar
*) rval
;
4746 init_HT(unsigned long n
) /* WIN32/WIN64 version */
4752 if (verbose
) printf("cpu%d: initialization for Windows\n", core_id
);
4757 { printf("cpu0: steps 0,1: no -DMEMLIM set\n");
4761 printf("cpu0: steps 0,1: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb)\n",
4762 MEMLIM
, ((double)n
/(1048576.)), ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.));
4764 get_mem
= NCORE
* sizeof(double) + (1 + CS_NR
) * sizeof(void *)+ 4*sizeof(void *) + 2*sizeof(double);
4765 /* NCORE * is_alive + search_terminated + CS_NR * sh_lock + 6 gr vars */
4766 get_mem
+= 4 * NCORE
* sizeof(void *);
4768 get_mem
+= (NCORE
) * sizeof(Stack_Tree
*);
4769 /* NCORE * stack_last */
4771 x
= (volatile char *) prep_state_mem((size_t) get_mem
);
4772 shmid_X
= (void *) x
;
4774 { printf("cpu0: could not allocate shared memory, see ./pan --\n");
4777 search_terminated
= (volatile unsigned int *) x
; /* comes first */
4778 x
+= sizeof(void *); /* maintain alignment */
4780 is_alive
= (volatile double *) x
;
4781 x
+= NCORE
* sizeof(double);
4783 sh_lock
= (volatile int *) x
;
4784 x
+= CS_NR
* sizeof(void *); /* allow 1 word per entry */
4786 grfree
= (volatile int *) x
;
4787 x
+= sizeof(void *);
4788 grfull
= (volatile int *) x
;
4789 x
+= sizeof(void *);
4790 grcnt
= (volatile int *) x
;
4791 x
+= sizeof(void *);
4792 grmax
= (volatile int *) x
;
4793 x
+= sizeof(void *);
4794 prfree
= (volatile int *) x
;
4795 x
+= NCORE
* sizeof(void *);
4796 prfull
= (volatile int *) x
;
4797 x
+= NCORE
* sizeof(void *);
4798 prcnt
= (volatile int *) x
;
4799 x
+= NCORE
* sizeof(void *);
4800 prmax
= (volatile int *) x
;
4801 x
+= NCORE
* sizeof(void *);
4802 gr_readmiss
= (volatile double *) x
;
4803 x
+= sizeof(double);
4804 gr_writemiss
= (volatile double *) x
;
4805 x
+= sizeof(double);
4808 stack_last
= (volatile Stack_Tree
**) x
;
4809 x
+= NCORE
* sizeof(Stack_Tree
*);
4813 H_tab
= (struct H_el
**) emalloc(n
);
4817 #warning MEMLIM not set
4818 #define MEMLIM (2048)
4821 if (core_id
== 0 && verbose
)
4822 printf("cpu0: step 0: -DMEMLIM=%d Mb - (hashtable %g Mb + workqueues %g Mb) = %g Mb for state storage\n",
4823 MEMLIM
, ((double)n
/(1048576.)), ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
)/(1048576.),
4824 (memlim
- memcnt
- (double) n
- ((double) NCORE
* LWQ_SIZE
+ GWQ_SIZE
))/(1048576.));
4826 H_tab
= (struct H_el
**) prep_shmid_S((size_t) n
); /* hash_table */
4828 get_mem
= memlim
- memcnt
- ((double) NCORE
) * LWQ_SIZE
- GWQ_SIZE
;
4830 { Uerror("internal error -- shared state memory");
4833 if (core_id
== 0 && verbose
)
4834 { printf("cpu0: step 2: shared state memory %g Mb\n",
4835 get_mem
/(1048576.));
4837 x
= dc_mem_start
= (char *) prep_state_mem((size_t) get_mem
); /* for states */
4839 { printf("cpu%d: insufficient memory -- aborting\n", core_id
);
4843 search_terminated
= (volatile unsigned int *) x
; /* comes first */
4844 x
+= sizeof(void *); /* maintain alignment */
4846 is_alive
= (volatile double *) x
;
4847 x
+= NCORE
* sizeof(double);
4849 sh_lock
= (volatile int *) x
;
4850 x
+= CS_NR
* sizeof(int);
4852 grfree
= (volatile int *) x
;
4853 x
+= sizeof(void *);
4854 grfull
= (volatile int *) x
;
4855 x
+= sizeof(void *);
4856 grcnt
= (volatile int *) x
;
4857 x
+= sizeof(void *);
4858 grmax
= (volatile int *) x
;
4859 x
+= sizeof(void *);
4860 prfree
= (volatile int *) x
;
4861 x
+= NCORE
* sizeof(void *);
4862 prfull
= (volatile int *) x
;
4863 x
+= NCORE
* sizeof(void *);
4864 prcnt
= (volatile int *) x
;
4865 x
+= NCORE
* sizeof(void *);
4866 prmax
= (volatile int *) x
;
4867 x
+= NCORE
* sizeof(void *);
4868 gr_readmiss
= (volatile double *) x
;
4869 x
+= sizeof(double);
4870 gr_writemiss
= (volatile double *) x
;
4871 x
+= sizeof(double);
4874 stack_last
= (volatile Stack_Tree
**) x
;
4875 x
+= NCORE
* sizeof(Stack_Tree
*);
4877 if (((long)x
)&(sizeof(void *)-1)) /* word alignment */
4878 { x
+= sizeof(void *)-(((long)x
)&(sizeof(void *)-1)); /* 64-bit align */
4882 ncomps
= (unsigned long *) x
;
4883 x
+= (256+2) * sizeof(unsigned long);
4886 dc_shared
= (sh_Allocater
*) x
; /* in shared memory */
4887 x
+= sizeof(sh_Allocater
);
4889 if (core_id
== 0) /* root only */
4890 { dc_shared
->dc_id
= shmid_M
;
4891 dc_shared
->dc_start
= (void *) dc_mem_start
;
4892 dc_shared
->dc_arena
= x
;
4893 dc_shared
->pattern
= 1234567;
4894 dc_shared
->dc_size
= (long) get_mem
- (long) (x
- dc_mem_start
);
4895 dc_shared
->nxt
= NULL
;
4900 #if defined(WIN32) || defined(WIN64) || defined(__i386__) || defined(__x86_64__)
4901 extern BOOLEAN
InterlockedBitTestAndSet(LONG
volatile* Base
, LONG Bit
);
4903 tas(volatile LONG
*s
)
4904 { return InterlockedBitTestAndSet(s
, 1);
4907 #error missing definition of test and set operation for this platform
4911 cleanup_shm(int val
)
4913 static int nibis
= 0;
4916 { printf("cpu%d: Redundant call to cleanup_shm(%d)\n", core_id
, val
);
4921 if (search_terminated
!= NULL
)
4922 { *search_terminated
|= 16; /* cleanup_shm */
4925 for (m
= 0; m
< NR_QS
; m
++)
4926 { if (shmid
[m
] != NULL
)
4927 { UnmapViewOfFile((char *) shared_mem
[m
]);
4928 CloseHandle(shmid
[m
]);
4931 UnmapViewOfFile((void *) shmid_X
);
4932 CloseHandle((void *) shmid_M
);
4935 if (shmid_S
!= NULL
)
4936 { UnmapViewOfFile(SS
);
4937 CloseHandle(shmid_S
);
4940 if (core_id
== 0 && verbose
)
4941 { printf("cpu0: done, %ld Mb of shared state memory left\n",
4942 dc_shared
->dc_size
/ (long)(1048576));
4944 if (shmid_S
!= NULL
)
4945 { UnmapViewOfFile(H_tab
);
4946 CloseHandle(shmid_S
);
4948 shmid_M
= (void *) (dc_shared
->dc_id
);
4949 UnmapViewOfFile((char *) dc_shared
->dc_start
);
4950 CloseHandle(shmid_M
);
4953 /* detached from shared memory - so cannot use cpu_printf */
4955 { printf("cpu%d: done -- got %d states from queue\n",
4956 core_id
, nstates_get
);
4965 #if defined(MA) && !defined(SEP_STATE)
4966 #error MA requires SEP_STATE in multi-core mode
4969 #error BFS is not supported in multi-core mode
4972 #error SC is not supported in multi-core mode
4974 init_shm(); /* we are single threaded when this starts */
4975 signal(SIGINT
, give_up
); /* windows control-c interrupt */
4977 if (core_id
== 0 && verbose
)
4978 { printf("cpu0: step 4: creating additional workers (proxy %d)\n",
4982 if NCORE
> 1 the child
or the parent should fork N
-1 more times
4983 the parent is the only process with core_id
== 0 and is_parent
> 0
4984 the
others (workers
) have is_parent
= 0 and core_id
= 1..NCORE
-1
4986 if (core_id
== 0) /* root starts up the workers */
4987 { worker_pids
[0] = (DWORD
) getpid(); /* for completeness */
4988 while (++core_id
< NCORE
) /* first worker sees core_id = 1 */
4990 STARTUPINFO si
= { sizeof(si
) };
4991 PROCESS_INFORMATION pi
;
4993 if (proxy_pid
== core_id
) /* always non-zero */
4994 { sprintf(cmdline
, "pan_proxy.exe -r %s-Q%d -Z%d",
4995 o_cmdline
, getpid(), core_id
);
4997 { sprintf(cmdline
, "pan.exe %s-Q%d -Z%d",
4998 o_cmdline
, getpid(), core_id
);
5000 if (verbose
) printf("cpu%d: spawn %s\n", core_id
, cmdline
);
5002 is_parent
= CreateProcess(0, cmdline
, 0, 0, FALSE
, 0, 0, 0, &si
, &pi
);
5004 { Uerror("fork failed");
5006 worker_pids
[core_id
] = pi
.dwProcessId
;
5007 worker_handles
[core_id
] = pi
.hProcess
;
5009 { cpu_printf("created core %d, pid %d\n",
5010 core_id
, pi
.dwProcessId
);
5012 if (proxy_pid
== core_id
) /* we just created the receive half */
5013 { /* add proxy send, store pid in proxy_pid_snd */
5014 sprintf(cmdline
, "pan_proxy.exe -s %s-Q%d -Z%d -Y%d",
5015 o_cmdline
, getpid(), core_id
, worker_pids
[proxy_pid
]);
5016 if (verbose
) printf("cpu%d: spawn %s\n", core_id
, cmdline
);
5017 is_parent
= CreateProcess(0, cmdline
, 0,0, FALSE
, 0,0,0, &si
, &pi
);
5019 { Uerror("fork failed");
5021 proxy_pid_snd
= pi
.dwProcessId
;
5022 proxy_handle_snd
= pi
.hProcess
;
5024 { cpu_printf("created core %d, pid %d (send proxy)\n",
5025 core_id
, pi
.dwProcessId
);
5027 core_id
= 0; /* reset core_id for root process */
5029 { static char db0
[16]; /* good for up to 10^6 cores */
5030 static char db1
[16];
5031 tprefix
= db0
; sprefix
= db1
;
5032 sprintf(tprefix
, "cpu%d_trail", core_id
); /* avoid conflicts on file access */
5033 sprintf(sprefix
, "cpu%d_rst", core_id
);
5034 memcnt
= 0; /* count only additionally allocated memory */
5037 { cpu_printf("starting core_id %d -- pid %d\n", core_id
, getpid());
5039 if (core_id
== 0 && !remote_party
)
5040 { new_state(); /* root starts the search */
5042 cpu_printf("done with 1st dfs, nstates %g (put %d states), start reading q\n",
5043 nstates
, nstates_put
);
5046 Read_Queue(core_id
); /* all cores */
5049 { cpu_printf("put %6d states into queue -- got %6d\n",
5050 nstates_put
, nstates_get
);
5060 init_SS(unsigned long n
)
5062 SS
= (uchar
*) prep_shmid_S((size_t) n
);
5070 clock_t crash_stamp
;
5072 #if !defined(WIN32) && !defined(WIN64)
5073 struct tms start_tm
;
5079 #if defined(WIN32) || defined(WIN64)
5080 start_time
= clock();
5082 start_time
= times(&start_tm
);
5088 { clock_t stop_time
;
5090 #if !defined(WIN32) && !defined(WIN64)
5092 stop_time
= times(&stop_tm
);
5093 delta_time
= ((double) (stop_time
- start_time
)) / ((double) sysconf(_SC_CLK_TCK
));
5095 stop_time
= clock();
5096 delta_time
= ((double) (stop_time
- start_time
)) / ((double) CLOCKS_PER_SEC
);
5098 if (readtrail
|| delta_time
< 0.00) return;
5100 if (core_id
== 0 && nstates
> (double) 0)
5101 { printf("\ncpu%d: elapsed time %.3g seconds (%g states visited)\n", core_id
, delta_time
, nstates
);
5102 if (delta_time
> 0.01)
5103 { printf("cpu%d: rate %g states/second\n", core_id
, nstates
/delta_time
);
5105 { void check_overkill(void);
5109 printf("\npan: elapsed time %.3g seconds\n", delta_time
);
5110 if (delta_time
> 0.01)
5111 { printf("pan: rate %9.8g states/second\n", nstates
/delta_time
);
5113 { printf("pan: avg transition delay %.5g usec\n",
5114 delta_time
/(nstates
+truncs
));
5121 double t_alerts
[17];
5126 printf("crash alert intervals:\n");
5127 for (i
= 0; i
< 17; i
++)
5128 { printf("%d\t%g\n", i
, t_alerts
[i
]);
5135 if (crash_stamp
!= (clock_t) 0)
5140 #if defined(WIN32) || defined(WIN64)
5141 delta_time
= ((double) (clock() - crash_stamp
)) / ((double) CLOCKS_PER_SEC
);
5143 delta_time
= ((double) (times(&start_tm
) - crash_stamp
)) / ((double) sysconf(_SC_CLK_TCK
));
5145 for (i
= 0; i
< 16; i
++)
5146 { if (delta_time
<= (i
*30))
5147 { t_alerts
[i
] = delta_time
;
5150 if (i
== 16) t_alerts
[i
] = delta_time
;
5153 printf("cpu%d: crash alert off\n", core_id
);
5155 crash_stamp
= (clock_t) 0;
5159 crash_test(double maxtime
)
5160 { double delta_time
;
5161 if (crash_stamp
== (clock_t) 0)
5162 { /* start timing */
5163 #if defined(WIN32) || defined(WIN64)
5164 crash_stamp
= clock();
5166 crash_stamp
= times(&start_tm
);
5169 { printf("cpu%d: crash detection\n", core_id
);
5173 #if defined(WIN32) || defined(WIN64)
5174 delta_time
= ((double) (clock() - crash_stamp
)) / ((double) CLOCKS_PER_SEC
);
5176 delta_time
= ((double) (times(&start_tm
) - crash_stamp
)) / ((double) sysconf(_SC_CLK_TCK
));
5178 return (delta_time
>= maxtime
);
5185 depth
= mreached
= 0;
5188 trpt
->tau
|= 4; /* the claim moves first */
5190 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
5191 { P0
*ptr
= (P0
*) pptr(i
);
5194 && accpstate
[ptr
->_t
][ptr
->_p
])
5199 && progstate
[ptr
->_t
][ptr
->_p
])
5206 if (accpstate
[EVENT_TRACE
][now
._event
])
5210 if (progstate
[EVENT_TRACE
][now
._event
])
5216 Mask
[0] = Mask
[1] = 1; /* _nr_pr, _nr_qs */
5218 { i
= &(now
._a_t
) - (uchar
*) &now
;
5219 Mask
[i
] = 1; /* _a_t */
5224 i
= &(now
._cnt
[0]) - (uchar
*) &now
;
5226 Mask
[i
++] = 1; /* _cnt[] */
5232 && (a_cycles
&& (trpt
->o_pm
&2)))
5233 { now
._a_t
= 2; /* set the A-bit */
5234 now
._cnt
[0] = now
._nr_pr
+ 1;
5236 printf("%3d: fairness Rule 1, cnt=%d, _a_t=%d\n",
5237 depth
, now
._cnt
[now
._a_t
&1], now
._a_t
);
5241 c_stack_start
= (char *) &i
; /* meant to be read-only */
5242 #if defined(HAS_CODE) && defined (C_INIT)
5243 C_INIT
; /* initialization of data that must precede fork() */
5246 #if defined(C_States) && (HAS_TRACK==1)
5247 /* capture initial state of tracked C objects */
5248 c_update((uchar
*) &(now
.c_state
[0]));
5251 if (readtrail
) getrail(); /* no return */
5257 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
5258 /* initial state of tracked & unmatched objects */
5259 c_stack((uchar
*) &(svtack
->c_stack
[0]));
5271 new_state(); /* start 1st DFS */
5277 do_reverse(Trans
*t
, short II
, uchar M
)
5279 int tt
= (int) ((P0
*)this)->_p
;
5280 #include REVERSE_MOVES
5286 static char _tp
= 'n'; static int _qid
= 0;
5289 do_transit(Trans
*t
, short II
)
5291 int tt
= (int) ((P0
*)this)->_p
;
5297 uchar ot
= (uchar
) ((P0
*)this)->_t
;
5298 if (ot
== EVENT_TRACE
) boq
= -1;
5299 #define continue { boq = oboq; return 0; }
5301 #define continue return 0
5303 uchar ot
= (uchar
) ((P0
*)this)->_t
;
5306 #include FORWARD_MOVES
5309 if (ot
== EVENT_TRACE
) boq
= oboq
;
5316 require(char tp
, int qid
)
5318 _tp
= tp
; _qid
= qid
;
5320 if (now
._event
!= endevent
)
5321 for (t
= trans
[EVENT_TRACE
][now
._event
]; t
; t
= t
->nxt
)
5322 { if (do_transit(t
, EVENT_TRACE
))
5323 { now
._event
= t
->st
;
5324 reached
[EVENT_TRACE
][t
->st
] = 1;
5326 printf(" event_trace move to -> %d\n", t
->st
);
5330 if (accpstate
[EVENT_TRACE
][now
._event
])
5331 (trpt
+1)->o_pm
|= 2;
5333 if (progstate
[EVENT_TRACE
][now
._event
])
5334 (trpt
+1)->o_pm
|= 4;
5337 #ifdef NEGATED_TRACE
5338 if (now
._event
== endevent
)
5343 uerror("event_trace error (all events matched)");
5350 for (t
= t
->nxt
; t
; t
= t
->nxt
)
5351 { if (do_transit(t
, EVENT_TRACE
))
5352 Uerror("non-determinism in event-trace");
5358 printf(" event_trace miss '%c' -- %d, %d, %d\n",
5359 tp
, qid
, now
._event
, t
->forw
);
5362 #ifdef NEGATED_TRACE
5363 now
._event
= endevent
; /* only 1st try will count -- fixed 4.2.6 */
5368 uerror("event_trace error (no matching event)");
5376 enabled(int iam
, int pid
)
5377 { Trans
*t
; uchar
*othis
= this;
5378 int res
= 0; int tt
; uchar ot
;
5380 /* if (pid > 0) */ pid
++;
5383 Uerror("used: enabled(pid=thisproc)");
5384 if (pid
< 0 || pid
>= (int) now
._nr_pr
)
5388 tt
= (int) ((P0
*)this)->_p
;
5389 ot
= (uchar
) ((P0
*)this)->_t
;
5390 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
5391 if (do_transit(t
, (short) pid
))
5402 { clock_t stop_time
;
5404 #if !defined(WIN32) && !defined(WIN64)
5406 stop_time
= times(&stop_tm
);
5407 delta_time
= ((double) (stop_time
- start_time
)) / ((double) sysconf(_SC_CLK_TCK
));
5409 stop_time
= clock();
5410 delta_time
= ((double) (stop_time
- start_time
)) / ((double) CLOCKS_PER_SEC
);
5412 if (delta_time
> 0.01)
5413 { printf("t= %6.3g ", delta_time
);
5414 printf("R= %7.0g", nstates
/delta_time
);
5417 if (quota
> 0.1 && delta_time
> quota
)
5418 { printf("Time limit of %6.3g minutes exceeded\n", quota
/60.0);
5421 leave_critical(GLOBAL_LOCK
);
5422 sudden_stop("time-limit");
5432 enter_critical(GLOBAL_LOCK
); /* snapshot */
5433 printf("cpu%d: ", core_id
);
5435 printf("Depth= %7ld States= %8.3g ",
5437 (long) (nr_handoffs
* z_handoff
) +
5440 printf("Transitions= %8.3g ", nstates
+truncs
);
5442 printf("Nodes= %7d ", nr_states
);
5444 printf("Memory= %9.3f\t", memcnt
/1048576.);
5448 leave_critical(GLOBAL_LOCK
);
5456 && (stackwrite
= creat(stackfile
, TMODE
)) < 0)
5457 Uerror("cannot create stackfile");
5459 if (write(stackwrite
, trail
, DDD
*sizeof(Trail
))
5460 != DDD
*sizeof(Trail
))
5461 Uerror("stackfile write error -- disk is full?");
5463 memmove(trail
, &trail
[DDD
], (HHH
-DDD
+2)*sizeof(Trail
));
5464 memset(&trail
[HHH
-DDD
+2], 0, (omaxdepth
- HHH
+ DDD
- 2)*sizeof(Trail
));
5472 memmove(&trail
[DDD
], trail
, (HHH
-DDD
+2)*sizeof(Trail
));
5475 || lseek(stackwrite
, -DDD
* (off_t
) sizeof(Trail
), SEEK_CUR
) == -1)
5476 Uerror("disk2stack lseek error");
5479 && (stackread
= open(stackfile
, 0)) < 0)
5480 Uerror("cannot open stackfile");
5482 if (lseek(stackread
, (CNT1
-CNT2
)*DDD
* (off_t
) sizeof(Trail
), SEEK_SET
) == -1)
5483 Uerror("disk2stack lseek error");
5485 have
= read(stackread
, trail
, DDD
*sizeof(Trail
));
5486 if (have
!= DDD
*sizeof(Trail
))
5487 Uerror("stackfile read error");
5492 { if (x
< 0 || x
>= MAXPROC
|| !proc_offset
[x
])
5495 return (uchar
*) pptr(x
);
5499 * new_state() is the main DFS search routine in the verifier
5500 * it has a lot of code ifdef-ed together to support
5501 * different search modes, which makes it quite unreadable.
5502 * if you are studying the code, first use the C preprocessor
5503 * to generate a specific version from the pan.c source,
5505 * gcc -E -DNOREDUCE -DBITSTATE pan.c > ppan.c
5506 * and then study the resulting file, rather than this one
5508 #if !defined(BFS) && (!defined(BITSTATE) || !defined(MA))
5514 { if (cnt
< 512) N_succ
[cnt
]++;
5515 else printf("tally_succ: cnt %d exceeds range\n", cnt
);
5520 { int i
; double sum
= 0.0;
5522 printf("Successor counts:\n");
5523 for (i
= 0; i
< 512; i
++)
5524 { sum
+= (double) N_succ
[i
];
5526 for (i
= 0; i
< 512; i
++)
5527 { if (N_succ
[i
] > 0)
5528 { printf("%3d %10d (%.4g %% of total)\n",
5529 i
, N_succ
[i
], (100.0 * (double) N_succ
[i
])/sum
);
5530 w_avg
+= (double) i
* (double) N_succ
[i
];
5532 if (sum
> N_succ
[0])
5533 printf("mean %.4g (without 0: %.4g)\n", w_avg
/ sum
, w_avg
/ (sum
- (double) N_succ
[0]));
5547 short II
, JJ
= 0, kk
;
5550 short From
= BASE
, To
= now
._nr_pr
-1;
5552 short From
= now
._nr_pr
-1, To
= BASE
;
5556 cpu_printf("%d: Down - %s %saccepting [pids %d-%d]\n",
5557 depth
, (trpt
->tau
&4)?"claim":"program",
5558 (trpt
->o_pm
&2)?"":"non-", From
, To
);
5562 { trpt
->sched_limit
= (trpt
-1)->sched_limit
;
5564 { trpt
->sched_limit
= 0;
5568 if (depth
> hiwater
)
5574 printf("zap %d: %d (maxdepth now %d)\n",
5575 CNT1
, hiwater
, maxdepth
);
5578 trpt
->tau
&= ~(16|32|64); /* make sure these are off */
5579 #if defined(FULLSTACK) && defined(MA)
5589 (trpt
+1)->o_n
= 1; /* not a deadlock: as below */
5592 (trpt
-1)->tau
|= 16; /* worstcase guess: as below */
5594 #if NCORE>1 && defined(FULL_TRAIL)
5602 if (depth
>= maxdepth
)
5605 printf("error: max search depth too small\n");
5608 { uerror("depth limit reached");
5612 (trpt
+1)->o_n
= 1; /* not a deadlock */
5615 (trpt
-1)->tau
|= 16; /* worstcase guess */
5617 #if NCORE>1 && defined(FULL_TRAIL)
5625 #if (defined(FULLSTACK) && !defined(MA)) || NCORE>1
5626 /* if atomic or rv move, carry forward previous state */
5627 trpt
->ostate
= (trpt
-1)->ostate
;
5630 if ((trpt
->tau
&4) || ((trpt
-1)->tau
&128))
5632 if (boq
== -1) { /* if not mid-rv */
5634 /* this check should now be redundant
5635 * because the seed state also appears
5636 * on the 1st dfs stack and would be
5637 * matched in hstore below
5639 if ((now
._a_t
&1) && depth
> A_depth
)
5640 { if (!memcmp((char *)&A_Root
,
5641 (char *)&now
, vsize
))
5643 depthfound
= A_depth
;
5645 printf("matches seed\n");
5648 uerror("non-progress cycle");
5650 uerror("acceptance cycle");
5652 #if NCORE>1 && defined(FULL_TRAIL)
5660 printf("not seed\n");
5664 if (!(trpt
->tau
&8)) /* if no atomic move */
5668 II
= bstore((char *)&now
, vsize
);
5669 trpt
->j6
= j1
; trpt
->j7
= j2
;
5670 JJ
= LL
[j1
] && LL
[j2
];
5676 JJ
= II
; /* worstcase guess for p.o. */
5679 II
= bstore((char *)&now
, vsize
);
5683 II
= gstore((char *)&now
, vsize
, 0);
5690 II
= hstore((char *)&now
, vsize
);
5696 kk
= (II
== 1 || II
== 2);
5698 #if NCORE==1 || defined (SEP_STATE)
5699 if (II
== 2 && ((trpt
->o_pm
&2) || ((trpt
-1)->o_pm
&2)))
5702 if (!fairness
|| ((now
._a_t
&1) && now
._cnt
[1] == 1)) /* 5.1.4 */
5704 if (a_cycles
&& !fairness
) /* 5.1.6 -- example by Hirofumi Watanabe */
5708 II
= 3; /* Schwoon & Esparza 2005, Gastin&Moro 2004 */
5710 printf("state match on dfs stack\n");
5715 #if defined(FULLSTACK) && defined(BITSTATE)
5716 if (!JJ
&& (now
._a_t
&1) && depth
> A_depth
)
5718 uchar o_a_t
= now
._a_t
;
5719 now
._a_t
&= ~(1|16|32);
5723 printf("state match on 1st dfs stack\n");
5730 if (II
== 3 && a_cycles
&& (now
._a_t
&1))
5733 if (fairness
&& now
._cnt
[1] > 1) /* was != 0 */
5736 printf(" fairness count non-zero\n");
5745 same_case
: if (Lstate
) depthfound
= Lstate
->D
;
5747 uerror("non-progress cycle");
5749 uerror("acceptance cycle");
5751 #if NCORE>1 && defined(FULL_TRAIL)
5762 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5763 if (II
!= 0 && (!Lstate
|| Lstate
->cpu_id
< core_id
))
5764 { (trpt
-1)->tau
|= 16;
5767 if ((II
&& JJ
) || (II
== 3))
5768 { /* marker for liveness proviso */
5770 (trpt
-1)->tau
|= 16;
5775 #if NCORE>1 && !defined(SEP_STATE) && defined(V_PROVISO)
5776 if (!(II
!= 0 && (!Lstate
|| Lstate
->cpu_id
< core_id
)))
5777 { /* treat as stack state */
5778 (trpt
-1)->tau
|= 16;
5780 { /* treat as non-stack state */
5781 (trpt
-1)->tau
|= 64;
5785 { /* successor outside stack */
5786 (trpt
-1)->tau
|= 64;
5792 #if NCORE>1 && defined(FULL_TRAIL)
5802 { static long sdone
= (long) 0; long ndone
;
5804 #if defined(ZAPH) && defined(BITSTATE)
5805 zstates
+= (double) hfns
;
5807 ndone
= (unsigned long) (nstates
/((double) FREQ
));
5811 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
5812 if (nstates
> ((double)(ONE_L
<<(ssize
+1))))
5813 { void resize_hashtable(void);
5817 #if defined(ZAPH) && defined(BITSTATE)
5818 if (zstates
> ((double)(ONE_L
<<(ssize
-2))))
5819 { /* more than half the bits set */
5820 void zap_hashtable(void);
5828 if (write(svfd
, (uchar
*) &now
, vprefix
) != vprefix
)
5829 { fprintf(efd
, "writing %s.svd failed\n", PanSource
);
5833 #if defined(MA) && defined(W_XPT)
5834 if ((unsigned long) nstates
%W_XPT
== 0)
5835 { void w_xpoint(void);
5840 #if defined(FULLSTACK) || defined(CNTRSTACK)
5843 #if defined(FULLSTACK) && !defined(MA)
5844 printf("%d: putting %u (%d)\n", depth
,
5846 (trpt
->ostate
)?trpt
->ostate
->tagged
:0);
5848 printf("%d: putting\n", depth
);
5853 trpt
->ostate
= Lstate
;
5857 if (depth
> mreached
)
5862 trpt
->tau
&= ~(1|2); /* timeout and -request off */
5868 if (now
._nr_pr
== 0) /* claim terminated */
5869 uerror("end state in claim reached");
5870 check_claim(((P0
*)pptr(0))->_p
);
5872 if (trpt
->tau
&4) /* must make a claimmove */
5875 if ((now
._a_t
&2) /* A-bit set */
5876 && now
._cnt
[now
._a_t
&1] == 1)
5878 now
._cnt
[now
._a_t
&1] = 0;
5881 printf("%3d: fairness Rule 3.: _a_t = %d\n",
5891 /* Look for a process with only safe transitions */
5892 /* (special rules apply in the 2nd dfs) */
5893 if (boq
== -1 && From
!= To
5897 && (depth
< z_handoff
)
5902 && ((a_cycles
) || (!a_cycles
&& depth
< z_handoff
))
5909 !((trpt
-1)->proviso
))
5916 !(((char *)&((trpt
-1)->ostate
->state
))[0] & 128))
5918 !(((char *)&(trpt
->ostate
->state
))[0] & 128))
5924 (trpt
-1)->ostate
->proviso
== 0)
5926 trpt
->ostate
->proviso
== 0)
5933 for (II
= From
; II
<= To
; II
++)
5935 for (II
= From
; II
>= To
; II
--)
5938 Resume
: /* pick up here if preselect fails */
5940 tt
= (int) ((P0
*)this)->_p
;
5941 ot
= (uchar
) ((P0
*)this)->_t
;
5942 if (trans
[ot
][tt
]->atom
& 8)
5943 { t
= trans
[ot
][tt
];
5950 From
= To
= II
; /* the process preselected */
5954 trpt
->tau
|= 32; /* preselect marker */
5957 printf("%3d: proc %d Pre", depth
, II
);
5958 printf("Selected (om=%d, tau=%d)\n",
5961 printf("%3d: proc %d PreSelected (tau=%d)\n",
5962 depth
, II
, trpt
->tau
);
5970 #if !defined(NOREDUCE) || (defined(ETIM) && !defined(VERI))
5973 /* The Main Expansion Loop over Processes */
5974 trpt
->o_pm
&= ~(8|16|32|64); /* fairness-marks */
5976 if (fairness
&& boq
== -1
5978 && (!(trpt
->tau
&4) && !((trpt
-1)->tau
&128))
5981 { /* A_bit = 1; Cnt = N in acc states with A_bit 0 */
5984 if (a_cycles
&& (trpt
->o_pm
&2))
5985 { /* Accepting state */
5987 now
._cnt
[now
._a_t
&1] = now
._nr_pr
+ 1;
5990 printf("%3d: fairness Rule 1: cnt=%d, _a_t=%d\n",
5991 depth
, now
._cnt
[now
._a_t
&1], now
._a_t
);
5995 { /* A_bit = 0 when Cnt 0 */
5996 if (now
._cnt
[now
._a_t
&1] == 1)
5998 now
._cnt
[now
._a_t
&1] = 0;
6001 printf("%3d: fairness Rule 3: _a_t = %d\n",
6008 for (II
= From
; II
<= To
; II
++)
6010 for (II
= From
; II
>= To
; II
--)
6014 /* no rendezvous with same proc */
6015 if (boq
!= -1 && trpt
->pr
== II
) continue;
6018 /* limit max nr of interleavings */
6024 && (trpt
-1)->pr
!= II
6025 && trpt
->sched_limit
>= sched_max
)
6033 tt
= (int) ((P0
*)this)->_p
;
6034 ot
= (uchar
) ((P0
*)this)->_t
;
6036 /* don't repeat a previous preselected expansion */
6037 /* could hit this if reduction proviso was false */
6048 if (_m
>_n
||(_n
>3&&_m
!=0)) _n
=_m
;
6049 continue; /* did it before */
6052 trpt
->o_pm
&= ~1; /* no move in this pid yet */
6054 (trpt
+1)->o_event
= now
._event
;
6056 /* Fairness: Cnt++ when Cnt == II */
6058 trpt
->o_pm
&= ~64; /* didn't apply rule 2 */
6063 && now
._cnt
[now
._a_t
&1] == II
+2)
6064 { now
._cnt
[now
._a_t
&1] -= 1;
6066 /* claim need not participate */
6068 now
._cnt
[now
._a_t
&1] = 1;
6071 printf("%3d: proc %d fairness ", depth
, II
);
6072 printf("Rule 2: --cnt to %d (%d)\n",
6073 now
._cnt
[now
._a_t
&1], now
._a_t
);
6075 trpt
->o_pm
|= (32|64);
6079 if (!provided(II
, ot
, tt
, t
)) continue;
6081 /* check all trans of proc II - escapes first */
6085 (trpt
+1)->pr
= (uchar
) II
;
6088 for (ooi
= eoi
= 0, t
= trans
[ot
][tt
]; t
; t
= t
->nxt
, ooi
++)
6089 { if (strcmp(t
->tp
, "else") == 0)
6094 { t
= trans
[ot
][tt
];
6096 printf("randomizer: suppressed, saw else\n");
6101 printf("randomizer: skip %d in %d\n", eoi
, ooi
);
6103 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
6104 if (eoi
-- <= 0) break;
6107 for ( ; t
&& ooi
> 0; t
= t
->nxt
, ooi
--)
6109 for (t
= trans
[ot
][tt
]; t
; t
= t
->nxt
)
6113 /* exploring all transitions from
6114 * a single escape state suffices
6116 if (trpt
->e_state
> 0
6117 && trpt
->e_state
!= t
->e_trans
)
6120 printf("skip 2nd escape %d (did %d before)\n",
6121 t
->e_trans
, trpt
->e_state
);
6128 #include FORWARD_MOVES
6129 P999
: /* jumps here when move succeeds */
6131 if (!(_m
= do_transit(t
, II
))) continue;
6138 && (trpt
-1)->pr
!= II
)
6139 { trpt
->sched_limit
= 1 + (trpt
-1)->sched_limit
;
6144 /* for branching-time, can accept reduction only if */
6145 /* the persistent set contains just 1 transition */
6146 { if ((trpt
->tau
&32) && (trpt
->o_pm
&1))
6148 trpt
->o_pm
|= 1; /* we moved */
6151 trpt
->o_pm
|= 1; /* we moved */
6154 if (loopstate
[ot
][tt
])
6157 printf("exiting from loopstate:\n");
6166 #if defined(VERBOSE) || defined(CHECK)
6168 cpu_printf("%3d: proc %d exec %d \n", depth
, II
, t
->t_id
);
6170 cpu_printf("%3d: proc %d exec %d, %d to %d, %s %s %s %saccepting [tau=%d]\n",
6171 depth
, II
, t
->forw
, tt
, t
->st
, t
->tp
,
6172 (t
->atom
&2)?"atomic":"",
6173 (boq
!= -1)?"rendez-vous":"",
6174 (trpt
->o_pm
&2)?"":"non-", trpt
->tau
);
6177 cpu_printf("\t(escape to state %d)\n", t
->st
);
6181 cpu_printf("\t(randomizer %d)\n", ooi
);
6188 now
._last
= II
- BASE
;
6191 trpt
->e_state
= t
->e_trans
;
6194 trpt
->pr
= (uchar
) II
;
6196 trpt
->o_pm
&= ~(2|4);
6198 { ((P0
*)this)->_p
= t
->st
;
6199 /* moved down reached[ot][t->st] = 1; */
6204 #if (ACCEPT_LAB>0 && !defined(NP)) || (PROG_LAB>0 && defined(HAS_NP))
6207 #define P__Q ((P0 *)pptr(ii))
6210 /* state 1 of np_ claim is accepting */
6211 if (((P0
*)pptr(0))->_p
== 1)
6214 for (ii
= 0; ii
< (int) now
._nr_pr
; ii
++)
6215 { if (accpstate
[P__Q
->_t
][P__Q
->_p
])
6221 #if defined(HAS_NP) && PROG_LAB>0
6222 for (ii
= 0; ii
< (int) now
._nr_pr
; ii
++)
6223 { if (progstate
[P__Q
->_t
][P__Q
->_p
])
6231 trpt
->o_t
= t
; trpt
->o_n
= _n
;
6232 trpt
->o_ot
= ot
; trpt
->o_tt
= tt
;
6233 trpt
->o_To
= To
; trpt
->o_m
= _m
;
6238 if (boq
!= -1 || (t
->atom
&2))
6241 /* atomic sequence in claim */
6247 { if ((trpt
-1)->tau
&4)
6252 /* if claim allowed timeout, so */
6253 /* does the next program-step: */
6254 if (((trpt
-1)->tau
&1) && !(trpt
->tau
&4))
6260 if (boq
== -1 && (t
->atom
&2))
6261 { From
= To
= II
; nlinks
++;
6264 { From
= BASE
; To
= now
._nr_pr
-1;
6266 { From
= now
._nr_pr
-1; To
= BASE
;
6269 #if NCORE>1 && defined(FULL_TRAIL)
6271 { Push_Stack_Tree(II
, t
->t_id
);
6274 goto Down
; /* pseudo-recursion */
6277 cpu_printf("%d: Up - %s\n", depth
,
6278 (trpt
->tau
&4)?"claim":"program");
6286 #if defined(MA) || NCORE>1
6287 if (depth
<= 0) return;
6288 /* e.g., if first state is old, after a restart */
6292 && depth
< hiwater
- (HHH
-DDD
) + 2)
6299 printf("unzap %d: %d\n", CNT2
, hiwater
);
6303 if (trpt
->o_pm
&128) /* fairness alg */
6304 { now
._cnt
[now
._a_t
&1] = trpt
->bup
.oval
;
6305 _n
= 1; trpt
->o_pm
&= ~128;
6307 #if defined(VERBOSE) || defined(CHECK)
6308 printf("%3d: reversed fairness default move\n", depth
);
6315 { int d
; Trail
*trl
;
6317 for (d
= 1; d
< depth
; d
++)
6318 { trl
= getframe(depth
-d
); /* was (trpt-d) */
6320 { now
._last
= trl
->pr
- BASE
;
6324 now
._last
= (depth
<1)?0:(trpt
-1)->pr
;
6328 now
._event
= trpt
->o_event
;
6331 if ((now
._a_t
&1) && depth
<= A_depth
)
6332 return; /* to checkcycles() */
6334 t
= trpt
->o_t
; _n
= trpt
->o_n
;
6335 ot
= trpt
->o_ot
; II
= trpt
->pr
;
6336 tt
= trpt
->o_tt
; this = pptr(II
);
6337 To
= trpt
->o_To
; _m
= trpt
->o_m
;
6342 _m
= do_reverse(t
, II
, _m
);
6344 #include REVERSE_MOVES
6345 R999
: /* jumps here when done */
6348 cpu_printf("%3d: proc %d reverses %d, %d to %d\n",
6349 depth
, II
, t
->forw
, tt
, t
->st
);
6350 cpu_printf("\t%s [abit=%d,adepth=%d,tau=%d,%d]\n",
6351 t
->tp
, now
._a_t
, A_depth
, trpt
->tau
, (trpt
-1)->tau
);
6354 /* pass the proviso tags */
6355 if ((trpt
->tau
&8) /* rv or atomic */
6357 (trpt
-1)->tau
|= 16;
6359 if ((trpt
->tau
&8) /* rv or atomic */
6361 (trpt
-1)->tau
|= 64;
6370 (trans
[ot
][tt
])->om
= _m
; /* head of list */
6372 /* i.e., not set if rv fails */
6375 #if defined(VERI) && !defined(NP)
6376 if (II
== 0 && verbose
&& !reached
[ot
][t
->st
])
6378 printf("depth %d: Claim reached state %d (line %d)\n",
6379 depth
, t
->st
, src_claim
[t
->st
]);
6383 reached
[ot
][t
->st
] = 1;
6384 reached
[ot
][tt
] = 1;
6387 else trpt
->e_state
= 0; /* undo */
6389 if (_m
>_n
||(_n
>3&&_m
!=0)) _n
=_m
;
6390 ((P0
*)this)->_p
= tt
;
6394 { t
= trans
[ot
][tt
];
6396 printf("randomizer: continue for %d more\n", ooi
);
6402 printf("randomizer: done\n");
6406 /* Fairness: undo Rule 2 */
6412 if (now
._cnt
[now
._a_t
&1] == 1)
6413 now
._cnt
[now
._a_t
&1] = 2;
6415 now
._cnt
[now
._a_t
&1] += 1;
6417 printf("%3d: proc %d fairness ", depth
, II
);
6418 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6419 now
._cnt
[now
._a_t
&1], now
._a_t
);
6421 trpt
->o_pm
&= ~(32|64);
6434 if (II
== 0) break; /* never claim */
6436 } /* all processes */
6438 tally_succ(trpt
->n_succ
);
6441 if (_n
== 0 /* no process could move */
6446 && trpt
->sched_limit
>= sched_max
)
6447 { _n
= 1; /* not a deadlock */
6451 /* Fairness: undo Rule 2 */
6452 if (trpt
->o_pm
&32) /* remains if proc blocked */
6455 if (now
._cnt
[now
._a_t
&1] == 1)
6456 now
._cnt
[now
._a_t
&1] = 2;
6458 now
._cnt
[now
._a_t
&1] += 1;
6460 printf("%3d: proc -- fairness ", depth
);
6461 printf("undo Rule 2, cnt=%d, _a_t=%d\n",
6462 now
._cnt
[now
._a_t
&1], now
._a_t
);
6468 && _n
== 0 /* nobody moved */
6470 && !(trpt
->tau
&4) /* in program move */
6472 && !(trpt
->tau
&8) /* not an atomic one */
6474 && ((trpt
->tau
&1) || endstate())
6477 && (trpt
->tau
&1) /* already tried timeout */
6482 && !((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6484 && now
._cnt
[now
._a_t
&1] > 0) /* needed more procs */
6486 trpt
->o_pm
|= 128 | ((trpt
-1)->o_pm
&(2|4));
6487 trpt
->bup
.oval
= now
._cnt
[now
._a_t
&1];
6488 now
._cnt
[now
._a_t
&1] = 1;
6495 From
= BASE
; To
= now
._nr_pr
-1;
6497 From
= now
._nr_pr
-1; To
= BASE
;
6499 #if defined(VERBOSE) || defined(CHECK)
6500 printf("%3d: fairness default move ", depth
);
6501 printf("(all procs block)\n");
6506 Q999
: /* returns here with _n>0 when done */;
6509 now
._cnt
[now
._a_t
&1] = 0;
6512 printf("%3d: fairness undo Rule 1, _a_t=%d\n",
6518 now
._cnt
[now
._a_t
&1] = 1;
6521 printf("%3d: fairness undo Rule 3, _a_t=%d\n",
6529 /* at least one move that was preselected at this */
6530 /* level, blocked or was a loop control flow point */
6531 if ((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6533 /* preselected move - no successors outside stack */
6534 if ((trpt
->tau
&32) && !(trpt
->tau
&64))
6537 { From
= BASE
; To
= now
._nr_pr
-1;
6539 { From
= now
._nr_pr
-1; To
= BASE
;
6542 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6543 depth
, II
+1, _n
, trpt
->tau
);
6545 _n
= 0; trpt
->tau
&= ~(16|32|64);
6547 if (II
<= To
) /* II already decremented */
6549 if (II
>= BASE
) /* II already decremented */
6556 /* at least one move that was preselected at this */
6557 /* level, blocked or truncated at the next level */
6558 /* implied: #ifdef FULLSTACK */
6559 if ((trpt
->tau
&32) && (_n
== 0 || (trpt
->tau
&16)))
6562 printf("%3d: proc %d UnSelected (_n=%d, tau=%d)\n",
6563 depth
, II
+1, (int) _n
, trpt
->tau
);
6565 if (a_cycles
&& (trpt
->tau
&16))
6566 { if (!(now
._a_t
&1))
6569 printf("%3d: setting proviso bit\n", depth
);
6574 (trpt
-1)->proviso
= 1;
6580 if ((trpt
-1)->ostate
)
6581 ((char *)&((trpt
-1)->ostate
->state
))[0] |= 128;
6583 ((char *)&(trpt
->ostate
->state
))[0] |= 128;
6588 if ((trpt
-1)->ostate
)
6589 (trpt
-1)->ostate
->proviso
= 1;
6591 trpt
->ostate
->proviso
= 1;
6595 From
= BASE
; To
= now
._nr_pr
-1;
6597 From
= now
._nr_pr
-1; To
= BASE
;
6599 _n
= 0; trpt
->tau
&= ~(16|32|64);
6600 goto Again
; /* do full search */
6601 } /* else accept reduction */
6604 { From
= BASE
; To
= now
._nr_pr
-1;
6606 { From
= now
._nr_pr
-1; To
= BASE
;
6608 _n
= 0; trpt
->tau
&= ~(16|32|64);
6610 if (II
<= To
) /* already decremented */
6612 if (II
>= BASE
) /* already decremented */
6621 if (_n
== 0 || ((trpt
->tau
&4) && (trpt
->tau
&2)))
6624 cpu_printf("%3d: no move [II=%d, tau=%d, boq=%d]\n",
6625 depth
, II
, trpt
->tau
, boq
);
6628 /* ok if a rendez-vous fails: */
6629 if (boq
!= -1) goto Done
;
6631 /* ok if no procs or we're at maxdepth */
6632 if ((now
._nr_pr
== 0 && (!strict
|| qs_empty()))
6636 || depth
>= maxdepth
-1) goto Done
;
6637 if ((trpt
->tau
&8) && !(trpt
->tau
&4))
6638 { trpt
->tau
&= ~(1|8);
6639 /* 1=timeout, 8=atomic */
6641 From
= BASE
; To
= now
._nr_pr
-1;
6643 From
= now
._nr_pr
-1; To
= BASE
;
6646 cpu_printf("%3d: atomic step proc %d unexecutable\n", depth
, II
+1);
6649 trpt
->tau
|= 4; /* switch to claim */
6654 if (!(trpt
->tau
&1)) /* didn't try timeout yet */
6660 if (trpt
->tau
&2) /* requested */
6665 cpu_printf("%d: timeout\n", depth
);
6670 { /* only claim can enable timeout */
6672 && !((trpt
-1)->tau
&4))
6673 /* blocks inside an atomic */ goto BreakOut
;
6675 cpu_printf("%d: req timeout\n",
6678 (trpt
-1)->tau
|= 2; /* request */
6679 #if NCORE>1 && defined(FULL_TRAIL)
6688 cpu_printf("%d: timeout\n", depth
);
6699 { trpt
->tau
|= 4; /* claim stuttering */
6700 trpt
->tau
|= 128; /* stutter mark */
6702 cpu_printf("%d: claim stutter\n", depth
);
6710 if (!noends
&& !a_cycles
&& !endstate())
6711 { depth
--; trpt
--; /* new 4.2.3 */
6712 uerror("invalid end state");
6716 else if (a_cycles
&& (trpt
->o_pm
&2)) /* new 4.2.4 */
6718 uerror("accept stutter");
6725 if (!(trpt
->tau
&8)) /* not in atomic seqs */
6730 /* --after-- a program-step, i.e., */
6731 /* after backtracking a claim-step */
6733 /* with at least one running process */
6734 /* unless in a stuttered accept state */
6735 && ((now
._nr_pr
> 1) || (trpt
->o_pm
&2))
6743 cpu_printf("Consider check %d %d...\n",
6744 now
._a_t
, now
._cnt
[0]);
6746 if ((now
._a_t
&2) /* A-bit */
6747 && (now
._cnt
[0] == 1))
6751 if (a_cycles
&& (trpt
->o_pm
&2))
6756 #if defined(FULLSTACK) || defined(CNTRSTACK)
6759 && (((trpt
->tau
&4) && !(trpt
->tau
&128))
6760 || ( (trpt
-1)->tau
&128)))
6766 #if defined(FULLSTACK)
6767 printf("%d: zapping %u (%d)\n",
6768 depth
, trpt
->ostate
,
6769 (trpt
->ostate
)?trpt
->ostate
->tagged
:0);
6778 && (((trpt
->tau
&4) && !(trpt
->tau
&128))
6779 || ( (trpt
-1)->tau
&128)))
6785 printf("%d: zapping\n", depth
);
6790 gstore((char *) &now
, vsize
, 1);
6797 #if NCORE>1 && defined(FULL_TRAIL)
6807 void new_state(void) { /* place holder */ }
6811 assert(int a
, char *s
, int ii
, int tt
, Trans
*t
)
6813 if (!a
&& !noasserts
)
6815 strcpy(bad
, "assertion violated ");
6816 if (strlen(s
) > 1000)
6817 { strncpy(&bad
[19], (const char *) s
, 1000);
6820 strcpy(&bad
[19], s
);
6824 #ifndef NOBOUNDCHECK
6826 Boundcheck(int x
, int y
, int a1
, int a2
, Trans
*a3
)
6828 assert((x
>= 0 && x
< y
), "- invalid array index",
6837 printf("%9.8g states, stored (%g visited)\n",
6838 nstates
- nShadow
, nstates
);
6840 printf("%9.8g states, stored\n", nstates
);
6843 printf(" %8g nominal states (- rv and atomic)\n", nstates
-midrv
-nlinks
+revrv
);
6844 printf(" %8g rvs succeeded\n", midrv
-failedrv
);
6846 printf(" %8g nominal states (stored-atomic)\n", nstates
-nlinks
);
6849 printf(" %8g midrv\n", midrv
);
6850 printf(" %8g failedrv\n", failedrv
);
6851 printf(" %8g revrv\n", revrv
);
6854 printf("%9.8g states, matched\n", truncs
);
6856 printf("%9.8g matches within stack\n",truncs2
);
6859 printf("%9.8g transitions (= visited+matched)\n",
6862 printf("%9.8g transitions (= stored+matched)\n",
6864 printf("%9.8g atomic steps\n", nlinks
);
6865 if (nlost
) printf("%g lost messages\n", (double) nlost
);
6868 printf("hash conflicts: %9.8g (resolved)\n", hcmp
);
6870 if (hcmp
> (double) (1<<ssize
))
6871 { printf("hint: increase hashtable-size (-w) to reduce runtime\n");
6872 } /* in multi-core: also reduces lock delays on access to hashtable */
6876 printf("%8g states allocated for dfs stack\n", ngrabs
);
6879 printf("\nhash factor: %4g (best if > 100.)\n\n",
6880 (double)(((double) udmem
) * 8.0) / (double) nstates
);
6882 printf("\nhash factor: %4g (best if > 100.)\n\n",
6883 (double)(1<<(ssize
-8)) / (double) nstates
* 256.0);
6884 printf("bits set per state: %u (-k%u)\n", hfns
, hfns
);
6887 { printf("total bits available: %8g (-M%ld)\n",
6888 ((double) udmem
) * 8.0, udmem
/(1024L*1024L));
6890 printf("total bits available: %8g (-w%d)\n",
6891 ((double) (ONE_L
<< (ssize
-4)) * 16.0), ssize
);
6895 printf("bfs disk reads: %ld writes %ld -- diff %ld\n",
6896 bfs_dsk_reads
, bfs_dsk_writes
, bfs_dsk_writes
-bfs_dsk_reads
);
6897 if (bfs_dsk_read
>= 0) (void) close(bfs_dsk_read
);
6898 if (bfs_dsk_write
>= 0) (void) close(bfs_dsk_write
);
6899 (void) unlink("pan_bfs_dsk.tmp");
6906 #if defined(BITSTATE) || !defined(NOCOMP)
6907 double nr1
, nr2
, nr3
= 0.0, nr4
, nr5
= 0.0;
6908 #if !defined(MA) && (defined(MEMCNT) || defined(MEMLIM))
6911 int mverbose
= verbose
;
6915 if (verbose
) cpu_printf("wrapup -- %d error(s)\n", errors
);
6919 void dsk_stats(void);
6922 if (search_terminated
!= NULL
)
6923 { *search_terminated
|= 2; /* wrapup */
6925 exit(0); /* normal termination, not an error */
6928 #if !defined(WIN32) && !defined(WIN64)
6929 signal(SIGINT
, SIG_DFL
);
6931 printf("\n(%s)\n", SpinVersion
);
6932 if (!done
) printf("Warning: Search not completed\n");
6934 (void) unlink((const char *)stackfile
);
6938 { printf(" + Multi-Core (NCORE=%d)\n", NCORE
);
6940 { printf(" + Multi-Core (NCORE=%d -z%d)\n", NCORE
, z_handoff
);
6944 printf(" + Using Breadth-First Search\n");
6947 printf(" + Partial Order Reduction\n");
6950 printf(" + Reverse Depth-First Search Order\n");
6953 printf(" + Reverse Transition Ordering\n");
6956 printf(" + Randomized Transition Ordering\n");
6959 printf(" + Scheduling Restriction (-DSCHED=%d)\n", sched_max
);
6962 printf(" + Compression\n");
6965 printf(" + Graph Encoding (-DMA=%d)\n", MA
);
6967 printf(" Restarted from checkpoint %s.xpt\n", PanSource
);
6972 printf(" + FullStack Matching\n");
6975 printf(" + CntrStack Matching\n");
6979 printf("\nBit statespace search for:\n");
6982 printf("\nHash-Compact %d search for:\n", HC
);
6984 printf("\nFull statespace search for:\n");
6988 #ifdef NEGATED_TRACE
6989 printf(" notrace assertion +\n");
6991 printf(" trace assertion +\n");
6995 printf(" never claim +\n");
6996 printf(" assertion violations ");
6998 printf("- (disabled by -A flag)\n");
7000 printf("+ (if within scope of claim)\n");
7003 printf(" never claim - (not selected)\n");
7005 printf(" never claim - (none specified)\n");
7007 printf(" assertion violations ");
7009 printf("- (disabled by -A flag)\n");
7015 printf(" non-progress cycles ");
7017 printf(" acceptance cycles ");
7020 printf("+ (fairness %sabled)\n",
7021 fairness
?"en":"dis");
7022 else printf("- (not selected)\n");
7024 printf(" cycle checks - (disabled by -DSAFETY)\n");
7027 printf(" invalid end states - ");
7028 printf("(disabled by ");
7030 printf("-E flag)\n\n");
7032 printf("never claim)\n\n");
7034 printf(" invalid end states ");
7036 printf("- (disabled by -E flag)\n\n");
7040 printf("State-vector %d byte, depth reached %ld", hmax
,
7042 (nr_handoffs
* z_handoff
) +
7045 printf(", errors: %d\n", errors
);
7049 { extern void dfa_stats(void);
7050 if (maxgs
+a_cycles
+2 < MA
)
7051 printf("MA stats: -DMA=%d is sufficient\n",
7058 printf("stackframes: %d/%d\n\n", smax
, svmax
);
7059 printf("stats: fa %d, fh %d, zh %d, zn %d - ",
7061 printf("check %d holds %d\n", Ccheck
, Cholds
);
7062 printf("stack stats: puts %d, probes %d, zaps %d\n",
7068 #if defined(BITSTATE) || !defined(NOCOMP)
7069 nr1
= (nstates
-nShadow
)*
7070 (double)(hmax
+sizeof(struct H_el
)-sizeof(unsigned));
7074 nr2
= (double) ((maxdepth
+3)*sizeof(Trail
));
7077 #if !defined(MA) || defined(COLLAPSE)
7078 nr3
= (double) (ONE_L
<<ssize
)*sizeof(struct H_el
*);
7082 nr3
= (double) (udmem
);
7084 nr3
= (double) (ONE_L
<<(ssize
-3));
7086 nr5
= (double) (ONE_L
<<(ssize
-3));
7089 nr5
= (double) (maxdepth
*sizeof(struct H_el
*));
7092 nr4
= (double) (svmax
* (sizeof(Svtack
) + hmax
))
7093 + (double) (smax
* (sizeof(Stack
) + Maxbody
));
7095 if (mverbose
|| memcnt
< nr1
+nr2
+nr3
+nr4
+nr5
)
7097 { double remainder
= memcnt
;
7098 double tmp_nr
= memcnt
-nr3
-nr4
-(nr2
-fragment
)-nr5
;
7099 #if NCORE>1 && !defined(SEP_STATE)
7100 tmp_nr
-= ((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
;
7102 if (tmp_nr
< 0.0) tmp_nr
= 0.;
7103 printf("Stats on memory usage (in Megabytes):\n");
7104 printf("%9.3f equivalent memory usage for states",
7105 nr1
/1048576.); /* 1024*1024=1048576 */
7106 printf(" (stored*(State-vector + overhead))\n");
7107 #if NCORE>1 && !defined(WIN32) && !defined(WIN64)
7108 printf("%9.3f shared memory reserved for state storage\n",
7109 mem_reserved
/1048576.);
7111 printf(" in %d local heaps of %7.3f MB each\n",
7112 NCORE
, mem_reserved
/(NCORE
*1048576.));
7118 printf("%9.3f memory used for hash array (-M%ld)\n",
7119 nr3
/1048576., udmem
/(1024L*1024L));
7121 printf("%9.3f memory used for hash array (-w%d)\n",
7122 nr3
/1048576., ssize
);
7124 printf("%9.3f memory used for bit stack\n",
7126 remainder
= remainder
- nr3
- nr5
;
7128 printf("%9.3f actual memory usage for states",
7130 remainder
-= tmp_nr
;
7133 { if (tmp_nr
> nr1
) printf("unsuccessful ");
7134 printf("compression: %.2f%%)\n",
7135 (100.0*tmp_nr
)/nr1
);
7137 printf("less than 1k)\n");
7140 { printf(" state-vector as stored = %.0f byte",
7141 (tmp_nr
)/(nstates
-nShadow
) -
7142 (double) (sizeof(struct H_el
) - sizeof(unsigned)));
7143 printf(" + %ld byte overhead\n",
7144 (long int) sizeof(struct H_el
)-sizeof(unsigned));
7147 #if !defined(MA) || defined(COLLAPSE)
7148 printf("%9.3f memory used for hash table (-w%d)\n",
7149 nr3
/1048576., ssize
);
7154 printf("%9.3f memory used for DFS stack (-m%ld)\n",
7155 nr2
/1048576., maxdepth
);
7159 remainder
-= ((double) NCORE
* LWQ_SIZE
) + GWQ_SIZE
;
7160 printf("%9.3f shared memory used for work-queues\n",
7161 (GWQ_SIZE
+ (double) NCORE
* LWQ_SIZE
) /1048576.);
7162 printf(" in %d queues of %7.3f MB each",
7163 NCORE
, (double) LWQ_SIZE
/1048576.);
7165 printf(" + a global q of %7.3f MB\n",
7166 (double) GWQ_SIZE
/ 1048576.);
7171 if (remainder
- fragment
> 1048576.)
7172 printf("%9.3f other (proc and chan stacks)\n",
7173 (remainder
-fragment
)/1048576.);
7174 if (fragment
> 1048576.)
7175 printf("%9.3f memory lost to fragmentation\n",
7177 printf("%9.3f total actual memory usage\n\n",
7185 printf("%9.3f memory usage (Mbyte)\n\n",
7189 printf("nr of templates: [ globals chans procs ]\n");
7190 printf("collapse counts: [ ");
7191 { int i
; for (i
= 0; i
< 256+2; i
++)
7193 printf("%d ", ncomps
[i
]);
7197 if ((done
|| verbose
) && !no_rck
) do_reach();
7200 printf("\nPeg Counts (transitions executed):\n");
7201 for (i
= 1; i
< NTRANS
; i
++)
7202 { if (peg
[i
]) putpeg(i
, peg
[i
]);
7209 if (vprefix
> 0) close(svfd
);
7212 printf("%g loopstates hit\n", cnt_loops
);
7217 #if NCORE>1 && defined(T_ALERT)
7225 { printf("Interrupted\n");
7227 was_interrupted
= 1;
7235 * super fast hash, based on Paul Hsieh's function
7236 * http://www.azillionmonkeys.com/qed/hash.html
7240 #if (defined(__GNUC__) && defined(__i386__)) || defined(__WATCOMC__) \
7241 || defined(_MSC_VER) || defined (__BORLANDC__) || defined (__TURBOC__)
7242 #define get16bits(d) (*((const uint16_t *) (d)))
7246 #define get16bits(d) ((((uint32_t)(((const uint8_t *)(d))[1])) << 8)\
7247 +(uint32_t)(((const uint8_t *)(d))[0]) )
7251 d_sfh(const char *s
, int len
)
7252 { uint32_t h
= len
, tmp
;
7258 for ( ; len
> 0; len
--)
7259 { h
+= get16bits(s
);
7260 tmp
= (get16bits(s
+2) << 11) ^ h
;
7261 h
= (h
<< 16) ^ tmp
;
7262 s
+= 2*sizeof(uint16_t);
7266 case 3: h
+= get16bits(s
);
7268 h
^= s
[sizeof(uint16_t)] << 18;
7271 case 2: h
+= get16bits(s
);
7292 #if defined(HASH64) || defined(WIN64)
7293 /* 64-bit Jenkins hash, 1997
7294 * http://burtleburtle.net/bob/c/lookup8.c
7296 #define mix(a,b,c) \
7297 { a -= b; a -= c; a ^= (c>>43); \
7298 b -= c; b -= a; b ^= (a<<9); \
7299 c -= a; c -= b; c ^= (b>>8); \
7300 a -= b; a -= c; a ^= (c>>38); \
7301 b -= c; b -= a; b ^= (a<<23); \
7302 c -= a; c -= b; c ^= (b>>5); \
7303 a -= b; a -= c; a ^= (c>>35); \
7304 b -= c; b -= a; b ^= (a<<49); \
7305 c -= a; c -= b; c ^= (b>>11); \
7306 a -= b; a -= c; a ^= (c>>12); \
7307 b -= c; b -= a; b ^= (a<<18); \
7308 c -= a; c -= b; c ^= (b>>22); \
7311 /* 32-bit Jenkins hash, 2006
7312 * http://burtleburtle.net/bob/c/lookup3.c
7314 #define rot(x,k) (((x)<<(k))|((x)>>(32-(k))))
7316 #define mix(a,b,c) \
7317 { a -= c; a ^= rot(c, 4); c += b; \
7318 b -= a; b ^= rot(a, 6); a += c; \
7319 c -= b; c ^= rot(b, 8); b += a; \
7320 a -= c; a ^= rot(c,16); c += b; \
7321 b -= a; b ^= rot(a,19); a += c; \
7322 c -= b; c ^= rot(b, 4); b += a; \
7325 #define final(a,b,c) \
7326 { c ^= b; c -= rot(b,14); \
7327 a ^= c; a -= rot(c,11); \
7328 b ^= a; b -= rot(a,25); \
7329 c ^= b; c -= rot(b,16); \
7330 a ^= c; a -= rot(c,4); \
7331 b ^= a; b -= rot(a,14); \
7332 c ^= b; c -= rot(b,24); \
7337 d_hash(uchar
*kb
, int nbytes
)
7339 #if defined(HASH64) || defined(WIN64)
7340 uint64_t a
= 0, b
, c
, n
;
7341 uint64_t *k
= (uint64_t *) kb
;
7343 uint32_t a
, b
, c
, n
;
7344 uint32_t *k
= (uint32_t *) kb
;
7346 /* extend to multiple of words, if needed */
7347 n
= nbytes
/WS
; /* nr of words */
7348 a
= nbytes
- (n
*WS
);
7353 case 3: *bp
++ = 0; /* fall thru */
7354 case 2: *bp
++ = 0; /* fall thru */
7358 #if defined(HASH64) || defined(WIN64)
7359 b
= HASH_CONST
[HASH_NR
];
7360 c
= 0x9e3779b97f4a7c13LL
; /* arbitrary value */
7369 c
+= (((uint64_t) nbytes
)<<3);
7377 a
= c
= 0xdeadbeef + (n
<<2);
7378 b
= HASH_CONST
[HASH_NR
];
7395 j1
= c
&nmask
; j3
= a
&7; /* 1st bit */
7396 j2
= b
&nmask
; j4
= (a
>>3)&7; /* 2nd bit */
7401 s_hash(uchar
*cp
, int om
)
7404 d_sfh((const char *) cp
, om
); /* sets K1 */
7406 d_hash(cp
, om
); /* sets K1 etc */
7410 j1
= K1
% omaxdepth
;
7423 srand(123); /* fixed startpoint */
7424 prerand
= (int *) emalloc((omaxdepth
+3)*sizeof(int));
7425 for (i
= 0; i
< omaxdepth
+3; i
++)
7426 prerand
[i
] = rand();
7430 { if (!prerand
) inirand();
7431 return prerand
[depth
];
7436 set_masks(void) /* 4.2.5 */
7438 if (WS
== 4 && ssize
>= 32)
7439 { mask
= 0xffffffff;
7442 case 34: nmask
= (mask
>>1); break;
7443 case 33: nmask
= (mask
>>2); break;
7444 default: nmask
= (mask
>>3); break;
7450 { mask
= ((ONE_L
<<ssize
)-1); /* hash init */
7457 { fprintf(stderr
, "pan: wordsize %ld not supported\n", (long int) WS
);
7459 } else /* WS == 4 and ssize < 32 */
7460 { mask
= ((ONE_L
<<ssize
)-1); /* hash init */
7465 static long reclaim_size
;
7466 static char *reclaim_mem
;
7467 #if defined(AUTO_RESIZE) && !defined(BITSTATE) && !defined(MA)
7469 #error cannot combine AUTO_RESIZE with NCORE>1 yet
7471 static struct H_el
**N_tab
;
7473 reverse_capture(struct H_el
*p
)
7475 reverse_capture(p
->nxt
);
7476 /* last element of list moves first */
7477 /* to preserve list-order */
7479 if (ssize
< 8*WS
) /* probably always true */
7486 resize_hashtable(void)
7488 if (WS
== 4 && ssize
>= 27 - 1)
7489 { return; /* canot increase further */
7492 ssize
+= 2; /* 4x size */
7494 printf("pan: resizing hashtable to -w%d.. ", ssize
);
7496 N_tab
= (struct H_el
**)
7497 emalloc((ONE_L
<<ssize
)*sizeof(struct H_el
*));
7499 set_masks(); /* they changed */
7501 for (j1
= 0; j1
< (ONE_L
<< (ssize
- 2)); j1
++)
7502 { reverse_capture(H_tab
[j1
]);
7504 reclaim_mem
= (char *) H_tab
;
7505 reclaim_size
= (ONE_L
<< (ssize
- 2));
7511 #if defined(ZAPH) && defined(BITSTATE)
7514 { cpu_printf("pan: resetting hashtable\n");
7516 { memset(SS
, 0, udmem
);
7518 { memset(SS
, 0, ONE_L
<<(ssize
-3));
7524 main(int argc
, char *argv
[])
7525 { void to_compile(void);
7527 efd
= stderr
; /* default */
7529 bstore
= bstore_reg
; /* default */
7533 strcpy(o_cmdline
, "");
7534 for (j
= 1; j
< argc
; j
++)
7535 { strcat(o_cmdline
, argv
[j
]);
7536 strcat(o_cmdline
, " ");
7538 /* printf("Command Line: %s\n", o_cmdline); */
7539 if (strlen(o_cmdline
) >= sizeof(o_cmdline
))
7540 { Uerror("option list too long");
7543 while (argc
> 1 && argv
[1][0] == '-')
7544 { switch (argv
[1][1]) {
7547 case 'a': fprintf(efd
, "error: -a disabled");
7550 case 'a': a_cycles
= 1; break;
7553 case 'A': noasserts
= 1; break;
7554 case 'b': bounded
= 1; break;
7556 case 'C': coltrace
= 1; goto samething
;
7558 case 'c': upto
= atoi(&argv
[1][2]); break;
7559 case 'd': state_tables
++; break;
7560 case 'e': every_error
= 1; Nr_Trails
= 1; break;
7561 case 'E': noends
= 1; break;
7563 case 'F': if (strlen(argv
[1]) > 2)
7564 stackfile
= &argv
[1][2];
7567 #if !defined(SAFETY) && !defined(NOFAIR)
7568 case 'f': fairness
= 1; break;
7571 case 'g': gui
= 1; goto samething
;
7573 case 'h': if (!argv
[1][2]) usage(efd
); else
7574 HASH_NR
= atoi(&argv
[1][2])%33; break;
7575 case 'I': iterative
= 2; every_error
= 1; break;
7576 case 'i': iterative
= 1; every_error
= 1; break;
7577 case 'J': like_java
= 1; break; /* Klaus Havelund */
7579 case 'k': hfns
= atoi(&argv
[1][2]); break;
7582 case 'L': sched_max
= atoi(&argv
[1][2]); break;
7586 case 'l': a_cycles
= 1; break;
7588 case 'l': fprintf(efd
, "error: -l disabled");
7593 case 'M': udmem
= atoi(&argv
[1][2]); break;
7594 case 'G': udmem
= atoi(&argv
[1][2]); udmem
*= 1024; break;
7597 fprintf(stderr
, "-M and -G affect only -DBITSTATE\n");
7600 case 'm': maxdepth
= atoi(&argv
[1][2]); break;
7601 case 'n': no_rck
= 1; break;
7602 case 'P': readtrail
= 1; onlyproc
= atoi(&argv
[1][2]);
7603 if (argv
[2][0] != '-') /* check next arg */
7604 { trailfilename
= argv
[2];
7605 argc
--; argv
++; /* skip next arg */
7609 case 'p': vprefix
= atoi(&argv
[1][2]); break;
7612 case 'Q': quota
= (double) 60.0 * (double) atoi(&argv
[1][2]); break;
7614 case 'q': strict
= 1; break;
7615 case 'R': Nrun
= atoi(&argv
[1][2]); break;
7618 samething
: readtrail
= 1;
7619 if (isdigit(argv
[1][2]))
7620 whichtrail
= atoi(&argv
[1][2]);
7621 else if (argc
> 2 && argv
[2][0] != '-') /* check next arg */
7622 { trailfilename
= argv
[2];
7623 argc
--; argv
++; /* skip next arg */
7626 case 'S': silent
= 1; goto samething
;
7629 case 's': hfns
= 1; break;
7631 case 'T': TMODE
= 0444; break;
7632 case 't': if (argv
[1][2]) tprefix
= &argv
[1][2]; break;
7633 case 'V': start_timer(); printf("Generated by %s\n", SpinVersion
);
7634 to_compile(); pan_exit(2); break;
7635 case 'v': verbose
++; break;
7636 case 'w': ssize
= atoi(&argv
[1][2]); break;
7637 case 'Y': signoff
= 1; break;
7638 case 'X': efd
= stdout
; break;
7639 case 'x': exclusive
= 1; break;
7641 /* -B ip is passthru to proxy of remote ip address: */
7642 case 'B': argc
--; argv
++; break;
7643 case 'Q': worker_pids
[0] = atoi(&argv
[1][2]); break;
7644 /* -Un means that the nth worker should be instantiated as a proxy */
7645 case 'U': proxy_pid
= atoi(&argv
[1][2]); break;
7646 /* -W means that this copy is started by a cluster-server as a remote */
7647 /* this flag is passed to ./pan_proxy, which interprets it */
7648 case 'W': remote_party
++; break;
7649 case 'Z': core_id
= atoi(&argv
[1][2]);
7651 { printf("cpu%d: pid %d parent %d\n",
7652 core_id
, getpid(), worker_pids
[0]);
7655 case 'z': z_handoff
= atoi(&argv
[1][2]); break;
7657 case 'z': break; /* ignored for single-core */
7659 default : fprintf(efd
, "saw option -%c\n", argv
[1][1]); usage(efd
); break;
7663 if (iterative
&& TMODE
!= 0666)
7665 fprintf(efd
, "warning: -T ignored when -i or -I is used\n");
7667 #if defined(HASH32) && !defined(SFH)
7669 { fprintf(efd
, "strong warning: compiling -DHASH32 on a 64-bit machine\n");
7670 fprintf(efd
, " without -DSFH can slow down performance a lot\n");
7673 #if defined(WIN32) || defined(WIN64)
7675 TMODE
= _S_IWRITE
| _S_IREAD
;
7680 store_proxy_pid
= proxy_pid
; /* for checks in mem_file() and someone_crashed() */
7681 if (core_id
!= 0) { proxy_pid
= 0; }
7683 if (core_id
== 0 && a_cycles
)
7684 { fprintf(efd
, "hint: this search may be more efficient ");
7685 fprintf(efd
, "if pan.c is compiled -DSEP_STATE\n");
7689 { z_handoff
= 20; /* conservative default - for non-liveness checks */
7691 #if defined(NGQ) || defined(LWQ_FIXED)
7692 LWQ_SIZE
= (double) (128.*1048576.);
7694 LWQ_SIZE
= (double) ( z_handoff
+ 2.) * (double) sizeof(SM_frame
);
7698 { fprintf(efd
, "warning: the intended nr of cores to be used in liveness mode is 2\n");
7700 fprintf(efd
, "warning: without -DSEP_STATE there is no guarantee that all liveness violations are found\n");
7705 #error cannot use hidden variables when compiling multi-core
7711 fprintf(efd
, "warning: using -k%d as minimal usable value\n", hfns
);
7714 omaxdepth
= maxdepth
;
7716 if (WS
== 4 && ssize
> 34)
7718 fprintf(efd
, "warning: using -w%d as max usable value\n", ssize
);
7720 * -w35 would not work: 35-3 = 32 but 1^31 is the largest
7721 * power of 2 that can be represented in an unsigned long
7725 if (WS
== 4 && ssize
> 27)
7727 fprintf(efd
, "warning: using -w%d as max usable value\n", ssize
);
7729 * for emalloc, the lookup table size multiplies by 4 for the pointers
7730 * the largest power of 2 that can be represented in a ulong is 1^31
7731 * hence the largest number of lookup table slots is 31-4 = 27
7736 hiwater
= HHH
= maxdepth
-10;
7739 { stackfile
= (char *) emalloc(strlen(PanSource
)+4+1);
7740 sprintf(stackfile
, "%s._s_", PanSource
);
7743 { fprintf(efd
, "error: cannot use -i or -I with -DSC\n");
7747 #if (defined(R_XPT) || defined(W_XPT)) && !defined(MA)
7748 #warning -DR_XPT and -DW_XPT assume -DMA (ignored)
7750 if (iterative
&& a_cycles
)
7751 fprintf(efd
, "warning: -i or -I work for safety properties only\n");
7754 #error -DBFS not compatible with -DSC
7757 #error -DBFS not compatible with _last
7760 #error cannot use c_track UnMatched with BFS
7763 #warning -DREACH is redundant when -DBFS is used
7766 #if defined(MERGED) && defined(PEG)
7767 #error to use -DPEG use: spin -o3 -a
7771 #error cannot combine -DHC and -DSFH
7772 /* use of NOCOMP is the real reason */
7775 #error cannot combine -DHC and -DNOCOMP
7779 #error cannot combine -DHC and -DBITSTATE
7782 #if defined(SAFETY) && defined(NP)
7783 #error cannot combine -DNP and -DBFS or -DSAFETY
7787 #error cannot combine -DMA and -DBITSTATE
7790 #error usage: -DMA=N with N > 0 and N < VECTORSZ
7795 #error cannot combine -DBITSTATE and -DCOLLAPSE
7798 #error cannot combine -DCOLLAPSE and -DSFH
7799 /* use of NOCOMP is the real reason */
7802 #error cannot combine -DCOLLAPSE and -DNOCOMP
7806 if (maxdepth
<= 0 || ssize
<= 1) usage(efd
);
7807 #if SYNC>0 && !defined(NOREDUCE)
7808 if (a_cycles
&& fairness
)
7809 { fprintf(efd
, "error: p.o. reduction not compatible with ");
7810 fprintf(efd
, "fairness (-f) in models\n");
7811 fprintf(efd
, " with rendezvous operations: ");
7812 fprintf(efd
, "recompile with -DNOREDUCE\n");
7816 #if defined(REM_VARS) && !defined(NOREDUCE)
7817 #warning p.o. reduction not compatible with remote varrefs (use -DNOREDUCE)
7819 #if defined(NOCOMP) && !defined(BITSTATE)
7821 { fprintf(efd
, "error: use of -DNOCOMP voids -l and -a\n");
7826 memlim
= ((double) MEMLIM
) * (double) (1<<20); /* size in Mbyte */
7829 if (Nrun
> 1) HASH_NR
= Nrun
- 1;
7831 if (Nrun
< 1 || Nrun
> 32)
7832 { fprintf(efd
, "error: invalid arg for -R\n");
7836 if (fairness
&& !a_cycles
)
7837 { fprintf(efd
, "error: -f requires -a or -l\n");
7842 { fprintf(efd
, "error: no accept labels defined ");
7843 fprintf(efd
, "in model (for option -a)\n");
7850 #error use of enabled() requires -DNOREDUCE
7853 #error use of pcvalue() requires -DNOREDUCE
7856 #error use of 'else' combined with i/o stmnts requires -DNOREDUCE
7859 #error use of _last requires -DNOREDUCE
7862 #if SYNC>0 && !defined(NOREDUCE)
7864 fprintf(efd
, "warning: use of a rendezvous stmnts in the escape\n");
7865 fprintf(efd
, " of an unless clause, if present, could make p.o. reduction\n");
7866 fprintf(efd
, " invalid (use -DNOREDUCE to avoid this)\n");
7868 fprintf(efd
, " (this type of rv is also not compatible with -DBFS)\n");
7872 #if SYNC>0 && defined(BFS)
7873 #warning use of rendezvous with BFS does not preserve all invalid endstates
7875 #if !defined(REACH) && !defined(BITSTATE)
7876 if (iterative
!= 0 && a_cycles
== 0)
7877 { fprintf(efd
, "warning: -i and -I need -DREACH to work accurately\n");
7880 #if defined(BITSTATE) && defined(REACH)
7881 #warning -DREACH is voided by -DBITSTATE
7883 #if defined(MA) && defined(REACH)
7884 #warning -DREACH is voided by -DMA
7886 #if defined(FULLSTACK) && defined(CNTRSTACK)
7887 #error cannot combine -DFULLSTACK and -DCNTRSTACK
7900 { fprintf(efd
, "warning: never claim + accept labels ");
7901 fprintf(efd
, "requires -a flag to fully verify\n");
7909 { fprintf(efd
, "warning: verification in BFS mode ");
7910 fprintf(efd
, "is restricted to safety properties\n");
7924 { fprintf(efd
, "hint: this search is more efficient ");
7925 fprintf(efd
, "if pan.c is compiled -DSAFETY\n");
7932 { S_A
= 1; /* _a_t */
7934 } else /* _a_t and _cnt[NFAIR] */
7935 { S_A
= (&(now
._cnt
[0]) - (uchar
*) &now
) + NFAIR
- 2;
7936 /* -2 because first two uchars in now are masked */
7941 signal(SIGINT
, stopped
);
7944 trail
= (Trail
*) emalloc(6*sizeof(Trail
));
7947 trail
= (Trail
*) emalloc((maxdepth
+3)*sizeof(Trail
));
7948 trail
++; /* protect trpt-1 refs at depth 0 */
7953 sprintf(nm
, "%s.svd", PanSource
);
7954 if ((svfd
= creat(nm
, TMODE
)) < 0)
7955 { fprintf(efd
, "couldn't create %s\n", nm
);
7962 #if SYNC>0 && ASYNC==0
7974 fprintf(fd
, "%s\n", SpinVersion
);
7975 fprintf(fd
, "Valid Options are:\n");
7978 fprintf(fd
, " -a -> is disabled by -DNP ");
7979 fprintf(fd
, "(-DNP compiles for -l only)\n");
7981 fprintf(fd
, " -a find acceptance cycles\n");
7984 fprintf(fd
, " -a,-l,-f -> are disabled by -DSAFETY\n");
7986 fprintf(fd
, " -A ignore assert() violations\n");
7987 fprintf(fd
, " -b consider it an error to exceed the depth-limit\n");
7988 fprintf(fd
, " -cN stop at Nth error ");
7989 fprintf(fd
, "(defaults to -c1)\n");
7990 fprintf(fd
, " -d print state tables and stop\n");
7991 fprintf(fd
, " -e create trails for all errors\n");
7992 fprintf(fd
, " -E ignore invalid end states\n");
7994 fprintf(fd
, " -Ffile use 'file' to store disk-stack\n");
7997 fprintf(fd
, " -f add weak fairness (to -a or -l)\n");
7999 fprintf(fd
, " -hN use different hash-seed N:1..32\n");
8000 fprintf(fd
, " -i search for shortest path to error\n");
8001 fprintf(fd
, " -I like -i, but approximate and faster\n");
8002 fprintf(fd
, " -J reverse eval order of nested unlesses\n");
8004 fprintf(fd
, " -kN set N bits per state (defaults to 3)\n");
8007 fprintf(fd
, " -LN set scheduling restriction to N (default 10)\n");
8011 fprintf(fd
, " -l find non-progress cycles\n");
8013 fprintf(fd
, " -l find non-progress cycles -> ");
8014 fprintf(fd
, "disabled, requires ");
8015 fprintf(fd
, "compilation with -DNP\n");
8019 fprintf(fd
, " -MN use N Megabytes for bitstate hash array\n");
8020 fprintf(fd
, " -GN use N Gigabytes for bitstate hash array\n");
8022 fprintf(fd
, " -mN max depth N steps (default=10k)\n");
8023 fprintf(fd
, " -n no listing of unreached states\n");
8025 fprintf(fd
, " -pN create svfile (save N bytes per state)\n");
8027 fprintf(fd
, " -QN set time-limit on execution of N minutes\n");
8028 fprintf(fd
, " -q require empty chans in valid end states\n");
8030 fprintf(fd
, " -r read and execute trail - can add -v,-n,-PN,-g,-C\n");
8031 fprintf(fd
, " -rN read and execute N-th error trail\n");
8032 fprintf(fd
, " -C read and execute trail - columnated output (can add -v,-n)\n");
8033 fprintf(fd
, " -PN read and execute trail - restrict trail output to proc N\n");
8034 fprintf(fd
, " -g read and execute trail + msc gui support\n");
8035 fprintf(fd
, " -S silent replay: only user defined printfs show\n");
8038 fprintf(fd
, " -RN repeat run Nx with N ");
8039 fprintf(fd
, "[1..32] independent hash functions\n");
8040 fprintf(fd
, " -s same as -k1 (single bit per state)\n");
8042 fprintf(fd
, " -T create trail files in read-only mode\n");
8043 fprintf(fd
, " -tsuf replace .trail with .suf on trailfiles\n");
8044 fprintf(fd
, " -V print SPIN version number\n");
8045 fprintf(fd
, " -v verbose -- filenames in unreached state listing\n");
8046 fprintf(fd
, " -wN hashtable of 2^N entries ");
8047 fprintf(fd
, "(defaults to -w%d)\n", ssize
);
8048 fprintf(fd
, " -x do not overwrite an existing trail file\n");
8050 fprintf(fd
, " -zN handoff states below depth N to 2nd cpu (multi_core)\n");
8053 fprintf(fd
, "\n options -r, -C, -PN, -g, and -S can optionally be followed by\n");
8054 fprintf(fd
, " a filename argument, as in '-r filename', naming the trailfile\n");
8063 Malloc(unsigned long n
)
8066 if (memcnt
+ (double) n
> memlim
) goto err
;
8069 tmp
= (char *) malloc(n
);
8072 tmp
= (char *) sbrk(n
);
8073 if (tmp
== (char *) -ONE_L
)
8079 printf("pan: out of memory\n");
8081 printf(" %g bytes used\n", memcnt
);
8082 printf(" %g bytes more needed\n", (double) n
);
8083 printf(" %g bytes limit\n",
8087 printf("hint: to reduce memory, recompile with\n");
8089 printf(" -DMA=%d # better/slower compression, or\n", hmax
);
8091 printf(" -DBITSTATE # supertrace, approximation\n");
8094 printf("hint: to reduce memory, recompile with\n");
8096 printf(" -DCOLLAPSE # good, fast compression, or\n");
8098 printf(" -DMA=%d # better/slower compression, or\n", hmax
);
8100 printf(" -DHC # hash-compaction, approximation\n");
8102 printf(" -DBITSTATE # supertrace, approximation\n");
8107 printf(" omit -DFULL_TRAIL or use pan -c0 to reduce memory\n");
8110 printf("hint: to reduce memory, recompile without\n");
8111 printf(" -DSEP_STATE # may be faster, but uses more memory\n");
8116 memcnt
+= (double) n
;
8120 #define CHUNK (100*VECTORSZ)
8123 emalloc(unsigned long n
) /* never released or reallocated */
8126 return (char *) NULL
;
8127 if (n
&(sizeof(void *)-1)) /* for proper alignment */
8128 n
+= sizeof(void *)-(n
&(sizeof(void *)-1));
8129 if ((unsigned long) left
< n
)
8130 { grow
= (n
< CHUNK
) ? CHUNK
: n
;
8131 have
= Malloc(grow
);
8132 fragment
+= (double) left
;
8143 { /* always fatal */
8146 sudden_stop("Uerror");
8151 #if defined(MA) && !defined(SAFETY)
8154 { Trans
*t
; uchar ot
, _m
; int tt
; short II
;
8158 uchar oat
= now
._a_t
;
8159 now
._a_t
&= ~(1|16|32);
8160 memcpy((char *) &comp_now
, (char *) &now
, vsize
);
8164 trpt
= getframe(depth
);
8167 printf("%d State: ", depth
);
8168 for (i
= 0; i
< vsize
; i
++) printf("%d%s,",
8169 ((char *)&now
)[i
], Mask
[i
]?"*":"");
8173 if (trpt
->o_pm
&128) /* fairness alg */
8174 { now
._cnt
[now
._a_t
&1] = trpt
->bup
.oval
;
8177 trpt
= getframe(depth
);
8186 { int d
; Trail
*trl
;
8188 for (d
= 1; d
< depth
; d
++)
8189 { trl
= getframe(depth
-d
); /* was trl = (trpt-d); */
8191 { now
._last
= trl
->pr
- BASE
;
8195 now
._last
= (depth
<1)?0:(trpt
-1)->pr
;
8199 now
._event
= trpt
->o_event
;
8201 if ((now
._a_t
&1) && depth
<= A_depth
)
8202 { now
._a_t
&= ~(1|16|32);
8203 if (fairness
) now
._a_t
|= 2; /* ? */
8205 goto CameFromHere
; /* checkcycles() */
8208 ot
= trpt
->o_ot
; II
= trpt
->pr
;
8209 tt
= trpt
->o_tt
; this = pptr(II
);
8210 _m
= do_reverse(t
, II
, trpt
->o_m
);
8212 printf("%3d: proc %d ", depth
, II
);
8213 printf("reverses %d, %d to %d,",
8214 t
->forw
, tt
, t
->st
);
8215 printf(" %s [abit=%d,adepth=%d,",
8216 t
->tp
, now
._a_t
, A_depth
);
8217 printf("tau=%d,%d] <unwind>\n",
8218 trpt
->tau
, (trpt
-1)->tau
);
8222 trpt
= getframe(depth
);
8226 /* reached[ot][t->st] = 1; 3.4.13 */
8227 ((P0
*)this)->_p
= tt
;
8229 if ((trpt
->o_pm
&32))
8232 if (now
._cnt
[now
._a_t
&1] == 0)
8233 now
._cnt
[now
._a_t
&1] = 1;
8235 now
._cnt
[now
._a_t
&1] += 1;
8240 now
._cnt
[now
._a_t
&1] = 0;
8246 if (memcmp((char *) &now
, (char *) &comp_now
, vsize
) == 0)
8248 if (depth
> 0) goto Up
;
8252 static char unwinding
;
8255 { static char laststr
[256];
8258 if (unwinding
) return; /* 1.4.2 */
8259 if (strncmp(str
, laststr
, 254))
8261 cpu_printf("pan: %s (at depth %ld)\n", str
,
8263 printf("pan: %s (at depth %ld)\n", str
,
8266 (nr_handoffs
* z_handoff
) +
8268 ((depthfound
==-1)?depth
:depthfound
));
8269 strncpy(laststr
, str
, 254);
8272 if (readtrail
) { wrap_trail(); return; }
8274 is_cycle
= (strstr(str
, " cycle") != (char *) 0);
8278 if ((every_error
!= 0)
8281 #if defined(MA) && !defined(SAFETY)
8285 depthfound
= Unwind();
8294 if (depth
> 1) trpt
--;
8296 if (depth
> 1) trpt
++;
8300 #if defined(MA) && !defined(SAFETY)
8301 if (strstr(str
, " cycle"))
8303 printf("sorry: MA writes 1 trail max\n");
8304 wrapup(); /* no recovery from unwind */
8308 if (search_terminated
!= NULL
)
8309 { *search_terminated
|= 4; /* uerror */
8315 { depth
--; trpt
--; /* undo */
8318 if (iterative
!= 0 && maxdepth
> 0)
8319 { maxdepth
= (iterative
== 1)?(depth
-1):(depth
/2);
8321 printf("pan: reducing search depth to %ld\n",
8325 if (errors
>= upto
&& upto
!= 0)
8328 sudden_stop("uerror");
8336 xrefsrc(int lno
, S_F_MAP
*mp
, int M
, int i
)
8337 { Trans
*T
; int j
, retval
=1;
8338 for (T
= trans
[M
][i
]; T
; T
= T
->nxt
)
8340 { if (strcmp(T
->tp
, ".(goto)") == 0
8341 || strncmp(T
->tp
, "goto :", 6) == 0)
8342 return 1; /* not reported */
8344 printf("\tline %d", lno
);
8346 for (j
= 0; j
< sizeof(mp
); j
++)
8347 if (i
>= mp
[j
].from
&& i
<= mp
[j
].upto
)
8348 { printf(", \"%s\"", mp
[j
].fnm
);
8351 printf(", state %d", i
);
8352 if (strcmp(T
->tp
, "") != 0)
8354 q
= transmognify(T
->tp
);
8355 printf(", \"%s\"", q
?q
:"");
8356 } else if (stopstate
[M
][i
])
8357 printf(", -end state-");
8359 retval
= 0; /* reported */
8365 r_ck(uchar
*which
, int N
, int M
, short *src
, S_F_MAP
*mp
)
8369 if (M
== VERI
&& !verbose
) return;
8371 printf("unreached in proctype %s\n", procname
[M
]);
8372 for (i
= 1; i
< N
; i
++)
8374 && (mapstate
[M
][i
] == 0
8375 || which
[mapstate
[M
][i
]] == 0))
8376 m
+= xrefsrc((int) src
[i
], mp
, M
, i
);
8379 printf(" (%d of %d states)\n", N
-1-m
, N
-1);
8381 #if NCORE>1 && !defined(SEP_STATE)
8382 static long rev_trail_cnt
;
8386 rev_trail(int fd
, volatile Stack_Tree
*st_tr
)
8387 { long j
; char snap
[64];
8392 rev_trail(fd
, st_tr
->prv
);
8394 printf("%d (%d) LRT [%d,%d] -- %9u (root %9u)\n",
8395 depth
, rev_trail_cnt
, st_tr
->pr
, st_tr
->t_id
, st_tr
, stack_last
[core_id
]);
8397 if (st_tr
->pr
!= 255)
8398 { sprintf(snap
, "%ld:%d:%d\n",
8399 rev_trail_cnt
++, st_tr
->pr
, st_tr
->t_id
);
8401 if (write(fd
, snap
, j
) != j
)
8402 { printf("pan: error writing trailfile\n");
8407 } else /* handoff point */
8409 { write(fd
, "-1:-1:-1\n", 9);
8418 #if defined VERI || defined(MERGED)
8421 #if NCORE==1 || defined(SEP_STATE) || !defined(FULL_TRAIL)
8428 sprintf(snap
, "-2:%d:-2\n", VERI
);
8429 write(fd
, snap
, strlen(snap
));
8432 sprintf(snap
, "-4:-4:-4\n");
8433 write(fd
, snap
, strlen(snap
));
8435 #if NCORE>1 && !defined(SEP_STATE) && defined(FULL_TRAIL)
8437 enter_critical(GLOBAL_LOCK
);
8438 rev_trail(fd
, stack_last
[core_id
]);
8439 leave_critical(GLOBAL_LOCK
);
8441 i
= 1; /* trail starts at position 1 */
8442 #if NCORE>1 && defined(SEP_STATE)
8443 if (cur_Root
.m_vsize
> 0) { i
++; depth
++; }
8445 for ( ; i
<= depth
; i
++)
8446 { if (i
== depthfound
+1)
8447 write(fd
, "-1:-1:-1\n", 9);
8449 if (!trl
->o_t
) continue;
8450 if (trl
->o_pm
&128) continue;
8451 sprintf(snap
, "%ld:%d:%d\n",
8452 i
, trl
->pr
, trl
->o_t
->t_id
);
8454 if (write(fd
, snap
, j
) != j
)
8455 { printf("pan: error writing trailfile\n");
8462 cpu_printf("pan: wrote trailfile\n");
8467 sv_save(void) /* push state vector onto save stack */
8469 { svtack
->nxt
= (Svtack
*) emalloc(sizeof(Svtack
));
8470 svtack
->nxt
->body
= emalloc(vsize
*sizeof(char));
8471 svtack
->nxt
->lst
= svtack
;
8472 svtack
->nxt
->m_delta
= vsize
;
8474 } else if (vsize
> svtack
->nxt
->m_delta
)
8475 { svtack
->nxt
->body
= emalloc(vsize
*sizeof(char));
8476 svtack
->nxt
->lst
= svtack
;
8477 svtack
->nxt
->m_delta
= vsize
;
8480 svtack
= svtack
->nxt
;
8482 svtack
->o_boq
= boq
;
8484 svtack
->o_delta
= vsize
; /* don't compress */
8485 memcpy((char *)(svtack
->body
), (char *) &now
, vsize
);
8486 #if defined(C_States) && defined(HAS_STACK) && (HAS_TRACK==1)
8487 c_stack((uchar
*) &(svtack
->c_stack
[0]));
8490 cpu_printf("%d: sv_save\n", depth
);
8495 sv_restor(void) /* pop state vector from save stack */
8497 memcpy((char *)&now
, svtack
->body
, svtack
->o_delta
);
8499 boq
= svtack
->o_boq
;
8501 #if defined(C_States) && (HAS_TRACK==1)
8503 c_unstack((uchar
*) &(svtack
->c_stack
[0]));
8505 c_revert((uchar
*) &(now
.c_state
[0]));
8507 if (vsize
!= svtack
->o_delta
)
8508 Uerror("sv_restor");
8510 Uerror("error: v_restor");
8511 svtack
= svtack
->lst
;
8513 cpu_printf(" sv_restor\n");
8519 { int i
; char *z
= (char *) &now
;
8521 proc_offset
[h
] = stack
->o_offset
;
8522 proc_skip
[h
] = (uchar
) stack
->o_skip
;
8524 p_name
[h
] = stack
->o_name
;
8527 for (i
= vsize
+ stack
->o_skip
; i
> vsize
; i
--)
8528 Mask
[i
-1] = 1; /* align */
8530 vsize
+= stack
->o_skip
;
8531 memcpy(z
+vsize
, stack
->body
, stack
->o_delta
);
8532 vsize
+= stack
->o_delta
;
8537 for (i
= 1; i
<= Air
[((P0
*)pptr(h
))->_t
]; i
++)
8538 Mask
[vsize
- i
] = 1; /* pad */
8539 Mask
[proc_offset
[h
]] = 1; /* _pid */
8541 if (BASE
> 0 && h
> 0)
8542 ((P0
*)pptr(h
))->_pid
= h
-BASE
;
8544 ((P0
*)pptr(h
))->_pid
= h
;
8547 if (!stack
->lst
) /* debugging */
8548 Uerror("error: p_restor");
8557 { char *z
= (char *) &now
;
8561 q_offset
[now
._nr_qs
] = stack
->o_offset
;
8562 q_skip
[now
._nr_qs
] = (uchar
) stack
->o_skip
;
8564 q_name
[now
._nr_qs
] = stack
->o_name
;
8566 vsize
+= stack
->o_skip
;
8567 memcpy(z
+vsize
, stack
->body
, stack
->o_delta
);
8568 vsize
+= stack
->o_delta
;
8574 k_end
= stack
->o_offset
;
8575 k
= k_end
- stack
->o_skip
;
8578 if (q_zero(now
._nr_qs
)) k_end
+= stack
->o_delta
;
8581 for ( ; k
< k_end
; k
++)
8584 if (!stack
->lst
) /* debugging */
8585 Uerror("error: q_restor");
8588 typedef struct IntChunks
{
8590 struct IntChunks
*nxt
;
8592 IntChunks
*filled_chunks
[512];
8593 IntChunks
*empty_chunks
[512];
8597 if (nr
>= 512) Uerror("cannot happen grab_int");
8598 if (filled_chunks
[nr
])
8599 { z
= filled_chunks
[nr
];
8600 filled_chunks
[nr
] = filled_chunks
[nr
]->nxt
;
8602 { z
= (IntChunks
*) emalloc(sizeof(IntChunks
));
8603 z
->ptr
= (int *) emalloc(nr
* sizeof(int));
8605 z
->nxt
= empty_chunks
[nr
];
8606 empty_chunks
[nr
] = z
;
8610 ungrab_ints(int *p
, int nr
)
8612 if (!empty_chunks
[nr
]) Uerror("cannot happen ungrab_int");
8613 z
= empty_chunks
[nr
];
8614 empty_chunks
[nr
] = empty_chunks
[nr
]->nxt
;
8616 z
->nxt
= filled_chunks
[nr
];
8617 filled_chunks
[nr
] = z
;
8620 delproc(int sav
, int h
)
8623 int o_vsize
= vsize
;
8625 if (h
+1 != (int) now
._nr_pr
) return 0;
8628 && q_offset
[now
._nr_qs
-1] > proc_offset
[h
])
8632 d
= vsize
- proc_offset
[h
];
8635 { stack
->nxt
= (Stack
*)
8636 emalloc(sizeof(Stack
));
8638 emalloc(Maxbody
*sizeof(char));
8639 stack
->nxt
->lst
= stack
;
8643 stack
->o_offset
= proc_offset
[h
];
8645 stack
->o_skip
= (int) proc_skip
[h
];
8647 stack
->o_skip
= (short) proc_skip
[h
];
8650 stack
->o_name
= p_name
[h
];
8654 memcpy(stack
->body
, (char *)pptr(h
), d
);
8656 vsize
= proc_offset
[h
];
8657 now
._nr_pr
= now
._nr_pr
- 1;
8658 memset((char *)pptr(h
), 0, d
);
8659 vsize
-= (int) proc_skip
[h
];
8664 for (i
= vsize
; i
< o_vsize
; i
++)
8665 Mask
[i
] = 0; /* reset */
8672 { int h
= now
._nr_qs
- 1;
8673 int d
= vsize
- q_offset
[now
._nr_qs
- 1];
8675 int k
, o_vsize
= vsize
;
8679 { stack
->nxt
= (Stack
*)
8680 emalloc(sizeof(Stack
));
8682 emalloc(Maxbody
*sizeof(char));
8683 stack
->nxt
->lst
= stack
;
8687 stack
->o_offset
= q_offset
[h
];
8689 stack
->o_skip
= (int) q_skip
[h
];
8691 stack
->o_skip
= (short) q_skip
[h
];
8694 stack
->o_name
= q_name
[h
];
8697 memcpy(stack
->body
, (char *)qptr(h
), d
);
8699 vsize
= q_offset
[h
];
8700 now
._nr_qs
= now
._nr_qs
- 1;
8701 memset((char *)qptr(h
), 0, d
);
8702 vsize
-= (int) q_skip
[h
];
8707 for (k
= vsize
; k
< o_vsize
; k
++)
8708 Mask
[k
] = 0; /* reset */
8715 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
8725 for (i
= BASE
; i
< (int) now
._nr_pr
; i
++)
8726 { ptr
= (P0
*) pptr(i
);
8727 if (!stopstate
[ptr
->_t
][ptr
->_p
])
8730 if (strict
) return qs_empty();
8731 #if defined(EVENT_TRACE) && !defined(OTIM)
8732 if (!stopstate
[EVENT_TRACE
][now
._event
] && !a_cycles
)
8733 { printf("pan: event_trace not completed\n");
8743 { uchar o_a_t
= now
._a_t
;
8748 uchar o_cnt
= now
._cnt
[1];
8752 struct H_el
*sv
= trpt
->ostate
; /* save */
8754 uchar prov
= trpt
->proviso
; /* save */
8758 { int i
; uchar
*v
= (uchar
*) &now
;
8759 printf(" set Seed state ");
8761 if (fairness
) printf("(cnt = %d:%d, nrpr=%d) ",
8762 now
._cnt
[0], now
._cnt
[1], now
._nr_pr
);
8764 /* for (i = 0; i < n; i++) printf("%d,", v[i]); */
8767 printf("%d: cycle check starts\n", depth
);
8769 now
._a_t
|= (1|16|32);
8770 /* 1 = 2nd DFS; (16|32) to help hasher */
8772 now
._cnt
[1] = now
._cnt
[0];
8774 memcpy((char *)&A_Root
, (char *)&now
, vsize
);
8775 A_depth
= depthfound
= depth
;
8780 o_limit
= trpt
->sched_limit
;
8781 trpt
->sched_limit
= 0;
8783 new_state(); /* start 2nd DFS */
8785 trpt
->sched_limit
= o_limit
;
8790 now
._cnt
[1] = o_cnt
;
8792 A_depth
= 0; depthfound
= -1;
8794 printf("%d: cycle check returns\n", depth
);
8798 trpt
->ostate
= sv
; /* restore */
8800 trpt
->proviso
= prov
;
8806 #if defined(FULLSTACK) && defined(BITSTATE)
8807 struct H_el
*Free_list
= (struct H_el
*) 0;
8809 onstack_init(void) /* to store stack states in a bitstate search */
8810 { S_Tab
= (struct H_el
**) emalloc(maxdepth
*sizeof(struct H_el
*));
8814 { struct H_el
*v
, *last
= 0;
8816 { for (v
= Free_list
; v
&& ((int) v
->tagged
>= n
); v
=v
->nxt
)
8817 { if ((int) v
->tagged
== n
)
8821 gotcha
: Free_list
= v
->nxt
;
8831 /* new: second try */
8833 if (v
&& ((int) v
->tagged
>= n
))
8837 return (struct H_el
*)
8838 emalloc(sizeof(struct H_el
)+n
-sizeof(unsigned));
8845 { struct H_el
*grab_shared(int);
8846 return grab_shared(sizeof(struct H_el
)+n
-sizeof(unsigned));
8850 #define grab_state(n) (struct H_el *) \
8851 emalloc(sizeof(struct H_el)+n-sizeof(unsigned long));
8856 int cnt
= sizeof(struct H_el
)+n
-sizeof(unsigned long);
8858 if (reclaim_size
>= cnt
+WS
)
8859 { if ((cnt
& (WS
-1)) != 0) /* alignment */
8860 { cnt
+= WS
- (cnt
& (WS
-1));
8862 p
= (struct H_el
*) reclaim_mem
;
8864 reclaim_size
-= cnt
;
8867 { p
= (struct H_el
*) emalloc(cnt
);
8876 ordinal(char *v
, long n
, short tp
)
8877 { struct H_el
*tmp
, *ntmp
; long m
;
8878 struct H_el
*olst
= (struct H_el
*) 0;
8879 s_hash((uchar
*)v
, n
);
8880 #if NCORE>1 && !defined(SEP_STATE)
8881 enter_critical(CS_ID
); /* uses spinlock - 1..128 */
8885 { tmp
= grab_state(n
);
8888 for ( ;; olst
= tmp
, tmp
= tmp
->nxt
)
8889 { m
= memcmp(((char *)&(tmp
->state
)), v
, n
);
8896 Insert
: ntmp
= grab_state(n
);
8904 } else if (!tmp
->nxt
)
8906 Append
: tmp
->nxt
= grab_state(n
);
8923 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
8926 memcpy(((char *)&(tmp
->state
)), v
, n
);
8929 #if NCORE>1 && !defined(SEP_STATE)
8930 leave_critical(CS_ID
); /* uses spinlock */
8940 compress(char *vin
, int nin
) /* collapse compression */
8941 { char *w
, *v
= (char *) &comp_now
;
8945 static uchar nbytes
[513]; /* 1 + 256 + 256 */
8946 static unsigned short nbytelen
;
8947 long col_q(int, char *);
8948 long col_p(int, char *);
8954 for (i
= 0; i
< NFAIR
; i
++)
8960 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
8961 { n
= col_p(i
, (char *) 0);
8963 nbytes
[nbytelen
] = 0;
8965 nbytes
[nbytelen
] = 1;
8966 *v
++ = ((P0
*) pptr(i
))->_t
;
8970 { nbytes
[nbytelen
]++;
8974 { nbytes
[nbytelen
]++;
8978 { nbytes
[nbytelen
]++;
8985 for (i
= 0; i
< (int) now
._nr_pr
; i
++)
8987 n
= ordinal(scratch
, x
-scratch
, 2); /* procs */
8989 nbytes
[nbytelen
] = 0;
8991 { nbytes
[nbytelen
]++;
8995 { nbytes
[nbytelen
]++;
8999 { nbytes
[nbytelen
]++;
9005 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
9006 { n
= col_q(i
, (char *) 0);
9007 nbytes
[nbytelen
] = 0;
9010 { nbytes
[nbytelen
]++;
9014 { nbytes
[nbytelen
]++;
9018 { nbytes
[nbytelen
]++;
9025 /* 3 = _a_t, _nr_pr, _nr_qs */
9026 w
= (char *) &now
+ 3 * sizeof(uchar
);
9032 w
= (char *) &(now
._vsz
) + sizeof(unsigned short);
9034 w
= (char *) &(now
._vsz
) + sizeof(unsigned long);
9040 if (now
._nr_qs
> 0 && qptr(0) < pptr(0))
9041 n
= qptr(0) - (uchar
*) w
;
9043 n
= pptr(0) - (uchar
*) w
;
9044 j
= w
- (char *) &now
;
9045 for (i
= 0; i
< (int) n
; i
++, w
++)
9046 if (!Mask
[j
++]) *x
++ = *w
;
9048 for (i
= 0; i
< (int) now
._nr_qs
; i
++)
9052 for (i
= 0, j
= 6; i
< nbytelen
; i
++)
9058 *x
|= (nbytes
[i
] << j
);
9061 for (j
= 0; j
< WS
-1; j
++)
9064 n
= ordinal(scratch
, x
-scratch
, 0); /* globals */
9066 if (n
>= (1<< 8)) { *v
++ = (n
>> 8)&255; j
++; }
9067 if (n
>= (1<<16)) { *v
++ = (n
>>16)&255; j
++; }
9068 if (n
>= (1<<24)) { *v
++ = (n
>>24)&255; j
++; }
9069 *v
++ = j
; /* add last count as a byte */
9070 for (i
= 0; i
< WS
-1; i
++)
9074 printf("collapse %d -> %d\n",
9075 vsize
, v
- (char *)&comp_now
);
9077 return v
- (char *)&comp_now
;
9080 #if !defined(NOCOMP)
9082 compress(char *vin
, int n
) /* default compression */
9086 s_hash((uchar
*)vin
, n
); /* sets K1 and K2 */
9089 { delta
++; /* _a_t */
9092 delta
+= NFAIR
; /* _cnt[] */
9096 memcpy((char *) &comp_now
+ delta
, (char *) &K1
, WS
);
9099 memcpy((char *) &comp_now
+ delta
, (char *) &K2
, HC
);
9105 char *v
= (char *) &comp_now
;
9108 int r
= 0, unroll
= n
/8;
9111 while (r
++ < unroll
)
9112 { /* unroll 8 times, avoid ifs */
9130 r
= n
- i
; /* the rest, at most 7 */
9132 case 7: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9133 case 6: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9134 case 5: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9135 case 4: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9136 case 3: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9137 case 2: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9138 case 1: *v
= *vv
++; v
+= 1 - Mask
[i
++];
9141 r
= (n
+WS
-1)/WS
; /* words rounded up */
9142 r
*= WS
; /* bytes */
9143 i
= r
- i
; /* remainder */
9145 case 7: *v
++ = 0; /* fall thru */
9153 default: Uerror("unexpected wordsize");
9158 { for (i
= 0; i
< n
; i
++, vv
++)
9159 if (!Mask
[i
]) *v
++ = *vv
;
9160 for (i
= 0; i
< WS
-1; i
++)
9165 printf("compress %d -> %d\n",
9166 n
, v
- (char *)&comp_now
);
9168 return v
- (char *)&comp_now
;
9173 #if defined(FULLSTACK) && defined(BITSTATE)
9175 #if !defined(onstack_now)
9176 int onstack_now(void) {}
9178 #if !defined(onstack_put)
9179 void onstack_put(void) {}
9181 #if !defined(onstack_zap)
9182 void onstack_zap(void) {}
9187 { struct H_el
*v
, *w
, *last
= 0;
9188 struct H_el
**tmp
= H_tab
;
9191 static char warned
= 0;
9195 nv
= (char *) &comp_now
;
9196 n
= compress((char *)&now
, vsize
);
9198 #if defined(BITSTATE) && defined(LC)
9199 nv
= (char *) &comp_now
;
9200 n
= compact_stack((char *)&now
, vsize
);
9206 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9207 s_hash((uchar
*)nv
, n
);
9210 for (v
= S_Tab
[j1
]; v
; Zh
++, last
=v
, v
=v
->nxt
)
9211 { m
= memcmp(&(v
->state
), nv
, n
);
9219 #if defined(BITSTATE) && NCORE>1
9220 /* seen this happen, likely harmless, but not yet understood */
9223 { /* Uerror("stack out of wack - zap"); */
9224 cpu_printf("pan: warning, stack incomplete\n");
9235 v
->tagged
= (unsigned) n
;
9236 #if !defined(NOREDUCE) && !defined(SAFETY)
9239 v
->nxt
= last
= (struct H_el
*) 0;
9240 for (w
= Free_list
; w
; Fa
++, last
=w
, w
= w
->nxt
)
9241 { if ((int) w
->tagged
<= n
)
9246 { v
->nxt
= Free_list
;
9259 { struct H_el
**tmp
= H_tab
;
9261 if (hstore((char *)&now
, vsize
) != 0)
9262 #if defined(BITSTATE) && defined(LC)
9263 printf("pan: warning, double stack entry\n");
9266 Uerror("cannot happen - unstack_put");
9270 trpt
->ostate
= Lstate
;
9276 struct H_el
**tmp2
= H_tab
;
9277 char *v
; int n
, m
= 1;
9281 #if defined(BITSTATE) && defined(LC)
9282 v
= (char *) &comp_now
;
9283 n
= compact_stack((char *)&now
, vsize
);
9289 v
= (char *) &comp_now
;
9290 n
= compress((char *)&now
, vsize
);
9292 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9293 s_hash((uchar
*)v
, n
);
9296 for (tmp
= S_Tab
[j1
]; tmp
; Zn
++, tmp
= tmp
->nxt
)
9297 { m
= memcmp(((char *)&(tmp
->state
)),v
,n
);
9299 { Lstate
= (struct H_el
*) tmp
;
9313 { void r_xpoint(void);
9317 dfa_init((unsigned short) (MA
+a_cycles
));
9318 #if NCORE>1 && !defined(COLLAPSE)
9320 { void init_HT(unsigned long);
9326 #if !defined(MA) || defined(COLLAPSE)
9329 { void init_HT(unsigned long);
9330 init_HT((unsigned long) (ONE_L
<<ssize
)*sizeof(struct H_el
*));
9333 H_tab
= (struct H_el
**)
9334 emalloc((ONE_L
<<ssize
)*sizeof(struct H_el
*));
9339 #if !defined(BITSTATE) || defined(FULLSTACK)
9342 dumpstate(int wasnew
, char *v
, int n
, int tag
)
9346 { printf(" state tags %d (%d::%d): ",
9349 printf(" %d ", tag
);
9357 for (i
= 0; i
< vsize
; i
++) printf("%d%s,",
9358 ((char *)&now
)[i
], Mask
[i
]?"*":"");
9360 printf("\n Vector: ");
9361 for (i
= 0; i
< n
; i
++) printf("%d,", v
[i
]);
9368 gstore(char *vin
, int nin
, uchar pbit
)
9372 static uchar Info
[MA
+1];
9374 n
= compress(vin
, nin
);
9375 v
= (uchar
*) &comp_now
;
9381 { printf("pan: error, MA too small, recompile pan.c");
9382 printf(" with -DMA=N with N>%d\n", n
);
9385 if (n
> (int) maxgs
)
9386 { maxgs
= (unsigned int) n
;
9388 for (i
= 0; i
< n
; i
++)
9391 for ( ; i
< MA
-1; i
++)
9395 if (a_cycles
) /* place _a_t at the end */
9396 { Info
[MA
] = Info
[0];
9400 #if NCORE>1 && !defined(SEP_STATE)
9401 enter_critical(GLOBAL_LOCK
); /* crude, but necessary */
9402 /* to make this mode work, also replace emalloc with grab_shared inside store MA routines */
9405 if (!dfa_store(Info
))
9409 { Info
[MA
] &= ~(1|16|32); /* _a_t */
9411 { Info
[MA
-1] = 4; /* off-stack bit */
9413 if (!dfa_member(MA
-1))
9416 printf("intersected 1st dfs stack\n");
9422 printf("new state\n");
9428 { Info
[MA
-1] = 1; /* proviso bit */
9430 trpt
->proviso
= dfa_member(MA
-1);
9432 Info
[MA
-1] = 4; /* off-stack bit */
9433 if (dfa_member(MA
-1))
9434 { ret_val
= 1; /* off-stack */
9436 printf("old state\n");
9439 { ret_val
= 2; /* on-stack */
9441 printf("on-stack\n");
9449 printf("old state\n");
9452 #if NCORE>1 && !defined(SEP_STATE)
9453 leave_critical(GLOBAL_LOCK
);
9455 return ret_val
; /* old state */
9458 #if defined(BITSTATE) && defined(LC)
9460 compact_stack(char *vin
, int n
)
9462 s_hash((uchar
*)vin
, n
); /* sets K1 and K2 */
9464 delta
++; /* room for state[0] |= 128 */
9466 memcpy((char *) &comp_now
+ delta
, (char *) &K1
, WS
);
9468 memcpy((char *) &comp_now
+ delta
, (char *) &K2
, WS
);
9469 delta
+= WS
; /* use all available bits */
9474 hstore(char *vin
, int nin
) /* hash table storage */
9475 { struct H_el
*ntmp
;
9476 struct H_el
*tmp
, *olst
= (struct H_el
*) 0;
9477 char *v
; int n
, m
=0;
9482 #if defined(BITSTATE) && defined(LC)
9484 { v
= (char *) &comp_now
;
9485 n
= compact_stack(vin
, nin
);
9493 v
= (char *) &comp_now
;
9498 n
= compress(vin
, nin
);
9504 { v
[0] = 0; /* _a_t */
9507 for (m
= 0; m
< NFAIR
; m
++)
9508 v
[m
+1] = 0; /* _cnt[] */
9514 #if !defined(HC) && !(defined(BITSTATE) && defined(LC))
9515 s_hash((uchar
*)v
, n
);
9517 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9518 enter_critical(CS_ID
); /* uses spinlock */
9522 { tmp
= grab_state(n
);
9525 { /* if we get here -- we've already issued a warning */
9526 /* but we want to allow the normal distributed termination */
9527 /* to collect the stats on all cpus in the wrapup */
9528 #if !defined(SEP_STATE) && !defined(BITSTATE)
9529 leave_critical(CS_ID
);
9531 return 1; /* allow normal termination */
9536 { for (;; hcmp
++, olst
= tmp
, tmp
= tmp
->nxt
)
9537 { /* skip the _a_t and the _cnt bytes */
9540 { if (!tmp
->nxt
) goto Append
;
9544 m
= memcmp(((char *)&(tmp
->state
)) + S_A
,
9555 { if ((((char *)&(tmp
->state
))[0] & V_A
) != V_A
)
9556 { wasnew
= 1; nShadow
++;
9557 ((char *)&(tmp
->state
))[0] |= V_A
;
9561 { /* 0 <= now._cnt[now._a_t&1] < MAXPROC */
9562 unsigned ci
, bp
; /* index, bit pos */
9563 ci
= (now
._cnt
[now
._a_t
&1] / 8);
9564 bp
= (now
._cnt
[now
._a_t
&1] - 8*ci
);
9565 if (now
._a_t
&1) /* use tail-bits in _cnt */
9566 { ci
= (NFAIR
- 1) - ci
;
9567 bp
= 7 - bp
; /* bp = 0..7 */
9569 ci
++; /* skip over _a_t */
9570 bp
= 1 << bp
; /* the bit mask */
9571 if ((((char *)&(tmp
->state
))[ci
] & bp
)==0)
9576 ((char *)&(tmp
->state
))[ci
] |= bp
;
9579 /* else: wasnew == 0, i.e., old state */
9585 Lstate
= (struct H_el
*) tmp
;
9590 { Lstate
= (struct H_el
*) tmp
;
9593 && (tmp
->tagged
&A_V
)
9599 printf("cpu%d: ", core_id
);
9601 printf("1st dfs-stack intersected on state %d+\n",
9604 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9605 leave_critical(CS_ID
);
9611 printf("cpu%d: ", core_id
);
9613 printf(" New state %d+\n", (int) tmp
->st_id
);
9616 dumpstate(1, (char *)&(tmp
->state
),n
,tmp
->tagged
);
9618 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9619 leave_critical(CS_ID
);
9624 if ((S_A
)?(tmp
->tagged
&V_A
):tmp
->tagged
)
9625 { Lstate
= (struct H_el
*) tmp
;
9627 /* already on current dfs stack */
9628 /* but may also be on 1st dfs stack */
9630 && (tmp
->tagged
&A_V
)
9633 && (!fairness
|| now
._cnt
[1] <= 1)
9640 printf("cpu%d: ", core_id
);
9642 printf(" Stack state %d\n", (int) tmp
->st_id
);
9645 dumpstate(0, (char *)&(tmp
->state
),n
,tmp
->tagged
);
9647 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9648 leave_critical(CS_ID
);
9650 return 2; /* match on stack */
9657 printf("cpu%d: ", core_id
);
9659 printf(" New state %d+\n", (int) tmp
->st_id
);
9662 dumpstate(1, (char *)&(tmp
->state
), n
, 0);
9664 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9665 leave_critical(CS_ID
);
9672 printf("cpu%d: ", core_id
);
9674 printf(" Old state %d\n", (int) tmp
->st_id
);
9677 dumpstate(0, (char *)&(tmp
->state
), n
, 0);
9684 printf("cpu%d: ", core_id
);
9686 printf(" ReVisiting (from smaller depth)\n");
9689 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9690 leave_critical(CS_ID
);
9695 #if (defined(BFS) && defined(Q_PROVISO)) || NCORE>1
9696 Lstate
= (struct H_el
*) tmp
;
9698 #if NCORE>1 && !defined(SEP_STATE) && !defined(BITSTATE)
9699 leave_critical(CS_ID
);
9701 return 1; /* match outside stack */
9703 { /* insert state before tmp */
9704 ntmp
= grab_state(n
);
9708 #if !defined(SEP_STATE) && !defined(BITSTATE)
9709 leave_critical(CS_ID
);
9711 return 1; /* allow normal termination */
9721 } else if (!tmp
->nxt
)
9722 { /* append after tmp */
9726 tmp
->nxt
= grab_state(n
);
9730 #if !defined(SEP_STATE) && !defined(BITSTATE)
9731 leave_critical(CS_ID
);
9733 return 1; /* allow normal termination */
9741 tmp
->st_id
= (unsigned) nstates
;
9743 printf("cpu%d: ", core_id
);
9746 printf(" Push state %d\n", ((int) nstates
) - 1);
9748 printf(" New state %d\n", (int) nstates
);
9751 #if !defined(SAFETY) || defined(REACH)
9760 { unsigned ci
, bp
; /* as above */
9761 ci
= (now
._cnt
[now
._a_t
&1] / 8);
9762 bp
= (now
._cnt
[now
._a_t
&1] - 8*ci
);
9764 { ci
= (NFAIR
- 1) - ci
;
9765 bp
= 7 - bp
; /* bp = 0..7 */
9773 #if defined(AUTO_RESIZE) && !defined(BITSTATE)
9776 memcpy(((char *)&(tmp
->state
)), v
, n
);
9778 tmp
->tagged
= (S_A
)?V_A
:(depth
+1);
9780 dumpstate(-1, v
, n
, tmp
->tagged
);
9782 Lstate
= (struct H_el
*) tmp
;
9785 dumpstate(-1, v
, n
, 0);
9788 Lstate
= (struct H_el
*) tmp
;
9791 /* #if NCORE>1 && !defined(SEP_STATE) */
9794 tmp
->cpu_id
= core_id
;
9796 #if !defined(SEP_STATE) && !defined(BITSTATE)
9797 leave_critical(CS_ID
);
9803 #include TRANSITIONS
9807 r_ck(reached0
, nstates0
, 0, src_ln0
, src_file0
);
9808 r_ck(reached1
, nstates1
, 1, src_ln1
, src_file1
);
9809 r_ck(reached2
, nstates2
, 2, src_ln2
, src_file2
);
9810 r_ck(reached3
, nstates3
, 3, src_ln3
, src_file3
);
9811 r_ck(reached4
, nstates4
, 4, src_ln4
, src_file4
);
9819 for (l_in
= 0; l_in
< 4; l_in
++)
9821 now
.buffer_use
[l_in
] = 0;
9826 for (l_in
= 0; l_in
< 2; l_in
++)
9828 now
.commit_count
[l_in
] = 0;
9832 now
.events_lost
= 0;
9836 for (l_in
= 0; l_in
< 4; l_in
++)
9838 logval("buffer_use[l_in]", now
.buffer_use
[l_in
]);
9841 logval("write_off", now
.write_off
);
9843 for (l_in
= 0; l_in
< 2; l_in
++)
9845 logval("commit_count[l_in]", now
.commit_count
[l_in
]);
9848 logval("read_off", now
.read_off
);
9849 logval("events_lost", now
.events_lost
);
9850 logval("refcount", now
.refcount
);
9852 Maxbody
= max(Maxbody
, sizeof(State
)-VECTORSZ
);
9856 addqueue(int n
, int is_rv
)
9857 { int j
=0, i
= now
._nr_qs
;
9862 Uerror("too many queues");
9864 default: Uerror("bad queue - addqueue");
9867 q_skip
[i
] = WS
-(vsize
%WS
);
9875 for (k
+= (int) q_skip
[i
]; k
> vsize
; k
--)
9878 vsize
+= (int) q_skip
[i
];
9879 q_offset
[i
] = vsize
;
9885 hmax
= max(hmax
, vsize
);
9886 if (vsize
>= VECTORSZ
)
9887 Uerror("VECTORSZ is too small, edit pan.h");
9888 memset((char *)qptr(i
), 0, j
);
9889 ((Q0
*)qptr(i
))->_t
= n
;
9895 qsend(int into
, int sorted
, int args_given
)
9902 uerror("ref to uninitialized chan name (sending)");
9903 if (into
>= (int) now
._nr_qs
|| into
< 0)
9904 Uerror("qsend bad queue#");
9906 j
= ((Q0
*)qptr(into
))->Qlen
;
9907 switch (((Q0
*)qptr(into
))->_t
) {
9908 case 0: printf("queue %d was deleted\n", into
+1);
9909 default: Uerror("bad queue - qsend");
9912 if (in_s_scope(into
+1))
9922 { uerror("ref to uninitialized chan name (q_zero)");
9925 switch(((Q0
*)qptr(from
))->_t
) {
9926 case 0: printf("queue %d was deleted\n", from
+1);
9928 Uerror("bad queue q-zero");
9934 { printf("==>> a test of the contents of a rv ");
9935 printf("channel always returns FALSE\n");
9936 uerror("error to poll rendezvous channel");
9943 setq_claim(int x
, int m
, char *s
, int y
, char *p
)
9945 uerror("x[rs] claim on uninitialized channel");
9946 if (x
< 0 || x
> MAXQ
)
9947 Uerror("cannot happen setq_claim");
9951 if (m
&2) q_S_check(x
, y
);
9952 if (m
&1) q_R_check(x
, y
);
9954 short q_sender
[MAXQ
+1];
9956 q_S_check(int x
, int who
)
9958 { q_sender
[x
] = who
+1;
9961 { printf("chan %s (%d), ",
9963 printf("sndr proc %s (%d)\n",
9965 uerror("xs chans cannot be used for rv");
9969 if (q_sender
[x
] != who
+1)
9970 { printf("pan: xs assertion violated: ");
9971 printf("access to chan <%s> (%d)\npan: by ",
9973 if (q_sender
[x
] > 0 && p_name
[q_sender
[x
]-1])
9974 printf("%s (proc %d) and by ",
9975 p_name
[q_sender
[x
]-1], q_sender
[x
]-1);
9976 printf("%s (proc %d)\n",
9978 uerror("error, partial order reduction invalid");
9982 short q_recver
[MAXQ
+1];
9984 q_R_check(int x
, int who
)
9986 { q_recver
[x
] = who
+1;
9989 { printf("chan %s (%d), ",
9991 printf("recv proc %s (%d)\n",
9993 uerror("xr chans cannot be used for rv");
9997 if (q_recver
[x
] != who
+1)
9998 { printf("pan: xr assertion violated: ");
9999 printf("access to chan %s (%d)\npan: ",
10001 if (q_recver
[x
] > 0 && p_name
[q_recver
[x
]-1])
10002 printf("by %s (proc %d) and ",
10003 p_name
[q_recver
[x
]-1], q_recver
[x
]-1);
10004 printf("by %s (proc %d)\n",
10006 uerror("error, partial order reduction invalid");
10014 uerror("ref to uninitialized chan name (len)");
10015 return ((Q0
*)qptr(x
))->Qlen
;
10021 uerror("ref to uninitialized chan name (qfull)");
10022 switch(((Q0
*)qptr(from
))->_t
) {
10023 case 0: printf("queue %d was deleted\n", from
+1);
10025 Uerror("bad queue - q_full");
10032 { /* empty or full */
10033 return !q_len(from
) || q_full(from
);
10038 qrecv(int from
, int slot
, int fld
, int done
)
10043 uerror("ref to uninitialized chan name (receiving)");
10044 if (from
>= (int) now
._nr_qs
|| from
< 0)
10045 Uerror("qrecv bad queue#");
10048 if (done
&& (in_r_scope(from
+1)))
10049 require('r', from
);
10051 switch (((Q0
*)qptr(from
))->_t
) {
10052 case 0: printf("queue %d was deleted\n", from
+1);
10053 default: Uerror("bad queue - qrecv");
10062 col_q(int i
, char *z
)
10065 Q0
*ptr
= (Q0
*) qptr(i
);
10067 default: Uerror("bad qtype - collapse");
10069 if (z
) x
= z
; else x
= scratch
;
10070 y
= (char *) ptr
; k
= q_offset
[i
];
10071 /* no need to store the empty slots at the end */
10072 j
-= (q_max
[ptr
->_t
] - ptr
->Qlen
) * ((j
- 2)/q_max
[ptr
->_t
]);
10073 for ( ; j
> 0; j
--, y
++)
10074 if (!Mask
[k
++]) *x
++ = *y
;
10075 for (j
= 0; j
< WS
-1; j
++)
10078 if (z
) return (long) (x
- z
);
10079 return ordinal(scratch
, x
-scratch
, 1); /* chan */
10085 { int _m
=0, j
; uchar
*z
;
10091 uerror("ref to uninitialized chan (unsend)");
10093 j
= ((Q0
*)z
)->Qlen
;
10094 ((Q0
*)z
)->Qlen
= --j
;
10095 switch (((Q0
*)qptr(into
))->_t
) {
10096 default: Uerror("bad queue - unsend");
10102 unrecv(int from
, int slot
, int fld
, int fldvar
, int strt
)
10106 uerror("ref to uninitialized chan (unrecv)");
10108 j
= ((Q0
*)z
)->Qlen
;
10109 if (strt
) ((Q0
*)z
)->Qlen
= j
+1;
10110 switch (((Q0
*)qptr(from
))->_t
) {
10111 default: Uerror("bad queue - qrecv");
10115 q_cond(short II
, Trans
*t
)
10117 for (i
= 0; i
< 6; i
++)
10118 { if (t
->ty
[i
] == TIMEOUT_F
) return 1;
10119 if (t
->ty
[i
] == ALPHA_F
)
10123 return (II
+1 == (short) now
._nr_pr
&& II
+1 < MAXPROC
);
10125 switch (t
->qu
[i
]) {
10127 default: Uerror("unknown qid - q_cond");
10135 { char ctd
[1024], carg
[64];
10137 strcpy(ctd
, "-DBITSTATE ");
10142 strcat(ctd
, "-DNOVSZ ");
10145 strcat(ctd
, "-DREVERSE ");
10148 strcat(ctd
, "-DT_REVERSE ");
10152 sprintf(carg
, "-DRANDOMIZE=%d ", RANDOMIZE
);
10155 strcat(ctd
, "-DRANDOMIZE ");
10159 sprintf(carg
, "-DSCHED=%d ", SCHED
);
10163 strcat(ctd
, "-DBFS ");
10166 sprintf(carg
, "-DMEMLIM=%d ", MEMLIM
);
10170 sprintf(carg
, "-DMEMCNT=%d ", MEMCNT
);
10175 strcat(ctd
, "-DNOCLAIM ");
10178 strcat(ctd
, "-DSAFETY ");
10181 strcat(ctd
, "-DNOFAIR ");
10185 { sprintf(carg
, "-DNFAIR=%d ", NFAIR
);
10192 strcat(ctd
, "-DNOREDUCE ");
10195 strcat(ctd
, "-DXUSAFE ");
10199 strcat(ctd
, "-DNP ");
10202 strcat(ctd
, "-DPEG ");
10205 strcat(ctd
, "-DVAR_RANGES ");
10208 strcat(ctd
, "-DHC0 ");
10211 strcat(ctd
, "-DHC1 ");
10214 strcat(ctd
, "-DHC2 ");
10217 strcat(ctd
, "-DHC3 ");
10220 strcat(ctd
, "-DHC4 ");
10223 strcat(ctd
, "-DCHECK ");
10226 strcat(ctd
, "-DCTL ");
10229 strcat(ctd
, "-DNIBIS ");
10231 #ifdef NOBOUNDCHECK
10232 strcat(ctd
, "-DNOBOUNDCHECK ");
10235 strcat(ctd
, "-DNOSTUTTER ");
10238 strcat(ctd
, "-DREACH ");
10241 strcat(ctd
, "-DPRINTF ");
10244 strcat(ctd
, "-DOTIM ");
10247 strcat(ctd
, "-DCOLLAPSE ");
10250 sprintf(carg
, "-DMA=%d ", MA
);
10254 strcat(ctd
, "-DSVDUMP ");
10257 if (VECTORSZ
!= 1024)
10258 { sprintf(carg
, "-DVECTORSZ=%d ", VECTORSZ
);
10263 strcat(ctd
, "-DVERBOSE ");
10266 strcat(ctd
, "-DCHECK ");
10269 strcat(ctd
, "-DSDUMP ");
10272 sprintf(carg
, "-DNCORE=%d ", NCORE
);
10276 sprintf(carg
, "-DSFH ");
10281 { sprintf(carg
, "-DVMAX=%d ", VMAX
);
10287 { sprintf(carg
, "-DPMAX=%d ", PMAX
);
10293 { sprintf(carg
, "-DQMAX=%d ", QMAX
);
10298 sprintf(carg
, "-DSET_WQ_SIZE=%d ", SET_WQ_SIZE
);
10301 printf("Compiled as: cc -o pan %span.c\n", ctd
);
10315 #define uchar unsigned char
10317 #define ulong unsigned long
10318 #define ushort unsigned short
10321 #define HASH(y,n) (n)*(((long)y))
10322 #define INRANGE(e,h) ((h>=e->From && h<=e->To)||(e->s==1 && e->S==h))
10324 extern char *emalloc(unsigned long); /* imported routine */
10325 extern void dfa_init(ushort
); /* 4 exported routines */
10326 extern int dfa_member(ulong
);
10327 extern int dfa_store(uchar
*);
10328 extern void dfa_stats(void);
10330 typedef struct Edge
{
10331 uchar From
, To
; /* max range 0..255 */
10332 uchar s
, S
; /* if s=1, S is singleton */
10333 struct Vertex
*Dst
;
10337 typedef struct Vertex
{
10338 ulong key
, num
; /* key for splay tree, nr incoming edges */
10339 uchar from
[2], to
[2]; /* in-node predefined edge info */
10340 struct Vertex
*dst
[2];/* most nodes have 2 or more edges */
10341 struct Edge
*Succ
; /* in case there are more edges */
10342 struct Vertex
*lnk
, *left
, *right
; /* splay tree plumbing */
10345 static Edge
*free_edges
;
10346 static Vertex
*free_vertices
;
10347 static Vertex
**layers
; /* one splay tree of nodes per layer */
10348 static Vertex
**path
; /* run of word in the DFA */
10349 static Vertex
*R
, *F
, *NF
; /* Root, Final, Not-Final */
10350 static uchar
*word
, *lastword
;/* string, and last string inserted */
10351 static int dfa_depth
, iv
=0, nv
=0, pfrst
=0, Tally
;
10353 static void insert_it(Vertex
*, int); /* splay-tree code */
10354 static void delete_it(Vertex
*, int);
10355 static Vertex
*find_it(Vertex
*, Vertex
*, uchar
, int);
10358 recyc_edges(Edge
*e
)
10361 recyc_edges(e
->Nxt
);
10362 e
->Nxt
= free_edges
;
10367 new_edge(Vertex
*dst
)
10372 free_edges
= e
->Nxt
;
10373 e
->From
= e
->To
= e
->s
= e
->S
= 0;
10374 e
->Nxt
= (Edge
*) 0;
10376 e
= (Edge
*) emalloc(sizeof(Edge
));
10383 recyc_vertex(Vertex
*v
)
10385 recyc_edges(v
->Succ
);
10386 v
->Succ
= (Edge
*) free_vertices
;
10396 { v
= free_vertices
;
10397 free_vertices
= (Vertex
*) v
->Succ
;
10398 v
->Succ
= (Edge
*) 0;
10401 v
= (Vertex
*) emalloc(sizeof(Vertex
));
10408 allDelta(Vertex
*v
, int n
)
10409 { Vertex
*dst
= new_vertex();
10420 insert_edge(Vertex
*v
, Edge
*e
)
10421 { /* put new edge first */
10423 { v
->dst
[0] = e
->Dst
;
10424 v
->from
[0] = e
->From
;
10430 { v
->from
[1] = v
->from
[0]; v
->from
[0] = e
->From
;
10431 v
->to
[1] = v
->to
[0]; v
->to
[0] = e
->To
;
10432 v
->dst
[1] = v
->dst
[0]; v
->dst
[0] = e
->Dst
;
10436 { int f
= v
->from
[1];
10438 Vertex
*d
= v
->dst
[1];
10439 v
->from
[1] = v
->from
[0]; v
->from
[0] = e
->From
;
10440 v
->to
[1] = v
->to
[0]; v
->to
[0] = e
->To
;
10441 v
->dst
[1] = v
->dst
[0]; v
->dst
[0] = e
->Dst
;
10451 copyRecursive(Vertex
*v
, Edge
*e
)
10453 if (e
->Nxt
) copyRecursive(v
, e
->Nxt
);
10454 f
= new_edge(e
->Dst
);
10464 copyEdges(Vertex
*to
, Vertex
*from
)
10466 for (i
= 0; i
< 2; i
++)
10467 { to
->from
[i
] = from
->from
[i
];
10468 to
->to
[i
] = from
->to
[i
];
10469 to
->dst
[i
] = from
->dst
[i
];
10471 if (from
->Succ
) copyRecursive(to
, from
->Succ
);
10475 cacheDelta(Vertex
*v
, int h
, int first
)
10476 { static Edge
*ov
, tmp
; int i
;
10478 if (!first
&& INRANGE(ov
,h
))
10479 return ov
; /* intercepts about 10% */
10480 for (i
= 0; i
< 2; i
++)
10481 if (v
->dst
[i
] && h
>= v
->from
[i
] && h
<= v
->to
[i
])
10482 { tmp
.From
= v
->from
[i
];
10484 tmp
.Dst
= v
->dst
[i
];
10489 for (ov
= v
->Succ
; ov
; ov
= ov
->Nxt
)
10490 if (INRANGE(ov
,h
)) return ov
;
10492 Uerror("cannot get here, cacheDelta");
10497 Delta(Vertex
*v
, int h
) /* v->delta[h] */
10500 if (v
->dst
[0] && h
>= v
->from
[0] && h
<= v
->to
[0])
10501 return v
->dst
[0]; /* oldest edge */
10502 if (v
->dst
[1] && h
>= v
->from
[1] && h
<= v
->to
[1])
10504 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10507 Uerror("cannot happen Delta");
10508 return (Vertex
*) 0;
10512 numDelta(Vertex
*v
, int d
)
10517 for (i
= 0; i
< 2; i
++)
10519 { cnt
= v
->dst
[i
]->num
+ d
*(1 + v
->to
[i
] - v
->from
[i
]);
10520 if (d
== 1 && cnt
< v
->dst
[i
]->num
) goto bad
;
10521 v
->dst
[i
]->num
= cnt
;
10523 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10524 { cnt
= e
->Dst
->num
+ d
*(1 + e
->To
- e
->From
+ e
->s
);
10525 if (d
== 1 && cnt
< e
->Dst
->num
)
10526 bad
: Uerror("too many incoming edges");
10532 setDelta(Vertex
*v
, int h
, Vertex
*newdst
) /* v->delta[h] = newdst; */
10533 { Edge
*e
, *f
= (Edge
*) 0, *g
;
10536 /* remove the old entry, if there */
10537 for (i
= 0; i
< 2; i
++)
10538 if (v
->dst
[i
] && h
>= v
->from
[i
] && h
<= v
->to
[i
])
10539 { if (h
== v
->from
[i
])
10540 { if (h
== v
->to
[i
])
10541 { v
->dst
[i
] = (Vertex
*) 0;
10542 v
->from
[i
] = v
->to
[i
] = 0;
10545 } else if (h
== v
->to
[i
])
10548 { g
= new_edge(v
->dst
[i
]);/* same dst */
10549 g
->From
= v
->from
[i
];
10550 g
->To
= h
-1; /* left half */
10551 v
->from
[i
] = h
+1; /* right half */
10556 for (e
= v
->Succ
; e
; f
= e
, e
= e
->Nxt
)
10557 { if (e
->s
== 1 && e
->S
== h
)
10561 if (h
>= e
->From
&& h
<= e
->To
)
10562 { if (h
== e
->From
)
10565 { e
->From
= e
->To
= e
->S
;
10572 } else if (h
== e
->To
)
10575 { g
= new_edge(e
->Dst
); /* same dst */
10577 g
->To
= h
-1; /* g=left half */
10578 e
->From
= h
+1; /* e=right half */
10579 g
->Nxt
= e
->Nxt
; /* insert g */
10580 e
->Nxt
= g
; /* behind e */
10584 rem_tst
: if (e
->From
> e
->To
)
10590 e
->Nxt
= (Edge
*) 0;
10593 { e
->From
= e
->To
= e
->S
;
10599 /* check if newdst is already there */
10600 for (i
= 0; i
< 2; i
++)
10601 if (v
->dst
[i
] == newdst
)
10602 { if (h
+1 == (int) v
->from
[i
])
10606 if (h
== (int) v
->to
[i
]+1)
10610 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10611 { if (e
->Dst
== newdst
)
10612 { if (h
+1 == (int) e
->From
)
10614 if (e
->s
== 1 && e
->S
+1 == e
->From
)
10620 if (h
== (int) e
->To
+1)
10622 if (e
->s
== 1 && e
->S
== e
->To
+1)
10633 /* add as a new edge */
10634 e
= new_edge(newdst
);
10635 e
->From
= e
->To
= h
;
10640 cheap_key(Vertex
*v
)
10644 { vk2
= (ulong
) v
->dst
[0];
10645 if ((ulong
) v
->dst
[1] > vk2
)
10646 vk2
= (ulong
) v
->dst
[1];
10647 } else if (v
->dst
[1])
10648 vk2
= (ulong
) v
->dst
[1];
10651 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10652 if ((ulong
) e
->Dst
> vk2
)
10653 vk2
= (ulong
) e
->Dst
;
10655 Tally
= (vk2
>>2)&(TWIDTH
-1);
10660 mk_key(Vertex
*v
) /* not sensitive to order */
10661 { ulong m
= 0, vk2
= 0;
10665 { m
+= HASH(v
->dst
[0], v
->to
[0] - v
->from
[0] + 1);
10666 vk2
= (ulong
) v
->dst
[0];
10669 { m
+= HASH(v
->dst
[1], v
->to
[1] - v
->from
[1] + 1);
10670 if ((ulong
) v
->dst
[1] > vk2
) vk2
= (ulong
) v
->dst
[1];
10672 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
10673 { m
+= HASH(e
->Dst
, e
->To
- e
->From
+ 1 + e
->s
);
10674 if ((ulong
) e
->Dst
> vk2
) vk2
= (ulong
) e
->Dst
;
10676 Tally
= (vk2
>>2)&(TWIDTH
-1);
10681 mk_special(int sigma
, Vertex
*n
, Vertex
*v
)
10682 { ulong m
= 0, vk2
= 0;
10686 for (i
= 0; i
< 2; i
++)
10688 { if (sigma
>= v
->from
[i
] && sigma
<= v
->to
[i
])
10689 { m
+= HASH(v
->dst
[i
], v
->to
[i
]-v
->from
[i
]);
10690 if ((ulong
) v
->dst
[i
] > vk2
10691 && v
->to
[i
] > v
->from
[i
])
10692 vk2
= (ulong
) v
->dst
[i
];
10694 { m
+= HASH(v
->dst
[i
], v
->to
[i
]-v
->from
[i
]+1);
10695 if ((ulong
) v
->dst
[i
] > vk2
)
10696 vk2
= (ulong
) v
->dst
[i
];
10698 for (f
= v
->Succ
; f
; f
= f
->Nxt
)
10699 { if (sigma
>= f
->From
&& sigma
<= f
->To
)
10700 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ f
->s
);
10701 if ((ulong
) f
->Dst
> vk2
10702 && f
->To
- f
->From
+ f
->s
> 0)
10703 vk2
= (ulong
) f
->Dst
;
10704 } else if (f
->s
== 1 && sigma
== f
->S
)
10705 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ 1);
10706 if ((ulong
) f
->Dst
> vk2
) vk2
= (ulong
) f
->Dst
;
10708 { m
+= HASH(f
->Dst
, f
->To
- f
->From
+ 1 + f
->s
);
10709 if ((ulong
) f
->Dst
> vk2
) vk2
= (ulong
) f
->Dst
;
10712 if ((ulong
) n
> vk2
) vk2
= (ulong
) n
;
10713 Tally
= (vk2
>>2)&(TWIDTH
-1);
10719 dfa_init(ushort nr_layers
)
10720 { int i
; Vertex
*r
, *t
;
10722 dfa_depth
= nr_layers
; /* one byte per layer */
10723 path
= (Vertex
**) emalloc((dfa_depth
+1)*sizeof(Vertex
*));
10724 layers
= (Vertex
**) emalloc(TWIDTH
*(dfa_depth
+1)*sizeof(Vertex
*));
10725 lastword
= (uchar
*) emalloc((dfa_depth
+1)*sizeof(uchar
));
10726 lastword
[dfa_depth
] = lastword
[0] = 255;
10727 path
[0] = R
= new_vertex(); F
= new_vertex();
10729 for (i
= 1, r
= R
; i
< dfa_depth
; i
++, r
= t
)
10730 t
= allDelta(r
, i
-1);
10731 NF
= allDelta(r
, i
-1);
10735 static void complement_dfa(void) { Vertex
*tmp
= F
; F
= NF
; NF
= tmp
; }
10739 tree_stats(Vertex
*t
)
10740 { Edge
*e
; double cnt
=0.0;
10742 if (!t
->key
) return 0;
10743 t
->key
= 0; /* precaution */
10744 if (t
->dst
[0]) cnt
++;
10745 if (t
->dst
[1]) cnt
++;
10746 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
10748 cnt
+= tree_stats(t
->lnk
);
10749 cnt
+= tree_stats(t
->left
);
10750 cnt
+= tree_stats(t
->right
);
10756 { int i
, j
; double cnt
= 0.0;
10757 for (j
= 0; j
< TWIDTH
; j
++)
10758 for (i
= 0; i
< dfa_depth
+1; i
++)
10759 cnt
+= tree_stats(layers
[i
*TWIDTH
+j
]);
10760 printf("Minimized Automaton: %6d nodes and %6g edges\n",
10765 dfa_member(ulong n
)
10767 uchar
*w
= &word
[n
];
10770 p
= &path
[n
]; q
= (p
+1);
10771 for (i
= n
; i
< dfa_depth
; i
++)
10772 *q
++ = Delta(*p
++, *w
++);
10777 dfa_store(uchar
*sv
)
10778 { Vertex
**p
, **q
, *s
, *y
, *old
, *new = F
;
10779 uchar
*w
, *u
= lastword
;
10783 while (*w
++ == *u
++) /* find first byte that differs */
10785 pfrst
= (int) (u
- lastword
) - 1;
10786 memcpy(&lastword
[pfrst
], &sv
[pfrst
], dfa_depth
-pfrst
);
10787 if (pfrst
> iv
) pfrst
= iv
;
10788 if (pfrst
> nv
) pfrst
= nv
;
10790 p
= &path
[pfrst
]; q
= (p
+1); w
= &word
[pfrst
];
10791 for (i
= pfrst
; i
< dfa_depth
; i
++)
10792 *q
++ = Delta(*p
++, *w
++); /* (*p)->delta[*w++]; */
10794 if (*p
== F
) return 1; /* it's already there */
10799 new = find_it(path
[iv
], old
, word
[iv
], iv
);
10800 } while (new && iv
> 0);
10803 nv
= k
= 0; s
= path
[0];
10804 for (j
= 1; j
<= iv
; ++j
)
10805 if (path
[j
]->num
> 1)
10806 { y
= new_vertex();
10807 copyEdges(y
, path
[j
]);
10811 setDelta(s
, word
[j
-1], y
);
10813 y
->num
= 1; /* initial value 1 */
10815 path
[j
]->num
--; /* only 1 moved from j to y */
10821 y
= Delta(s
, word
[iv
]);
10824 setDelta(s
, word
[iv
], old
);
10828 for (j
= iv
+1; j
< dfa_depth
; j
++)
10829 if (path
[j
]->num
== 0)
10830 { numDelta(path
[j
], -1);
10831 delete_it(path
[j
], j
);
10832 recyc_vertex(path
[j
]);
10839 splay(ulong i
, Vertex
*t
)
10840 { Vertex N
, *l
, *r
, *y
;
10843 N
.left
= N
.right
= (Vertex
*) 0;
10847 { if (!t
->left
) break;
10848 if (i
< t
->left
->key
)
10850 t
->left
= y
->right
;
10853 if (!t
->left
) break;
10858 } else if (i
> t
->key
)
10859 { if (!t
->right
) break;
10860 if (i
> t
->right
->key
)
10862 t
->right
= y
->left
;
10865 if (!t
->right
) break;
10873 l
->right
= t
->left
;
10874 r
->left
= t
->right
;
10881 insert_it(Vertex
*v
, int L
)
10886 nr
= ((L
*TWIDTH
)+Tally
);
10897 new->left
= t
->left
;
10899 t
->left
= (Vertex
*) 0;
10900 } else if (i
> t
->key
)
10902 new->right
= t
->right
;
10904 t
->right
= (Vertex
*) 0;
10905 } else /* it's already there */
10906 { v
->lnk
= t
->lnk
; /* put in linked list off v */
10914 checkit(Vertex
*h
, Vertex
*v
, Vertex
*n
, uchar sigma
)
10918 for (k
= 0; k
< 2; k
++)
10920 { if (sigma
>= h
->from
[k
] && sigma
<= h
->to
[k
])
10921 { if (h
->dst
[k
] != n
) goto no_match
;
10923 for (i
= h
->from
[k
]; i
<= h
->to
[k
]; i
++)
10924 { if (i
== sigma
) continue;
10925 g
= cacheDelta(v
, i
, j
); j
= 0;
10926 if (h
->dst
[k
] != g
->Dst
)
10928 if (g
->s
== 0 || g
->S
!= i
)
10931 for (f
= h
->Succ
; f
; f
= f
->Nxt
)
10932 { if (INRANGE(f
,sigma
))
10933 { if (f
->Dst
!= n
) goto no_match
;
10935 for (i
= f
->From
; i
<= f
->To
; i
++)
10936 { if (i
== sigma
) continue;
10937 g
= cacheDelta(v
, i
, j
); j
= 0;
10938 if (f
->Dst
!= g
->Dst
)
10940 if (g
->s
== 1 && i
== g
->S
)
10944 if (f
->s
&& f
->S
!= sigma
)
10945 { g
= cacheDelta(v
, f
->S
, 1);
10946 if (f
->Dst
!= g
->Dst
)
10950 if (h
->Succ
|| h
->dst
[0] || h
->dst
[1]) return 1;
10956 find_it(Vertex
*v
, Vertex
*n
, uchar sigma
, int L
)
10960 i
= mk_special(sigma
,n
,v
);
10961 nr
= ((L
*TWIDTH
)+Tally
);
10964 if (!t
) return (Vertex
*) 0;
10965 layers
[nr
] = t
= splay(i
, t
);
10967 for (z
= t
; z
; z
= z
->lnk
)
10968 if (checkit(z
, v
, n
, sigma
))
10971 return (Vertex
*) 0;
10975 delete_it(Vertex
*v
, int L
)
10980 nr
= ((L
*TWIDTH
)+Tally
);
10986 { Vertex
*z
, *y
= (Vertex
*) 0;
10987 for (z
= t
; z
&& z
!= v
; y
= z
, z
= z
->lnk
)
10989 if (z
!= v
) goto bad
;
10992 z
->lnk
= (Vertex
*) 0;
10995 } else if (z
->lnk
) /* z == t == v */
10998 y
->right
= t
->right
;
10999 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11003 /* delete the node itself */
11007 { x
= splay(i
, t
->left
);
11008 x
->right
= t
->right
;
11010 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11014 bad
: Uerror("cannot happen delete");
11017 #if defined(MA) && (defined(W_XPT) || defined(R_XPT))
11018 static Vertex
**temptree
;
11019 static char wbuf
[4096];
11020 static int WCNT
= 4096, wcnt
=0;
11021 static uchar stacker
[MA
+1];
11022 static ulong stackcnt
= 0;
11023 extern double nstates
, nlinks
, truncs
, truncs2
;
11026 xwrite(int fd
, char *b
, int n
)
11028 if (wcnt
+n
>= 4096)
11029 { write(fd
, wbuf
, wcnt
);
11032 memcpy(&wbuf
[wcnt
], b
, n
);
11040 write(fd
, wbuf
, wcnt
);
11046 w_vertex(int fd
, Vertex
*v
)
11047 { char t
[3]; int i
; Edge
*e
;
11049 xwrite(fd
, (char *) &v
, sizeof(Vertex
*));
11051 for (i
= 0; i
< 2; i
++)
11053 { t
[1] = v
->from
[i
], t
[2] = v
->to
[i
];
11055 xwrite(fd
, (char *) &(v
->dst
[i
]), sizeof(Vertex
*));
11057 for (e
= v
->Succ
; e
; e
= e
->Nxt
)
11058 { t
[1] = e
->From
, t
[2] = e
->To
;
11060 xwrite(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11063 { t
[1] = t
[2] = e
->S
;
11065 xwrite(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11070 w_layer(int fd
, Vertex
*v
)
11074 xwrite(fd
, (char *) &c
, 1);
11076 w_layer(fd
, v
->lnk
);
11077 w_layer(fd
, v
->left
);
11078 w_layer(fd
, v
->right
);
11083 { int fd
; char nm
[64];
11085 static uchar xwarned
= 0;
11087 sprintf(nm
, "%s.xpt", PanSource
);
11088 if ((fd
= creat(nm
, 0666)) <= 0)
11091 printf("cannot creat checkpoint file\n");
11094 xwrite(fd
, (char *) &nstates
, sizeof(double));
11095 xwrite(fd
, (char *) &truncs
, sizeof(double));
11096 xwrite(fd
, (char *) &truncs2
, sizeof(double));
11097 xwrite(fd
, (char *) &nlinks
, sizeof(double));
11098 xwrite(fd
, (char *) &dfa_depth
, sizeof(int));
11099 xwrite(fd
, (char *) &R
, sizeof(Vertex
*));
11100 xwrite(fd
, (char *) &F
, sizeof(Vertex
*));
11101 xwrite(fd
, (char *) &NF
, sizeof(Vertex
*));
11103 for (j
= 0; j
< TWIDTH
; j
++)
11104 for (i
= 0; i
< dfa_depth
+1; i
++)
11105 { w_layer(fd
, layers
[i
*TWIDTH
+j
]);
11106 c
= 2; xwrite(fd
, (char *) &c
, 1);
11112 xread(int fd
, char *b
, int n
)
11113 { int m
= wcnt
; int delta
= 0;
11115 { if (m
> 0) memcpy(b
, &wbuf
[WCNT
-m
], m
);
11117 WCNT
= wcnt
= read(fd
, wbuf
, 4096);
11119 Uerror("xread failed -- insufficient data");
11122 memcpy(&b
[delta
], &wbuf
[WCNT
-wcnt
], n
);
11127 x_cleanup(Vertex
*c
)
11128 { Edge
*e
; /* remove the tree and edges from c */
11130 for (e
= c
->Succ
; e
; e
= e
->Nxt
)
11137 { Vertex
*tmp
; int i
, s
;
11139 /* double-check: */
11140 stacker
[dfa_depth
-1] = 0; r
= dfa_store(stacker
);
11141 stacker
[dfa_depth
-1] = 4; j
= dfa_member(dfa_depth
-1);
11142 if (r
!= 1 || j
!= 0)
11143 { printf("%d: ", stackcnt
);
11144 for (i
= 0; i
< dfa_depth
; i
++)
11145 printf("%d,", stacker
[i
]);
11146 printf(" -- not a stackstate <o:%d,4:%d>\n", r
, j
);
11149 stacker
[dfa_depth
-1] = 1;
11150 s
= dfa_member(dfa_depth
-1);
11152 { tmp
= F
; F
= NF
; NF
= tmp
; } /* complement */
11153 if (s
) dfa_store(stacker
);
11154 stacker
[dfa_depth
-1] = 0;
11155 dfa_store(stacker
);
11157 { tmp
= F
; F
= NF
; NF
= tmp
; }
11161 x_rm_stack(Vertex
*t
, int k
)
11169 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11170 { for (j
= e
->From
; j
<= (int) e
->To
; j
++)
11171 { stacker
[k
] = (uchar
) j
;
11172 x_rm_stack(e
->Dst
, k
-1);
11175 { stacker
[k
] = e
->S
;
11176 x_rm_stack(e
->Dst
, k
-1);
11181 insert_withkey(Vertex
*v
, int L
)
11182 { Vertex
*new, *t
= temptree
[L
];
11184 if (!t
) { temptree
[L
] = v
; return v
; }
11185 t
= splay(v
->key
, t
);
11186 if (v
->key
< t
->key
)
11188 new->left
= t
->left
;
11190 t
->left
= (Vertex
*) 0;
11191 } else if (v
->key
> t
->key
)
11193 new->right
= t
->right
;
11195 t
->right
= (Vertex
*) 0;
11197 { if (t
!= R
&& t
!= F
&& t
!= NF
)
11198 Uerror("double insert, bad checkpoint data");
11209 find_withkey(Vertex
*v
, int L
)
11210 { Vertex
*t
= temptree
[L
];
11212 { temptree
[L
] = t
= splay((ulong
) v
, t
);
11213 if (t
->key
== (ulong
) v
)
11216 Uerror("not found error, bad checkpoint data");
11217 return (Vertex
*) 0;
11221 r_layer(int fd
, int n
)
11227 { xread(fd
, &c
, 1);
11230 { v
= new_vertex();
11231 xread(fd
, (char *) &(v
->key
), sizeof(Vertex
*));
11232 v
= insert_withkey(v
, n
);
11233 } else /* c == 0 */
11234 { e
= new_edge((Vertex
*) 0);
11238 xread(fd
, (char *) &(e
->Dst
), sizeof(Vertex
*));
11244 v_fix(Vertex
*t
, int nr
)
11249 for (i
= 0; i
< 2; i
++)
11251 t
->dst
[i
] = find_withkey(t
->dst
[i
], nr
);
11253 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11254 e
->Dst
= find_withkey(e
->Dst
, nr
);
11256 v_fix(t
->left
, nr
);
11257 v_fix(t
->right
, nr
);
11261 v_insert(Vertex
*t
, int nr
)
11265 v_insert(t
->left
, nr
);
11266 v_insert(t
->right
, nr
);
11268 /* remove only leafs from temptree */
11269 t
->left
= t
->right
= t
->lnk
= (Vertex
*) 0;
11270 insert_it(t
, nr
); /* into layers */
11271 for (i
= 0; i
< 2; i
++)
11273 t
->dst
[i
]->num
+= (t
->to
[i
] - t
->from
[i
] + 1);
11274 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11275 e
->Dst
->num
+= (e
->To
- e
->From
+ 1 + e
->s
);
11282 for (i
= 0; i
< dfa_depth
; i
++)
11283 v_fix(temptree
[i
], (i
+1));
11285 for (i
= dfa_depth
; i
>= 0; i
--)
11286 v_insert(temptree
[i
], i
);
11290 x_tail(Vertex
*t
, ulong want
)
11291 { int i
, yes
, no
; Edge
*e
; Vertex
*v
= (Vertex
*) 0;
11296 for (i
= 0; i
< 2; i
++)
11297 if ((ulong
) t
->dst
[i
] == want
)
11298 { /* was t->from[i] <= 0 && t->to[i] >= 0 */
11299 /* but from and to are uchar */
11300 if (t
->from
[i
] == 0)
11303 if (t
->from
[i
] <= 4 && t
->to
[i
] >= 4)
11307 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11308 if ((ulong
) e
->Dst
== want
)
11309 { /* was INRANGE(e,0) but From and To are uchar */
11310 if ((e
->From
== 0) || (e
->s
==1 && e
->S
==0))
11312 else if (INRANGE(e
, 4))
11315 if (yes
&& !no
) return t
;
11316 v
= x_tail(t
->left
, want
); if (v
) return v
;
11317 v
= x_tail(t
->right
, want
); if (v
) return v
;
11318 return (Vertex
*) 0;
11322 x_anytail(Vertex
*t
, Vertex
*c
, int nr
)
11323 { int i
; Edge
*e
, *f
; Vertex
*v
;
11327 for (i
= 0; i
< 2; i
++)
11328 if ((ulong
) t
->dst
[i
] == c
->key
)
11329 { v
= new_vertex(); v
->key
= t
->key
;
11331 f
->From
= t
->from
[i
];
11336 x_anytail(temptree
[nr
-1], v
, nr
-1);
11339 for (e
= t
->Succ
; e
; e
= e
->Nxt
)
11340 if ((ulong
) e
->Dst
== c
->key
)
11341 { v
= new_vertex(); v
->key
= t
->key
;
11349 x_anytail(temptree
[nr
-1], v
, nr
-1);
11352 x_anytail(t
->left
, c
, nr
);
11353 x_anytail(t
->right
, c
, nr
);
11358 { Vertex
*c
, *v
; /* find 0 and !4 predecessor of F */
11360 v
= x_tail(temptree
[dfa_depth
-1], F
->key
);
11361 if (!v
) return (Vertex
*) 0;
11363 c
= new_vertex(); c
->key
= v
->key
;
11365 /* every node on dfa_depth-2 that has v->key as succ */
11366 /* make copy and let c point to these (reversing ptrs) */
11368 x_anytail(temptree
[dfa_depth
-2], c
, dfa_depth
-2);
11375 { int fd
; char nm
[64]; Vertex
*d
;
11379 sprintf(nm
, "%s.xpt", PanSource
);
11380 if ((fd
= open(nm
, 0)) < 0) /* O_RDONLY */
11381 Uerror("cannot open checkpoint file");
11383 xread(fd
, (char *) &nstates
, sizeof(double));
11384 xread(fd
, (char *) &truncs
, sizeof(double));
11385 xread(fd
, (char *) &truncs2
, sizeof(double));
11386 xread(fd
, (char *) &nlinks
, sizeof(double));
11387 xread(fd
, (char *) &dfa_depth
, sizeof(int));
11389 if (dfa_depth
!= MA
+a_cycles
)
11390 Uerror("bad dfa_depth in checkpoint file");
11392 path
= (Vertex
**) emalloc((dfa_depth
+1)*sizeof(Vertex
*));
11393 layers
= (Vertex
**) emalloc(TWIDTH
*(dfa_depth
+1)*sizeof(Vertex
*));
11394 temptree
= (Vertex
**) emalloc((dfa_depth
+2)*sizeof(Vertex
*));
11395 lastword
= (uchar
*) emalloc((dfa_depth
+1)*sizeof(uchar
));
11396 lastword
[dfa_depth
] = lastword
[0] = 255;
11398 path
[0] = R
= new_vertex();
11399 xread(fd
, (char *) &R
->key
, sizeof(Vertex
*));
11400 R
= insert_withkey(R
, 0);
11403 xread(fd
, (char *) &F
->key
, sizeof(Vertex
*));
11404 F
= insert_withkey(F
, dfa_depth
);
11407 xread(fd
, (char *) &NF
->key
, sizeof(Vertex
*));
11408 NF
= insert_withkey(NF
, dfa_depth
);
11410 for (j
= 0; j
< TWIDTH
; j
++)
11411 for (i
= 0; i
< dfa_depth
+1; i
++)
11414 if (wcnt
!= 0) Uerror("bad count in checkpoint file");
11418 stacker
[dfa_depth
-1] = 0;
11419 x_rm_stack(d
, dfa_depth
-2);
11423 printf("pan: removed %d stackstates\n", stackcnt
);
11424 nstates
-= (double) stackcnt
;
11429 check_claim(int st
)
11431 if (st
== endclaim
)
11432 uerror("claim violated!");
11433 if (stopstate
[VERI
][st
])
11434 uerror("end state in claim reached");
11440 printf("global vars:\n");
11441 printf(" byte write_off: %d\n", now
.write_off
);
11443 for (l_in
= 0; l_in
< 2; l_in
++)
11445 printf(" byte commit_count[%d]: %d\n", l_in
, now
.commit_count
[l_in
]);
11448 printf(" byte read_off: %d\n", now
.read_off
);
11449 printf(" byte events_lost: %d\n", now
.events_lost
);
11450 printf(" byte refcount: %d\n", now
.refcount
);
11452 for (l_in
= 0; l_in
< 4; l_in
++)
11454 printf(" bit buffer_use[%d]: %d\n", l_in
, now
.buffer_use
[l_in
]);
11459 c_locals(int pid
, int tp
)
11463 printf("local vars proc %d (:init:):\n", pid
);
11464 printf(" byte i: %d\n", ((P4
*)pptr(pid
))->i
);
11465 printf(" byte j: %d\n", ((P4
*)pptr(pid
))->j
);
11466 printf(" byte sum: %d\n", ((P4
*)pptr(pid
))->sum
);
11467 printf(" byte commit_sum: %d\n", ((P4
*)pptr(pid
))->commit_sum
);
11473 printf("local vars proc %d (reader):\n", pid
);
11474 printf(" byte i: %d\n", ((P2
*)pptr(pid
))->i
);
11475 printf(" byte j: %d\n", ((P2
*)pptr(pid
))->j
);
11478 printf("local vars proc %d (tracer):\n", pid
);
11479 printf(" byte size: %d\n", ((P1
*)pptr(pid
))->size
);
11480 printf(" byte prev_off: %d\n", ((P1
*)pptr(pid
))->prev_off
);
11481 printf(" byte new_off: %d\n", ((P1
*)pptr(pid
))->new_off
);
11482 printf(" byte tmp_commit: %d\n", ((P1
*)pptr(pid
))->tmp_commit
);
11483 printf(" byte i: %d\n", ((P1
*)pptr(pid
))->i
);
11484 printf(" byte j: %d\n", ((P1
*)pptr(pid
))->j
);
11487 printf("local vars proc %d (switcher):\n", pid
);
11488 printf(" byte prev_off: %d\n", ((P0
*)pptr(pid
))->prev_off
);
11489 printf(" byte new_off: %d\n", ((P0
*)pptr(pid
))->new_off
);
11490 printf(" byte tmp_commit: %d\n", ((P0
*)pptr(pid
))->tmp_commit
);
11491 printf(" byte size: %d\n", ((P0
*)pptr(pid
))->size
);
11499 default: Printf("%d", x
);
11503 c_chandump(int unused
) { unused
++; /* avoid complaints */ }