convert from svn repository: remove tags directory
[lttv.git] / trunk / verif / lttng / buffer.spin.bkp3
1
2 // LTTng ltt-tracer.c atomic lockless buffering scheme Promela model v1
3 // Created for the Spin validator.
4 // Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
5 // June 2008
6
7 // TODO : create test cases that will generate an overflow on the offset and
8 // counter type. Counter types smaller than a byte should be used.
9
10 // Promela only has unsigned char, no signed char.
11 // Because detection of difference < 0 depends on a signed type, but we want
12 // compactness, check also for the values being higher than half of the unsigned
13 // char range (and consider them negative). The model, by design, does not use
14 // offsets or counts higher than 127 because we would then have to use a larger
15 // type (short or int).
16 #define HALF_UCHAR (255/2)
17
18 // NUMPROCS 4 : causes event loss with some reader timings.
19 // e.g. 3 events, 1 switch, 1 event (lost, buffer full), read 1 subbuffer
20 #define NUMPROCS 4
21
22 // NUMPROCS 3 : does not cause event loss because buffers are big enough.
23 //#define NUMPROCS 3
24 // e.g. 3 events, 1 switch, read 1 subbuffer
25
26 #define NUMSWITCH 1
27 #define BUFSIZE 4
28 #define NR_SUBBUFS 2
29 #define SUBBUF_SIZE (BUFSIZE / NR_SUBBUFS)
30
31 // Writer counters
32 byte write_off = 0;
33 byte commit_count[NR_SUBBUFS];
34
35 // Reader counters
36 byte read_off = 0;
37 byte retrieve_count[NR_SUBBUFS];
38
39 byte events_lost = 0;
40 byte refcount = 0;
41
42 bool deliver = 0;
43
44 // buffer slot in-use bit. Detects racy use (more than a single process
45 // accessing a slot at any given step).
46 bool buffer_use[BUFSIZE];
47
48 // Proceed to a sub-subber switch is needed.
49 // Used in a periodical timer interrupt to fill and ship the current subbuffer
50 // to the reader so we can guarantee a steady flow. If a subbuffer is
51 // completely empty, don't switch.
52 // Also used as "finalize" operation to complete the last subbuffer after
53 // all writers have finished so the last subbuffer can be read by the reader.
54 proctype switcher()
55 {
56 byte prev_off, new_off, tmp_commit;
57 byte size;
58
59 cmpxchg_loop:
60 atomic {
61 prev_off = write_off;
62 size = SUBBUF_SIZE - (prev_off % SUBBUF_SIZE);
63 new_off = prev_off + size;
64 if
65 :: (new_off - read_off > BUFSIZE && new_off - read_off < HALF_UCHAR)
66 || size == SUBBUF_SIZE ->
67 refcount = refcount - 1;
68 goto not_needed;
69 :: else -> skip
70 fi;
71 }
72 atomic {
73 if
74 :: prev_off != write_off -> goto cmpxchg_loop
75 :: else -> write_off = new_off;
76 fi;
77 }
78
79 atomic {
80 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
81 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
82 if
83 :: tmp_commit % SUBBUF_SIZE == 0 -> deliver = 1
84 :: else -> skip
85 fi;
86 refcount = refcount - 1;
87 }
88 not_needed:
89 skip;
90 }
91
92 // tracer
93 // Writes 1 byte of information in the buffer at the current
94 // "write_off" position and then increment the commit_count of the sub-buffer
95 // the information has been written to.
96 proctype tracer()
97 {
98 byte size = 1;
99 byte prev_off, new_off, tmp_commit;
100 byte i, j;
101
102 cmpxchg_loop:
103 atomic {
104 prev_off = write_off;
105 new_off = prev_off + size;
106 }
107 atomic {
108 if
109 :: new_off - read_off > BUFSIZE && new_off - read_off < HALF_UCHAR ->
110 goto lost
111 :: else -> skip
112 fi;
113 }
114 atomic {
115 if
116 :: prev_off != write_off -> goto cmpxchg_loop
117 :: else -> write_off = new_off;
118 fi;
119 i = 0;
120 do
121 :: i < size ->
122 assert(buffer_use[(prev_off + i) % BUFSIZE] == 0);
123 buffer_use[(prev_off + i) % BUFSIZE] = 1;
124 i++
125 :: i >= size -> break
126 od;
127 }
128
129 // writing to buffer...
130
131 atomic {
132 i = 0;
133 do
134 :: i < size ->
135 buffer_use[(prev_off + i) % BUFSIZE] = 0;
136 i++
137 :: i >= size -> break
138 od;
139 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
140 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
141 if
142 :: tmp_commit % SUBBUF_SIZE == 0 -> deliver = 1;
143 :: else -> skip
144 fi;
145 }
146 atomic {
147 goto end;
148 lost:
149 events_lost++;
150 end:
151 refcount = refcount - 1;
152 }
153 }
154
155 // reader
156 // Read the information sub-buffer per sub-buffer when available.
157 //
158 // Reads the information as soon as it is ready, or may be delayed by
159 // an asynchronous delivery. Being modeled as a process insures all cases
160 // (scheduled very quickly or very late, causing event loss) are covered.
161 // Only one reader per buffer (normally ensured by a mutex). This is modeled
162 // by using a single reader process.
163 proctype reader()
164 {
165 byte i, j;
166 byte tmp_retrieve;
167 byte lwrite_off, lcommit_count;
168
169 do
170 :: (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) > 0
171 && (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) < HALF_UCHAR
172 && commit_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
173 - retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
174 == SUBBUF_SIZE ->
175 atomic {
176 i = 0;
177 do
178 :: i < SUBBUF_SIZE ->
179 assert(buffer_use[(read_off + i) % BUFSIZE] == 0);
180 buffer_use[(read_off + i) % BUFSIZE] = 1;
181 i++
182 :: i >= SUBBUF_SIZE -> break
183 od;
184 }
185 // reading from buffer...
186
187 // Since there is only one reader per buffer at any given time,
188 // we don't care about retrieve_count vs read_off ordering :
189 // combined use of retrieve_count and read_off are made atomic by a
190 // mutex.
191 atomic {
192 i = 0;
193 do
194 :: i < SUBBUF_SIZE ->
195 buffer_use[(read_off + i) % BUFSIZE] = 0;
196 i++
197 :: i >= SUBBUF_SIZE -> break
198 od;
199 tmp_retrieve = retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
200 + SUBBUF_SIZE;
201 retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE] = tmp_retrieve;
202 read_off = read_off + SUBBUF_SIZE;
203 }
204 :: read_off >= (NUMPROCS - events_lost) -> break;
205 od;
206 }
207
208 // Waits for all tracer and switcher processes to finish before finalizing
209 // the buffer. Only after that will the reader be allowed to read the
210 // last subbuffer.
211 proctype cleaner()
212 {
213 atomic {
214 do
215 :: refcount == 0 ->
216 refcount = refcount + 1;
217 run switcher(); // Finalize the last sub-buffer so it can be read.
218 break;
219 od;
220 }
221 }
222
223 init {
224 byte i = 0;
225 byte j = 0;
226 byte sum = 0;
227 byte commit_sum = 0;
228
229 atomic {
230 i = 0;
231 do
232 :: i < NR_SUBBUFS ->
233 commit_count[i] = 0;
234 retrieve_count[i] = 0;
235 i++
236 :: i >= NR_SUBBUFS -> break
237 od;
238 i = 0;
239 do
240 :: i < BUFSIZE ->
241 buffer_use[i] = 0;
242 i++
243 :: i >= BUFSIZE -> break
244 od;
245 run reader();
246 run cleaner();
247 i = 0;
248 do
249 :: i < NUMPROCS ->
250 refcount = refcount + 1;
251 run tracer();
252 i++
253 :: i >= NUMPROCS -> break
254 od;
255 i = 0;
256 do
257 :: i < NUMSWITCH ->
258 refcount = refcount + 1;
259 run switcher();
260 i++
261 :: i >= NUMSWITCH -> break
262 od;
263 }
264 // Assertions.
265 atomic {
266 // The writer head must always be superior or equal to the reader head.
267 assert(write_off - read_off >= 0 && write_off - read_off < HALF_UCHAR);
268 j = 0;
269 commit_sum = 0;
270 do
271 :: j < NR_SUBBUFS ->
272 commit_sum = commit_sum + commit_count[j];
273 // The commit count of a particular subbuffer must always be higher
274 // or equal to the retrieve_count of this subbuffer.
275 assert(commit_count[j] - retrieve_count[j] >= 0 &&
276 commit_count[j] - retrieve_count[j] < HALF_UCHAR);
277 j++
278 :: j >= NR_SUBBUFS -> break
279 od;
280 // The sum of all subbuffer commit counts must always be lower or equal
281 // to the writer head, because space must be reserved before it is
282 // written to and then committed.
283 assert(write_off - commit_sum >= 0 && write_off - commit_sum < HALF_UCHAR);
284
285 // If we have less writers than the buffer space available, we should
286 // not lose events
287 assert(NUMPROCS + NUMSWITCH > BUFSIZE || events_lost == 0);
288 }
289 }
This page took 0.03602 seconds and 4 git commands to generate.