move everything out of trunk
[lttv.git] / verif / lttng / buffer.spin.bkp2
1
2 // does not cause event loss
3 //#define NUMPROCS 3
4 // e.g. 3 events, 1 switch, read 1 subbuffer
5
6 // causes event loss with some reader timings,
7 // e.g. 3 events, 1 switch, 1 event (lost, buffer full), read 1 subbuffer
8 #define NUMPROCS 4
9
10 #define NUMSWITCH 1
11 #define BUFSIZE 4
12 #define NR_SUBBUFS 2
13 #define SUBBUF_SIZE (BUFSIZE / NR_SUBBUFS)
14
15 // Writer counters
16 byte write_off = 0;
17 byte commit_count[NR_SUBBUFS];
18
19 // Reader counters
20 byte read_off = 0;
21 byte retrieve_count[NR_SUBBUFS];
22
23 byte events_lost = 0;
24 byte refcount = 0;
25
26 bool deliver = 0;
27
28 // buffer slot in-use bit. Detects racy use (more than a single process
29 // accessing a slot at any given step).
30 bool buffer_use[BUFSIZE];
31
32 // Proceed to a sub-subber switch is needed.
33 // Used in a periodical timer interrupt to fill and ship the current subbuffer
34 // to the reader so we can guarantee a steady flow.
35 // Also used as "finalize" operation to complete the last subbuffer after
36 // all writers have finished so the last subbuffer can be read by the reader.
37 proctype switcher()
38 {
39 byte prev_off, new_off, tmp_commit;
40 byte size;
41
42 cmpxchg_loop:
43 atomic {
44 prev_off = write_off;
45 size = SUBBUF_SIZE - (prev_off % SUBBUF_SIZE);
46 new_off = prev_off + size;
47 if
48 :: new_off - read_off > BUFSIZE ->
49 refcount = refcount - 1;
50 goto not_needed;
51 :: else -> skip
52 fi;
53 }
54 atomic {
55 if
56 :: prev_off != write_off -> goto cmpxchg_loop
57 :: else -> write_off = new_off;
58 fi;
59 }
60
61 atomic {
62 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
63 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
64 if
65 :: tmp_commit % SUBBUF_SIZE == 0 -> deliver = 1
66 :: else -> skip
67 fi;
68 refcount = refcount - 1;
69 }
70 not_needed:
71 skip;
72 }
73
74 // tracer
75 // Writes "size" bytes of information in the buffer at the current
76 // "write_off" position and then increment the commit_count of the sub-buffer
77 // the information has been written to.
78 proctype tracer(byte size)
79 {
80 byte prev_off, new_off, tmp_commit;
81 byte i, j;
82
83 cmpxchg_loop:
84 atomic {
85 prev_off = write_off;
86 new_off = prev_off + size;
87 }
88 atomic {
89 if
90 :: new_off - read_off > BUFSIZE -> goto lost
91 :: else -> skip
92 fi;
93 }
94 atomic {
95 if
96 :: prev_off != write_off -> goto cmpxchg_loop
97 :: else -> write_off = new_off;
98 fi;
99 i = 0;
100 do
101 :: i < size ->
102 assert(buffer_use[(prev_off + i) % BUFSIZE] == 0);
103 buffer_use[(prev_off + i) % BUFSIZE] = 1;
104 i++
105 :: i >= size -> break
106 od;
107 }
108
109 // writing to buffer...
110
111 atomic {
112 i = 0;
113 do
114 :: i < size ->
115 buffer_use[(prev_off + i) % BUFSIZE] = 0;
116 i++
117 :: i >= size -> break
118 od;
119 tmp_commit = commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] + size;
120 commit_count[(prev_off % BUFSIZE) / SUBBUF_SIZE] = tmp_commit;
121 if
122 :: tmp_commit % SUBBUF_SIZE == 0 -> deliver = 1;
123 :: else -> skip
124 fi;
125 }
126 atomic {
127 goto end;
128 lost:
129 events_lost++;
130 end:
131 refcount = refcount - 1;
132 }
133 }
134
135 // reader
136 // Read the information sub-buffer per sub-buffer when available.
137 //
138 // Reads the information as soon as it is ready, or may be delayed by
139 // an asynchronous delivery. Being modeled as a process insures all cases
140 // (scheduled very quickly or very late, causing event loss) are covered.
141 // Only one reader per buffer (normally ensured by a mutex). This is modeled
142 // by using a single reader process.
143 proctype reader()
144 {
145 byte i, j;
146 byte tmp_retrieve;
147 byte lwrite_off, lcommit_count;
148
149 do
150 :: (write_off / SUBBUF_SIZE) - (read_off / SUBBUF_SIZE) > 0
151 && commit_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
152 - retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
153 == SUBBUF_SIZE ->
154 atomic {
155 i = 0;
156 do
157 :: i < SUBBUF_SIZE ->
158 assert(buffer_use[(read_off + i) % BUFSIZE] == 0);
159 buffer_use[(read_off + i) % BUFSIZE] = 1;
160 i++
161 :: i >= SUBBUF_SIZE -> break
162 od;
163 }
164 // reading from buffer...
165
166 // Since there is only one reader per buffer at any given time,
167 // we don't care about retrieve_count vs read_off ordering :
168 // combined use of retrieve_count and read_off are made atomic by a
169 // mutex.
170 atomic {
171 i = 0;
172 do
173 :: i < SUBBUF_SIZE ->
174 buffer_use[(read_off + i) % BUFSIZE] = 0;
175 i++
176 :: i >= SUBBUF_SIZE -> break
177 od;
178 tmp_retrieve = retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE]
179 + SUBBUF_SIZE;
180 retrieve_count[(read_off % BUFSIZE) / SUBBUF_SIZE] = tmp_retrieve;
181 read_off = read_off + SUBBUF_SIZE;
182 }
183 :: read_off >= (NUMPROCS - events_lost) -> break;
184 od;
185 }
186
187 // Waits for all tracer and switcher processes to finish before finalizing
188 // the buffer. Only after that will the reader be allowed to read the
189 // last subbuffer.
190 proctype cleaner()
191 {
192 atomic {
193 do
194 :: refcount == 0 ->
195 refcount = refcount + 1;
196 run switcher(); // Finalize the last sub-buffer so it can be read.
197 break;
198 od;
199 }
200 }
201
202 init {
203 byte i = 0;
204 byte j = 0;
205 byte sum = 0;
206 byte commit_sum = 0;
207
208 atomic {
209 i = 0;
210 do
211 :: i < NR_SUBBUFS ->
212 commit_count[i] = 0;
213 retrieve_count[i] = 0;
214 i++
215 :: i >= NR_SUBBUFS -> break
216 od;
217 i = 0;
218 do
219 :: i < BUFSIZE ->
220 buffer_use[i] = 0;
221 i++
222 :: i >= BUFSIZE -> break
223 od;
224 run reader();
225 run cleaner();
226 i = 0;
227 do
228 :: i < NUMPROCS ->
229 refcount = refcount + 1;
230 run tracer(1);
231 i++
232 :: i >= NUMPROCS -> break
233 od;
234 i = 0;
235 do
236 :: i < NUMSWITCH ->
237 refcount = refcount + 1;
238 run switcher();
239 i++
240 :: i >= NUMSWITCH -> break
241 od;
242 }
243 // Assertions.
244 atomic {
245 // The writer head must always be superior to the reader head.
246 assert(write_off - read_off >= 0);
247 j = 0;
248 commit_sum = 0;
249 do
250 :: j < NR_SUBBUFS ->
251 commit_sum = commit_sum + commit_count[j];
252 // The commit count of a particular subbuffer must always be higher
253 // or equal to the retrieve_count of this subbuffer.
254 assert(commit_count[j] >= retrieve_count[j]);
255 j++
256 :: j >= NR_SUBBUFS -> break
257 od;
258 // The sum of all subbuffer commit counts must always be lower or equal
259 // to the writer head, because space must be reserved before it is
260 // written to and then committed.
261 assert(commit_sum <= write_off);
262 j = 0;
263
264 // If we have less writers than the buffer space available, we should
265 // not lose events
266 assert(NUMPROCS + NUMSWITCH > BUFSIZE || events_lost == 0);
267 }
268 }
This page took 0.035392 seconds and 4 git commands to generate.