Maintain list of struct call_rcu_data to keep valgrind happy.
[urcu.git] / tests / test_qsbr_gc.c
1 /*
2 * test_urcu_gc.c
3 *
4 * Userspace RCU library - test program (with baatch reclamation)
5 *
6 * Copyright February 2009 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23 #define _GNU_SOURCE
24 #include "../config.h"
25 #include <stdio.h>
26 #include <pthread.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/types.h>
30 #include <sys/wait.h>
31 #include <unistd.h>
32 #include <stdio.h>
33 #include <assert.h>
34 #include <sys/syscall.h>
35 #include <sched.h>
36 #include <errno.h>
37
38 #include <urcu/arch.h>
39
40 /* hardcoded number of CPUs */
41 #define NR_CPUS 16384
42
43 #if defined(_syscall0)
44 _syscall0(pid_t, gettid)
45 #elif defined(__NR_gettid)
46 static inline pid_t gettid(void)
47 {
48 return syscall(__NR_gettid);
49 }
50 #else
51 #warning "use pid as tid"
52 static inline pid_t gettid(void)
53 {
54 return getpid();
55 }
56 #endif
57
58 #define _LGPL_SOURCE
59 #include <urcu-qsbr.h>
60
61 struct test_array {
62 int a;
63 };
64
65 static volatile int test_go, test_stop;
66
67 static unsigned long wdelay;
68
69 static struct test_array *test_rcu_pointer;
70
71 static unsigned long duration;
72
73 /* read-side C.S. duration, in loops */
74 static unsigned long rduration;
75 static unsigned int reclaim_batch = 1;
76
77 struct reclaim_queue {
78 void **queue; /* Beginning of queue */
79 void **head; /* Insert position */
80 };
81
82 static struct reclaim_queue *pending_reclaims;
83
84
85 /* write-side C.S. duration, in loops */
86 static unsigned long wduration;
87
88 static inline void loop_sleep(unsigned long l)
89 {
90 while(l-- != 0)
91 caa_cpu_relax();
92 }
93
94 static int verbose_mode;
95
96 #define printf_verbose(fmt, args...) \
97 do { \
98 if (verbose_mode) \
99 printf(fmt, args); \
100 } while (0)
101
102 static unsigned int cpu_affinities[NR_CPUS];
103 static unsigned int next_aff = 0;
104 static int use_affinity = 0;
105
106 pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
107
108 #ifndef HAVE_CPU_SET_T
109 typedef unsigned long cpu_set_t;
110 # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
111 # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
112 #endif
113
114 static void set_affinity(void)
115 {
116 cpu_set_t mask;
117 int cpu;
118 int ret;
119
120 if (!use_affinity)
121 return;
122
123 #if HAVE_SCHED_SETAFFINITY
124 ret = pthread_mutex_lock(&affinity_mutex);
125 if (ret) {
126 perror("Error in pthread mutex lock");
127 exit(-1);
128 }
129 cpu = cpu_affinities[next_aff++];
130 ret = pthread_mutex_unlock(&affinity_mutex);
131 if (ret) {
132 perror("Error in pthread mutex unlock");
133 exit(-1);
134 }
135
136 CPU_ZERO(&mask);
137 CPU_SET(cpu, &mask);
138 #if SCHED_SETAFFINITY_ARGS == 2
139 sched_setaffinity(0, &mask);
140 #else
141 sched_setaffinity(0, sizeof(mask), &mask);
142 #endif
143 #endif /* HAVE_SCHED_SETAFFINITY */
144 }
145
146 /*
147 * returns 0 if test should end.
148 */
149 static int test_duration_write(void)
150 {
151 return !test_stop;
152 }
153
154 static int test_duration_read(void)
155 {
156 return !test_stop;
157 }
158
159 static unsigned long long __thread nr_writes;
160 static unsigned long long __thread nr_reads;
161
162 static unsigned int nr_readers;
163 static unsigned int nr_writers;
164
165 pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
166 static
167 unsigned long long __attribute__((aligned(CAA_CACHE_LINE_SIZE))) *tot_nr_writes;
168
169
170 void rcu_copy_mutex_lock(void)
171 {
172 int ret;
173 ret = pthread_mutex_lock(&rcu_copy_mutex);
174 if (ret) {
175 perror("Error in pthread mutex lock");
176 exit(-1);
177 }
178 }
179
180 void rcu_copy_mutex_unlock(void)
181 {
182 int ret;
183
184 ret = pthread_mutex_unlock(&rcu_copy_mutex);
185 if (ret) {
186 perror("Error in pthread mutex unlock");
187 exit(-1);
188 }
189 }
190
191 void *thr_reader(void *_count)
192 {
193 unsigned long long *count = _count;
194 struct test_array *local_ptr;
195
196 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
197 "reader", pthread_self(), (unsigned long)gettid());
198
199 set_affinity();
200
201 rcu_register_thread();
202
203 while (!test_go)
204 {
205 }
206 cmm_smp_mb();
207
208 for (;;) {
209 _rcu_read_lock();
210 local_ptr = _rcu_dereference(test_rcu_pointer);
211 debug_yield_read();
212 if (local_ptr)
213 assert(local_ptr->a == 8);
214 if (unlikely(rduration))
215 loop_sleep(rduration);
216 _rcu_read_unlock();
217 nr_reads++;
218 /* QS each 1024 reads */
219 if (unlikely((nr_reads & ((1 << 10) - 1)) == 0))
220 _rcu_quiescent_state();
221 if (unlikely(!test_duration_read()))
222 break;
223 }
224
225 rcu_unregister_thread();
226
227 *count = nr_reads;
228 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
229 "reader", pthread_self(), (unsigned long)gettid());
230 return ((void*)1);
231
232 }
233
234 static void rcu_gc_clear_queue(unsigned long wtidx)
235 {
236 void **p;
237
238 /* Wait for Q.S and empty queue */
239 synchronize_rcu();
240
241 for (p = pending_reclaims[wtidx].queue;
242 p < pending_reclaims[wtidx].head; p++) {
243 /* poison */
244 if (*p)
245 ((struct test_array *)*p)->a = 0;
246 free(*p);
247 }
248 pending_reclaims[wtidx].head = pending_reclaims[wtidx].queue;
249 }
250
251 /* Using per-thread queue */
252 static void rcu_gc_reclaim(unsigned long wtidx, void *old)
253 {
254 /* Queue pointer */
255 *pending_reclaims[wtidx].head = old;
256 pending_reclaims[wtidx].head++;
257
258 if (likely(pending_reclaims[wtidx].head - pending_reclaims[wtidx].queue
259 < reclaim_batch))
260 return;
261
262 rcu_gc_clear_queue(wtidx);
263 }
264
265 void *thr_writer(void *data)
266 {
267 unsigned long wtidx = (unsigned long)data;
268 #ifdef TEST_LOCAL_GC
269 struct test_array *old = NULL;
270 #else
271 struct test_array *new, *old;
272 #endif
273
274 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
275 "writer", pthread_self(), (unsigned long)gettid());
276
277 set_affinity();
278
279 while (!test_go)
280 {
281 }
282 cmm_smp_mb();
283
284 for (;;) {
285 #ifndef TEST_LOCAL_GC
286 new = malloc(sizeof(*new));
287 new->a = 8;
288 old = _rcu_xchg_pointer(&test_rcu_pointer, new);
289 #endif
290 if (unlikely(wduration))
291 loop_sleep(wduration);
292 rcu_gc_reclaim(wtidx, old);
293 nr_writes++;
294 if (unlikely(!test_duration_write()))
295 break;
296 if (unlikely(wdelay))
297 loop_sleep(wdelay);
298 }
299
300 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
301 "writer", pthread_self(), (unsigned long)gettid());
302 tot_nr_writes[wtidx] = nr_writes;
303 return ((void*)2);
304 }
305
306 void show_usage(int argc, char **argv)
307 {
308 printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
309 #ifdef DEBUG_YIELD
310 printf(" [-r] [-w] (yield reader and/or writer)");
311 #endif
312 printf(" [-b batch] (batch reclaim)");
313 printf(" [-d delay] (writer period (us))");
314 printf(" [-c duration] (reader C.S. duration (in loops))");
315 printf(" [-e duration] (writer C.S. duration (in loops))");
316 printf(" [-v] (verbose output)");
317 printf(" [-a cpu#] [-a cpu#]... (affinity)");
318 printf("\n");
319 }
320
321 int main(int argc, char **argv)
322 {
323 int err;
324 pthread_t *tid_reader, *tid_writer;
325 void *tret;
326 unsigned long long *count_reader;
327 unsigned long long tot_reads = 0, tot_writes = 0;
328 int i, a;
329
330 if (argc < 4) {
331 show_usage(argc, argv);
332 return -1;
333 }
334
335 err = sscanf(argv[1], "%u", &nr_readers);
336 if (err != 1) {
337 show_usage(argc, argv);
338 return -1;
339 }
340
341 err = sscanf(argv[2], "%u", &nr_writers);
342 if (err != 1) {
343 show_usage(argc, argv);
344 return -1;
345 }
346
347 err = sscanf(argv[3], "%lu", &duration);
348 if (err != 1) {
349 show_usage(argc, argv);
350 return -1;
351 }
352
353 for (i = 4; i < argc; i++) {
354 if (argv[i][0] != '-')
355 continue;
356 switch (argv[i][1]) {
357 #ifdef DEBUG_YIELD
358 case 'r':
359 yield_active |= YIELD_READ;
360 break;
361 case 'w':
362 yield_active |= YIELD_WRITE;
363 break;
364 #endif
365 case 'a':
366 if (argc < i + 2) {
367 show_usage(argc, argv);
368 return -1;
369 }
370 a = atoi(argv[++i]);
371 cpu_affinities[next_aff++] = a;
372 use_affinity = 1;
373 printf_verbose("Adding CPU %d affinity\n", a);
374 break;
375 case 'b':
376 if (argc < i + 2) {
377 show_usage(argc, argv);
378 return -1;
379 }
380 reclaim_batch = atol(argv[++i]);
381 break;
382 case 'c':
383 if (argc < i + 2) {
384 show_usage(argc, argv);
385 return -1;
386 }
387 rduration = atol(argv[++i]);
388 break;
389 case 'd':
390 if (argc < i + 2) {
391 show_usage(argc, argv);
392 return -1;
393 }
394 wdelay = atol(argv[++i]);
395 break;
396 case 'e':
397 if (argc < i + 2) {
398 show_usage(argc, argv);
399 return -1;
400 }
401 wduration = atol(argv[++i]);
402 break;
403 case 'v':
404 verbose_mode = 1;
405 break;
406 }
407 }
408
409 printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
410 duration, nr_readers, nr_writers);
411 printf_verbose("Writer delay : %lu loops.\n", wdelay);
412 printf_verbose("Reader duration : %lu loops.\n", rduration);
413 printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
414 "main", pthread_self(), (unsigned long)gettid());
415
416 tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
417 tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
418 count_reader = malloc(sizeof(*count_reader) * nr_readers);
419 tot_nr_writes = malloc(sizeof(*tot_nr_writes) * nr_writers);
420 pending_reclaims = malloc(sizeof(*pending_reclaims) * nr_writers);
421 if (reclaim_batch * sizeof(*pending_reclaims[i].queue)
422 < CAA_CACHE_LINE_SIZE)
423 for (i = 0; i < nr_writers; i++)
424 pending_reclaims[i].queue = calloc(1, CAA_CACHE_LINE_SIZE);
425 else
426 for (i = 0; i < nr_writers; i++)
427 pending_reclaims[i].queue = calloc(reclaim_batch,
428 sizeof(*pending_reclaims[i].queue));
429 for (i = 0; i < nr_writers; i++)
430 pending_reclaims[i].head = pending_reclaims[i].queue;
431
432 next_aff = 0;
433
434 for (i = 0; i < nr_readers; i++) {
435 err = pthread_create(&tid_reader[i], NULL, thr_reader,
436 &count_reader[i]);
437 if (err != 0)
438 exit(1);
439 }
440 for (i = 0; i < nr_writers; i++) {
441 err = pthread_create(&tid_writer[i], NULL, thr_writer,
442 (void *)(long)i);
443 if (err != 0)
444 exit(1);
445 }
446
447 cmm_smp_mb();
448
449 test_go = 1;
450
451 sleep(duration);
452
453 test_stop = 1;
454
455 for (i = 0; i < nr_readers; i++) {
456 err = pthread_join(tid_reader[i], &tret);
457 if (err != 0)
458 exit(1);
459 tot_reads += count_reader[i];
460 }
461 for (i = 0; i < nr_writers; i++) {
462 err = pthread_join(tid_writer[i], &tret);
463 if (err != 0)
464 exit(1);
465 tot_writes += tot_nr_writes[i];
466 rcu_gc_clear_queue(i);
467 }
468
469 printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
470 tot_writes);
471 printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu "
472 "nr_writers %3u "
473 "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
474 "batch %u\n",
475 argv[0], duration, nr_readers, rduration, wduration,
476 nr_writers, wdelay, tot_reads, tot_writes,
477 tot_reads + tot_writes, reclaim_batch);
478 free(tid_reader);
479 free(tid_writer);
480 free(count_reader);
481 free(tot_nr_writes);
482 for (i = 0; i < nr_writers; i++)
483 free(pending_reclaims[i].queue);
484 free(pending_reclaims);
485
486 return 0;
487 }
This page took 0.039494 seconds and 4 git commands to generate.