Merge branch 'master' into rbtree2
[userspace-rcu.git] / tests / test_urcu_rbtree.c
1 /*
2 * test_urcu_rbtree.c
3 *
4 * Userspace RCU library - test program for RB tree
5 *
6 * Copyright February 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23 #define _GNU_SOURCE
24 #ifndef DYNAMIC_LINK_TEST
25 #define _LGPL_SOURCE
26 #else
27 #define debug_yield_read()
28 #endif
29 #include "../config.h"
30 #include <stdio.h>
31 #include <pthread.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <unistd.h>
37 #include <stdio.h>
38 #include <assert.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <errno.h>
42 #include <time.h>
43
44 #include <urcu/arch.h>
45
46 extern int __thread disable_debug;
47
48 /* hardcoded number of CPUs */
49 #define NR_CPUS 16384
50
51 /* number of insert/delete */
52 #define NR_RAND 6
53 //#define NR_RAND 7
54
55 #if defined(_syscall0)
56 _syscall0(pid_t, gettid)
57 #elif defined(__NR_gettid)
58 static inline pid_t gettid(void)
59 {
60 return syscall(__NR_gettid);
61 }
62 #else
63 #warning "use pid as tid"
64 static inline pid_t gettid(void)
65 {
66 return getpid();
67 }
68 #endif
69
70 #include <urcu.h>
71 #include <urcu/rcurbtree.h>
72 #include <urcu-defer.h>
73
74 int tree_comp(void *a, void *b)
75 {
76 if ((unsigned long)a < (unsigned long)b)
77 return -1;
78 else if ((unsigned long)a > (unsigned long)b)
79 return 1;
80 else
81 return 0;
82 }
83
84 static DEFINE_RCU_RBTREE(rbtree, tree_comp, malloc, free, call_rcu);
85
86 static volatile int test_go, test_stop;
87
88 static unsigned long wdelay;
89
90 static unsigned long duration;
91
92 /* read-side C.S. duration, in loops */
93 static unsigned long rduration;
94
95 /* write-side C.S. duration, in loops */
96 static unsigned long wduration;
97
98 static inline void loop_sleep(unsigned long l)
99 {
100 while(l-- != 0)
101 caa_cpu_relax();
102 }
103
104 static int verbose_mode;
105
106 #define printf_verbose(fmt, args...) \
107 do { \
108 if (verbose_mode) \
109 printf(fmt, args); \
110 } while (0)
111
112 static unsigned int cpu_affinities[NR_CPUS];
113 static unsigned int next_aff = 0;
114 static int use_affinity = 0;
115
116 pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
117
118 #ifndef HAVE_CPU_SET_T
119 typedef unsigned long cpu_set_t;
120 # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
121 # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
122 #endif
123
124 static void set_affinity(void)
125 {
126 cpu_set_t mask;
127 int cpu;
128 int ret;
129
130 if (!use_affinity)
131 return;
132
133 #if HAVE_SCHED_SETAFFINITY
134 ret = pthread_mutex_lock(&affinity_mutex);
135 if (ret) {
136 perror("Error in pthread mutex lock");
137 exit(-1);
138 }
139 cpu = cpu_affinities[next_aff++];
140 ret = pthread_mutex_unlock(&affinity_mutex);
141 if (ret) {
142 perror("Error in pthread mutex unlock");
143 exit(-1);
144 }
145
146 CPU_ZERO(&mask);
147 CPU_SET(cpu, &mask);
148 #if SCHED_SETAFFINITY_ARGS == 2
149 sched_setaffinity(0, &mask);
150 #else
151 sched_setaffinity(0, sizeof(mask), &mask);
152 #endif
153 #endif /* HAVE_SCHED_SETAFFINITY */
154 }
155
156 /*
157 * returns 0 if test should end.
158 */
159 static int test_duration_write(void)
160 {
161 return !test_stop;
162 }
163
164 static int test_duration_read(void)
165 {
166 return !test_stop;
167 }
168
169 static unsigned long long __thread nr_writes;
170 static unsigned long long __thread nr_reads;
171
172 static unsigned int nr_readers;
173 static unsigned int nr_writers;
174
175 static unsigned long global_items;
176 static void **global_key = NULL;
177
178 pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
179
180 void rcu_copy_mutex_lock(void)
181 {
182 int ret;
183 ret = pthread_mutex_lock(&rcu_copy_mutex);
184 if (ret) {
185 perror("Error in pthread mutex lock");
186 exit(-1);
187 }
188 }
189
190 void rcu_copy_mutex_unlock(void)
191 {
192 int ret;
193
194 ret = pthread_mutex_unlock(&rcu_copy_mutex);
195 if (ret) {
196 perror("Error in pthread mutex unlock");
197 exit(-1);
198 }
199 }
200
201 static
202 void set_lookup_index(struct rcu_rbtree_node *node,
203 char *lookup_hit)
204 {
205 int i;
206
207 for (i = 0; i < global_items; i++) {
208 if (node->begin == global_key[i]
209 && !lookup_hit[i]) {
210 lookup_hit[i] = 1;
211 break;
212 }
213 }
214 }
215
216 void *thr_reader(void *_count)
217 {
218 unsigned long long *count = _count;
219 struct rcu_rbtree_node *node;
220 int i, index;
221 char *lookup_hit;
222
223 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
224 "reader", pthread_self(), (unsigned long)gettid());
225
226 set_affinity();
227
228 rcu_register_thread();
229
230 lookup_hit = malloc(sizeof(*lookup_hit) * global_items);
231
232 while (!test_go)
233 {
234 }
235 cmm_smp_mb();
236
237 for (;;) {
238 /* search bottom of range */
239 for (i = 0; i < global_items; i++) {
240 rcu_read_lock();
241 node = rcu_rbtree_search(&rbtree,
242 rcu_dereference(rbtree.root),
243 global_key[i]);
244 assert(!rcu_rbtree_is_nil(&rbtree, node));
245 rcu_read_unlock();
246 }
247
248 /* search end of range */
249 for (i = 0; i < global_items; i++) {
250 rcu_read_lock();
251 node = rcu_rbtree_search(&rbtree,
252 rcu_dereference(rbtree.root),
253 (void*) ((unsigned long) global_key[i] + 3));
254 assert(!rcu_rbtree_is_nil(&rbtree, node));
255 rcu_read_unlock();
256 }
257
258 /* search range (middle) */
259 for (i = 0; i < global_items; i++) {
260 rcu_read_lock();
261 node = rcu_rbtree_search_range(&rbtree,
262 rcu_dereference(rbtree.root),
263 (void*) ((unsigned long) global_key[i] + 1),
264 (void*) ((unsigned long) global_key[i] + 2));
265 assert(!rcu_rbtree_is_nil(&rbtree, node));
266 rcu_read_unlock();
267 }
268
269 /* search begin key */
270 for (i = 0; i < global_items; i++) {
271 rcu_read_lock();
272 node = rcu_rbtree_search_begin_key(&rbtree,
273 rcu_dereference(rbtree.root),
274 global_key[i]);
275 assert(!rcu_rbtree_is_nil(&rbtree, node));
276 rcu_read_unlock();
277 }
278
279 /* min + next */
280 memset(lookup_hit, 0, sizeof(*lookup_hit) * global_items);
281
282 rcu_read_lock();
283 node = rcu_rbtree_min(&rbtree,
284 rcu_dereference(rbtree.root));
285 while (!rcu_rbtree_is_nil(&rbtree, node)) {
286 set_lookup_index(node, lookup_hit);
287 node = rcu_rbtree_next(&rbtree, node);
288 }
289 rcu_read_unlock();
290
291 for (i = 0; i < global_items; i++)
292 assert(lookup_hit[i]);
293
294 /* max + prev */
295 memset(lookup_hit, 0, sizeof(*lookup_hit) * global_items);
296
297 rcu_read_lock();
298 node = rcu_rbtree_max(&rbtree,
299 rcu_dereference(rbtree.root));
300 while (!rcu_rbtree_is_nil(&rbtree, node)) {
301 set_lookup_index(node, lookup_hit);
302 node = rcu_rbtree_prev(&rbtree, node);
303 }
304 rcu_read_unlock();
305
306 for (i = 0; i < global_items; i++)
307 assert(lookup_hit[i]);
308
309 debug_yield_read();
310 if (unlikely(rduration))
311 loop_sleep(rduration);
312 nr_reads++;
313 if (unlikely(!test_duration_read()))
314 break;
315 }
316
317 rcu_unregister_thread();
318
319 /* test extra thread registration */
320 rcu_register_thread();
321 rcu_unregister_thread();
322
323 free(lookup_hit);
324
325 *count = nr_reads;
326 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
327 "reader", pthread_self(), (unsigned long)gettid());
328 return ((void*)1);
329
330 }
331
332 void *thr_writer(void *_count)
333 {
334 unsigned long long *count = _count;
335 struct rcu_rbtree_node *node;
336 void *key[NR_RAND];
337 int i;
338
339 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
340 "writer", pthread_self(), (unsigned long)gettid());
341
342 set_affinity();
343
344 //disable_debug = 1;
345
346 rcu_register_thread();
347
348 while (!test_go)
349 {
350 }
351 cmm_smp_mb();
352
353 for (;;) {
354 rcu_copy_mutex_lock();
355
356 for (i = 0; i < NR_RAND; i++) {
357 //key[i] = (void *)(unsigned long)(rand() % 2048);
358 key[i] = (void *)(unsigned long)(((unsigned long) rand() * 4) % 2048);
359 //For more collisions
360 //key[i] = (void *)(unsigned long)(rand() % 6);
361 //node->begin = key[i];
362 //node->end = (void *)((unsigned long) key[i] + 1);
363 //node->end = (void *)((unsigned long) key[i] + 4);
364 rcu_read_lock();
365 rcu_rbtree_insert(&rbtree, key[i],
366 (void *)((unsigned long) key[i] + 4));
367 rcu_read_unlock();
368 }
369 rcu_copy_mutex_unlock();
370
371 if (unlikely(wduration))
372 loop_sleep(wduration);
373
374 rcu_copy_mutex_lock();
375 for (i = 0; i < NR_RAND; i++) {
376 #if 0
377 node = rcu_rbtree_min(rbtree, rbtree->root);
378 while (!rcu_rbtree_is_nil(&rbtree, node)) {
379 printf("{ 0x%lX p:%lX r:%lX l:%lX %s %s %s} ",
380 (unsigned long)node->key,
381 node->p->key,
382 node->right->key,
383 node->left->key,
384 node->color ? "red" : "black",
385 node->pos ? "right" : "left",
386 node->nil ? "nil" : "");
387 node = rcu_rbtree_next(rbtree, node);
388 }
389 printf("\n");
390 #endif
391 rcu_read_lock();
392 node = rcu_rbtree_search(&rbtree, rbtree.root, key[i]);
393 assert(!rcu_rbtree_is_nil(&rbtree, node));
394 rcu_rbtree_remove(&rbtree, node);
395 rcu_read_unlock();
396 }
397
398 rcu_copy_mutex_unlock();
399 nr_writes++;
400 if (unlikely(!test_duration_write()))
401 break;
402 if (unlikely(wdelay))
403 loop_sleep(wdelay);
404 }
405
406 rcu_unregister_thread();
407
408 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
409 "writer", pthread_self(), (unsigned long)gettid());
410 *count = nr_writes;
411 return ((void*)2);
412 }
413
414 void show_usage(int argc, char **argv)
415 {
416 printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
417 #ifdef DEBUG_YIELD
418 printf(" [-r] [-w] (yield reader and/or writer)");
419 #endif
420 printf(" [-d delay] (writer period (us))");
421 printf(" [-c duration] (reader C.S. duration (in loops))");
422 printf(" [-e duration] (writer C.S. duration (in loops))");
423 printf(" [-v] (verbose output)");
424 printf(" [-a cpu#] [-a cpu#]... (affinity)");
425 printf("\n");
426 }
427
428 int main(int argc, char **argv)
429 {
430 int err;
431 pthread_t *tid_reader, *tid_writer;
432 void *tret;
433 unsigned long long *count_reader, *count_writer;
434 unsigned long long tot_reads = 0, tot_writes = 0;
435 int i, a;
436 struct rcu_rbtree_node *node;
437
438 if (argc < 4) {
439 show_usage(argc, argv);
440 return -1;
441 }
442
443 err = sscanf(argv[1], "%u", &nr_readers);
444 if (err != 1) {
445 show_usage(argc, argv);
446 return -1;
447 }
448
449 err = sscanf(argv[2], "%u", &nr_writers);
450 if (err != 1) {
451 show_usage(argc, argv);
452 return -1;
453 }
454
455 err = sscanf(argv[3], "%lu", &duration);
456 if (err != 1) {
457 show_usage(argc, argv);
458 return -1;
459 }
460
461 for (i = 4; i < argc; i++) {
462 if (argv[i][0] != '-')
463 continue;
464 switch (argv[i][1]) {
465 #ifdef DEBUG_YIELD
466 case 'r':
467 yield_active |= YIELD_READ;
468 break;
469 case 'w':
470 yield_active |= YIELD_WRITE;
471 break;
472 #endif
473 case 'a':
474 if (argc < i + 2) {
475 show_usage(argc, argv);
476 return -1;
477 }
478 a = atoi(argv[++i]);
479 cpu_affinities[next_aff++] = a;
480 use_affinity = 1;
481 printf_verbose("Adding CPU %d affinity\n", a);
482 break;
483 case 'c':
484 if (argc < i + 2) {
485 show_usage(argc, argv);
486 return -1;
487 }
488 rduration = atol(argv[++i]);
489 break;
490 case 'd':
491 if (argc < i + 2) {
492 show_usage(argc, argv);
493 return -1;
494 }
495 wdelay = atol(argv[++i]);
496 break;
497 case 'e':
498 if (argc < i + 2) {
499 show_usage(argc, argv);
500 return -1;
501 }
502 wduration = atol(argv[++i]);
503 break;
504 case 'v':
505 verbose_mode = 1;
506 break;
507 case 'g':
508 if (argc < i + 2) {
509 show_usage(argc, argv);
510 return -1;
511 }
512 global_items = atol(argv[++i]);
513 break;
514 }
515 }
516
517 printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
518 duration, nr_readers, nr_writers);
519 printf_verbose("Writer delay : %lu loops.\n", wdelay);
520 printf_verbose("Reader duration : %lu loops.\n", rduration);
521 printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
522 "main", pthread_self(), (unsigned long)gettid());
523
524 tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
525 tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
526 count_reader = malloc(sizeof(*count_reader) * nr_readers);
527 count_writer = malloc(sizeof(*count_writer) * nr_writers);
528 global_key = malloc(sizeof(*global_key) * global_items);
529
530 srand(time(NULL));
531
532 err = create_all_cpu_call_rcu_data(0);
533 assert(!err);
534
535 next_aff = 0;
536
537 for (i = 0; i < nr_readers; i++) {
538 err = pthread_create(&tid_reader[i], NULL, thr_reader,
539 &count_reader[i]);
540 if (err != 0)
541 exit(1);
542 }
543 for (i = 0; i < nr_writers; i++) {
544 err = pthread_create(&tid_writer[i], NULL, thr_writer,
545 &count_writer[i]);
546 if (err != 0)
547 exit(1);
548 }
549
550 rcu_register_thread();
551 rcu_read_lock();
552 /* Insert items looked up by readers */
553 for (i = 0; i < global_items; i++) {
554 global_key[i] = (void *)(unsigned long)(((unsigned long) rand() * 4) % 2048);
555 //global_key[i] = (void *)(unsigned long)(rand() % 2048);
556 //For more collisions
557 //global_key[i] = (void *)(unsigned long)(rand() % 6);
558 //node->begin = global_key[i];
559 //node->end = (void *)((unsigned long) global_key[i] + 1);
560 //node->end = (void *)((unsigned long) global_key[i] + 4);
561 rcu_rbtree_insert(&rbtree, global_key[i],
562 (void *)((unsigned long) global_key[i] + 4));
563 }
564 rcu_read_unlock();
565
566 cmm_smp_mb();
567
568 test_go = 1;
569
570 sleep(duration);
571
572 test_stop = 1;
573
574 for (i = 0; i < nr_readers; i++) {
575 err = pthread_join(tid_reader[i], &tret);
576 if (err != 0)
577 exit(1);
578 tot_reads += count_reader[i];
579 }
580 for (i = 0; i < nr_writers; i++) {
581 err = pthread_join(tid_writer[i], &tret);
582 if (err != 0)
583 exit(1);
584 tot_writes += count_writer[i];
585 }
586
587 rcu_read_lock();
588 for (i = 0; i < global_items; i++) {
589 node = rcu_rbtree_search(&rbtree, rbtree.root, global_key[i]);
590 assert(!rcu_rbtree_is_nil(&rbtree, node));
591 rcu_rbtree_remove(&rbtree, node);
592 }
593 rcu_read_unlock();
594 rcu_unregister_thread();
595
596 printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
597 tot_writes);
598 printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu "
599 "nr_writers %3u "
600 "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
601 "global_items %6lu\n",
602 argv[0], duration, nr_readers, rduration, wduration,
603 nr_writers, wdelay, tot_reads, tot_writes,
604 tot_reads + tot_writes, global_items);
605 free(tid_reader);
606 free(tid_writer);
607 free(count_reader);
608 free(count_writer);
609 free(global_key);
610 return 0;
611 }
This page took 0.041204 seconds and 5 git commands to generate.