rbtree API change: handle node memory allocation internally
[userspace-rcu.git] / tests / test_urcu_rbtree.c
1 /*
2 * test_urcu_rbtree.c
3 *
4 * Userspace RCU library - test program for RB tree
5 *
6 * Copyright February 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23 #define _GNU_SOURCE
24 #ifndef DYNAMIC_LINK_TEST
25 #define _LGPL_SOURCE
26 #else
27 #define debug_yield_read()
28 #endif
29 #include "../config.h"
30 #include <stdio.h>
31 #include <pthread.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <unistd.h>
37 #include <stdio.h>
38 #include <assert.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <errno.h>
42 #include <time.h>
43
44 #include <urcu/arch.h>
45
46 extern int __thread disable_debug;
47
48 /* hardcoded number of CPUs */
49 #define NR_CPUS 16384
50
51 /* number of insert/delete */
52 #define NR_RAND 6
53 //#define NR_RAND 7
54
55 #if defined(_syscall0)
56 _syscall0(pid_t, gettid)
57 #elif defined(__NR_gettid)
58 static inline pid_t gettid(void)
59 {
60 return syscall(__NR_gettid);
61 }
62 #else
63 #warning "use pid as tid"
64 static inline pid_t gettid(void)
65 {
66 return getpid();
67 }
68 #endif
69
70 #include <urcu.h>
71 #include <urcu/rcurbtree.h>
72 #include <urcu-defer.h>
73
74 /* TODO: error handling testing for -ENOMEM */
75 void *rbtree_alloc(size_t size)
76 {
77 return malloc(size);
78 }
79
80 void rbtree_free(void *ptr)
81 {
82 free(ptr);
83 }
84
85 int tree_comp(void *a, void *b)
86 {
87 if ((unsigned long)a < (unsigned long)b)
88 return -1;
89 else if ((unsigned long)a > (unsigned long)b)
90 return 1;
91 else
92 return 0;
93 }
94
95 static DEFINE_RCU_RBTREE(rbtree, tree_comp, rbtree_alloc, rbtree_free);
96
97 static volatile int test_go, test_stop;
98
99 static unsigned long wdelay;
100
101 static unsigned long duration;
102
103 /* read-side C.S. duration, in loops */
104 static unsigned long rduration;
105
106 /* write-side C.S. duration, in loops */
107 static unsigned long wduration;
108
109 static inline void loop_sleep(unsigned long l)
110 {
111 while(l-- != 0)
112 caa_cpu_relax();
113 }
114
115 static int verbose_mode;
116
117 #define printf_verbose(fmt, args...) \
118 do { \
119 if (verbose_mode) \
120 printf(fmt, args); \
121 } while (0)
122
123 static unsigned int cpu_affinities[NR_CPUS];
124 static unsigned int next_aff = 0;
125 static int use_affinity = 0;
126
127 pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
128
129 #ifndef HAVE_CPU_SET_T
130 typedef unsigned long cpu_set_t;
131 # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
132 # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
133 #endif
134
135 static void set_affinity(void)
136 {
137 cpu_set_t mask;
138 int cpu;
139 int ret;
140
141 if (!use_affinity)
142 return;
143
144 #if HAVE_SCHED_SETAFFINITY
145 ret = pthread_mutex_lock(&affinity_mutex);
146 if (ret) {
147 perror("Error in pthread mutex lock");
148 exit(-1);
149 }
150 cpu = cpu_affinities[next_aff++];
151 ret = pthread_mutex_unlock(&affinity_mutex);
152 if (ret) {
153 perror("Error in pthread mutex unlock");
154 exit(-1);
155 }
156
157 CPU_ZERO(&mask);
158 CPU_SET(cpu, &mask);
159 #if SCHED_SETAFFINITY_ARGS == 2
160 sched_setaffinity(0, &mask);
161 #else
162 sched_setaffinity(0, sizeof(mask), &mask);
163 #endif
164 #endif /* HAVE_SCHED_SETAFFINITY */
165 }
166
167 /*
168 * returns 0 if test should end.
169 */
170 static int test_duration_write(void)
171 {
172 return !test_stop;
173 }
174
175 static int test_duration_read(void)
176 {
177 return !test_stop;
178 }
179
180 static unsigned long long __thread nr_writes;
181 static unsigned long long __thread nr_reads;
182
183 static unsigned int nr_readers;
184 static unsigned int nr_writers;
185
186 static unsigned long global_items;
187 static void **global_key = NULL;
188
189 pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
190
191 void rcu_copy_mutex_lock(void)
192 {
193 int ret;
194 ret = pthread_mutex_lock(&rcu_copy_mutex);
195 if (ret) {
196 perror("Error in pthread mutex lock");
197 exit(-1);
198 }
199 }
200
201 void rcu_copy_mutex_unlock(void)
202 {
203 int ret;
204
205 ret = pthread_mutex_unlock(&rcu_copy_mutex);
206 if (ret) {
207 perror("Error in pthread mutex unlock");
208 exit(-1);
209 }
210 }
211
212 static
213 void set_lookup_index(struct rcu_rbtree_node *node,
214 char *lookup_hit)
215 {
216 int i;
217
218 for (i = 0; i < global_items; i++) {
219 if (node->begin == global_key[i]
220 && !lookup_hit[i]) {
221 lookup_hit[i] = 1;
222 break;
223 }
224 }
225 }
226
227 void *thr_reader(void *_count)
228 {
229 unsigned long long *count = _count;
230 struct rcu_rbtree_node *node;
231 int i, index;
232 char *lookup_hit;
233
234 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
235 "reader", pthread_self(), (unsigned long)gettid());
236
237 set_affinity();
238
239 rcu_register_thread();
240
241 lookup_hit = malloc(sizeof(*lookup_hit) * global_items);
242
243 while (!test_go)
244 {
245 }
246 cmm_smp_mb();
247
248 for (;;) {
249 /* search bottom of range */
250 for (i = 0; i < global_items; i++) {
251 rcu_read_lock();
252 node = rcu_rbtree_search(&rbtree,
253 rcu_dereference(rbtree.root),
254 global_key[i]);
255 assert(!rcu_rbtree_is_nil(&rbtree, node));
256 rcu_read_unlock();
257 }
258
259 /* search end of range */
260 for (i = 0; i < global_items; i++) {
261 rcu_read_lock();
262 node = rcu_rbtree_search(&rbtree,
263 rcu_dereference(rbtree.root),
264 (void*) ((unsigned long) global_key[i] + 3));
265 assert(!rcu_rbtree_is_nil(&rbtree, node));
266 rcu_read_unlock();
267 }
268
269 /* search range (middle) */
270 for (i = 0; i < global_items; i++) {
271 rcu_read_lock();
272 node = rcu_rbtree_search_range(&rbtree,
273 rcu_dereference(rbtree.root),
274 (void*) ((unsigned long) global_key[i] + 1),
275 (void*) ((unsigned long) global_key[i] + 2));
276 assert(!rcu_rbtree_is_nil(&rbtree, node));
277 rcu_read_unlock();
278 }
279
280 /* search begin key */
281 for (i = 0; i < global_items; i++) {
282 rcu_read_lock();
283 node = rcu_rbtree_search_begin_key(&rbtree,
284 rcu_dereference(rbtree.root),
285 global_key[i]);
286 assert(!rcu_rbtree_is_nil(&rbtree, node));
287 rcu_read_unlock();
288 }
289
290 /* min + next */
291 memset(lookup_hit, 0, sizeof(*lookup_hit) * global_items);
292
293 rcu_read_lock();
294 node = rcu_rbtree_min(&rbtree,
295 rcu_dereference(rbtree.root));
296 while (!rcu_rbtree_is_nil(&rbtree, node)) {
297 set_lookup_index(node, lookup_hit);
298 node = rcu_rbtree_next(&rbtree, node);
299 }
300 rcu_read_unlock();
301
302 for (i = 0; i < global_items; i++)
303 assert(lookup_hit[i]);
304
305 /* max + prev */
306 memset(lookup_hit, 0, sizeof(*lookup_hit) * global_items);
307
308 rcu_read_lock();
309 node = rcu_rbtree_max(&rbtree,
310 rcu_dereference(rbtree.root));
311 while (!rcu_rbtree_is_nil(&rbtree, node)) {
312 set_lookup_index(node, lookup_hit);
313 node = rcu_rbtree_prev(&rbtree, node);
314 }
315 rcu_read_unlock();
316
317 for (i = 0; i < global_items; i++)
318 assert(lookup_hit[i]);
319
320 debug_yield_read();
321 if (unlikely(rduration))
322 loop_sleep(rduration);
323 nr_reads++;
324 if (unlikely(!test_duration_read()))
325 break;
326 }
327
328 rcu_unregister_thread();
329
330 /* test extra thread registration */
331 rcu_register_thread();
332 rcu_unregister_thread();
333
334 free(lookup_hit);
335
336 *count = nr_reads;
337 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
338 "reader", pthread_self(), (unsigned long)gettid());
339 return ((void*)1);
340
341 }
342
343 void *thr_writer(void *_count)
344 {
345 unsigned long long *count = _count;
346 struct rcu_rbtree_node *node;
347 void *key[NR_RAND];
348 int i;
349
350 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
351 "writer", pthread_self(), (unsigned long)gettid());
352
353 set_affinity();
354
355 //disable_debug = 1;
356
357 rcu_register_thread();
358
359 while (!test_go)
360 {
361 }
362 cmm_smp_mb();
363
364 for (;;) {
365 rcu_copy_mutex_lock();
366
367 for (i = 0; i < NR_RAND; i++) {
368 //key[i] = (void *)(unsigned long)(rand() % 2048);
369 key[i] = (void *)(unsigned long)(((unsigned long) rand() * 4) % 2048);
370 //For more collisions
371 //key[i] = (void *)(unsigned long)(rand() % 6);
372 //node->begin = key[i];
373 //node->end = (void *)((unsigned long) key[i] + 1);
374 //node->end = (void *)((unsigned long) key[i] + 4);
375 rcu_read_lock();
376 rcu_rbtree_insert(&rbtree, key[i],
377 (void *)((unsigned long) key[i] + 4));
378 rcu_read_unlock();
379 }
380 rcu_copy_mutex_unlock();
381
382 if (unlikely(wduration))
383 loop_sleep(wduration);
384
385 rcu_copy_mutex_lock();
386 for (i = 0; i < NR_RAND; i++) {
387 #if 0
388 node = rcu_rbtree_min(rbtree, rbtree->root);
389 while (!rcu_rbtree_is_nil(&rbtree, node)) {
390 printf("{ 0x%lX p:%lX r:%lX l:%lX %s %s %s} ",
391 (unsigned long)node->key,
392 node->p->key,
393 node->right->key,
394 node->left->key,
395 node->color ? "red" : "black",
396 node->pos ? "right" : "left",
397 node->nil ? "nil" : "");
398 node = rcu_rbtree_next(rbtree, node);
399 }
400 printf("\n");
401 #endif
402 rcu_read_lock();
403 node = rcu_rbtree_search(&rbtree, rbtree.root, key[i]);
404 assert(!rcu_rbtree_is_nil(&rbtree, node));
405 rcu_rbtree_remove(&rbtree, node);
406 rcu_read_unlock();
407 }
408
409 rcu_copy_mutex_unlock();
410 nr_writes++;
411 if (unlikely(!test_duration_write()))
412 break;
413 if (unlikely(wdelay))
414 loop_sleep(wdelay);
415 }
416
417 rcu_unregister_thread();
418
419 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
420 "writer", pthread_self(), (unsigned long)gettid());
421 *count = nr_writes;
422 return ((void*)2);
423 }
424
425 void show_usage(int argc, char **argv)
426 {
427 printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
428 #ifdef DEBUG_YIELD
429 printf(" [-r] [-w] (yield reader and/or writer)");
430 #endif
431 printf(" [-d delay] (writer period (us))");
432 printf(" [-c duration] (reader C.S. duration (in loops))");
433 printf(" [-e duration] (writer C.S. duration (in loops))");
434 printf(" [-v] (verbose output)");
435 printf(" [-a cpu#] [-a cpu#]... (affinity)");
436 printf("\n");
437 }
438
439 int main(int argc, char **argv)
440 {
441 int err;
442 pthread_t *tid_reader, *tid_writer;
443 void *tret;
444 unsigned long long *count_reader, *count_writer;
445 unsigned long long tot_reads = 0, tot_writes = 0;
446 int i, a;
447 struct rcu_rbtree_node *node;
448
449 if (argc < 4) {
450 show_usage(argc, argv);
451 return -1;
452 }
453
454 err = sscanf(argv[1], "%u", &nr_readers);
455 if (err != 1) {
456 show_usage(argc, argv);
457 return -1;
458 }
459
460 err = sscanf(argv[2], "%u", &nr_writers);
461 if (err != 1) {
462 show_usage(argc, argv);
463 return -1;
464 }
465
466 err = sscanf(argv[3], "%lu", &duration);
467 if (err != 1) {
468 show_usage(argc, argv);
469 return -1;
470 }
471
472 for (i = 4; i < argc; i++) {
473 if (argv[i][0] != '-')
474 continue;
475 switch (argv[i][1]) {
476 #ifdef DEBUG_YIELD
477 case 'r':
478 yield_active |= YIELD_READ;
479 break;
480 case 'w':
481 yield_active |= YIELD_WRITE;
482 break;
483 #endif
484 case 'a':
485 if (argc < i + 2) {
486 show_usage(argc, argv);
487 return -1;
488 }
489 a = atoi(argv[++i]);
490 cpu_affinities[next_aff++] = a;
491 use_affinity = 1;
492 printf_verbose("Adding CPU %d affinity\n", a);
493 break;
494 case 'c':
495 if (argc < i + 2) {
496 show_usage(argc, argv);
497 return -1;
498 }
499 rduration = atol(argv[++i]);
500 break;
501 case 'd':
502 if (argc < i + 2) {
503 show_usage(argc, argv);
504 return -1;
505 }
506 wdelay = atol(argv[++i]);
507 break;
508 case 'e':
509 if (argc < i + 2) {
510 show_usage(argc, argv);
511 return -1;
512 }
513 wduration = atol(argv[++i]);
514 break;
515 case 'v':
516 verbose_mode = 1;
517 break;
518 case 'g':
519 if (argc < i + 2) {
520 show_usage(argc, argv);
521 return -1;
522 }
523 global_items = atol(argv[++i]);
524 break;
525 }
526 }
527
528 printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
529 duration, nr_readers, nr_writers);
530 printf_verbose("Writer delay : %lu loops.\n", wdelay);
531 printf_verbose("Reader duration : %lu loops.\n", rduration);
532 printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
533 "main", pthread_self(), (unsigned long)gettid());
534
535 tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
536 tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
537 count_reader = malloc(sizeof(*count_reader) * nr_readers);
538 count_writer = malloc(sizeof(*count_writer) * nr_writers);
539 global_key = malloc(sizeof(*global_key) * global_items);
540
541 srand(time(NULL));
542
543 next_aff = 0;
544
545 for (i = 0; i < nr_readers; i++) {
546 err = pthread_create(&tid_reader[i], NULL, thr_reader,
547 &count_reader[i]);
548 if (err != 0)
549 exit(1);
550 }
551 for (i = 0; i < nr_writers; i++) {
552 err = pthread_create(&tid_writer[i], NULL, thr_writer,
553 &count_writer[i]);
554 if (err != 0)
555 exit(1);
556 }
557
558 rcu_register_thread();
559 rcu_read_lock();
560 /* Insert items looked up by readers */
561 for (i = 0; i < global_items; i++) {
562 global_key[i] = (void *)(unsigned long)(((unsigned long) rand() * 4) % 2048);
563 //global_key[i] = (void *)(unsigned long)(rand() % 2048);
564 //For more collisions
565 //global_key[i] = (void *)(unsigned long)(rand() % 6);
566 //node->begin = global_key[i];
567 //node->end = (void *)((unsigned long) global_key[i] + 1);
568 //node->end = (void *)((unsigned long) global_key[i] + 4);
569 rcu_rbtree_insert(&rbtree, global_key[i],
570 (void *)((unsigned long) global_key[i] + 4));
571 }
572 rcu_read_unlock();
573
574 cmm_smp_mb();
575
576 test_go = 1;
577
578 sleep(duration);
579
580 test_stop = 1;
581
582 for (i = 0; i < nr_readers; i++) {
583 err = pthread_join(tid_reader[i], &tret);
584 if (err != 0)
585 exit(1);
586 tot_reads += count_reader[i];
587 }
588 for (i = 0; i < nr_writers; i++) {
589 err = pthread_join(tid_writer[i], &tret);
590 if (err != 0)
591 exit(1);
592 tot_writes += count_writer[i];
593 }
594
595 rcu_read_lock();
596 for (i = 0; i < global_items; i++) {
597 node = rcu_rbtree_search(&rbtree, rbtree.root, global_key[i]);
598 assert(!rcu_rbtree_is_nil(&rbtree, node));
599 rcu_rbtree_remove(&rbtree, node);
600 }
601 rcu_read_unlock();
602 rcu_unregister_thread();
603
604 printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
605 tot_writes);
606 printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu "
607 "nr_writers %3u "
608 "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
609 "global_items %6lu\n",
610 argv[0], duration, nr_readers, rduration, wduration,
611 nr_writers, wdelay, tot_reads, tot_writes,
612 tot_reads + tot_writes, global_items);
613 free(tid_reader);
614 free(tid_writer);
615 free(count_reader);
616 free(count_writer);
617 free(global_key);
618 return 0;
619 }
This page took 0.041565 seconds and 5 git commands to generate.