Temporarily disable range tests
[userspace-rcu.git] / tests / test_urcu_rbtree.c
1 /*
2 * test_urcu_rbtree.c
3 *
4 * Userspace RCU library - test program for RB tree
5 *
6 * Copyright February 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23 #define _GNU_SOURCE
24 #ifndef DYNAMIC_LINK_TEST
25 #define _LGPL_SOURCE
26 #else
27 #define debug_yield_read()
28 #endif
29 #include "../config.h"
30 #include <stdio.h>
31 #include <pthread.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <unistd.h>
37 #include <stdio.h>
38 #include <assert.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <errno.h>
42 #include <time.h>
43
44 #include <urcu/arch.h>
45
46 extern int __thread disable_debug;
47
48 /* hardcoded number of CPUs */
49 #define NR_CPUS 16384
50
51 /* number of insert/delete */
52 #define NR_RAND 6
53 //#define NR_RAND 7
54
55 #if defined(_syscall0)
56 _syscall0(pid_t, gettid)
57 #elif defined(__NR_gettid)
58 static inline pid_t gettid(void)
59 {
60 return syscall(__NR_gettid);
61 }
62 #else
63 #warning "use pid as tid"
64 static inline pid_t gettid(void)
65 {
66 return getpid();
67 }
68 #endif
69
70 #include <urcu.h>
71 #include <urcu/rcurbtree.h>
72 #include <urcu-defer.h>
73
74 /* TODO: error handling testing for -ENOMEM */
75 struct rcu_rbtree_node *rbtree_alloc(void)
76 {
77 return calloc(1, sizeof(struct rcu_rbtree_node));
78 }
79
80 void rbtree_free(struct rcu_head *head)
81 {
82 struct rcu_rbtree_node *node =
83 caa_container_of(head, struct rcu_rbtree_node, head);
84 free(node);
85 }
86
87 int tree_comp(void *a, void *b)
88 {
89 if ((unsigned long)a < (unsigned long)b)
90 return -1;
91 else if ((unsigned long)a > (unsigned long)b)
92 return 1;
93 else
94 return 0;
95 }
96
97 static DEFINE_RCU_RBTREE(rbtree, tree_comp, rbtree_alloc, rbtree_free);
98
99 static volatile int test_go, test_stop;
100
101 static unsigned long wdelay;
102
103 static unsigned long duration;
104
105 /* read-side C.S. duration, in loops */
106 static unsigned long rduration;
107
108 /* write-side C.S. duration, in loops */
109 static unsigned long wduration;
110
111 static inline void loop_sleep(unsigned long l)
112 {
113 while(l-- != 0)
114 caa_cpu_relax();
115 }
116
117 static int verbose_mode;
118
119 #define printf_verbose(fmt, args...) \
120 do { \
121 if (verbose_mode) \
122 printf(fmt, args); \
123 } while (0)
124
125 static unsigned int cpu_affinities[NR_CPUS];
126 static unsigned int next_aff = 0;
127 static int use_affinity = 0;
128
129 pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
130
131 #ifndef HAVE_CPU_SET_T
132 typedef unsigned long cpu_set_t;
133 # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
134 # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
135 #endif
136
137 static void set_affinity(void)
138 {
139 cpu_set_t mask;
140 int cpu;
141 int ret;
142
143 if (!use_affinity)
144 return;
145
146 #if HAVE_SCHED_SETAFFINITY
147 ret = pthread_mutex_lock(&affinity_mutex);
148 if (ret) {
149 perror("Error in pthread mutex lock");
150 exit(-1);
151 }
152 cpu = cpu_affinities[next_aff++];
153 ret = pthread_mutex_unlock(&affinity_mutex);
154 if (ret) {
155 perror("Error in pthread mutex unlock");
156 exit(-1);
157 }
158
159 CPU_ZERO(&mask);
160 CPU_SET(cpu, &mask);
161 #if SCHED_SETAFFINITY_ARGS == 2
162 sched_setaffinity(0, &mask);
163 #else
164 sched_setaffinity(0, sizeof(mask), &mask);
165 #endif
166 #endif /* HAVE_SCHED_SETAFFINITY */
167 }
168
169 /*
170 * returns 0 if test should end.
171 */
172 static int test_duration_write(void)
173 {
174 return !test_stop;
175 }
176
177 static int test_duration_read(void)
178 {
179 return !test_stop;
180 }
181
182 static unsigned long long __thread nr_writes;
183 static unsigned long long __thread nr_reads;
184
185 static unsigned int nr_readers;
186 static unsigned int nr_writers;
187
188 static unsigned long global_items;
189 static void **global_key = NULL;
190
191 pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
192
193 void rcu_copy_mutex_lock(void)
194 {
195 int ret;
196 ret = pthread_mutex_lock(&rcu_copy_mutex);
197 if (ret) {
198 perror("Error in pthread mutex lock");
199 exit(-1);
200 }
201 }
202
203 void rcu_copy_mutex_unlock(void)
204 {
205 int ret;
206
207 ret = pthread_mutex_unlock(&rcu_copy_mutex);
208 if (ret) {
209 perror("Error in pthread mutex unlock");
210 exit(-1);
211 }
212 }
213
214 static
215 void set_lookup_index(struct rcu_rbtree_node *node,
216 char *lookup_hit)
217 {
218 int i;
219
220 for (i = 0; i < global_items; i++) {
221 if (node->begin == global_key[i]
222 && !lookup_hit[i]) {
223 lookup_hit[i] = 1;
224 break;
225 }
226 }
227 }
228
229 void *thr_reader(void *_count)
230 {
231 unsigned long long *count = _count;
232 struct rcu_rbtree_node *node;
233 int i, index;
234 char *lookup_hit;
235
236 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
237 "reader", pthread_self(), (unsigned long)gettid());
238
239 set_affinity();
240
241 rcu_register_thread();
242
243 lookup_hit = malloc(sizeof(*lookup_hit) * global_items);
244
245 while (!test_go)
246 {
247 }
248 cmm_smp_mb();
249
250 for (;;) {
251 /* search bottom of range */
252 for (i = 0; i < global_items; i++) {
253 rcu_read_lock();
254 node = rcu_rbtree_search(&rbtree,
255 rcu_dereference(rbtree.root),
256 global_key[i]);
257 assert(!rcu_rbtree_is_nil(&rbtree, node));
258 rcu_read_unlock();
259 }
260 #if 0
261 /* search end of range */
262 for (i = 0; i < global_items; i++) {
263 rcu_read_lock();
264 node = rcu_rbtree_search(&rbtree,
265 rcu_dereference(rbtree.root),
266 (void*) ((unsigned long) global_key[i] + 2));
267 assert(!rcu_rbtree_is_nil(&rbtree, node));
268 rcu_read_unlock();
269 }
270
271 /* search range (middle) */
272 for (i = 0; i < global_items; i++) {
273 rcu_read_lock();
274 node = rcu_rbtree_search_range(&rbtree,
275 rcu_dereference(rbtree.root),
276 (void*) ((unsigned long) global_key[i] + 1),
277 (void*) ((unsigned long) global_key[i] + 2));
278 assert(!rcu_rbtree_is_nil(&rbtree, node));
279 rcu_read_unlock();
280 }
281 #endif //0
282
283 /* search begin key */
284 for (i = 0; i < global_items; i++) {
285 rcu_read_lock();
286 node = rcu_rbtree_search_begin_key(&rbtree,
287 rcu_dereference(rbtree.root),
288 global_key[i]);
289 assert(!rcu_rbtree_is_nil(&rbtree, node));
290 rcu_read_unlock();
291 }
292
293 /* min + next */
294 memset(lookup_hit, 0, sizeof(*lookup_hit) * global_items);
295
296 rcu_read_lock();
297 node = rcu_rbtree_min(&rbtree,
298 rcu_dereference(rbtree.root));
299 while (!rcu_rbtree_is_nil(&rbtree, node)) {
300 set_lookup_index(node, lookup_hit);
301 node = rcu_rbtree_next(&rbtree, node);
302 }
303 rcu_read_unlock();
304
305 for (i = 0; i < global_items; i++)
306 assert(lookup_hit[i]);
307
308 /* max + prev */
309 memset(lookup_hit, 0, sizeof(*lookup_hit) * global_items);
310
311 rcu_read_lock();
312 node = rcu_rbtree_max(&rbtree,
313 rcu_dereference(rbtree.root));
314 while (!rcu_rbtree_is_nil(&rbtree, node)) {
315 set_lookup_index(node, lookup_hit);
316 node = rcu_rbtree_prev(&rbtree, node);
317 }
318 rcu_read_unlock();
319
320 for (i = 0; i < global_items; i++)
321 assert(lookup_hit[i]);
322
323 debug_yield_read();
324 if (unlikely(rduration))
325 loop_sleep(rduration);
326 nr_reads++;
327 if (unlikely(!test_duration_read()))
328 break;
329 }
330
331 rcu_unregister_thread();
332
333 /* test extra thread registration */
334 rcu_register_thread();
335 rcu_unregister_thread();
336
337 free(lookup_hit);
338
339 *count = nr_reads;
340 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
341 "reader", pthread_self(), (unsigned long)gettid());
342 return ((void*)1);
343
344 }
345
346 void *thr_writer(void *_count)
347 {
348 unsigned long long *count = _count;
349 struct rcu_rbtree_node *node;
350 void *key[NR_RAND];
351 int i;
352
353 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
354 "writer", pthread_self(), (unsigned long)gettid());
355
356 set_affinity();
357
358 //disable_debug = 1;
359
360 rcu_register_thread();
361
362 while (!test_go)
363 {
364 }
365 cmm_smp_mb();
366
367 for (;;) {
368 rcu_copy_mutex_lock();
369
370 for (i = 0; i < NR_RAND; i++) {
371 node = rbtree_alloc();
372 key[i] = (void *)(unsigned long)(rand() % 2048);
373 //key[i] = (void *)(unsigned long)((rand() * 4) % 2048);
374 //For more collisions
375 //key[i] = (void *)(unsigned long)(rand() % 6);
376 node->begin = key[i];
377 node->end = (void *)((unsigned long) key[i] + 1);
378 //node->end = (void *)((unsigned long) key[i] + 4);
379 rcu_read_lock();
380 rcu_rbtree_insert(&rbtree, node);
381 rcu_read_unlock();
382 }
383 rcu_copy_mutex_unlock();
384
385 if (unlikely(wduration))
386 loop_sleep(wduration);
387
388 rcu_copy_mutex_lock();
389 for (i = 0; i < NR_RAND; i++) {
390 #if 0
391 node = rcu_rbtree_min(rbtree, rbtree->root);
392 while (!rcu_rbtree_is_nil(&rbtree, node)) {
393 printf("{ 0x%lX p:%lX r:%lX l:%lX %s %s %s} ",
394 (unsigned long)node->key,
395 node->p->key,
396 node->right->key,
397 node->left->key,
398 node->color ? "red" : "black",
399 node->pos ? "right" : "left",
400 node->nil ? "nil" : "");
401 node = rcu_rbtree_next(rbtree, node);
402 }
403 printf("\n");
404 #endif
405 rcu_read_lock();
406 node = rcu_rbtree_search(&rbtree, rbtree.root, key[i]);
407 assert(!rcu_rbtree_is_nil(&rbtree, node));
408 rcu_rbtree_remove(&rbtree, node);
409 rcu_read_unlock();
410 call_rcu(&node->head, rbtree_free);
411 }
412
413 rcu_copy_mutex_unlock();
414 nr_writes++;
415 if (unlikely(!test_duration_write()))
416 break;
417 if (unlikely(wdelay))
418 loop_sleep(wdelay);
419 }
420
421 rcu_unregister_thread();
422
423 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
424 "writer", pthread_self(), (unsigned long)gettid());
425 *count = nr_writes;
426 return ((void*)2);
427 }
428
429 void show_usage(int argc, char **argv)
430 {
431 printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
432 #ifdef DEBUG_YIELD
433 printf(" [-r] [-w] (yield reader and/or writer)");
434 #endif
435 printf(" [-d delay] (writer period (us))");
436 printf(" [-c duration] (reader C.S. duration (in loops))");
437 printf(" [-e duration] (writer C.S. duration (in loops))");
438 printf(" [-v] (verbose output)");
439 printf(" [-a cpu#] [-a cpu#]... (affinity)");
440 printf("\n");
441 }
442
443 int main(int argc, char **argv)
444 {
445 int err;
446 pthread_t *tid_reader, *tid_writer;
447 void *tret;
448 unsigned long long *count_reader, *count_writer;
449 unsigned long long tot_reads = 0, tot_writes = 0;
450 int i, a;
451 struct rcu_rbtree_node *node;
452
453 if (argc < 4) {
454 show_usage(argc, argv);
455 return -1;
456 }
457
458 err = sscanf(argv[1], "%u", &nr_readers);
459 if (err != 1) {
460 show_usage(argc, argv);
461 return -1;
462 }
463
464 err = sscanf(argv[2], "%u", &nr_writers);
465 if (err != 1) {
466 show_usage(argc, argv);
467 return -1;
468 }
469
470 err = sscanf(argv[3], "%lu", &duration);
471 if (err != 1) {
472 show_usage(argc, argv);
473 return -1;
474 }
475
476 for (i = 4; i < argc; i++) {
477 if (argv[i][0] != '-')
478 continue;
479 switch (argv[i][1]) {
480 #ifdef DEBUG_YIELD
481 case 'r':
482 yield_active |= YIELD_READ;
483 break;
484 case 'w':
485 yield_active |= YIELD_WRITE;
486 break;
487 #endif
488 case 'a':
489 if (argc < i + 2) {
490 show_usage(argc, argv);
491 return -1;
492 }
493 a = atoi(argv[++i]);
494 cpu_affinities[next_aff++] = a;
495 use_affinity = 1;
496 printf_verbose("Adding CPU %d affinity\n", a);
497 break;
498 case 'c':
499 if (argc < i + 2) {
500 show_usage(argc, argv);
501 return -1;
502 }
503 rduration = atol(argv[++i]);
504 break;
505 case 'd':
506 if (argc < i + 2) {
507 show_usage(argc, argv);
508 return -1;
509 }
510 wdelay = atol(argv[++i]);
511 break;
512 case 'e':
513 if (argc < i + 2) {
514 show_usage(argc, argv);
515 return -1;
516 }
517 wduration = atol(argv[++i]);
518 break;
519 case 'v':
520 verbose_mode = 1;
521 break;
522 case 'g':
523 if (argc < i + 2) {
524 show_usage(argc, argv);
525 return -1;
526 }
527 global_items = atol(argv[++i]);
528 break;
529 }
530 }
531
532 printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
533 duration, nr_readers, nr_writers);
534 printf_verbose("Writer delay : %lu loops.\n", wdelay);
535 printf_verbose("Reader duration : %lu loops.\n", rduration);
536 printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
537 "main", pthread_self(), (unsigned long)gettid());
538
539 tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
540 tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
541 count_reader = malloc(sizeof(*count_reader) * nr_readers);
542 count_writer = malloc(sizeof(*count_writer) * nr_writers);
543 global_key = malloc(sizeof(*global_key) * global_items);
544
545 srand(time(NULL));
546
547 next_aff = 0;
548
549 for (i = 0; i < nr_readers; i++) {
550 err = pthread_create(&tid_reader[i], NULL, thr_reader,
551 &count_reader[i]);
552 if (err != 0)
553 exit(1);
554 }
555 for (i = 0; i < nr_writers; i++) {
556 err = pthread_create(&tid_writer[i], NULL, thr_writer,
557 &count_writer[i]);
558 if (err != 0)
559 exit(1);
560 }
561
562 rcu_register_thread();
563 rcu_read_lock();
564 /* Insert items looked up by readers */
565 for (i = 0; i < global_items; i++) {
566 node = rbtree_alloc();
567 //global_key[i] = (void *)(unsigned long)((rand() * 4) % 2048);
568 global_key[i] = (void *)(unsigned long)(rand() % 2048);
569 //For more collisions
570 //global_key[i] = (void *)(unsigned long)(rand() % 6);
571 node->begin = global_key[i];
572 node->end = (void *)((unsigned long) global_key[i] + 1);
573 //node->end = (void *)((unsigned long) global_key[i] + 4);
574 rcu_rbtree_insert(&rbtree, node);
575 }
576 rcu_read_unlock();
577
578 cmm_smp_mb();
579
580 test_go = 1;
581
582 sleep(duration);
583
584 test_stop = 1;
585
586 for (i = 0; i < nr_readers; i++) {
587 err = pthread_join(tid_reader[i], &tret);
588 if (err != 0)
589 exit(1);
590 tot_reads += count_reader[i];
591 }
592 for (i = 0; i < nr_writers; i++) {
593 err = pthread_join(tid_writer[i], &tret);
594 if (err != 0)
595 exit(1);
596 tot_writes += count_writer[i];
597 }
598
599 rcu_read_lock();
600 for (i = 0; i < global_items; i++) {
601 node = rcu_rbtree_search(&rbtree, rbtree.root, global_key[i]);
602 assert(!rcu_rbtree_is_nil(&rbtree, node));
603 rcu_rbtree_remove(&rbtree, node);
604 call_rcu(&node->head, rbtree_free);
605 }
606 rcu_read_unlock();
607 rcu_unregister_thread();
608
609 printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
610 tot_writes);
611 printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu "
612 "nr_writers %3u "
613 "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
614 "global_items %6lu\n",
615 argv[0], duration, nr_readers, rduration, wduration,
616 nr_writers, wdelay, tot_reads, tot_writes,
617 tot_reads + tot_writes, global_items);
618 free(tid_reader);
619 free(tid_writer);
620 free(count_reader);
621 free(count_writer);
622 free(global_key);
623 return 0;
624 }
This page took 0.051186 seconds and 5 git commands to generate.