Add range test case, fix range search
[userspace-rcu.git] / tests / test_urcu_rbtree.c
1 /*
2 * test_urcu_rbtree.c
3 *
4 * Userspace RCU library - test program for RB tree
5 *
6 * Copyright February 2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 */
22
23 #define _GNU_SOURCE
24 #ifndef DYNAMIC_LINK_TEST
25 #define _LGPL_SOURCE
26 #else
27 #define debug_yield_read()
28 #endif
29 #include "../config.h"
30 #include <stdio.h>
31 #include <pthread.h>
32 #include <stdlib.h>
33 #include <string.h>
34 #include <sys/types.h>
35 #include <sys/wait.h>
36 #include <unistd.h>
37 #include <stdio.h>
38 #include <assert.h>
39 #include <sys/syscall.h>
40 #include <sched.h>
41 #include <errno.h>
42 #include <time.h>
43
44 #include <urcu/arch.h>
45
46 /* hardcoded number of CPUs */
47 #define NR_CPUS 16384
48
49 /* number of insert/delete */
50 #define NR_RAND 6
51 //#define NR_RAND 7
52
53 #if defined(_syscall0)
54 _syscall0(pid_t, gettid)
55 #elif defined(__NR_gettid)
56 static inline pid_t gettid(void)
57 {
58 return syscall(__NR_gettid);
59 }
60 #else
61 #warning "use pid as tid"
62 static inline pid_t gettid(void)
63 {
64 return getpid();
65 }
66 #endif
67
68 #include <urcu.h>
69 #include <urcu/rcurbtree.h>
70 #include <urcu-defer.h>
71
72 /* TODO: error handling testing for -ENOMEM */
73 struct rcu_rbtree_node *rbtree_alloc(void)
74 {
75 return calloc(1, sizeof(struct rcu_rbtree_node));
76 }
77
78 void rbtree_free(struct rcu_head *head)
79 {
80 struct rcu_rbtree_node *node =
81 caa_container_of(head, struct rcu_rbtree_node, head);
82 free(node);
83 }
84
85 int tree_comp(void *a, void *b)
86 {
87 if ((unsigned long)a < (unsigned long)b)
88 return -1;
89 else if ((unsigned long)a > (unsigned long)b)
90 return 1;
91 else
92 return 0;
93 }
94
95 static DEFINE_RCU_RBTREE(rbtree, tree_comp, rbtree_alloc, rbtree_free);
96
97 static volatile int test_go, test_stop;
98
99 static unsigned long wdelay;
100
101 static unsigned long duration;
102
103 /* read-side C.S. duration, in loops */
104 static unsigned long rduration;
105
106 /* write-side C.S. duration, in loops */
107 static unsigned long wduration;
108
109 static inline void loop_sleep(unsigned long l)
110 {
111 while(l-- != 0)
112 caa_cpu_relax();
113 }
114
115 static int verbose_mode;
116
117 #define printf_verbose(fmt, args...) \
118 do { \
119 if (verbose_mode) \
120 printf(fmt, args); \
121 } while (0)
122
123 static unsigned int cpu_affinities[NR_CPUS];
124 static unsigned int next_aff = 0;
125 static int use_affinity = 0;
126
127 pthread_mutex_t affinity_mutex = PTHREAD_MUTEX_INITIALIZER;
128
129 #ifndef HAVE_CPU_SET_T
130 typedef unsigned long cpu_set_t;
131 # define CPU_ZERO(cpuset) do { *(cpuset) = 0; } while(0)
132 # define CPU_SET(cpu, cpuset) do { *(cpuset) |= (1UL << (cpu)); } while(0)
133 #endif
134
135 static void set_affinity(void)
136 {
137 cpu_set_t mask;
138 int cpu;
139 int ret;
140
141 if (!use_affinity)
142 return;
143
144 #if HAVE_SCHED_SETAFFINITY
145 ret = pthread_mutex_lock(&affinity_mutex);
146 if (ret) {
147 perror("Error in pthread mutex lock");
148 exit(-1);
149 }
150 cpu = cpu_affinities[next_aff++];
151 ret = pthread_mutex_unlock(&affinity_mutex);
152 if (ret) {
153 perror("Error in pthread mutex unlock");
154 exit(-1);
155 }
156
157 CPU_ZERO(&mask);
158 CPU_SET(cpu, &mask);
159 #if SCHED_SETAFFINITY_ARGS == 2
160 sched_setaffinity(0, &mask);
161 #else
162 sched_setaffinity(0, sizeof(mask), &mask);
163 #endif
164 #endif /* HAVE_SCHED_SETAFFINITY */
165 }
166
167 /*
168 * returns 0 if test should end.
169 */
170 static int test_duration_write(void)
171 {
172 return !test_stop;
173 }
174
175 static int test_duration_read(void)
176 {
177 return !test_stop;
178 }
179
180 static unsigned long long __thread nr_writes;
181 static unsigned long long __thread nr_reads;
182
183 static unsigned int nr_readers;
184 static unsigned int nr_writers;
185
186 static unsigned long global_items;
187 static void **global_key = NULL;
188
189 pthread_mutex_t rcu_copy_mutex = PTHREAD_MUTEX_INITIALIZER;
190
191 void rcu_copy_mutex_lock(void)
192 {
193 int ret;
194 ret = pthread_mutex_lock(&rcu_copy_mutex);
195 if (ret) {
196 perror("Error in pthread mutex lock");
197 exit(-1);
198 }
199 }
200
201 void rcu_copy_mutex_unlock(void)
202 {
203 int ret;
204
205 ret = pthread_mutex_unlock(&rcu_copy_mutex);
206 if (ret) {
207 perror("Error in pthread mutex unlock");
208 exit(-1);
209 }
210 }
211
212 static
213 void set_lookup_index(struct rcu_rbtree_node *node,
214 char *lookup_hit)
215 {
216 int i;
217
218 for (i = 0; i < global_items; i++) {
219 if (node->key == global_key[i]
220 && !lookup_hit[i]) {
221 lookup_hit[i] = 1;
222 break;
223 }
224 }
225 }
226
227 void *thr_reader(void *_count)
228 {
229 unsigned long long *count = _count;
230 struct rcu_rbtree_node *node;
231 int i, index;
232 char *lookup_hit;
233
234 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
235 "reader", pthread_self(), (unsigned long)gettid());
236
237 set_affinity();
238
239 rcu_register_thread();
240
241 lookup_hit = malloc(sizeof(*lookup_hit) * global_items);
242
243 while (!test_go)
244 {
245 }
246 cmm_smp_mb();
247
248 for (;;) {
249
250 /* search */
251 for (i = 0; i < global_items; i++) {
252 rcu_read_lock();
253 node = rcu_rbtree_search(&rbtree,
254 rcu_dereference(rbtree.root),
255 global_key[i]);
256 assert(!rcu_rbtree_is_nil(node));
257 rcu_read_unlock();
258 }
259
260 /* search range min */
261 for (i = 0; i < global_items; i++) {
262 rcu_read_lock();
263 node = rcu_rbtree_search_min(&rbtree,
264 rcu_dereference(rbtree.root),
265 global_key[i], global_key[i]);
266 assert(!rcu_rbtree_is_nil(node));
267 rcu_read_unlock();
268 }
269
270 /* search range max */
271 for (i = 0; i < global_items; i++) {
272 rcu_read_lock();
273 node = rcu_rbtree_search_max(&rbtree,
274 rcu_dereference(rbtree.root),
275 global_key[i], global_key[i]);
276 assert(!rcu_rbtree_is_nil(node));
277 rcu_read_unlock();
278 }
279
280 /* min + next */
281 memset(lookup_hit, 0, sizeof(*lookup_hit) * global_items);
282
283 rcu_read_lock();
284 node = rcu_rbtree_min(&rbtree,
285 rcu_dereference(rbtree.root));
286 while (!rcu_rbtree_is_nil(node)) {
287 set_lookup_index(node, lookup_hit);
288 node = rcu_rbtree_next(&rbtree, node);
289 }
290 rcu_read_unlock();
291
292 for (i = 0; i < global_items; i++)
293 assert(lookup_hit[i]);
294
295 /* max + prev */
296 memset(lookup_hit, 0, sizeof(*lookup_hit) * global_items);
297
298 rcu_read_lock();
299 node = rcu_rbtree_max(&rbtree,
300 rcu_dereference(rbtree.root));
301 while (!rcu_rbtree_is_nil(node)) {
302 set_lookup_index(node, lookup_hit);
303 node = rcu_rbtree_prev(&rbtree, node);
304 }
305 rcu_read_unlock();
306
307 for (i = 0; i < global_items; i++)
308 assert(lookup_hit[i]);
309
310 debug_yield_read();
311 if (unlikely(rduration))
312 loop_sleep(rduration);
313 nr_reads++;
314 if (unlikely(!test_duration_read()))
315 break;
316 }
317
318 rcu_unregister_thread();
319
320 /* test extra thread registration */
321 rcu_register_thread();
322 rcu_unregister_thread();
323
324 free(lookup_hit);
325
326 *count = nr_reads;
327 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
328 "reader", pthread_self(), (unsigned long)gettid());
329 return ((void*)1);
330
331 }
332
333 void *thr_writer(void *_count)
334 {
335 unsigned long long *count = _count;
336 struct rcu_rbtree_node *node;
337 void *key[NR_RAND];
338 int i;
339
340 printf_verbose("thread_begin %s, thread id : %lx, tid %lu\n",
341 "writer", pthread_self(), (unsigned long)gettid());
342
343 set_affinity();
344
345 rcu_register_thread();
346
347 while (!test_go)
348 {
349 }
350 cmm_smp_mb();
351
352 for (;;) {
353 rcu_copy_mutex_lock();
354
355 for (i = 0; i < NR_RAND; i++) {
356 node = rbtree_alloc();
357 key[i] = (void *)(unsigned long)(rand() % 2048);
358 node->key = key[i];
359 rcu_read_lock();
360 rcu_rbtree_insert(&rbtree, node);
361 rcu_read_unlock();
362 }
363 rcu_copy_mutex_unlock();
364
365 if (unlikely(wduration))
366 loop_sleep(wduration);
367
368 rcu_copy_mutex_lock();
369 for (i = 0; i < NR_RAND; i++) {
370 #if 0
371 node = rcu_rbtree_min(rbtree, rbtree->root);
372 while (!rcu_rbtree_is_nil(node)) {
373 printf("{ 0x%lX p:%lX r:%lX l:%lX %s %s %s} ",
374 (unsigned long)node->key,
375 node->p->key,
376 node->right->key,
377 node->left->key,
378 node->color ? "red" : "black",
379 node->pos ? "right" : "left",
380 node->nil ? "nil" : "");
381 node = rcu_rbtree_next(rbtree, node);
382 }
383 printf("\n");
384 #endif
385 rcu_read_lock();
386 node = rcu_rbtree_search(&rbtree, rbtree.root, key[i]);
387 assert(!rcu_rbtree_is_nil(node));
388 rcu_rbtree_remove(&rbtree, node);
389 rcu_read_unlock();
390 call_rcu(&node->head, rbtree_free);
391 }
392
393 rcu_copy_mutex_unlock();
394 nr_writes++;
395 if (unlikely(!test_duration_write()))
396 break;
397 if (unlikely(wdelay))
398 loop_sleep(wdelay);
399 }
400
401 rcu_unregister_thread();
402
403 printf_verbose("thread_end %s, thread id : %lx, tid %lu\n",
404 "writer", pthread_self(), (unsigned long)gettid());
405 *count = nr_writes;
406 return ((void*)2);
407 }
408
409 void show_usage(int argc, char **argv)
410 {
411 printf("Usage : %s nr_readers nr_writers duration (s)", argv[0]);
412 #ifdef DEBUG_YIELD
413 printf(" [-r] [-w] (yield reader and/or writer)");
414 #endif
415 printf(" [-d delay] (writer period (us))");
416 printf(" [-c duration] (reader C.S. duration (in loops))");
417 printf(" [-e duration] (writer C.S. duration (in loops))");
418 printf(" [-v] (verbose output)");
419 printf(" [-a cpu#] [-a cpu#]... (affinity)");
420 printf("\n");
421 }
422
423 int main(int argc, char **argv)
424 {
425 int err;
426 pthread_t *tid_reader, *tid_writer;
427 void *tret;
428 unsigned long long *count_reader, *count_writer;
429 unsigned long long tot_reads = 0, tot_writes = 0;
430 int i, a;
431 struct rcu_rbtree_node *node;
432
433 if (argc < 4) {
434 show_usage(argc, argv);
435 return -1;
436 }
437
438 err = sscanf(argv[1], "%u", &nr_readers);
439 if (err != 1) {
440 show_usage(argc, argv);
441 return -1;
442 }
443
444 err = sscanf(argv[2], "%u", &nr_writers);
445 if (err != 1) {
446 show_usage(argc, argv);
447 return -1;
448 }
449
450 err = sscanf(argv[3], "%lu", &duration);
451 if (err != 1) {
452 show_usage(argc, argv);
453 return -1;
454 }
455
456 for (i = 4; i < argc; i++) {
457 if (argv[i][0] != '-')
458 continue;
459 switch (argv[i][1]) {
460 #ifdef DEBUG_YIELD
461 case 'r':
462 yield_active |= YIELD_READ;
463 break;
464 case 'w':
465 yield_active |= YIELD_WRITE;
466 break;
467 #endif
468 case 'a':
469 if (argc < i + 2) {
470 show_usage(argc, argv);
471 return -1;
472 }
473 a = atoi(argv[++i]);
474 cpu_affinities[next_aff++] = a;
475 use_affinity = 1;
476 printf_verbose("Adding CPU %d affinity\n", a);
477 break;
478 case 'c':
479 if (argc < i + 2) {
480 show_usage(argc, argv);
481 return -1;
482 }
483 rduration = atol(argv[++i]);
484 break;
485 case 'd':
486 if (argc < i + 2) {
487 show_usage(argc, argv);
488 return -1;
489 }
490 wdelay = atol(argv[++i]);
491 break;
492 case 'e':
493 if (argc < i + 2) {
494 show_usage(argc, argv);
495 return -1;
496 }
497 wduration = atol(argv[++i]);
498 break;
499 case 'v':
500 verbose_mode = 1;
501 break;
502 case 'g':
503 if (argc < i + 2) {
504 show_usage(argc, argv);
505 return -1;
506 }
507 global_items = atol(argv[++i]);
508 break;
509 }
510 }
511
512 printf_verbose("running test for %lu seconds, %u readers, %u writers.\n",
513 duration, nr_readers, nr_writers);
514 printf_verbose("Writer delay : %lu loops.\n", wdelay);
515 printf_verbose("Reader duration : %lu loops.\n", rduration);
516 printf_verbose("thread %-6s, thread id : %lx, tid %lu\n",
517 "main", pthread_self(), (unsigned long)gettid());
518
519 tid_reader = malloc(sizeof(*tid_reader) * nr_readers);
520 tid_writer = malloc(sizeof(*tid_writer) * nr_writers);
521 count_reader = malloc(sizeof(*count_reader) * nr_readers);
522 count_writer = malloc(sizeof(*count_writer) * nr_writers);
523 global_key = malloc(sizeof(*global_key) * global_items);
524
525 srand(time(NULL));
526
527 next_aff = 0;
528
529 for (i = 0; i < nr_readers; i++) {
530 err = pthread_create(&tid_reader[i], NULL, thr_reader,
531 &count_reader[i]);
532 if (err != 0)
533 exit(1);
534 }
535 for (i = 0; i < nr_writers; i++) {
536 err = pthread_create(&tid_writer[i], NULL, thr_writer,
537 &count_writer[i]);
538 if (err != 0)
539 exit(1);
540 }
541
542 rcu_register_thread();
543 rcu_read_lock();
544 /* Insert items looked up by readers */
545 for (i = 0; i < global_items; i++) {
546 node = rbtree_alloc();
547 global_key[i] = (void *)(unsigned long)(rand() % 2048);
548 node->key = global_key[i];
549 rcu_rbtree_insert(&rbtree, node);
550 }
551 rcu_read_unlock();
552
553 cmm_smp_mb();
554
555 test_go = 1;
556
557 sleep(duration);
558
559 test_stop = 1;
560
561 for (i = 0; i < nr_readers; i++) {
562 err = pthread_join(tid_reader[i], &tret);
563 if (err != 0)
564 exit(1);
565 tot_reads += count_reader[i];
566 }
567 for (i = 0; i < nr_writers; i++) {
568 err = pthread_join(tid_writer[i], &tret);
569 if (err != 0)
570 exit(1);
571 tot_writes += count_writer[i];
572 }
573
574 rcu_read_lock();
575 for (i = 0; i < global_items; i++) {
576 node = rcu_rbtree_search(&rbtree, rbtree.root, global_key[i]);
577 assert(!rcu_rbtree_is_nil(node));
578 rcu_rbtree_remove(&rbtree, node);
579 call_rcu(&node->head, rbtree_free);
580 }
581 rcu_read_unlock();
582 rcu_unregister_thread();
583
584 printf_verbose("total number of reads : %llu, writes %llu\n", tot_reads,
585 tot_writes);
586 printf("SUMMARY %-25s testdur %4lu nr_readers %3u rdur %6lu wdur %6lu "
587 "nr_writers %3u "
588 "wdelay %6lu nr_reads %12llu nr_writes %12llu nr_ops %12llu "
589 "global_items %6lu\n",
590 argv[0], duration, nr_readers, rduration, wduration,
591 nr_writers, wdelay, tot_reads, tot_writes,
592 tot_reads + tot_writes, global_items);
593 free(tid_reader);
594 free(tid_writer);
595 free(count_reader);
596 free(count_writer);
597 free(global_key);
598 return 0;
599 }
This page took 0.045385 seconds and 5 git commands to generate.