rculfhash test: add nr_leaked count
[urcu.git] / rculfhash.c
... / ...
CommitLineData
1/*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23#define _LGPL_SOURCE
24#include <stdlib.h>
25#include <errno.h>
26#include <assert.h>
27#include <stdio.h>
28#include <stdint.h>
29#include <string.h>
30
31#include <urcu.h>
32#include <urcu-call-rcu.h>
33#include <urcu/arch.h>
34#include <urcu/uatomic.h>
35#include <urcu/jhash.h>
36#include <urcu/compiler.h>
37#include <urcu/rculfhash.h>
38#include <stdio.h>
39#include <pthread.h>
40
41#define DEBUG /* Test */
42
43#ifdef DEBUG
44#define dbg_printf(args...) printf(args)
45#else
46#define dbg_printf(args...)
47#endif
48
49#define CHAIN_LEN_TARGET 1
50#define CHAIN_LEN_RESIZE_THRESHOLD 2
51
52#ifndef max
53#define max(a, b) ((a) > (b) ? (a) : (b))
54#endif
55
56struct rcu_table {
57 unsigned long size; /* always a power of 2 */
58 unsigned long resize_target;
59 int resize_initiated;
60 struct rcu_head head;
61 struct rcu_ht_node *tbl[0];
62};
63
64struct rcu_ht {
65 struct rcu_table *t; /* shared */
66 ht_hash_fct hash_fct;
67 ht_compare_fct compare_fct;
68 unsigned long hash_seed;
69 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
70 void (*ht_call_rcu)(struct rcu_head *head,
71 void (*func)(struct rcu_head *head));
72};
73
74struct rcu_resize_work {
75 struct rcu_head head;
76 struct rcu_ht *ht;
77};
78
79/*
80 * Algorithm to reverse bits in a word by lookup table, extended to
81 * 64-bit words.
82 * Source:
83 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
84 * Originally from Public Domain.
85 */
86
87static const uint8_t BitReverseTable256[256] =
88{
89#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
90#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
91#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
92 R6(0), R6(2), R6(1), R6(3)
93};
94#undef R2
95#undef R4
96#undef R6
97
98static
99uint8_t bit_reverse_u8(uint8_t v)
100{
101 return BitReverseTable256[v];
102}
103
104static __attribute__((unused))
105uint32_t bit_reverse_u32(uint32_t v)
106{
107 return ((uint32_t) bit_reverse_u8(v) << 24) |
108 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
109 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
110 ((uint32_t) bit_reverse_u8(v >> 24));
111}
112
113static __attribute__((unused))
114uint64_t bit_reverse_u64(uint64_t v)
115{
116 return ((uint64_t) bit_reverse_u8(v) << 56) |
117 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
118 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
119 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
120 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
121 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
122 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
123 ((uint64_t) bit_reverse_u8(v >> 56));
124}
125
126static
127unsigned long bit_reverse_ulong(unsigned long v)
128{
129#if (CAA_BITS_PER_LONG == 32)
130 return bit_reverse_u32(v);
131#else
132 return bit_reverse_u64(v);
133#endif
134}
135
136/*
137 * Algorithm to find the log2 of a 32-bit unsigned integer.
138 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
139 * Originally from Public Domain.
140 */
141static const char LogTable256[256] =
142{
143#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
144 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
145 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
146 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
147};
148
149uint32_t log2_u32(uint32_t v)
150{
151 uint32_t t, tt;
152
153 if ((tt = (v >> 16)))
154 return (t = (tt >> 8))
155 ? 24 + LogTable256[t]
156 : 16 + LogTable256[tt];
157 else
158 return (t = (v >> 8))
159 ? 8 + LogTable256[t]
160 : LogTable256[v];
161}
162
163static
164void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
165
166static
167void check_resize(struct rcu_ht *ht, struct rcu_table *t,
168 uint32_t chain_len)
169{
170 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
171 ht_resize_lazy(ht, t,
172 log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
173}
174
175static
176struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
177{
178 return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
179}
180
181static
182int is_removed(struct rcu_ht_node *node)
183{
184 return ((unsigned long) node) & 0x1;
185}
186
187static
188struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
189{
190 return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
191}
192
193static
194unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
195{
196 unsigned long old1, old2;
197
198 old1 = uatomic_read(ptr);
199 do {
200 old2 = old1;
201 if (old2 >= v)
202 return old2;
203 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
204 return v;
205}
206
207/*
208 * Remove all logically deleted nodes from a bucket up to a certain node key.
209 */
210static
211void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
212{
213 struct rcu_ht_node *iter_prev, *iter, *next;
214
215 for (;;) {
216 iter_prev = dummy;
217 /* We can always skip the dummy node initially */
218 iter = rcu_dereference(iter_prev->next);
219 assert(iter_prev->reverse_hash <= node->reverse_hash);
220 if (unlikely(!iter))
221 return;
222 for (;;) {
223 if (clear_flag(iter)->reverse_hash > node->reverse_hash)
224 return;
225 next = rcu_dereference(clear_flag(iter)->next);
226 if (is_removed(next))
227 break;
228 if (unlikely(!next))
229 return;
230 iter_prev = iter;
231 iter = next;
232 }
233 assert(!is_removed(iter));
234 (void) uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next));
235 }
236}
237
238static
239int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node,
240 int unique)
241{
242 struct rcu_ht_node *iter_prev, *dummy, *iter, *next;
243
244 if (!t->size)
245 return 0;
246 for (;;) {
247 uint32_t chain_len = 0;
248
249 /*
250 * iter_prev points to the non-removed node prior to the
251 * insert location.
252 */
253 iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
254 /* We can always skip the dummy node initially */
255 iter = rcu_dereference(iter_prev->next);
256 assert(iter_prev->reverse_hash <= node->reverse_hash);
257 for (;;) {
258 if (unlikely(!iter))
259 goto insert;
260 if (clear_flag(iter)->reverse_hash > node->reverse_hash)
261 goto insert;
262 next = rcu_dereference(clear_flag(iter)->next);
263 if (is_removed(next))
264 goto gc;
265 /* Only account for identical reverse hash once */
266 if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash)
267 check_resize(ht, t, ++chain_len);
268 iter_prev = clear_flag(iter);
269 iter = next;
270 }
271 insert:
272 assert(node != clear_flag(iter));
273 assert(!is_removed(iter_prev));
274 assert(iter_prev != node);
275 node->next = iter;
276 if (uatomic_cmpxchg(&iter_prev->next, iter,
277 node) != iter)
278 continue; /* retry */
279 else
280 goto gc_end;
281 gc:
282 /* Garbage collect logically removed nodes in the bucket */
283 dummy = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
284 _ht_gc_bucket(dummy, node);
285 /* retry */
286 }
287gc_end:
288 /* Garbage collect logically removed nodes in the bucket */
289 dummy = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
290 _ht_gc_bucket(dummy, node);
291 return 0;
292}
293
294static
295int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
296{
297 struct rcu_ht_node *dummy, *next, *old;
298 int flagged = 0;
299
300 /* logically delete the node */
301 old = rcu_dereference(node->next);
302 do {
303 next = old;
304 if (is_removed(next))
305 goto end;
306 assert(!node->dummy);
307 old = uatomic_cmpxchg(&node->next, next,
308 flag_removed(next));
309 } while (old != next);
310
311 /* We performed the (logical) deletion. */
312 flagged = 1;
313
314 /*
315 * Ensure that the node is not visible to readers anymore: lookup for
316 * the node, and remove it (along with any other logically removed node)
317 * if found.
318 */
319 dummy = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
320 _ht_gc_bucket(dummy, node);
321end:
322 /*
323 * Only the flagging action indicated that we (and no other)
324 * removed the node from the hash.
325 */
326 if (flagged) {
327 assert(is_removed(rcu_dereference(node->next)));
328 return 0;
329 } else
330 return -ENOENT;
331}
332
333static
334void init_table(struct rcu_ht *ht, struct rcu_table *t,
335 unsigned long first, unsigned long len)
336{
337 unsigned long i, end;
338
339 end = first + len;
340 for (i = first; i < end; i++) {
341 /* Update table size when power of two */
342 if (i != 0 && !(i & (i - 1)))
343 t->size = i;
344 t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node));
345 t->tbl[i]->dummy = 1;
346 t->tbl[i]->hash = i;
347 t->tbl[i]->reverse_hash = bit_reverse_ulong(i);
348 (void) _ht_add(ht, t, t->tbl[i], 0);
349 }
350 t->resize_target = t->size = end;
351 t->resize_initiated = 0;
352}
353
354struct rcu_ht *ht_new(ht_hash_fct hash_fct,
355 ht_compare_fct compare_fct,
356 unsigned long hash_seed,
357 unsigned long init_size,
358 void (*ht_call_rcu)(struct rcu_head *head,
359 void (*func)(struct rcu_head *head)))
360{
361 struct rcu_ht *ht;
362
363 ht = calloc(1, sizeof(struct rcu_ht));
364 ht->hash_fct = hash_fct;
365 ht->compare_fct = compare_fct;
366 ht->hash_seed = hash_seed;
367 ht->ht_call_rcu = ht_call_rcu;
368 /* this mutex should not nest in read-side C.S. */
369 pthread_mutex_init(&ht->resize_mutex, NULL);
370 ht->t = calloc(1, sizeof(struct rcu_table)
371 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
372 ht->t->size = 0;
373 pthread_mutex_lock(&ht->resize_mutex);
374 init_table(ht, ht->t, 0, max(init_size, 1));
375 pthread_mutex_unlock(&ht->resize_mutex);
376 return ht;
377}
378
379struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
380{
381 struct rcu_table *t;
382 struct rcu_ht_node *node;
383 unsigned long hash, reverse_hash;
384
385 hash = ht->hash_fct(key, key_len, ht->hash_seed);
386 reverse_hash = bit_reverse_ulong(hash);
387
388 t = rcu_dereference(ht->t);
389 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
390 for (;;) {
391 if (unlikely(!node))
392 break;
393 if (unlikely(node->reverse_hash > reverse_hash)) {
394 node = NULL;
395 break;
396 }
397 if (!ht->compare_fct(node->key, node->key_len, key, key_len)) {
398 if (likely(!is_removed(rcu_dereference(node->next)))
399 && likely(!node->dummy))
400 break;
401 }
402 node = clear_flag(rcu_dereference(node->next));
403 }
404 assert(!node || !node->dummy);
405 return node;
406}
407
408void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
409{
410 struct rcu_table *t;
411
412 node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
413 node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
414
415 t = rcu_dereference(ht->t);
416 (void) _ht_add(ht, t, node, 0);
417}
418
419int ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
420{
421 struct rcu_table *t;
422
423 node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
424 node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash);
425
426 t = rcu_dereference(ht->t);
427 return _ht_add(ht, t, node, 1);
428}
429
430int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
431{
432 struct rcu_table *t;
433
434 t = rcu_dereference(ht->t);
435 return _ht_remove(ht, t, node);
436}
437
438static
439int ht_delete_dummy(struct rcu_ht *ht)
440{
441 struct rcu_table *t;
442 struct rcu_ht_node *node;
443 unsigned long i;
444
445 t = ht->t;
446 /* Check that the table is empty */
447 node = t->tbl[0];
448 do {
449 if (!node->dummy)
450 return -EPERM;
451 node = node->next;
452 assert(!is_removed(node));
453 } while (node);
454 /* Internal sanity check: all nodes left should be dummy */
455 for (i = 0; i < t->size; i++) {
456 assert(t->tbl[i]->dummy);
457 free(t->tbl[i]);
458 }
459 return 0;
460}
461
462/*
463 * Should only be called when no more concurrent readers nor writers can
464 * possibly access the table.
465 */
466int ht_destroy(struct rcu_ht *ht)
467{
468 int ret;
469
470 ret = ht_delete_dummy(ht);
471 if (ret)
472 return ret;
473 free(ht->t);
474 free(ht);
475 return ret;
476}
477
478void ht_count_nodes(struct rcu_ht *ht,
479 unsigned long *count,
480 unsigned long *removed)
481{
482 struct rcu_table *t;
483 struct rcu_ht_node *node, *next;
484
485 *count = 0;
486 *removed = 0;
487
488 t = rcu_dereference(ht->t);
489 /* Check that the table is empty */
490 node = rcu_dereference(t->tbl[0]);
491 do {
492 next = rcu_dereference(node->next);
493 if (is_removed(next)) {
494 assert(!node->dummy);
495 (*removed)++;
496 } else if (!node->dummy)
497 (*count)++;
498 node = clear_flag(next);
499 } while (node);
500}
501
502static
503void ht_free_table_cb(struct rcu_head *head)
504{
505 struct rcu_table *t =
506 caa_container_of(head, struct rcu_table, head);
507 free(t);
508}
509
510/* called with resize mutex held */
511static
512void _do_ht_resize(struct rcu_ht *ht)
513{
514 unsigned long new_size, old_size;
515 struct rcu_table *new_t, *old_t;
516
517 old_t = ht->t;
518 old_size = old_t->size;
519
520 new_size = CMM_LOAD_SHARED(old_t->resize_target);
521 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
522 old_size, new_size);
523 if (old_size == new_size)
524 return;
525 new_t = malloc(sizeof(struct rcu_table)
526 + (new_size * sizeof(struct rcu_ht_node *)));
527 assert(new_size > old_size);
528 memcpy(&new_t->tbl, &old_t->tbl,
529 old_size * sizeof(struct rcu_ht_node *));
530 init_table(ht, new_t, old_size, new_size - old_size);
531 /* Changing table and size atomically wrt lookups */
532 rcu_assign_pointer(ht->t, new_t);
533 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
534}
535
536static
537unsigned long resize_target_update(struct rcu_table *t,
538 int growth_order)
539{
540 return _uatomic_max(&t->resize_target,
541 t->size << growth_order);
542}
543
544void ht_resize(struct rcu_ht *ht, int growth)
545{
546 struct rcu_table *t = rcu_dereference(ht->t);
547 unsigned long target_size;
548
549 target_size = resize_target_update(t, growth);
550 if (t->size < target_size) {
551 CMM_STORE_SHARED(t->resize_initiated, 1);
552 pthread_mutex_lock(&ht->resize_mutex);
553 _do_ht_resize(ht);
554 pthread_mutex_unlock(&ht->resize_mutex);
555 }
556}
557
558static
559void do_resize_cb(struct rcu_head *head)
560{
561 struct rcu_resize_work *work =
562 caa_container_of(head, struct rcu_resize_work, head);
563 struct rcu_ht *ht = work->ht;
564
565 pthread_mutex_lock(&ht->resize_mutex);
566 _do_ht_resize(ht);
567 pthread_mutex_unlock(&ht->resize_mutex);
568 free(work);
569}
570
571static
572void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
573{
574 struct rcu_resize_work *work;
575 unsigned long target_size;
576
577 target_size = resize_target_update(t, growth);
578 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
579 work = malloc(sizeof(*work));
580 work->ht = ht;
581 ht->ht_call_rcu(&work->head, do_resize_cb);
582 CMM_STORE_SHARED(t->resize_initiated, 1);
583 }
584}
This page took 0.027952 seconds and 4 git commands to generate.