rculfhash: merge duplicated code for bucket lookup
[urcu.git] / rculfhash.c
1 /*
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 /*
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
30 * (2002), 73-82.
31 *
32 * Some specificities of this Lock-Free Resizable RCU Hash Table
33 * implementation:
34 *
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
44 * duplicata exists.
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
53 * operation.
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
56 * - Per-CPU Split-counters are used to keep track of the number of
57 * nodes within the hash table for automatic resize triggering.
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket dummy node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
83 * list concurrently.
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
88 * for it do to so.
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "dummy node" tables.
92 * - There is one dummy node table per hash index order. The size of
93 * each dummy node table is half the number of hashes contained in
94 * this order.
95 * - call_rcu is used to garbage-collect the old order table.
96 * - The per-order dummy node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
99 *
100 * A bit of ascii art explanation:
101 *
102 * Order index is the off-by-one compare to the actual power of 2 because
103 * we use index 0 to deal with the 0 special-case.
104 *
105 * This shows the nodes for a small table ordered by reversed bits:
106 *
107 * bits reverse
108 * 0 000 000
109 * 4 100 001
110 * 2 010 010
111 * 6 110 011
112 * 1 001 100
113 * 5 101 101
114 * 3 011 110
115 * 7 111 111
116 *
117 * This shows the nodes in order of non-reversed bits, linked by
118 * reversed-bit order.
119 *
120 * order bits reverse
121 * 0 0 000 000
122 * |
123 * 1 | 1 001 100 <- <-
124 * | | | |
125 * 2 | | 2 010 010 | |
126 * | | | 3 011 110 | <- |
127 * | | | | | | |
128 * 3 -> | | | 4 100 001 | |
129 * -> | | 5 101 101 |
130 * -> | 6 110 011
131 * -> 7 111 111
132 */
133
134 #define _LGPL_SOURCE
135 #include <stdlib.h>
136 #include <errno.h>
137 #include <assert.h>
138 #include <stdio.h>
139 #include <stdint.h>
140 #include <string.h>
141
142 #include "config.h"
143 #include <urcu.h>
144 #include <urcu-call-rcu.h>
145 #include <urcu/arch.h>
146 #include <urcu/uatomic.h>
147 #include <urcu/compiler.h>
148 #include <urcu/rculfhash.h>
149 #include <stdio.h>
150 #include <pthread.h>
151
152 #ifdef DEBUG
153 #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
154 #else
155 #define dbg_printf(fmt, args...)
156 #endif
157
158 /*
159 * Per-CPU split-counters lazily update the global counter each 1024
160 * addition/removal. It automatically keeps track of resize required.
161 * We use the bucket length as indicator for need to expand for small
162 * tables and machines lacking per-cpu data suppport.
163 */
164 #define COUNT_COMMIT_ORDER 10
165 #define CHAIN_LEN_TARGET 1
166 #define CHAIN_LEN_RESIZE_THRESHOLD 3
167
168 /*
169 * Define the minimum table size.
170 */
171 #define MIN_TABLE_SIZE 1
172
173 #if (CAA_BITS_PER_LONG == 32)
174 #define MAX_TABLE_ORDER 32
175 #else
176 #define MAX_TABLE_ORDER 64
177 #endif
178
179 /*
180 * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink.
181 */
182 #define MIN_PARTITION_PER_THREAD_ORDER 12
183 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
184
185 #ifndef min
186 #define min(a, b) ((a) < (b) ? (a) : (b))
187 #endif
188
189 #ifndef max
190 #define max(a, b) ((a) > (b) ? (a) : (b))
191 #endif
192
193 /*
194 * The removed flag needs to be updated atomically with the pointer.
195 * It indicates that no node must attach to the node scheduled for
196 * removal, and that node garbage collection must be performed.
197 * The dummy flag does not require to be updated atomically with the
198 * pointer, but it is added as a pointer low bit flag to save space.
199 */
200 #define REMOVED_FLAG (1UL << 0)
201 #define DUMMY_FLAG (1UL << 1)
202 #define FLAGS_MASK ((1UL << 2) - 1)
203
204 /* Value of the end pointer. Should not interact with flags. */
205 #define END_VALUE NULL
206
207 struct ht_items_count {
208 unsigned long add, del;
209 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
210
211 struct rcu_level {
212 /* Note: manually update allocation length when adding a field */
213 struct _cds_lfht_node nodes[0];
214 };
215
216 struct rcu_table {
217 unsigned long size; /* always a power of 2, shared (RCU) */
218 unsigned long resize_target;
219 int resize_initiated;
220 struct rcu_level *tbl[MAX_TABLE_ORDER];
221 };
222
223 struct cds_lfht {
224 struct rcu_table t;
225 cds_lfht_hash_fct hash_fct;
226 cds_lfht_compare_fct compare_fct;
227 unsigned long hash_seed;
228 int flags;
229 /*
230 * We need to put the work threads offline (QSBR) when taking this
231 * mutex, because we use synchronize_rcu within this mutex critical
232 * section, which waits on read-side critical sections, and could
233 * therefore cause grace-period deadlock if we hold off RCU G.P.
234 * completion.
235 */
236 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
237 unsigned int in_progress_resize, in_progress_destroy;
238 void (*cds_lfht_call_rcu)(struct rcu_head *head,
239 void (*func)(struct rcu_head *head));
240 void (*cds_lfht_synchronize_rcu)(void);
241 void (*cds_lfht_rcu_read_lock)(void);
242 void (*cds_lfht_rcu_read_unlock)(void);
243 void (*cds_lfht_rcu_thread_offline)(void);
244 void (*cds_lfht_rcu_thread_online)(void);
245 void (*cds_lfht_rcu_register_thread)(void);
246 void (*cds_lfht_rcu_unregister_thread)(void);
247 pthread_attr_t *resize_attr; /* Resize threads attributes */
248 long count; /* global approximate item count */
249 struct ht_items_count *percpu_count; /* per-cpu item count */
250 };
251
252 struct rcu_resize_work {
253 struct rcu_head head;
254 struct cds_lfht *ht;
255 };
256
257 struct partition_resize_work {
258 pthread_t thread_id;
259 struct cds_lfht *ht;
260 unsigned long i, start, len;
261 void (*fct)(struct cds_lfht *ht, unsigned long i,
262 unsigned long start, unsigned long len);
263 };
264
265 enum add_mode {
266 ADD_DEFAULT = 0,
267 ADD_UNIQUE = 1,
268 ADD_REPLACE = 2,
269 };
270
271 static
272 struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
273 unsigned long size,
274 struct cds_lfht_node *node,
275 enum add_mode mode, int dummy);
276
277 /*
278 * Algorithm to reverse bits in a word by lookup table, extended to
279 * 64-bit words.
280 * Source:
281 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
282 * Originally from Public Domain.
283 */
284
285 static const uint8_t BitReverseTable256[256] =
286 {
287 #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
288 #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
289 #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
290 R6(0), R6(2), R6(1), R6(3)
291 };
292 #undef R2
293 #undef R4
294 #undef R6
295
296 static
297 uint8_t bit_reverse_u8(uint8_t v)
298 {
299 return BitReverseTable256[v];
300 }
301
302 static __attribute__((unused))
303 uint32_t bit_reverse_u32(uint32_t v)
304 {
305 return ((uint32_t) bit_reverse_u8(v) << 24) |
306 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
307 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
308 ((uint32_t) bit_reverse_u8(v >> 24));
309 }
310
311 static __attribute__((unused))
312 uint64_t bit_reverse_u64(uint64_t v)
313 {
314 return ((uint64_t) bit_reverse_u8(v) << 56) |
315 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
316 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
317 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
318 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
319 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
320 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
321 ((uint64_t) bit_reverse_u8(v >> 56));
322 }
323
324 static
325 unsigned long bit_reverse_ulong(unsigned long v)
326 {
327 #if (CAA_BITS_PER_LONG == 32)
328 return bit_reverse_u32(v);
329 #else
330 return bit_reverse_u64(v);
331 #endif
332 }
333
334 /*
335 * fls: returns the position of the most significant bit.
336 * Returns 0 if no bit is set, else returns the position of the most
337 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
338 */
339 #if defined(__i386) || defined(__x86_64)
340 static inline
341 unsigned int fls_u32(uint32_t x)
342 {
343 int r;
344
345 asm("bsrl %1,%0\n\t"
346 "jnz 1f\n\t"
347 "movl $-1,%0\n\t"
348 "1:\n\t"
349 : "=r" (r) : "rm" (x));
350 return r + 1;
351 }
352 #define HAS_FLS_U32
353 #endif
354
355 #if defined(__x86_64)
356 static inline
357 unsigned int fls_u64(uint64_t x)
358 {
359 long r;
360
361 asm("bsrq %1,%0\n\t"
362 "jnz 1f\n\t"
363 "movq $-1,%0\n\t"
364 "1:\n\t"
365 : "=r" (r) : "rm" (x));
366 return r + 1;
367 }
368 #define HAS_FLS_U64
369 #endif
370
371 #ifndef HAS_FLS_U64
372 static __attribute__((unused))
373 unsigned int fls_u64(uint64_t x)
374 {
375 unsigned int r = 64;
376
377 if (!x)
378 return 0;
379
380 if (!(x & 0xFFFFFFFF00000000ULL)) {
381 x <<= 32;
382 r -= 32;
383 }
384 if (!(x & 0xFFFF000000000000ULL)) {
385 x <<= 16;
386 r -= 16;
387 }
388 if (!(x & 0xFF00000000000000ULL)) {
389 x <<= 8;
390 r -= 8;
391 }
392 if (!(x & 0xF000000000000000ULL)) {
393 x <<= 4;
394 r -= 4;
395 }
396 if (!(x & 0xC000000000000000ULL)) {
397 x <<= 2;
398 r -= 2;
399 }
400 if (!(x & 0x8000000000000000ULL)) {
401 x <<= 1;
402 r -= 1;
403 }
404 return r;
405 }
406 #endif
407
408 #ifndef HAS_FLS_U32
409 static __attribute__((unused))
410 unsigned int fls_u32(uint32_t x)
411 {
412 unsigned int r = 32;
413
414 if (!x)
415 return 0;
416 if (!(x & 0xFFFF0000U)) {
417 x <<= 16;
418 r -= 16;
419 }
420 if (!(x & 0xFF000000U)) {
421 x <<= 8;
422 r -= 8;
423 }
424 if (!(x & 0xF0000000U)) {
425 x <<= 4;
426 r -= 4;
427 }
428 if (!(x & 0xC0000000U)) {
429 x <<= 2;
430 r -= 2;
431 }
432 if (!(x & 0x80000000U)) {
433 x <<= 1;
434 r -= 1;
435 }
436 return r;
437 }
438 #endif
439
440 unsigned int fls_ulong(unsigned long x)
441 {
442 #if (CAA_BITS_PER_lONG == 32)
443 return fls_u32(x);
444 #else
445 return fls_u64(x);
446 #endif
447 }
448
449 /*
450 * Return the minimum order for which x <= (1UL << order).
451 * Return -1 if x is 0.
452 */
453 int get_count_order_u32(uint32_t x)
454 {
455 if (!x)
456 return -1;
457
458 return fls_u32(x - 1);
459 }
460
461 /*
462 * Return the minimum order for which x <= (1UL << order).
463 * Return -1 if x is 0.
464 */
465 int get_count_order_ulong(unsigned long x)
466 {
467 if (!x)
468 return -1;
469
470 return fls_ulong(x - 1);
471 }
472
473 #ifdef POISON_FREE
474 #define poison_free(ptr) \
475 do { \
476 memset(ptr, 0x42, sizeof(*(ptr))); \
477 free(ptr); \
478 } while (0)
479 #else
480 #define poison_free(ptr) free(ptr)
481 #endif
482
483 static
484 void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth);
485
486 /*
487 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
488 * available, then we support hash table item accounting.
489 * In the unfortunate event the number of CPUs reported would be
490 * inaccurate, we use modulo arithmetic on the number of CPUs we got.
491 */
492 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
493
494 static
495 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
496 unsigned long count);
497
498 static long nr_cpus_mask = -1;
499
500 static
501 struct ht_items_count *alloc_per_cpu_items_count(void)
502 {
503 struct ht_items_count *count;
504
505 switch (nr_cpus_mask) {
506 case -2:
507 return NULL;
508 case -1:
509 {
510 long maxcpus;
511
512 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
513 if (maxcpus <= 0) {
514 nr_cpus_mask = -2;
515 return NULL;
516 }
517 /*
518 * round up number of CPUs to next power of two, so we
519 * can use & for modulo.
520 */
521 maxcpus = 1UL << get_count_order_ulong(maxcpus);
522 nr_cpus_mask = maxcpus - 1;
523 }
524 /* Fall-through */
525 default:
526 return calloc(nr_cpus_mask + 1, sizeof(*count));
527 }
528 }
529
530 static
531 void free_per_cpu_items_count(struct ht_items_count *count)
532 {
533 poison_free(count);
534 }
535
536 static
537 int ht_get_cpu(void)
538 {
539 int cpu;
540
541 assert(nr_cpus_mask >= 0);
542 cpu = sched_getcpu();
543 if (unlikely(cpu < 0))
544 return cpu;
545 else
546 return cpu & nr_cpus_mask;
547 }
548
549 static
550 void ht_count_add(struct cds_lfht *ht, unsigned long size)
551 {
552 unsigned long percpu_count;
553 int cpu;
554
555 if (unlikely(!ht->percpu_count))
556 return;
557 cpu = ht_get_cpu();
558 if (unlikely(cpu < 0))
559 return;
560 percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
561 if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
562 long count;
563
564 dbg_printf("add percpu %lu\n", percpu_count);
565 count = uatomic_add_return(&ht->count,
566 1UL << COUNT_COMMIT_ORDER);
567 /* If power of 2 */
568 if (!(count & (count - 1))) {
569 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
570 return;
571 dbg_printf("add set global %ld\n", count);
572 cds_lfht_resize_lazy_count(ht, size,
573 count >> (CHAIN_LEN_TARGET - 1));
574 }
575 }
576 }
577
578 static
579 void ht_count_del(struct cds_lfht *ht, unsigned long size)
580 {
581 unsigned long percpu_count;
582 int cpu;
583
584 if (unlikely(!ht->percpu_count))
585 return;
586 cpu = ht_get_cpu();
587 if (unlikely(cpu < 0))
588 return;
589 percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1);
590 if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
591 long count;
592
593 dbg_printf("del percpu %lu\n", percpu_count);
594 count = uatomic_add_return(&ht->count,
595 -(1UL << COUNT_COMMIT_ORDER));
596 /* If power of 2 */
597 if (!(count & (count - 1))) {
598 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
599 return;
600 dbg_printf("del set global %ld\n", count);
601 /*
602 * Don't shrink table if the number of nodes is below a
603 * certain threshold.
604 */
605 if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1))
606 return;
607 cds_lfht_resize_lazy_count(ht, size,
608 count >> (CHAIN_LEN_TARGET - 1));
609 }
610 }
611 }
612
613 #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
614
615 static const long nr_cpus_mask = -2;
616
617 static
618 struct ht_items_count *alloc_per_cpu_items_count(void)
619 {
620 return NULL;
621 }
622
623 static
624 void free_per_cpu_items_count(struct ht_items_count *count)
625 {
626 }
627
628 static
629 void ht_count_add(struct cds_lfht *ht, unsigned long size)
630 {
631 }
632
633 static
634 void ht_count_del(struct cds_lfht *ht, unsigned long size)
635 {
636 }
637
638 #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
639
640
641 static
642 void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
643 {
644 unsigned long count;
645
646 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
647 return;
648 count = uatomic_read(&ht->count);
649 /*
650 * Use bucket-local length for small table expand and for
651 * environments lacking per-cpu data support.
652 */
653 if (count >= (1UL << COUNT_COMMIT_ORDER))
654 return;
655 if (chain_len > 100)
656 dbg_printf("WARNING: large chain length: %u.\n",
657 chain_len);
658 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
659 cds_lfht_resize_lazy(ht, size,
660 get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
661 }
662
663 static
664 struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
665 {
666 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
667 }
668
669 static
670 int is_removed(struct cds_lfht_node *node)
671 {
672 return ((unsigned long) node) & REMOVED_FLAG;
673 }
674
675 static
676 struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
677 {
678 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
679 }
680
681 static
682 int is_dummy(struct cds_lfht_node *node)
683 {
684 return ((unsigned long) node) & DUMMY_FLAG;
685 }
686
687 static
688 struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
689 {
690 return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
691 }
692
693 static
694 struct cds_lfht_node *get_end(void)
695 {
696 return (struct cds_lfht_node *) END_VALUE;
697 }
698
699 static
700 int is_end(struct cds_lfht_node *node)
701 {
702 return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
703 }
704
705 static
706 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
707 {
708 unsigned long old1, old2;
709
710 old1 = uatomic_read(ptr);
711 do {
712 old2 = old1;
713 if (old2 >= v)
714 return old2;
715 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
716 return v;
717 }
718
719 static
720 struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
721 unsigned long hash)
722 {
723 unsigned long index, order;
724
725 assert(size > 0);
726 index = hash & (size - 1);
727 order = get_count_order_ulong(index + 1);
728
729 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
730 hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
731
732 return &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
733 }
734
735 /*
736 * Remove all logically deleted nodes from a bucket up to a certain node key.
737 */
738 static
739 void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
740 {
741 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
742
743 assert(!is_dummy(dummy));
744 assert(!is_removed(dummy));
745 assert(!is_dummy(node));
746 assert(!is_removed(node));
747 for (;;) {
748 iter_prev = dummy;
749 /* We can always skip the dummy node initially */
750 iter = rcu_dereference(iter_prev->p.next);
751 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
752 /*
753 * We should never be called with dummy (start of chain)
754 * and logically removed node (end of path compression
755 * marker) being the actual same node. This would be a
756 * bug in the algorithm implementation.
757 */
758 assert(dummy != node);
759 for (;;) {
760 if (unlikely(is_end(iter)))
761 return;
762 if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
763 return;
764 next = rcu_dereference(clear_flag(iter)->p.next);
765 if (likely(is_removed(next)))
766 break;
767 iter_prev = clear_flag(iter);
768 iter = next;
769 }
770 assert(!is_removed(iter));
771 if (is_dummy(iter))
772 new_next = flag_dummy(clear_flag(next));
773 else
774 new_next = clear_flag(next);
775 if (is_removed(iter))
776 new_next = flag_removed(new_next);
777 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
778 }
779 return;
780 }
781
782 static
783 int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
784 struct cds_lfht_node *old_node,
785 struct cds_lfht_node *ret_next,
786 struct cds_lfht_node *new_node)
787 {
788 struct cds_lfht_node *dummy, *old_next;
789 struct _cds_lfht_node *lookup;
790 int flagged = 0;
791
792 if (!old_node) /* Return -ENOENT if asked to replace NULL node */
793 goto end;
794
795 assert(!is_removed(old_node));
796 assert(!is_dummy(old_node));
797 assert(!is_removed(new_node));
798 assert(!is_dummy(new_node));
799 assert(new_node != old_node);
800 do {
801 /* Insert after node to be replaced */
802 old_next = ret_next;
803 if (is_removed(old_next)) {
804 /*
805 * Too late, the old node has been removed under us
806 * between lookup and replace. Fail.
807 */
808 goto end;
809 }
810 assert(!is_dummy(old_next));
811 assert(new_node != clear_flag(old_next));
812 new_node->p.next = clear_flag(old_next);
813 /*
814 * Here is the whole trick for lock-free replace: we add
815 * the replacement node _after_ the node we want to
816 * replace by atomically setting its next pointer at the
817 * same time we set its removal flag. Given that
818 * the lookups/get next use an iterator aware of the
819 * next pointer, they will either skip the old node due
820 * to the removal flag and see the new node, or use
821 * the old node, but will not see the new one.
822 */
823 ret_next = uatomic_cmpxchg(&old_node->p.next,
824 old_next, flag_removed(new_node));
825 } while (ret_next != old_next);
826
827 /* We performed the replacement. */
828 flagged = 1;
829
830 /*
831 * Ensure that the old node is not visible to readers anymore:
832 * lookup for the node, and remove it (along with any other
833 * logically removed node) if found.
834 */
835 lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash));
836 dummy = (struct cds_lfht_node *) lookup;
837 _cds_lfht_gc_bucket(dummy, new_node);
838 end:
839 /*
840 * Only the flagging action indicated that we (and no other)
841 * replaced the node from the hash table.
842 */
843 if (flagged) {
844 assert(is_removed(rcu_dereference(old_node->p.next)));
845 return 0;
846 } else {
847 return -ENOENT;
848 }
849 }
850
851 static
852 struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
853 unsigned long size,
854 struct cds_lfht_node *node,
855 enum add_mode mode, int dummy)
856 {
857 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
858 *dummy_node, *return_node;
859 struct _cds_lfht_node *lookup;
860
861 assert(!is_dummy(node));
862 assert(!is_removed(node));
863 if (!size) {
864 assert(dummy);
865 node->p.next = flag_dummy(get_end());
866 return node; /* Initial first add (head) */
867 }
868 lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
869 for (;;) {
870 uint32_t chain_len = 0;
871
872 /*
873 * iter_prev points to the non-removed node prior to the
874 * insert location.
875 */
876 iter_prev = (struct cds_lfht_node *) lookup;
877 /* We can always skip the dummy node initially */
878 iter = rcu_dereference(iter_prev->p.next);
879 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
880 for (;;) {
881 if (unlikely(is_end(iter)))
882 goto insert;
883 if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
884 goto insert;
885 next = rcu_dereference(clear_flag(iter)->p.next);
886 if (unlikely(is_removed(next)))
887 goto gc_node;
888 if ((mode == ADD_UNIQUE || mode == ADD_REPLACE)
889 && !is_dummy(next)
890 && !ht->compare_fct(node->key, node->key_len,
891 clear_flag(iter)->key,
892 clear_flag(iter)->key_len)) {
893 if (mode == ADD_UNIQUE)
894 return clear_flag(iter);
895 else /* mode == ADD_REPLACE */
896 goto replace;
897 }
898 /* Only account for identical reverse hash once */
899 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
900 && !is_dummy(next))
901 check_resize(ht, size, ++chain_len);
902 iter_prev = clear_flag(iter);
903 iter = next;
904 }
905
906 insert:
907 assert(node != clear_flag(iter));
908 assert(!is_removed(iter_prev));
909 assert(!is_removed(iter));
910 assert(iter_prev != node);
911 if (!dummy)
912 node->p.next = clear_flag(iter);
913 else
914 node->p.next = flag_dummy(clear_flag(iter));
915 if (is_dummy(iter))
916 new_node = flag_dummy(node);
917 else
918 new_node = node;
919 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
920 new_node) != iter) {
921 continue; /* retry */
922 } else {
923 if (mode == ADD_REPLACE)
924 return_node = NULL;
925 else /* ADD_DEFAULT and ADD_UNIQUE */
926 return_node = node;
927 goto gc_end;
928 }
929
930 replace:
931
932 if (!_cds_lfht_replace(ht, size, clear_flag(iter), next,
933 node)) {
934 return_node = clear_flag(iter);
935 goto end; /* gc already done */
936 } else {
937 continue; /* retry */
938 }
939
940 gc_node:
941 assert(!is_removed(iter));
942 if (is_dummy(iter))
943 new_next = flag_dummy(clear_flag(next));
944 else
945 new_next = clear_flag(next);
946 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
947 /* retry */
948 }
949 gc_end:
950 /* Garbage collect logically removed nodes in the bucket */
951 dummy_node = (struct cds_lfht_node *) lookup;
952 _cds_lfht_gc_bucket(dummy_node, node);
953 end:
954 return return_node;
955 }
956
957 static
958 int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
959 struct cds_lfht_node *node,
960 int dummy_removal)
961 {
962 struct cds_lfht_node *dummy, *next, *old;
963 struct _cds_lfht_node *lookup;
964 int flagged = 0;
965
966 if (!node) /* Return -ENOENT if asked to delete NULL node */
967 goto end;
968
969 /* logically delete the node */
970 assert(!is_dummy(node));
971 assert(!is_removed(node));
972 old = rcu_dereference(node->p.next);
973 do {
974 struct cds_lfht_node *new_next;
975
976 next = old;
977 if (unlikely(is_removed(next)))
978 goto end;
979 if (dummy_removal)
980 assert(is_dummy(next));
981 else
982 assert(!is_dummy(next));
983 new_next = flag_removed(next);
984 old = uatomic_cmpxchg(&node->p.next, next, new_next);
985 } while (old != next);
986
987 /* We performed the (logical) deletion. */
988 flagged = 1;
989
990 /*
991 * Ensure that the node is not visible to readers anymore: lookup for
992 * the node, and remove it (along with any other logically removed node)
993 * if found.
994 */
995 lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
996 dummy = (struct cds_lfht_node *) lookup;
997 _cds_lfht_gc_bucket(dummy, node);
998 end:
999 /*
1000 * Only the flagging action indicated that we (and no other)
1001 * removed the node from the hash.
1002 */
1003 if (flagged) {
1004 assert(is_removed(rcu_dereference(node->p.next)));
1005 return 0;
1006 } else {
1007 return -ENOENT;
1008 }
1009 }
1010
1011 static
1012 void *partition_resize_thread(void *arg)
1013 {
1014 struct partition_resize_work *work = arg;
1015
1016 work->ht->cds_lfht_rcu_register_thread();
1017 work->fct(work->ht, work->i, work->start, work->len);
1018 work->ht->cds_lfht_rcu_unregister_thread();
1019 return NULL;
1020 }
1021
1022 static
1023 void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
1024 unsigned long len,
1025 void (*fct)(struct cds_lfht *ht, unsigned long i,
1026 unsigned long start, unsigned long len))
1027 {
1028 unsigned long partition_len;
1029 struct partition_resize_work *work;
1030 int thread, ret;
1031 unsigned long nr_threads;
1032
1033 /*
1034 * Note: nr_cpus_mask + 1 is always power of 2.
1035 * We spawn just the number of threads we need to satisfy the minimum
1036 * partition size, up to the number of CPUs in the system.
1037 */
1038 if (nr_cpus_mask > 0) {
1039 nr_threads = min(nr_cpus_mask + 1,
1040 len >> MIN_PARTITION_PER_THREAD_ORDER);
1041 } else {
1042 nr_threads = 1;
1043 }
1044 partition_len = len >> get_count_order_ulong(nr_threads);
1045 work = calloc(nr_threads, sizeof(*work));
1046 assert(work);
1047 for (thread = 0; thread < nr_threads; thread++) {
1048 work[thread].ht = ht;
1049 work[thread].i = i;
1050 work[thread].len = partition_len;
1051 work[thread].start = thread * partition_len;
1052 work[thread].fct = fct;
1053 ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
1054 partition_resize_thread, &work[thread]);
1055 assert(!ret);
1056 }
1057 for (thread = 0; thread < nr_threads; thread++) {
1058 ret = pthread_join(work[thread].thread_id, NULL);
1059 assert(!ret);
1060 }
1061 free(work);
1062 }
1063
1064 /*
1065 * Holding RCU read lock to protect _cds_lfht_add against memory
1066 * reclaim that could be performed by other call_rcu worker threads (ABA
1067 * problem).
1068 *
1069 * When we reach a certain length, we can split this population phase over
1070 * many worker threads, based on the number of CPUs available in the system.
1071 * This should therefore take care of not having the expand lagging behind too
1072 * many concurrent insertion threads by using the scheduler's ability to
1073 * schedule dummy node population fairly with insertions.
1074 */
1075 static
1076 void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
1077 unsigned long start, unsigned long len)
1078 {
1079 unsigned long j;
1080
1081 ht->cds_lfht_rcu_read_lock();
1082 for (j = start; j < start + len; j++) {
1083 struct cds_lfht_node *new_node =
1084 (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
1085
1086 dbg_printf("init populate: i %lu j %lu hash %lu\n",
1087 i, j, !i ? 0 : (1UL << (i - 1)) + j);
1088 new_node->p.reverse_hash =
1089 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
1090 (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
1091 new_node, ADD_DEFAULT, 1);
1092 }
1093 ht->cds_lfht_rcu_read_unlock();
1094 }
1095
1096 static
1097 void init_table_populate(struct cds_lfht *ht, unsigned long i,
1098 unsigned long len)
1099 {
1100 assert(nr_cpus_mask != -1);
1101 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
1102 ht->cds_lfht_rcu_thread_online();
1103 init_table_populate_partition(ht, i, 0, len);
1104 ht->cds_lfht_rcu_thread_offline();
1105 return;
1106 }
1107 partition_resize_helper(ht, i, len, init_table_populate_partition);
1108 }
1109
1110 static
1111 void init_table(struct cds_lfht *ht,
1112 unsigned long first_order, unsigned long len_order)
1113 {
1114 unsigned long i, end_order;
1115
1116 dbg_printf("init table: first_order %lu end_order %lu\n",
1117 first_order, first_order + len_order);
1118 end_order = first_order + len_order;
1119 for (i = first_order; i < end_order; i++) {
1120 unsigned long len;
1121
1122 len = !i ? 1 : 1UL << (i - 1);
1123 dbg_printf("init order %lu len: %lu\n", i, len);
1124
1125 /* Stop expand if the resize target changes under us */
1126 if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
1127 break;
1128
1129 ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
1130 assert(ht->t.tbl[i]);
1131
1132 /*
1133 * Set all dummy nodes reverse hash values for a level and
1134 * link all dummy nodes into the table.
1135 */
1136 init_table_populate(ht, i, len);
1137
1138 /*
1139 * Update table size.
1140 */
1141 cmm_smp_wmb(); /* populate data before RCU size */
1142 CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
1143
1144 dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
1145 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1146 break;
1147 }
1148 }
1149
1150 /*
1151 * Holding RCU read lock to protect _cds_lfht_remove against memory
1152 * reclaim that could be performed by other call_rcu worker threads (ABA
1153 * problem).
1154 * For a single level, we logically remove and garbage collect each node.
1155 *
1156 * As a design choice, we perform logical removal and garbage collection on a
1157 * node-per-node basis to simplify this algorithm. We also assume keeping good
1158 * cache locality of the operation would overweight possible performance gain
1159 * that could be achieved by batching garbage collection for multiple levels.
1160 * However, this would have to be justified by benchmarks.
1161 *
1162 * Concurrent removal and add operations are helping us perform garbage
1163 * collection of logically removed nodes. We guarantee that all logically
1164 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1165 * invoked to free a hole level of dummy nodes (after a grace period).
1166 *
1167 * Logical removal and garbage collection can therefore be done in batch or on a
1168 * node-per-node basis, as long as the guarantee above holds.
1169 *
1170 * When we reach a certain length, we can split this removal over many worker
1171 * threads, based on the number of CPUs available in the system. This should
1172 * take care of not letting resize process lag behind too many concurrent
1173 * updater threads actively inserting into the hash table.
1174 */
1175 static
1176 void remove_table_partition(struct cds_lfht *ht, unsigned long i,
1177 unsigned long start, unsigned long len)
1178 {
1179 unsigned long j;
1180
1181 ht->cds_lfht_rcu_read_lock();
1182 for (j = start; j < start + len; j++) {
1183 struct cds_lfht_node *fini_node =
1184 (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
1185
1186 dbg_printf("remove entry: i %lu j %lu hash %lu\n",
1187 i, j, !i ? 0 : (1UL << (i - 1)) + j);
1188 fini_node->p.reverse_hash =
1189 bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
1190 (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
1191 fini_node, 1);
1192 }
1193 ht->cds_lfht_rcu_read_unlock();
1194 }
1195
1196 static
1197 void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
1198 {
1199
1200 assert(nr_cpus_mask != -1);
1201 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
1202 ht->cds_lfht_rcu_thread_online();
1203 remove_table_partition(ht, i, 0, len);
1204 ht->cds_lfht_rcu_thread_offline();
1205 return;
1206 }
1207 partition_resize_helper(ht, i, len, remove_table_partition);
1208 }
1209
1210 static
1211 void fini_table(struct cds_lfht *ht,
1212 unsigned long first_order, unsigned long len_order)
1213 {
1214 long i, end_order;
1215 void *free_by_rcu = NULL;
1216
1217 dbg_printf("fini table: first_order %lu end_order %lu\n",
1218 first_order, first_order + len_order);
1219 end_order = first_order + len_order;
1220 assert(first_order > 0);
1221 for (i = end_order - 1; i >= first_order; i--) {
1222 unsigned long len;
1223
1224 len = !i ? 1 : 1UL << (i - 1);
1225 dbg_printf("fini order %lu len: %lu\n", i, len);
1226
1227 /* Stop shrink if the resize target changes under us */
1228 if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
1229 break;
1230
1231 cmm_smp_wmb(); /* populate data before RCU size */
1232 CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
1233
1234 /*
1235 * We need to wait for all add operations to reach Q.S. (and
1236 * thus use the new table for lookups) before we can start
1237 * releasing the old dummy nodes. Otherwise their lookup will
1238 * return a logically removed node as insert position.
1239 */
1240 ht->cds_lfht_synchronize_rcu();
1241 if (free_by_rcu)
1242 free(free_by_rcu);
1243
1244 /*
1245 * Set "removed" flag in dummy nodes about to be removed.
1246 * Unlink all now-logically-removed dummy node pointers.
1247 * Concurrent add/remove operation are helping us doing
1248 * the gc.
1249 */
1250 remove_table(ht, i, len);
1251
1252 free_by_rcu = ht->t.tbl[i];
1253
1254 dbg_printf("fini new size: %lu\n", 1UL << i);
1255 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1256 break;
1257 }
1258
1259 if (free_by_rcu) {
1260 ht->cds_lfht_synchronize_rcu();
1261 free(free_by_rcu);
1262 }
1263 }
1264
1265 struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
1266 cds_lfht_compare_fct compare_fct,
1267 unsigned long hash_seed,
1268 unsigned long init_size,
1269 int flags,
1270 void (*cds_lfht_call_rcu)(struct rcu_head *head,
1271 void (*func)(struct rcu_head *head)),
1272 void (*cds_lfht_synchronize_rcu)(void),
1273 void (*cds_lfht_rcu_read_lock)(void),
1274 void (*cds_lfht_rcu_read_unlock)(void),
1275 void (*cds_lfht_rcu_thread_offline)(void),
1276 void (*cds_lfht_rcu_thread_online)(void),
1277 void (*cds_lfht_rcu_register_thread)(void),
1278 void (*cds_lfht_rcu_unregister_thread)(void),
1279 pthread_attr_t *attr)
1280 {
1281 struct cds_lfht *ht;
1282 unsigned long order;
1283
1284 /* init_size must be power of two */
1285 if (init_size && (init_size & (init_size - 1)))
1286 return NULL;
1287 ht = calloc(1, sizeof(struct cds_lfht));
1288 assert(ht);
1289 ht->hash_fct = hash_fct;
1290 ht->compare_fct = compare_fct;
1291 ht->hash_seed = hash_seed;
1292 ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
1293 ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
1294 ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
1295 ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
1296 ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
1297 ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
1298 ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread;
1299 ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread;
1300 ht->resize_attr = attr;
1301 ht->percpu_count = alloc_per_cpu_items_count();
1302 /* this mutex should not nest in read-side C.S. */
1303 pthread_mutex_init(&ht->resize_mutex, NULL);
1304 order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
1305 ht->flags = flags;
1306 ht->cds_lfht_rcu_thread_offline();
1307 pthread_mutex_lock(&ht->resize_mutex);
1308 ht->t.resize_target = 1UL << (order - 1);
1309 init_table(ht, 0, order);
1310 pthread_mutex_unlock(&ht->resize_mutex);
1311 ht->cds_lfht_rcu_thread_online();
1312 return ht;
1313 }
1314
1315 void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len,
1316 struct cds_lfht_iter *iter)
1317 {
1318 struct cds_lfht_node *node, *next, *dummy_node;
1319 struct _cds_lfht_node *lookup;
1320 unsigned long hash, reverse_hash, size;
1321
1322 hash = ht->hash_fct(key, key_len, ht->hash_seed);
1323 reverse_hash = bit_reverse_ulong(hash);
1324
1325 size = rcu_dereference(ht->t.size);
1326 lookup = lookup_bucket(ht, size, hash);
1327 dummy_node = (struct cds_lfht_node *) lookup;
1328 /* We can always skip the dummy node initially */
1329 node = rcu_dereference(dummy_node->p.next);
1330 node = clear_flag(node);
1331 for (;;) {
1332 if (unlikely(is_end(node))) {
1333 node = next = NULL;
1334 break;
1335 }
1336 if (unlikely(node->p.reverse_hash > reverse_hash)) {
1337 node = next = NULL;
1338 break;
1339 }
1340 next = rcu_dereference(node->p.next);
1341 if (likely(!is_removed(next))
1342 && !is_dummy(next)
1343 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
1344 break;
1345 }
1346 node = clear_flag(next);
1347 }
1348 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1349 iter->node = node;
1350 iter->next = next;
1351 }
1352
1353 void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1354 {
1355 struct cds_lfht_node *node, *next;
1356 unsigned long reverse_hash;
1357 void *key;
1358 size_t key_len;
1359
1360 node = iter->node;
1361 reverse_hash = node->p.reverse_hash;
1362 key = node->key;
1363 key_len = node->key_len;
1364 next = iter->next;
1365 node = clear_flag(next);
1366
1367 for (;;) {
1368 if (unlikely(is_end(node))) {
1369 node = next = NULL;
1370 break;
1371 }
1372 if (unlikely(node->p.reverse_hash > reverse_hash)) {
1373 node = next = NULL;
1374 break;
1375 }
1376 next = rcu_dereference(node->p.next);
1377 if (likely(!is_removed(next))
1378 && !is_dummy(next)
1379 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
1380 break;
1381 }
1382 node = clear_flag(next);
1383 }
1384 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1385 iter->node = node;
1386 iter->next = next;
1387 }
1388
1389 void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1390 {
1391 struct cds_lfht_node *node, *next;
1392
1393 node = clear_flag(iter->next);
1394 for (;;) {
1395 if (unlikely(is_end(node))) {
1396 node = next = NULL;
1397 break;
1398 }
1399 next = rcu_dereference(node->p.next);
1400 if (likely(!is_removed(next))
1401 && !is_dummy(next)) {
1402 break;
1403 }
1404 node = clear_flag(next);
1405 }
1406 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1407 iter->node = node;
1408 iter->next = next;
1409 }
1410
1411 void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1412 {
1413 struct _cds_lfht_node *lookup;
1414
1415 /*
1416 * Get next after first dummy node. The first dummy node is the
1417 * first node of the linked list.
1418 */
1419 lookup = &ht->t.tbl[0]->nodes[0];
1420 iter->next = lookup->next;
1421 cds_lfht_next(ht, iter);
1422 }
1423
1424 void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
1425 {
1426 unsigned long hash, size;
1427
1428 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
1429 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
1430
1431 size = rcu_dereference(ht->t.size);
1432 (void) _cds_lfht_add(ht, size, node, ADD_DEFAULT, 0);
1433 ht_count_add(ht, size);
1434 }
1435
1436 struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
1437 struct cds_lfht_node *node)
1438 {
1439 unsigned long hash, size;
1440 struct cds_lfht_node *ret;
1441
1442 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
1443 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
1444
1445 size = rcu_dereference(ht->t.size);
1446 ret = _cds_lfht_add(ht, size, node, ADD_UNIQUE, 0);
1447 if (ret == node)
1448 ht_count_add(ht, size);
1449 return ret;
1450 }
1451
1452 struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
1453 struct cds_lfht_node *node)
1454 {
1455 unsigned long hash, size;
1456 struct cds_lfht_node *ret;
1457
1458 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
1459 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
1460
1461 size = rcu_dereference(ht->t.size);
1462 ret = _cds_lfht_add(ht, size, node, ADD_REPLACE, 0);
1463 if (ret == NULL)
1464 ht_count_add(ht, size);
1465 return ret;
1466 }
1467
1468 int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
1469 struct cds_lfht_node *new_node)
1470 {
1471 unsigned long size;
1472
1473 size = rcu_dereference(ht->t.size);
1474 return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
1475 new_node);
1476 }
1477
1478 int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1479 {
1480 unsigned long size;
1481 int ret;
1482
1483 size = rcu_dereference(ht->t.size);
1484 ret = _cds_lfht_del(ht, size, iter->node, 0);
1485 if (!ret)
1486 ht_count_del(ht, size);
1487 return ret;
1488 }
1489
1490 static
1491 int cds_lfht_delete_dummy(struct cds_lfht *ht)
1492 {
1493 struct cds_lfht_node *node;
1494 struct _cds_lfht_node *lookup;
1495 unsigned long order, i, size;
1496
1497 /* Check that the table is empty */
1498 lookup = &ht->t.tbl[0]->nodes[0];
1499 node = (struct cds_lfht_node *) lookup;
1500 do {
1501 node = clear_flag(node)->p.next;
1502 if (!is_dummy(node))
1503 return -EPERM;
1504 assert(!is_removed(node));
1505 } while (!is_end(node));
1506 /*
1507 * size accessed without rcu_dereference because hash table is
1508 * being destroyed.
1509 */
1510 size = ht->t.size;
1511 /* Internal sanity check: all nodes left should be dummy */
1512 for (order = 0; order < get_count_order_ulong(size) + 1; order++) {
1513 unsigned long len;
1514
1515 len = !order ? 1 : 1UL << (order - 1);
1516 for (i = 0; i < len; i++) {
1517 dbg_printf("delete order %lu i %lu hash %lu\n",
1518 order, i,
1519 bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
1520 assert(is_dummy(ht->t.tbl[order]->nodes[i].next));
1521 }
1522 poison_free(ht->t.tbl[order]);
1523 }
1524 return 0;
1525 }
1526
1527 /*
1528 * Should only be called when no more concurrent readers nor writers can
1529 * possibly access the table.
1530 */
1531 int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
1532 {
1533 int ret;
1534
1535 /* Wait for in-flight resize operations to complete */
1536 _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
1537 cmm_smp_mb(); /* Store destroy before load resize */
1538 while (uatomic_read(&ht->in_progress_resize))
1539 poll(NULL, 0, 100); /* wait for 100ms */
1540 ret = cds_lfht_delete_dummy(ht);
1541 if (ret)
1542 return ret;
1543 free_per_cpu_items_count(ht->percpu_count);
1544 if (attr)
1545 *attr = ht->resize_attr;
1546 poison_free(ht);
1547 return ret;
1548 }
1549
1550 void cds_lfht_count_nodes(struct cds_lfht *ht,
1551 long *approx_before,
1552 unsigned long *count,
1553 unsigned long *removed,
1554 long *approx_after)
1555 {
1556 struct cds_lfht_node *node, *next;
1557 struct _cds_lfht_node *lookup;
1558 unsigned long nr_dummy = 0;
1559
1560 *approx_before = 0;
1561 if (nr_cpus_mask >= 0) {
1562 int i;
1563
1564 for (i = 0; i < nr_cpus_mask + 1; i++) {
1565 *approx_before += uatomic_read(&ht->percpu_count[i].add);
1566 *approx_before -= uatomic_read(&ht->percpu_count[i].del);
1567 }
1568 }
1569
1570 *count = 0;
1571 *removed = 0;
1572
1573 /* Count non-dummy nodes in the table */
1574 lookup = &ht->t.tbl[0]->nodes[0];
1575 node = (struct cds_lfht_node *) lookup;
1576 do {
1577 next = rcu_dereference(node->p.next);
1578 if (is_removed(next)) {
1579 if (!is_dummy(next))
1580 (*removed)++;
1581 else
1582 (nr_dummy)++;
1583 } else if (!is_dummy(next))
1584 (*count)++;
1585 else
1586 (nr_dummy)++;
1587 node = clear_flag(next);
1588 } while (!is_end(node));
1589 dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
1590 *approx_after = 0;
1591 if (nr_cpus_mask >= 0) {
1592 int i;
1593
1594 for (i = 0; i < nr_cpus_mask + 1; i++) {
1595 *approx_after += uatomic_read(&ht->percpu_count[i].add);
1596 *approx_after -= uatomic_read(&ht->percpu_count[i].del);
1597 }
1598 }
1599 }
1600
1601 /* called with resize mutex held */
1602 static
1603 void _do_cds_lfht_grow(struct cds_lfht *ht,
1604 unsigned long old_size, unsigned long new_size)
1605 {
1606 unsigned long old_order, new_order;
1607
1608 old_order = get_count_order_ulong(old_size) + 1;
1609 new_order = get_count_order_ulong(new_size) + 1;
1610 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1611 old_size, old_order, new_size, new_order);
1612 assert(new_size > old_size);
1613 init_table(ht, old_order, new_order - old_order);
1614 }
1615
1616 /* called with resize mutex held */
1617 static
1618 void _do_cds_lfht_shrink(struct cds_lfht *ht,
1619 unsigned long old_size, unsigned long new_size)
1620 {
1621 unsigned long old_order, new_order;
1622
1623 new_size = max(new_size, MIN_TABLE_SIZE);
1624 old_order = get_count_order_ulong(old_size) + 1;
1625 new_order = get_count_order_ulong(new_size) + 1;
1626 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1627 old_size, old_order, new_size, new_order);
1628 assert(new_size < old_size);
1629
1630 /* Remove and unlink all dummy nodes to remove. */
1631 fini_table(ht, new_order, old_order - new_order);
1632 }
1633
1634
1635 /* called with resize mutex held */
1636 static
1637 void _do_cds_lfht_resize(struct cds_lfht *ht)
1638 {
1639 unsigned long new_size, old_size;
1640
1641 /*
1642 * Resize table, re-do if the target size has changed under us.
1643 */
1644 do {
1645 assert(uatomic_read(&ht->in_progress_resize));
1646 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1647 break;
1648 ht->t.resize_initiated = 1;
1649 old_size = ht->t.size;
1650 new_size = CMM_LOAD_SHARED(ht->t.resize_target);
1651 if (old_size < new_size)
1652 _do_cds_lfht_grow(ht, old_size, new_size);
1653 else if (old_size > new_size)
1654 _do_cds_lfht_shrink(ht, old_size, new_size);
1655 ht->t.resize_initiated = 0;
1656 /* write resize_initiated before read resize_target */
1657 cmm_smp_mb();
1658 } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
1659 }
1660
1661 static
1662 unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size,
1663 int growth_order)
1664 {
1665 return _uatomic_max(&ht->t.resize_target,
1666 size << growth_order);
1667 }
1668
1669 static
1670 void resize_target_update_count(struct cds_lfht *ht,
1671 unsigned long count)
1672 {
1673 count = max(count, MIN_TABLE_SIZE);
1674 uatomic_set(&ht->t.resize_target, count);
1675 }
1676
1677 void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
1678 {
1679 resize_target_update_count(ht, new_size);
1680 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
1681 ht->cds_lfht_rcu_thread_offline();
1682 pthread_mutex_lock(&ht->resize_mutex);
1683 _do_cds_lfht_resize(ht);
1684 pthread_mutex_unlock(&ht->resize_mutex);
1685 ht->cds_lfht_rcu_thread_online();
1686 }
1687
1688 static
1689 void do_resize_cb(struct rcu_head *head)
1690 {
1691 struct rcu_resize_work *work =
1692 caa_container_of(head, struct rcu_resize_work, head);
1693 struct cds_lfht *ht = work->ht;
1694
1695 ht->cds_lfht_rcu_thread_offline();
1696 pthread_mutex_lock(&ht->resize_mutex);
1697 _do_cds_lfht_resize(ht);
1698 pthread_mutex_unlock(&ht->resize_mutex);
1699 ht->cds_lfht_rcu_thread_online();
1700 poison_free(work);
1701 cmm_smp_mb(); /* finish resize before decrement */
1702 uatomic_dec(&ht->in_progress_resize);
1703 }
1704
1705 static
1706 void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
1707 {
1708 struct rcu_resize_work *work;
1709 unsigned long target_size;
1710
1711 target_size = resize_target_update(ht, size, growth);
1712 /* Store resize_target before read resize_initiated */
1713 cmm_smp_mb();
1714 if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
1715 uatomic_inc(&ht->in_progress_resize);
1716 cmm_smp_mb(); /* increment resize count before load destroy */
1717 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
1718 uatomic_dec(&ht->in_progress_resize);
1719 return;
1720 }
1721 work = malloc(sizeof(*work));
1722 work->ht = ht;
1723 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
1724 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
1725 }
1726 }
1727
1728 #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
1729
1730 static
1731 void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
1732 unsigned long count)
1733 {
1734 struct rcu_resize_work *work;
1735
1736 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
1737 return;
1738 resize_target_update_count(ht, count);
1739 /* Store resize_target before read resize_initiated */
1740 cmm_smp_mb();
1741 if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
1742 uatomic_inc(&ht->in_progress_resize);
1743 cmm_smp_mb(); /* increment resize count before load destroy */
1744 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
1745 uatomic_dec(&ht->in_progress_resize);
1746 return;
1747 }
1748 work = malloc(sizeof(*work));
1749 work->ht = ht;
1750 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
1751 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
1752 }
1753 }
1754
1755 #endif
This page took 0.061523 seconds and 5 git commands to generate.