rculfhash: Remove leftover hash_seed field
[urcu.git] / rculfhash.c
CommitLineData
5e28c532 1/*
abc490a1
MD
2 * rculfhash.c
3 *
1475579c 4 * Userspace RCU library - Lock-Free Resizable RCU Hash Table
abc490a1
MD
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
5e28c532
MD
21 */
22
e753ff5a
MD
23/*
24 * Based on the following articles:
25 * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
26 * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
27 * - Michael, M. M. High performance dynamic lock-free hash tables
28 * and list-based sets. In Proceedings of the fourteenth annual ACM
29 * symposium on Parallel algorithms and architectures, ACM Press,
30 * (2002), 73-82.
31 *
1475579c 32 * Some specificities of this Lock-Free Resizable RCU Hash Table
e753ff5a
MD
33 * implementation:
34 *
35 * - RCU read-side critical section allows readers to perform hash
36 * table lookups and use the returned objects safely by delaying
37 * memory reclaim of a grace period.
38 * - Add and remove operations are lock-free, and do not need to
39 * allocate memory. They need to be executed within RCU read-side
40 * critical section to ensure the objects they read are valid and to
41 * deal with the cmpxchg ABA problem.
42 * - add and add_unique operations are supported. add_unique checks if
43 * the node key already exists in the hash table. It ensures no key
44 * duplicata exists.
45 * - The resize operation executes concurrently with add/remove/lookup.
46 * - Hash table nodes are contained within a split-ordered list. This
47 * list is ordered by incrementing reversed-bits-hash value.
48 * - An index of dummy nodes is kept. These dummy nodes are the hash
49 * table "buckets", and they are also chained together in the
50 * split-ordered list, which allows recursive expansion.
1475579c
MD
51 * - The resize operation for small tables only allows expanding the hash table.
52 * It is triggered automatically by detecting long chains in the add
53 * operation.
54 * - The resize operation for larger tables (and available through an
55 * API) allows both expanding and shrinking the hash table.
4c42f1b8 56 * - Split-counters are used to keep track of the number of
1475579c 57 * nodes within the hash table for automatic resize triggering.
e753ff5a
MD
58 * - Resize operation initiated by long chain detection is executed by a
59 * call_rcu thread, which keeps lock-freedom of add and remove.
60 * - Resize operations are protected by a mutex.
61 * - The removal operation is split in two parts: first, a "removed"
62 * flag is set in the next pointer within the node to remove. Then,
63 * a "garbage collection" is performed in the bucket containing the
64 * removed node (from the start of the bucket up to the removed node).
65 * All encountered nodes with "removed" flag set in their next
66 * pointers are removed from the linked-list. If the cmpxchg used for
67 * removal fails (due to concurrent garbage-collection or concurrent
68 * add), we retry from the beginning of the bucket. This ensures that
69 * the node with "removed" flag set is removed from the hash table
70 * (not visible to lookups anymore) before the RCU read-side critical
71 * section held across removal ends. Furthermore, this ensures that
72 * the node with "removed" flag set is removed from the linked-list
73 * before its memory is reclaimed. Only the thread which removal
74 * successfully set the "removed" flag (with a cmpxchg) into a node's
75 * next pointer is considered to have succeeded its removal (and thus
76 * owns the node to reclaim). Because we garbage-collect starting from
77 * an invariant node (the start-of-bucket dummy node) up to the
78 * "removed" node (or find a reverse-hash that is higher), we are sure
79 * that a successful traversal of the chain leads to a chain that is
80 * present in the linked-list (the start node is never removed) and
81 * that is does not contain the "removed" node anymore, even if
82 * concurrent delete/add operations are changing the structure of the
83 * list concurrently.
29e669f6
MD
84 * - The add operation performs gargage collection of buckets if it
85 * encounters nodes with removed flag set in the bucket where it wants
86 * to add its new node. This ensures lock-freedom of add operation by
87 * helping the remover unlink nodes from the list rather than to wait
88 * for it do to so.
e753ff5a
MD
89 * - A RCU "order table" indexed by log2(hash index) is copied and
90 * expanded by the resize operation. This order table allows finding
91 * the "dummy node" tables.
92 * - There is one dummy node table per hash index order. The size of
93 * each dummy node table is half the number of hashes contained in
93d46c39
LJ
94 * this order (except for order 0).
95 * - synchronzie_rcu is used to garbage-collect the old dummy node table.
e753ff5a
MD
96 * - The per-order dummy node tables contain a compact version of the
97 * hash table nodes. These tables are invariant after they are
98 * populated into the hash table.
93d46c39
LJ
99 *
100 * Dummy node tables:
101 *
102 * hash table hash table the last all dummy node tables
103 * order size dummy node 0 1 2 3 4 5 6(index)
104 * table size
105 * 0 1 1 1
106 * 1 2 1 1 1
107 * 2 4 2 1 1 2
108 * 3 8 4 1 1 2 4
109 * 4 16 8 1 1 2 4 8
110 * 5 32 16 1 1 2 4 8 16
111 * 6 64 32 1 1 2 4 8 16 32
112 *
113 * When growing/shrinking, we only focus on the last dummy node table
114 * which size is (!order ? 1 : (1 << (order -1))).
115 *
116 * Example for growing/shrinking:
117 * grow hash table from order 5 to 6: init the index=6 dummy node table
118 * shrink hash table from order 6 to 5: fini the index=6 dummy node table
119 *
1475579c
MD
120 * A bit of ascii art explanation:
121 *
122 * Order index is the off-by-one compare to the actual power of 2 because
123 * we use index 0 to deal with the 0 special-case.
124 *
125 * This shows the nodes for a small table ordered by reversed bits:
126 *
127 * bits reverse
128 * 0 000 000
129 * 4 100 001
130 * 2 010 010
131 * 6 110 011
132 * 1 001 100
133 * 5 101 101
134 * 3 011 110
135 * 7 111 111
136 *
137 * This shows the nodes in order of non-reversed bits, linked by
138 * reversed-bit order.
139 *
140 * order bits reverse
141 * 0 0 000 000
0adc36a8
LJ
142 * 1 | 1 001 100 <-
143 * 2 | | 2 010 010 <- |
f6fdd688 144 * | | | 3 011 110 | <- |
1475579c
MD
145 * 3 -> | | | 4 100 001 | |
146 * -> | | 5 101 101 |
147 * -> | 6 110 011
148 * -> 7 111 111
e753ff5a
MD
149 */
150
2ed95849
MD
151#define _LGPL_SOURCE
152#include <stdlib.h>
e0ba718a
MD
153#include <errno.h>
154#include <assert.h>
155#include <stdio.h>
abc490a1 156#include <stdint.h>
f000907d 157#include <string.h>
e0ba718a 158
15cfbec7 159#include "config.h"
2ed95849 160#include <urcu.h>
abc490a1 161#include <urcu-call-rcu.h>
a42cc659
MD
162#include <urcu/arch.h>
163#include <urcu/uatomic.h>
a42cc659 164#include <urcu/compiler.h>
abc490a1 165#include <urcu/rculfhash.h>
5e28c532 166#include <stdio.h>
464a1ec9 167#include <pthread.h>
44395fb7 168
f9830efd 169#ifdef DEBUG
f0c29ed7 170#define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
f9830efd 171#else
e753ff5a 172#define dbg_printf(fmt, args...)
f9830efd
MD
173#endif
174
f8994aee 175/*
4c42f1b8 176 * Split-counters lazily update the global counter each 1024
f8994aee
MD
177 * addition/removal. It automatically keeps track of resize required.
178 * We use the bucket length as indicator for need to expand for small
179 * tables and machines lacking per-cpu data suppport.
180 */
181#define COUNT_COMMIT_ORDER 10
4ddbb355 182#define DEFAULT_SPLIT_COUNT_MASK 0xFUL
6ea6bc67
MD
183#define CHAIN_LEN_TARGET 1
184#define CHAIN_LEN_RESIZE_THRESHOLD 3
2ed95849 185
cd95516d 186/*
76a73da8 187 * Define the minimum table size.
cd95516d 188 */
c9edd44a 189#define MIN_TABLE_SIZE 1
cd95516d 190
4105056a
MD
191#if (CAA_BITS_PER_LONG == 32)
192#define MAX_TABLE_ORDER 32
193#else
194#define MAX_TABLE_ORDER 64
195#endif
196
b7d619b0
MD
197/*
198 * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink.
199 */
6083a889
MD
200#define MIN_PARTITION_PER_THREAD_ORDER 12
201#define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
b7d619b0 202
4105056a
MD
203#ifndef min
204#define min(a, b) ((a) < (b) ? (a) : (b))
205#endif
206
abc490a1
MD
207#ifndef max
208#define max(a, b) ((a) > (b) ? (a) : (b))
209#endif
2ed95849 210
d95bd160
MD
211/*
212 * The removed flag needs to be updated atomically with the pointer.
48ed1c18 213 * It indicates that no node must attach to the node scheduled for
b198f0fd 214 * removal, and that node garbage collection must be performed.
d95bd160
MD
215 * The dummy flag does not require to be updated atomically with the
216 * pointer, but it is added as a pointer low bit flag to save space.
217 */
d37166c6 218#define REMOVED_FLAG (1UL << 0)
b198f0fd
MD
219#define DUMMY_FLAG (1UL << 1)
220#define FLAGS_MASK ((1UL << 2) - 1)
d37166c6 221
bb7b2f26 222/* Value of the end pointer. Should not interact with flags. */
f9c80341 223#define END_VALUE NULL
bb7b2f26 224
7f52427b
MD
225/*
226 * ht_items_count: Split-counters counting the number of node addition
227 * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
228 * is set at hash table creation.
229 *
230 * These are free-running counters, never reset to zero. They count the
231 * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
232 * operations to update the global counter. We choose a power-of-2 value
233 * for the trigger to deal with 32 or 64-bit overflow of the counter.
234 */
df44348d 235struct ht_items_count {
860d07e8 236 unsigned long add, del;
df44348d
MD
237} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
238
7f52427b
MD
239/*
240 * rcu_level: Contains the per order-index-level dummy node table. The
241 * size of each dummy node table is half the number of hashes contained
242 * in this order (except for order 0). The minimum allocation size
243 * parameter allows combining the dummy node arrays of the lowermost
244 * levels to improve cache locality for small index orders.
245 */
1475579c 246struct rcu_level {
0d14ceb2 247 /* Note: manually update allocation length when adding a field */
1475579c
MD
248 struct _cds_lfht_node nodes[0];
249};
250
7f52427b
MD
251/*
252 * rcu_table: Contains the size and desired new size if a resize
253 * operation is in progress, as well as the statically-sized array of
254 * rcu_level pointers.
255 */
395270b6 256struct rcu_table {
4105056a 257 unsigned long size; /* always a power of 2, shared (RCU) */
f9830efd 258 unsigned long resize_target;
11519af6 259 int resize_initiated;
4105056a 260 struct rcu_level *tbl[MAX_TABLE_ORDER];
395270b6
MD
261};
262
7f52427b
MD
263/*
264 * cds_lfht: Top-level data structure representing a lock-free hash
265 * table. Defined in the implementation file to make it be an opaque
266 * cookie to users.
267 */
14044b37 268struct cds_lfht {
4105056a 269 struct rcu_table t;
5488222b
LJ
270 unsigned long min_alloc_order;
271 unsigned long min_alloc_size;
b8af5011 272 int flags;
5f511391
MD
273 /*
274 * We need to put the work threads offline (QSBR) when taking this
275 * mutex, because we use synchronize_rcu within this mutex critical
276 * section, which waits on read-side critical sections, and could
277 * therefore cause grace-period deadlock if we hold off RCU G.P.
278 * completion.
279 */
464a1ec9 280 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
33c7c748 281 unsigned int in_progress_resize, in_progress_destroy;
14044b37 282 void (*cds_lfht_call_rcu)(struct rcu_head *head,
abc490a1 283 void (*func)(struct rcu_head *head));
1475579c 284 void (*cds_lfht_synchronize_rcu)(void);
01dbfa62
MD
285 void (*cds_lfht_rcu_read_lock)(void);
286 void (*cds_lfht_rcu_read_unlock)(void);
5f511391
MD
287 void (*cds_lfht_rcu_thread_offline)(void);
288 void (*cds_lfht_rcu_thread_online)(void);
b7d619b0
MD
289 void (*cds_lfht_rcu_register_thread)(void);
290 void (*cds_lfht_rcu_unregister_thread)(void);
291 pthread_attr_t *resize_attr; /* Resize threads attributes */
7de5ccfd 292 long count; /* global approximate item count */
4c42f1b8 293 struct ht_items_count *split_count; /* split item count */
2ed95849
MD
294};
295
7f52427b
MD
296/*
297 * rcu_resize_work: Contains arguments passed to RCU worker thread
298 * responsible for performing lazy resize.
299 */
abc490a1
MD
300struct rcu_resize_work {
301 struct rcu_head head;
14044b37 302 struct cds_lfht *ht;
abc490a1 303};
2ed95849 304
7f52427b
MD
305/*
306 * partition_resize_work: Contains arguments passed to worker threads
307 * executing the hash table resize on partitions of the hash table
308 * assigned to each processor's worker thread.
309 */
b7d619b0 310struct partition_resize_work {
1af6e26e 311 pthread_t thread_id;
b7d619b0
MD
312 struct cds_lfht *ht;
313 unsigned long i, start, len;
314 void (*fct)(struct cds_lfht *ht, unsigned long i,
315 unsigned long start, unsigned long len);
316};
317
76a73da8 318static
83beee94 319void _cds_lfht_add(struct cds_lfht *ht,
0422d92c 320 cds_lfht_match_fct match,
83beee94
MD
321 unsigned long size,
322 struct cds_lfht_node *node,
323 struct cds_lfht_iter *unique_ret,
324 int dummy);
48ed1c18 325
abc490a1
MD
326/*
327 * Algorithm to reverse bits in a word by lookup table, extended to
328 * 64-bit words.
f9830efd 329 * Source:
abc490a1 330 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
f9830efd 331 * Originally from Public Domain.
abc490a1
MD
332 */
333
334static const uint8_t BitReverseTable256[256] =
2ed95849 335{
abc490a1
MD
336#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
337#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
338#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
339 R6(0), R6(2), R6(1), R6(3)
340};
341#undef R2
342#undef R4
343#undef R6
2ed95849 344
abc490a1
MD
345static
346uint8_t bit_reverse_u8(uint8_t v)
347{
348 return BitReverseTable256[v];
349}
ab7d5fc6 350
abc490a1
MD
351static __attribute__((unused))
352uint32_t bit_reverse_u32(uint32_t v)
353{
354 return ((uint32_t) bit_reverse_u8(v) << 24) |
355 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
356 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
357 ((uint32_t) bit_reverse_u8(v >> 24));
2ed95849
MD
358}
359
abc490a1
MD
360static __attribute__((unused))
361uint64_t bit_reverse_u64(uint64_t v)
2ed95849 362{
abc490a1
MD
363 return ((uint64_t) bit_reverse_u8(v) << 56) |
364 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
365 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
366 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
367 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
368 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
369 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
370 ((uint64_t) bit_reverse_u8(v >> 56));
371}
372
373static
374unsigned long bit_reverse_ulong(unsigned long v)
375{
376#if (CAA_BITS_PER_LONG == 32)
377 return bit_reverse_u32(v);
378#else
379 return bit_reverse_u64(v);
380#endif
381}
382
f9830efd 383/*
24365af7
MD
384 * fls: returns the position of the most significant bit.
385 * Returns 0 if no bit is set, else returns the position of the most
386 * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
f9830efd 387 */
24365af7
MD
388#if defined(__i386) || defined(__x86_64)
389static inline
390unsigned int fls_u32(uint32_t x)
f9830efd 391{
24365af7
MD
392 int r;
393
394 asm("bsrl %1,%0\n\t"
395 "jnz 1f\n\t"
396 "movl $-1,%0\n\t"
397 "1:\n\t"
398 : "=r" (r) : "rm" (x));
399 return r + 1;
400}
401#define HAS_FLS_U32
402#endif
403
404#if defined(__x86_64)
405static inline
406unsigned int fls_u64(uint64_t x)
407{
408 long r;
409
410 asm("bsrq %1,%0\n\t"
411 "jnz 1f\n\t"
412 "movq $-1,%0\n\t"
413 "1:\n\t"
414 : "=r" (r) : "rm" (x));
415 return r + 1;
416}
417#define HAS_FLS_U64
418#endif
419
420#ifndef HAS_FLS_U64
421static __attribute__((unused))
422unsigned int fls_u64(uint64_t x)
423{
424 unsigned int r = 64;
425
426 if (!x)
427 return 0;
428
429 if (!(x & 0xFFFFFFFF00000000ULL)) {
430 x <<= 32;
431 r -= 32;
432 }
433 if (!(x & 0xFFFF000000000000ULL)) {
434 x <<= 16;
435 r -= 16;
436 }
437 if (!(x & 0xFF00000000000000ULL)) {
438 x <<= 8;
439 r -= 8;
440 }
441 if (!(x & 0xF000000000000000ULL)) {
442 x <<= 4;
443 r -= 4;
444 }
445 if (!(x & 0xC000000000000000ULL)) {
446 x <<= 2;
447 r -= 2;
448 }
449 if (!(x & 0x8000000000000000ULL)) {
450 x <<= 1;
451 r -= 1;
452 }
453 return r;
454}
455#endif
456
457#ifndef HAS_FLS_U32
458static __attribute__((unused))
459unsigned int fls_u32(uint32_t x)
460{
461 unsigned int r = 32;
f9830efd 462
24365af7
MD
463 if (!x)
464 return 0;
465 if (!(x & 0xFFFF0000U)) {
466 x <<= 16;
467 r -= 16;
468 }
469 if (!(x & 0xFF000000U)) {
470 x <<= 8;
471 r -= 8;
472 }
473 if (!(x & 0xF0000000U)) {
474 x <<= 4;
475 r -= 4;
476 }
477 if (!(x & 0xC0000000U)) {
478 x <<= 2;
479 r -= 2;
480 }
481 if (!(x & 0x80000000U)) {
482 x <<= 1;
483 r -= 1;
484 }
485 return r;
486}
487#endif
488
489unsigned int fls_ulong(unsigned long x)
f9830efd 490{
6887cc5e 491#if (CAA_BITS_PER_LONG == 32)
24365af7
MD
492 return fls_u32(x);
493#else
494 return fls_u64(x);
495#endif
496}
f9830efd 497
920f8ef6
LJ
498/*
499 * Return the minimum order for which x <= (1UL << order).
500 * Return -1 if x is 0.
501 */
24365af7
MD
502int get_count_order_u32(uint32_t x)
503{
920f8ef6
LJ
504 if (!x)
505 return -1;
24365af7 506
920f8ef6 507 return fls_u32(x - 1);
24365af7
MD
508}
509
920f8ef6
LJ
510/*
511 * Return the minimum order for which x <= (1UL << order).
512 * Return -1 if x is 0.
513 */
24365af7
MD
514int get_count_order_ulong(unsigned long x)
515{
920f8ef6
LJ
516 if (!x)
517 return -1;
24365af7 518
920f8ef6 519 return fls_ulong(x - 1);
f9830efd
MD
520}
521
98808fb1 522#ifdef POISON_FREE
5afadd12
LJ
523#define poison_free(ptr) \
524 do { \
525 if (ptr) { \
526 memset(ptr, 0x42, sizeof(*(ptr))); \
527 free(ptr); \
528 } \
98808fb1
MD
529 } while (0)
530#else
531#define poison_free(ptr) free(ptr)
532#endif
533
f9830efd 534static
ab65b890 535void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth);
f9830efd 536
f8994aee 537static
4105056a 538void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
f8994aee
MD
539 unsigned long count);
540
df44348d 541static long nr_cpus_mask = -1;
4c42f1b8
LJ
542static long split_count_mask = -1;
543
4ddbb355 544#if defined(HAVE_SYSCONF)
4c42f1b8
LJ
545static void ht_init_nr_cpus_mask(void)
546{
547 long maxcpus;
548
549 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
550 if (maxcpus <= 0) {
551 nr_cpus_mask = -2;
552 return;
553 }
554 /*
555 * round up number of CPUs to next power of two, so we
556 * can use & for modulo.
557 */
558 maxcpus = 1UL << get_count_order_ulong(maxcpus);
559 nr_cpus_mask = maxcpus - 1;
560}
4ddbb355
LJ
561#else /* #if defined(HAVE_SYSCONF) */
562static void ht_init_nr_cpus_mask(void)
563{
564 nr_cpus_mask = -2;
565}
566#endif /* #else #if defined(HAVE_SYSCONF) */
df44348d
MD
567
568static
5afadd12 569void alloc_split_items_count(struct cds_lfht *ht)
df44348d
MD
570{
571 struct ht_items_count *count;
572
4c42f1b8
LJ
573 if (nr_cpus_mask == -1) {
574 ht_init_nr_cpus_mask();
4ddbb355
LJ
575 if (nr_cpus_mask < 0)
576 split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
577 else
578 split_count_mask = nr_cpus_mask;
df44348d 579 }
4c42f1b8 580
4ddbb355 581 assert(split_count_mask >= 0);
5afadd12
LJ
582
583 if (ht->flags & CDS_LFHT_ACCOUNTING) {
584 ht->split_count = calloc(split_count_mask + 1, sizeof(*count));
585 assert(ht->split_count);
586 } else {
587 ht->split_count = NULL;
588 }
df44348d
MD
589}
590
591static
5afadd12 592void free_split_items_count(struct cds_lfht *ht)
df44348d 593{
5afadd12 594 poison_free(ht->split_count);
df44348d
MD
595}
596
14360f1c 597#if defined(HAVE_SCHED_GETCPU)
df44348d 598static
14360f1c 599int ht_get_split_count_index(unsigned long hash)
df44348d
MD
600{
601 int cpu;
602
4c42f1b8 603 assert(split_count_mask >= 0);
df44348d 604 cpu = sched_getcpu();
8ed51e04 605 if (caa_unlikely(cpu < 0))
14360f1c 606 return hash & split_count_mask;
df44348d 607 else
4c42f1b8 608 return cpu & split_count_mask;
df44348d 609}
14360f1c
LJ
610#else /* #if defined(HAVE_SCHED_GETCPU) */
611static
612int ht_get_split_count_index(unsigned long hash)
613{
614 return hash & split_count_mask;
615}
616#endif /* #else #if defined(HAVE_SCHED_GETCPU) */
df44348d
MD
617
618static
14360f1c 619void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
df44348d 620{
4c42f1b8
LJ
621 unsigned long split_count;
622 int index;
df44348d 623
8ed51e04 624 if (caa_unlikely(!ht->split_count))
3171717f 625 return;
14360f1c 626 index = ht_get_split_count_index(hash);
4c42f1b8 627 split_count = uatomic_add_return(&ht->split_count[index].add, 1);
8ed51e04 628 if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
e3ecefd6 629 long count;
df44348d 630
4c42f1b8 631 dbg_printf("add split count %lu\n", split_count);
df44348d
MD
632 count = uatomic_add_return(&ht->count,
633 1UL << COUNT_COMMIT_ORDER);
634 /* If power of 2 */
635 if (!(count & (count - 1))) {
4105056a 636 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
f8994aee 637 return;
e3ecefd6 638 dbg_printf("add set global %ld\n", count);
4105056a 639 cds_lfht_resize_lazy_count(ht, size,
6ea6bc67 640 count >> (CHAIN_LEN_TARGET - 1));
df44348d
MD
641 }
642 }
643}
644
645static
14360f1c 646void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
df44348d 647{
4c42f1b8
LJ
648 unsigned long split_count;
649 int index;
df44348d 650
8ed51e04 651 if (caa_unlikely(!ht->split_count))
3171717f 652 return;
14360f1c 653 index = ht_get_split_count_index(hash);
4c42f1b8 654 split_count = uatomic_add_return(&ht->split_count[index].del, 1);
8ed51e04 655 if (caa_unlikely(!(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
e3ecefd6 656 long count;
df44348d 657
4c42f1b8 658 dbg_printf("del split count %lu\n", split_count);
df44348d 659 count = uatomic_add_return(&ht->count,
3171717f 660 -(1UL << COUNT_COMMIT_ORDER));
df44348d
MD
661 /* If power of 2 */
662 if (!(count & (count - 1))) {
4105056a 663 if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
f8994aee 664 return;
e3ecefd6
MD
665 dbg_printf("del set global %ld\n", count);
666 /*
c941bb9e 667 * Don't shrink table if the number of nodes is below a
e3ecefd6
MD
668 * certain threshold.
669 */
4c42f1b8 670 if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
e3ecefd6 671 return;
4105056a 672 cds_lfht_resize_lazy_count(ht, size,
6ea6bc67 673 count >> (CHAIN_LEN_TARGET - 1));
df44348d
MD
674 }
675 }
676}
677
f9830efd 678static
4105056a 679void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
f9830efd 680{
f8994aee
MD
681 unsigned long count;
682
b8af5011
MD
683 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
684 return;
f8994aee
MD
685 count = uatomic_read(&ht->count);
686 /*
687 * Use bucket-local length for small table expand and for
688 * environments lacking per-cpu data support.
689 */
690 if (count >= (1UL << COUNT_COMMIT_ORDER))
691 return;
24365af7 692 if (chain_len > 100)
f0c29ed7 693 dbg_printf("WARNING: large chain length: %u.\n",
24365af7 694 chain_len);
3390d470 695 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
ab65b890 696 cds_lfht_resize_lazy_grow(ht, size,
01370f0b 697 get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
f9830efd
MD
698}
699
abc490a1 700static
14044b37 701struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
abc490a1 702{
14044b37 703 return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
abc490a1
MD
704}
705
706static
14044b37 707int is_removed(struct cds_lfht_node *node)
abc490a1 708{
d37166c6 709 return ((unsigned long) node) & REMOVED_FLAG;
abc490a1
MD
710}
711
712static
14044b37 713struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
abc490a1 714{
14044b37 715 return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
abc490a1
MD
716}
717
f5596c94 718static
14044b37 719int is_dummy(struct cds_lfht_node *node)
f5596c94
MD
720{
721 return ((unsigned long) node) & DUMMY_FLAG;
722}
723
724static
14044b37 725struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
f5596c94 726{
14044b37 727 return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
f5596c94 728}
bb7b2f26
MD
729
730static
731struct cds_lfht_node *get_end(void)
732{
733 return (struct cds_lfht_node *) END_VALUE;
734}
735
736static
737int is_end(struct cds_lfht_node *node)
738{
739 return clear_flag(node) == (struct cds_lfht_node *) END_VALUE;
740}
741
abc490a1 742static
ab65b890
LJ
743unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
744 unsigned long v)
abc490a1
MD
745{
746 unsigned long old1, old2;
747
748 old1 = uatomic_read(ptr);
749 do {
750 old2 = old1;
751 if (old2 >= v)
f9830efd 752 return old2;
abc490a1 753 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
ab65b890 754 return old2;
abc490a1
MD
755}
756
f4a9cc0b
LJ
757static
758struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
759 unsigned long hash)
760{
761 unsigned long index, order;
762
763 assert(size > 0);
764 index = hash & (size - 1);
ef6e6171
LJ
765
766 if (index < ht->min_alloc_size) {
767 dbg_printf("lookup hash %lu index %lu order 0 aridx 0\n",
768 hash, index);
769 return &ht->t.tbl[0]->nodes[index];
770 }
a4ea2223
LJ
771 /*
772 * equivalent to get_count_order_ulong(index + 1), but optimizes
773 * away the non-existing 0 special-case for
774 * get_count_order_ulong.
775 */
776 order = fls_ulong(index);
f4a9cc0b 777 dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
ef6e6171
LJ
778 hash, index, order, index & ((1UL << (order - 1)) - 1));
779 return &ht->t.tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)];
f4a9cc0b
LJ
780}
781
273399de
MD
782/*
783 * Remove all logically deleted nodes from a bucket up to a certain node key.
784 */
785static
f9c80341 786void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
273399de 787{
14044b37 788 struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
273399de 789
c90201ac
MD
790 assert(!is_dummy(dummy));
791 assert(!is_removed(dummy));
792 assert(!is_dummy(node));
793 assert(!is_removed(node));
273399de
MD
794 for (;;) {
795 iter_prev = dummy;
796 /* We can always skip the dummy node initially */
cc4fcb10 797 iter = rcu_dereference(iter_prev->p.next);
b4cb483f 798 assert(!is_removed(iter));
cc4fcb10 799 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
bd4db153
MD
800 /*
801 * We should never be called with dummy (start of chain)
802 * and logically removed node (end of path compression
803 * marker) being the actual same node. This would be a
804 * bug in the algorithm implementation.
805 */
806 assert(dummy != node);
273399de 807 for (;;) {
8ed51e04 808 if (caa_unlikely(is_end(iter)))
f9c80341 809 return;
8ed51e04 810 if (caa_likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
f9c80341 811 return;
cc4fcb10 812 next = rcu_dereference(clear_flag(iter)->p.next);
8ed51e04 813 if (caa_likely(is_removed(next)))
273399de 814 break;
b453eae1 815 iter_prev = clear_flag(iter);
273399de
MD
816 iter = next;
817 }
b198f0fd 818 assert(!is_removed(iter));
f5596c94
MD
819 if (is_dummy(iter))
820 new_next = flag_dummy(clear_flag(next));
821 else
822 new_next = clear_flag(next);
823 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
273399de 824 }
f9c80341 825 return;
273399de
MD
826}
827
9357c415
MD
828static
829int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
830 struct cds_lfht_node *old_node,
3fb86f26 831 struct cds_lfht_node *old_next,
9357c415
MD
832 struct cds_lfht_node *new_node)
833{
3fb86f26 834 struct cds_lfht_node *dummy, *ret_next;
9357c415 835 struct _cds_lfht_node *lookup;
9357c415
MD
836
837 if (!old_node) /* Return -ENOENT if asked to replace NULL node */
7801dadd 838 return -ENOENT;
9357c415
MD
839
840 assert(!is_removed(old_node));
841 assert(!is_dummy(old_node));
842 assert(!is_removed(new_node));
843 assert(!is_dummy(new_node));
844 assert(new_node != old_node);
3fb86f26 845 for (;;) {
9357c415 846 /* Insert after node to be replaced */
9357c415
MD
847 if (is_removed(old_next)) {
848 /*
849 * Too late, the old node has been removed under us
850 * between lookup and replace. Fail.
851 */
7801dadd 852 return -ENOENT;
9357c415
MD
853 }
854 assert(!is_dummy(old_next));
855 assert(new_node != clear_flag(old_next));
856 new_node->p.next = clear_flag(old_next);
857 /*
858 * Here is the whole trick for lock-free replace: we add
859 * the replacement node _after_ the node we want to
860 * replace by atomically setting its next pointer at the
861 * same time we set its removal flag. Given that
862 * the lookups/get next use an iterator aware of the
863 * next pointer, they will either skip the old node due
864 * to the removal flag and see the new node, or use
865 * the old node, but will not see the new one.
866 */
867 ret_next = uatomic_cmpxchg(&old_node->p.next,
868 old_next, flag_removed(new_node));
3fb86f26 869 if (ret_next == old_next)
7801dadd 870 break; /* We performed the replacement. */
3fb86f26
LJ
871 old_next = ret_next;
872 }
9357c415 873
9357c415
MD
874 /*
875 * Ensure that the old node is not visible to readers anymore:
876 * lookup for the node, and remove it (along with any other
877 * logically removed node) if found.
878 */
f4a9cc0b 879 lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash));
9357c415
MD
880 dummy = (struct cds_lfht_node *) lookup;
881 _cds_lfht_gc_bucket(dummy, new_node);
7801dadd
LJ
882
883 assert(is_removed(rcu_dereference(old_node->p.next)));
884 return 0;
9357c415
MD
885}
886
83beee94
MD
887/*
888 * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
889 * mode. A NULL unique_ret allows creation of duplicate keys.
890 */
abc490a1 891static
83beee94 892void _cds_lfht_add(struct cds_lfht *ht,
0422d92c 893 cds_lfht_match_fct match,
83beee94
MD
894 unsigned long size,
895 struct cds_lfht_node *node,
896 struct cds_lfht_iter *unique_ret,
897 int dummy)
abc490a1 898{
14044b37 899 struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
960c9e4f 900 *return_node;
14044b37 901 struct _cds_lfht_node *lookup;
abc490a1 902
c90201ac
MD
903 assert(!is_dummy(node));
904 assert(!is_removed(node));
f4a9cc0b 905 lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
abc490a1 906 for (;;) {
adc0de68 907 uint32_t chain_len = 0;
abc490a1 908
11519af6
MD
909 /*
910 * iter_prev points to the non-removed node prior to the
911 * insert location.
11519af6 912 */
14044b37 913 iter_prev = (struct cds_lfht_node *) lookup;
11519af6 914 /* We can always skip the dummy node initially */
cc4fcb10
MD
915 iter = rcu_dereference(iter_prev->p.next);
916 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
abc490a1 917 for (;;) {
8ed51e04 918 if (caa_unlikely(is_end(iter)))
273399de 919 goto insert;
8ed51e04 920 if (caa_likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
273399de 921 goto insert;
238cc06e 922
194fdbd1
LJ
923 /* dummy node is the first node of the identical-hash-value chain */
924 if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash)
925 goto insert;
238cc06e 926
cc4fcb10 927 next = rcu_dereference(clear_flag(iter)->p.next);
8ed51e04 928 if (caa_unlikely(is_removed(next)))
9dba85be 929 goto gc_node;
238cc06e
LJ
930
931 /* uniquely add */
83beee94 932 if (unique_ret
1b81fe1a 933 && !is_dummy(next)
238cc06e
LJ
934 && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) {
935 struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
936
937 /*
938 * uniquely adding inserts the node as the first
939 * node of the identical-hash-value node chain.
940 *
941 * This semantic ensures no duplicated keys
942 * should ever be observable in the table
943 * (including observe one node by one node
944 * by forward iterations)
945 */
0422d92c 946 cds_lfht_next_duplicate(ht, match, &d_iter);
238cc06e
LJ
947 if (!d_iter.node)
948 goto insert;
949
950 *unique_ret = d_iter;
83beee94 951 return;
48ed1c18 952 }
238cc06e 953
11519af6 954 /* Only account for identical reverse hash once */
24365af7
MD
955 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
956 && !is_dummy(next))
4105056a 957 check_resize(ht, size, ++chain_len);
11519af6 958 iter_prev = clear_flag(iter);
273399de 959 iter = next;
abc490a1 960 }
48ed1c18 961
273399de 962 insert:
7ec59d3b 963 assert(node != clear_flag(iter));
11519af6 964 assert(!is_removed(iter_prev));
c90201ac 965 assert(!is_removed(iter));
f000907d 966 assert(iter_prev != node);
f9c80341 967 if (!dummy)
1b81fe1a 968 node->p.next = clear_flag(iter);
f9c80341
MD
969 else
970 node->p.next = flag_dummy(clear_flag(iter));
f5596c94
MD
971 if (is_dummy(iter))
972 new_node = flag_dummy(node);
973 else
974 new_node = node;
cc4fcb10 975 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
48ed1c18 976 new_node) != iter) {
273399de 977 continue; /* retry */
48ed1c18 978 } else {
83beee94 979 return_node = node;
960c9e4f 980 goto end;
48ed1c18
MD
981 }
982
9dba85be
MD
983 gc_node:
984 assert(!is_removed(iter));
f5596c94
MD
985 if (is_dummy(iter))
986 new_next = flag_dummy(clear_flag(next));
987 else
988 new_next = clear_flag(next);
989 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
273399de 990 /* retry */
464a1ec9 991 }
9357c415 992end:
83beee94
MD
993 if (unique_ret) {
994 unique_ret->node = return_node;
995 /* unique_ret->next left unset, never used. */
996 }
abc490a1 997}
464a1ec9 998
abc490a1 999static
860d07e8 1000int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
4105056a 1001 struct cds_lfht_node *node,
b198f0fd 1002 int dummy_removal)
abc490a1 1003{
14044b37
MD
1004 struct cds_lfht_node *dummy, *next, *old;
1005 struct _cds_lfht_node *lookup;
5e28c532 1006
9357c415 1007 if (!node) /* Return -ENOENT if asked to delete NULL node */
743f9143 1008 return -ENOENT;
9357c415 1009
7ec59d3b 1010 /* logically delete the node */
c90201ac
MD
1011 assert(!is_dummy(node));
1012 assert(!is_removed(node));
cc4fcb10 1013 old = rcu_dereference(node->p.next);
7ec59d3b 1014 do {
48ed1c18
MD
1015 struct cds_lfht_node *new_next;
1016
7ec59d3b 1017 next = old;
8ed51e04 1018 if (caa_unlikely(is_removed(next)))
743f9143 1019 return -ENOENT;
1475579c
MD
1020 if (dummy_removal)
1021 assert(is_dummy(next));
1022 else
1023 assert(!is_dummy(next));
48ed1c18 1024 new_next = flag_removed(next);
48ed1c18 1025 old = uatomic_cmpxchg(&node->p.next, next, new_next);
7ec59d3b 1026 } while (old != next);
7ec59d3b 1027 /* We performed the (logical) deletion. */
7ec59d3b
MD
1028
1029 /*
1030 * Ensure that the node is not visible to readers anymore: lookup for
273399de
MD
1031 * the node, and remove it (along with any other logically removed node)
1032 * if found.
11519af6 1033 */
f4a9cc0b 1034 lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
14044b37 1035 dummy = (struct cds_lfht_node *) lookup;
f9c80341 1036 _cds_lfht_gc_bucket(dummy, node);
743f9143
LJ
1037
1038 assert(is_removed(rcu_dereference(node->p.next)));
1039 return 0;
abc490a1 1040}
2ed95849 1041
b7d619b0
MD
1042static
1043void *partition_resize_thread(void *arg)
1044{
1045 struct partition_resize_work *work = arg;
1046
1047 work->ht->cds_lfht_rcu_register_thread();
1048 work->fct(work->ht, work->i, work->start, work->len);
1049 work->ht->cds_lfht_rcu_unregister_thread();
1050 return NULL;
1051}
1052
1053static
1054void partition_resize_helper(struct cds_lfht *ht, unsigned long i,
1055 unsigned long len,
1056 void (*fct)(struct cds_lfht *ht, unsigned long i,
1057 unsigned long start, unsigned long len))
1058{
1059 unsigned long partition_len;
1060 struct partition_resize_work *work;
6083a889
MD
1061 int thread, ret;
1062 unsigned long nr_threads;
b7d619b0 1063
6083a889
MD
1064 /*
1065 * Note: nr_cpus_mask + 1 is always power of 2.
1066 * We spawn just the number of threads we need to satisfy the minimum
1067 * partition size, up to the number of CPUs in the system.
1068 */
91452a6a
MD
1069 if (nr_cpus_mask > 0) {
1070 nr_threads = min(nr_cpus_mask + 1,
1071 len >> MIN_PARTITION_PER_THREAD_ORDER);
1072 } else {
1073 nr_threads = 1;
1074 }
6083a889
MD
1075 partition_len = len >> get_count_order_ulong(nr_threads);
1076 work = calloc(nr_threads, sizeof(*work));
b7d619b0 1077 assert(work);
6083a889
MD
1078 for (thread = 0; thread < nr_threads; thread++) {
1079 work[thread].ht = ht;
1080 work[thread].i = i;
1081 work[thread].len = partition_len;
1082 work[thread].start = thread * partition_len;
1083 work[thread].fct = fct;
1af6e26e 1084 ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
6083a889 1085 partition_resize_thread, &work[thread]);
b7d619b0
MD
1086 assert(!ret);
1087 }
6083a889 1088 for (thread = 0; thread < nr_threads; thread++) {
1af6e26e 1089 ret = pthread_join(work[thread].thread_id, NULL);
b7d619b0
MD
1090 assert(!ret);
1091 }
1092 free(work);
b7d619b0
MD
1093}
1094
e8de508e
MD
1095/*
1096 * Holding RCU read lock to protect _cds_lfht_add against memory
1097 * reclaim that could be performed by other call_rcu worker threads (ABA
1098 * problem).
9ee0fc9a 1099 *
b7d619b0 1100 * When we reach a certain length, we can split this population phase over
9ee0fc9a
MD
1101 * many worker threads, based on the number of CPUs available in the system.
1102 * This should therefore take care of not having the expand lagging behind too
1103 * many concurrent insertion threads by using the scheduler's ability to
1104 * schedule dummy node population fairly with insertions.
e8de508e 1105 */
4105056a 1106static
b7d619b0
MD
1107void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
1108 unsigned long start, unsigned long len)
4105056a
MD
1109{
1110 unsigned long j;
1111
5488222b 1112 assert(i > ht->min_alloc_order);
4105056a 1113 ht->cds_lfht_rcu_read_lock();
b7d619b0 1114 for (j = start; j < start + len; j++) {
4105056a
MD
1115 struct cds_lfht_node *new_node =
1116 (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
1117
dc1da8f6 1118 dbg_printf("init populate: i %lu j %lu hash %lu\n",
4f6e90b7 1119 i, j, (1UL << (i - 1)) + j);
dc1da8f6 1120 new_node->p.reverse_hash =
4f6e90b7 1121 bit_reverse_ulong((1UL << (i - 1)) + j);
0422d92c 1122 _cds_lfht_add(ht, NULL, 1UL << (i - 1),
83beee94 1123 new_node, NULL, 1);
4105056a
MD
1124 }
1125 ht->cds_lfht_rcu_read_unlock();
b7d619b0
MD
1126}
1127
1128static
1129void init_table_populate(struct cds_lfht *ht, unsigned long i,
1130 unsigned long len)
1131{
1132 assert(nr_cpus_mask != -1);
6083a889 1133 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
b7d619b0
MD
1134 ht->cds_lfht_rcu_thread_online();
1135 init_table_populate_partition(ht, i, 0, len);
1136 ht->cds_lfht_rcu_thread_offline();
1137 return;
1138 }
1139 partition_resize_helper(ht, i, len, init_table_populate_partition);
4105056a
MD
1140}
1141
abc490a1 1142static
4105056a 1143void init_table(struct cds_lfht *ht,
93d46c39 1144 unsigned long first_order, unsigned long last_order)
24365af7 1145{
93d46c39 1146 unsigned long i;
24365af7 1147
93d46c39
LJ
1148 dbg_printf("init table: first_order %lu last_order %lu\n",
1149 first_order, last_order);
5488222b 1150 assert(first_order > ht->min_alloc_order);
93d46c39 1151 for (i = first_order; i <= last_order; i++) {
4105056a 1152 unsigned long len;
24365af7 1153
4f6e90b7 1154 len = 1UL << (i - 1);
f0c29ed7 1155 dbg_printf("init order %lu len: %lu\n", i, len);
4d676753
MD
1156
1157 /* Stop expand if the resize target changes under us */
4f6e90b7 1158 if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i))
4d676753
MD
1159 break;
1160
0d14ceb2 1161 ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
b7d619b0 1162 assert(ht->t.tbl[i]);
4105056a 1163
4105056a 1164 /*
dc1da8f6
MD
1165 * Set all dummy nodes reverse hash values for a level and
1166 * link all dummy nodes into the table.
4105056a 1167 */
dc1da8f6 1168 init_table_populate(ht, i, len);
4105056a 1169
f9c80341
MD
1170 /*
1171 * Update table size.
1172 */
1173 cmm_smp_wmb(); /* populate data before RCU size */
4f6e90b7 1174 CMM_STORE_SHARED(ht->t.size, 1UL << i);
f9c80341 1175
4f6e90b7 1176 dbg_printf("init new size: %lu\n", 1UL << i);
4105056a
MD
1177 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1178 break;
1179 }
1180}
1181
e8de508e
MD
1182/*
1183 * Holding RCU read lock to protect _cds_lfht_remove against memory
1184 * reclaim that could be performed by other call_rcu worker threads (ABA
1185 * problem).
1186 * For a single level, we logically remove and garbage collect each node.
1187 *
1188 * As a design choice, we perform logical removal and garbage collection on a
1189 * node-per-node basis to simplify this algorithm. We also assume keeping good
1190 * cache locality of the operation would overweight possible performance gain
1191 * that could be achieved by batching garbage collection for multiple levels.
1192 * However, this would have to be justified by benchmarks.
1193 *
1194 * Concurrent removal and add operations are helping us perform garbage
1195 * collection of logically removed nodes. We guarantee that all logically
1196 * removed nodes have been garbage-collected (unlinked) before call_rcu is
1197 * invoked to free a hole level of dummy nodes (after a grace period).
1198 *
1199 * Logical removal and garbage collection can therefore be done in batch or on a
1200 * node-per-node basis, as long as the guarantee above holds.
9ee0fc9a 1201 *
b7d619b0
MD
1202 * When we reach a certain length, we can split this removal over many worker
1203 * threads, based on the number of CPUs available in the system. This should
1204 * take care of not letting resize process lag behind too many concurrent
9ee0fc9a 1205 * updater threads actively inserting into the hash table.
e8de508e 1206 */
4105056a 1207static
b7d619b0
MD
1208void remove_table_partition(struct cds_lfht *ht, unsigned long i,
1209 unsigned long start, unsigned long len)
4105056a
MD
1210{
1211 unsigned long j;
1212
5488222b 1213 assert(i > ht->min_alloc_order);
4105056a 1214 ht->cds_lfht_rcu_read_lock();
b7d619b0 1215 for (j = start; j < start + len; j++) {
4105056a
MD
1216 struct cds_lfht_node *fini_node =
1217 (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
1218
1219 dbg_printf("remove entry: i %lu j %lu hash %lu\n",
4f6e90b7 1220 i, j, (1UL << (i - 1)) + j);
4105056a 1221 fini_node->p.reverse_hash =
4f6e90b7
LJ
1222 bit_reverse_ulong((1UL << (i - 1)) + j);
1223 (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1);
abc490a1 1224 }
4105056a 1225 ht->cds_lfht_rcu_read_unlock();
b7d619b0
MD
1226}
1227
1228static
1229void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
1230{
1231
1232 assert(nr_cpus_mask != -1);
6083a889 1233 if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
b7d619b0
MD
1234 ht->cds_lfht_rcu_thread_online();
1235 remove_table_partition(ht, i, 0, len);
1236 ht->cds_lfht_rcu_thread_offline();
1237 return;
1238 }
1239 partition_resize_helper(ht, i, len, remove_table_partition);
2ed95849
MD
1240}
1241
1475579c 1242static
4105056a 1243void fini_table(struct cds_lfht *ht,
93d46c39 1244 unsigned long first_order, unsigned long last_order)
1475579c 1245{
93d46c39 1246 long i;
0d14ceb2 1247 void *free_by_rcu = NULL;
1475579c 1248
93d46c39
LJ
1249 dbg_printf("fini table: first_order %lu last_order %lu\n",
1250 first_order, last_order);
5488222b 1251 assert(first_order > ht->min_alloc_order);
93d46c39 1252 for (i = last_order; i >= first_order; i--) {
4105056a 1253 unsigned long len;
1475579c 1254
4f6e90b7 1255 len = 1UL << (i - 1);
1475579c 1256 dbg_printf("fini order %lu len: %lu\n", i, len);
4105056a 1257
4d676753
MD
1258 /* Stop shrink if the resize target changes under us */
1259 if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
1260 break;
1261
1262 cmm_smp_wmb(); /* populate data before RCU size */
1263 CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
1264
1265 /*
1266 * We need to wait for all add operations to reach Q.S. (and
1267 * thus use the new table for lookups) before we can start
1268 * releasing the old dummy nodes. Otherwise their lookup will
1269 * return a logically removed node as insert position.
1270 */
1271 ht->cds_lfht_synchronize_rcu();
0d14ceb2
LJ
1272 if (free_by_rcu)
1273 free(free_by_rcu);
4d676753 1274
21263e21 1275 /*
4105056a
MD
1276 * Set "removed" flag in dummy nodes about to be removed.
1277 * Unlink all now-logically-removed dummy node pointers.
1278 * Concurrent add/remove operation are helping us doing
1279 * the gc.
21263e21 1280 */
4105056a
MD
1281 remove_table(ht, i, len);
1282
0d14ceb2 1283 free_by_rcu = ht->t.tbl[i];
4105056a
MD
1284
1285 dbg_printf("fini new size: %lu\n", 1UL << i);
1475579c
MD
1286 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1287 break;
1288 }
0d14ceb2
LJ
1289
1290 if (free_by_rcu) {
1291 ht->cds_lfht_synchronize_rcu();
1292 free(free_by_rcu);
1293 }
1475579c
MD
1294}
1295
ff0d69de
LJ
1296static
1297void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size)
1298{
1299 struct _cds_lfht_node *prev, *node;
1300 unsigned long order, len, i, j;
1301
5488222b 1302 ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct _cds_lfht_node));
ff0d69de
LJ
1303 assert(ht->t.tbl[0]);
1304
1305 dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0);
1306 ht->t.tbl[0]->nodes[0].next = flag_dummy(get_end());
1307 ht->t.tbl[0]->nodes[0].reverse_hash = 0;
1308
1309 for (order = 1; order < get_count_order_ulong(size) + 1; order++) {
1310 len = 1UL << (order - 1);
5488222b 1311 if (order <= ht->min_alloc_order) {
eb631bf2 1312 ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len);
5488222b
LJ
1313 } else {
1314 ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node));
1315 assert(ht->t.tbl[order]);
1316 }
ff0d69de
LJ
1317
1318 i = 0;
1319 prev = ht->t.tbl[i]->nodes;
1320 for (j = 0; j < len; j++) {
1321 if (j & (j - 1)) { /* Between power of 2 */
1322 prev++;
1323 } else if (j) { /* At each power of 2 */
1324 i++;
1325 prev = ht->t.tbl[i]->nodes;
1326 }
1327
1328 node = &ht->t.tbl[order]->nodes[j];
1329 dbg_printf("create dummy: order %lu index %lu hash %lu\n",
1330 order, j, j + len);
1331 node->next = prev->next;
1332 assert(is_dummy(node->next));
1333 node->reverse_hash = bit_reverse_ulong(j + len);
1334 prev->next = flag_dummy((struct cds_lfht_node *)node);
1335 }
1336 }
1337}
1338
0422d92c 1339struct cds_lfht *_cds_lfht_new(unsigned long init_size,
5488222b 1340 unsigned long min_alloc_size,
b8af5011 1341 int flags,
14044b37 1342 void (*cds_lfht_call_rcu)(struct rcu_head *head,
1475579c 1343 void (*func)(struct rcu_head *head)),
01dbfa62
MD
1344 void (*cds_lfht_synchronize_rcu)(void),
1345 void (*cds_lfht_rcu_read_lock)(void),
5f511391
MD
1346 void (*cds_lfht_rcu_read_unlock)(void),
1347 void (*cds_lfht_rcu_thread_offline)(void),
b7d619b0
MD
1348 void (*cds_lfht_rcu_thread_online)(void),
1349 void (*cds_lfht_rcu_register_thread)(void),
1350 void (*cds_lfht_rcu_unregister_thread)(void),
1351 pthread_attr_t *attr)
abc490a1 1352{
14044b37 1353 struct cds_lfht *ht;
24365af7 1354 unsigned long order;
abc490a1 1355
5488222b
LJ
1356 /* min_alloc_size must be power of two */
1357 if (!min_alloc_size || (min_alloc_size & (min_alloc_size - 1)))
1358 return NULL;
8129be4e 1359 /* init_size must be power of two */
5488222b 1360 if (!init_size || (init_size & (init_size - 1)))
8129be4e 1361 return NULL;
5488222b
LJ
1362 min_alloc_size = max(min_alloc_size, MIN_TABLE_SIZE);
1363 init_size = max(init_size, min_alloc_size);
14044b37 1364 ht = calloc(1, sizeof(struct cds_lfht));
b7d619b0 1365 assert(ht);
b5d6b20f 1366 ht->flags = flags;
14044b37 1367 ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
1475579c 1368 ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
01dbfa62
MD
1369 ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
1370 ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
5f511391
MD
1371 ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
1372 ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
b7d619b0
MD
1373 ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread;
1374 ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread;
1375 ht->resize_attr = attr;
5afadd12 1376 alloc_split_items_count(ht);
abc490a1
MD
1377 /* this mutex should not nest in read-side C.S. */
1378 pthread_mutex_init(&ht->resize_mutex, NULL);
5488222b 1379 order = get_count_order_ulong(init_size);
93d46c39 1380 ht->t.resize_target = 1UL << order;
5488222b
LJ
1381 ht->min_alloc_size = min_alloc_size;
1382 ht->min_alloc_order = get_count_order_ulong(min_alloc_size);
bcbd36fc
LJ
1383 cds_lfht_create_dummy(ht, 1UL << order);
1384 ht->t.size = 1UL << order;
abc490a1
MD
1385 return ht;
1386}
1387
0422d92c
MD
1388void cds_lfht_lookup(struct cds_lfht *ht, cds_lfht_match_fct match,
1389 unsigned long hash, void *key, struct cds_lfht_iter *iter)
2ed95849 1390{
bb7b2f26 1391 struct cds_lfht_node *node, *next, *dummy_node;
14044b37 1392 struct _cds_lfht_node *lookup;
0422d92c 1393 unsigned long reverse_hash, size;
2ed95849 1394
abc490a1 1395 reverse_hash = bit_reverse_ulong(hash);
464a1ec9 1396
4105056a 1397 size = rcu_dereference(ht->t.size);
f4a9cc0b 1398 lookup = lookup_bucket(ht, size, hash);
bb7b2f26
MD
1399 dummy_node = (struct cds_lfht_node *) lookup;
1400 /* We can always skip the dummy node initially */
1401 node = rcu_dereference(dummy_node->p.next);
bb7b2f26 1402 node = clear_flag(node);
2ed95849 1403 for (;;) {
8ed51e04 1404 if (caa_unlikely(is_end(node))) {
96ad1112 1405 node = next = NULL;
abc490a1 1406 break;
bb7b2f26 1407 }
8ed51e04 1408 if (caa_unlikely(node->p.reverse_hash > reverse_hash)) {
96ad1112 1409 node = next = NULL;
abc490a1 1410 break;
2ed95849 1411 }
1b81fe1a 1412 next = rcu_dereference(node->p.next);
7f52427b 1413 assert(node == clear_flag(node));
8ed51e04 1414 if (caa_likely(!is_removed(next))
1b81fe1a 1415 && !is_dummy(next)
7f52427b 1416 && node->p.reverse_hash == reverse_hash
0422d92c 1417 && caa_likely(match(node, key))) {
273399de 1418 break;
2ed95849 1419 }
1b81fe1a 1420 node = clear_flag(next);
2ed95849 1421 }
1b81fe1a 1422 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
adc0de68
MD
1423 iter->node = node;
1424 iter->next = next;
abc490a1 1425}
e0ba718a 1426
0422d92c
MD
1427void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
1428 struct cds_lfht_iter *iter)
a481e5ff 1429{
adc0de68 1430 struct cds_lfht_node *node, *next;
a481e5ff
MD
1431 unsigned long reverse_hash;
1432 void *key;
a481e5ff 1433
adc0de68 1434 node = iter->node;
a481e5ff
MD
1435 reverse_hash = node->p.reverse_hash;
1436 key = node->key;
adc0de68 1437 next = iter->next;
a481e5ff
MD
1438 node = clear_flag(next);
1439
1440 for (;;) {
8ed51e04 1441 if (caa_unlikely(is_end(node))) {
96ad1112 1442 node = next = NULL;
a481e5ff 1443 break;
bb7b2f26 1444 }
8ed51e04 1445 if (caa_unlikely(node->p.reverse_hash > reverse_hash)) {
96ad1112 1446 node = next = NULL;
a481e5ff
MD
1447 break;
1448 }
1449 next = rcu_dereference(node->p.next);
8ed51e04 1450 if (caa_likely(!is_removed(next))
a481e5ff 1451 && !is_dummy(next)
0422d92c 1452 && caa_likely(match(node->key, key))) {
a481e5ff
MD
1453 break;
1454 }
1455 node = clear_flag(next);
1456 }
1457 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
adc0de68
MD
1458 iter->node = node;
1459 iter->next = next;
a481e5ff
MD
1460}
1461
4e9b9fbf
MD
1462void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1463{
1464 struct cds_lfht_node *node, *next;
1465
853395e1 1466 node = clear_flag(iter->next);
4e9b9fbf 1467 for (;;) {
8ed51e04 1468 if (caa_unlikely(is_end(node))) {
4e9b9fbf
MD
1469 node = next = NULL;
1470 break;
1471 }
1472 next = rcu_dereference(node->p.next);
8ed51e04 1473 if (caa_likely(!is_removed(next))
4e9b9fbf
MD
1474 && !is_dummy(next)) {
1475 break;
1476 }
1477 node = clear_flag(next);
1478 }
1479 assert(!node || !is_dummy(rcu_dereference(node->p.next)));
1480 iter->node = node;
1481 iter->next = next;
1482}
1483
1484void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
1485{
1486 struct _cds_lfht_node *lookup;
1487
1488 /*
1489 * Get next after first dummy node. The first dummy node is the
1490 * first node of the linked list.
1491 */
1492 lookup = &ht->t.tbl[0]->nodes[0];
853395e1 1493 iter->next = lookup->next;
4e9b9fbf
MD
1494 cds_lfht_next(ht, iter);
1495}
1496
0422d92c
MD
1497void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
1498 struct cds_lfht_node *node)
abc490a1 1499{
0422d92c 1500 unsigned long size;
ab7d5fc6 1501
cc4fcb10 1502 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
4105056a 1503 size = rcu_dereference(ht->t.size);
0422d92c 1504 _cds_lfht_add(ht, NULL, size, node, NULL, 0);
14360f1c 1505 ht_count_add(ht, size, hash);
3eca1b8c
MD
1506}
1507
14044b37 1508struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
0422d92c
MD
1509 cds_lfht_match_fct match,
1510 unsigned long hash,
48ed1c18 1511 struct cds_lfht_node *node)
3eca1b8c 1512{
0422d92c 1513 unsigned long size;
83beee94 1514 struct cds_lfht_iter iter;
3eca1b8c 1515
cc4fcb10 1516 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
4105056a 1517 size = rcu_dereference(ht->t.size);
0422d92c 1518 _cds_lfht_add(ht, match, size, node, &iter, 0);
83beee94 1519 if (iter.node == node)
14360f1c 1520 ht_count_add(ht, size, hash);
83beee94 1521 return iter.node;
2ed95849
MD
1522}
1523
9357c415 1524struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
0422d92c
MD
1525 cds_lfht_match_fct match,
1526 unsigned long hash,
48ed1c18
MD
1527 struct cds_lfht_node *node)
1528{
0422d92c 1529 unsigned long size;
83beee94 1530 struct cds_lfht_iter iter;
48ed1c18 1531
48ed1c18 1532 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
48ed1c18 1533 size = rcu_dereference(ht->t.size);
83beee94 1534 for (;;) {
0422d92c 1535 _cds_lfht_add(ht, match, size, node, &iter, 0);
83beee94 1536 if (iter.node == node) {
14360f1c 1537 ht_count_add(ht, size, hash);
83beee94
MD
1538 return NULL;
1539 }
1540
1541 if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node))
1542 return iter.node;
1543 }
48ed1c18
MD
1544}
1545
9357c415
MD
1546int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
1547 struct cds_lfht_node *new_node)
1548{
1549 unsigned long size;
1550
1551 size = rcu_dereference(ht->t.size);
1552 return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
1553 new_node);
1554}
1555
1556int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
2ed95849 1557{
14360f1c 1558 unsigned long size, hash;
df44348d 1559 int ret;
abc490a1 1560
4105056a 1561 size = rcu_dereference(ht->t.size);
9357c415 1562 ret = _cds_lfht_del(ht, size, iter->node, 0);
14360f1c
LJ
1563 if (!ret) {
1564 hash = bit_reverse_ulong(iter->node->p.reverse_hash);
1565 ht_count_del(ht, size, hash);
1566 }
df44348d 1567 return ret;
2ed95849 1568}
ab7d5fc6 1569
abc490a1 1570static
14044b37 1571int cds_lfht_delete_dummy(struct cds_lfht *ht)
674f7a69 1572{
14044b37
MD
1573 struct cds_lfht_node *node;
1574 struct _cds_lfht_node *lookup;
4105056a 1575 unsigned long order, i, size;
674f7a69 1576
abc490a1 1577 /* Check that the table is empty */
4105056a 1578 lookup = &ht->t.tbl[0]->nodes[0];
14044b37 1579 node = (struct cds_lfht_node *) lookup;
abc490a1 1580 do {
1b81fe1a
MD
1581 node = clear_flag(node)->p.next;
1582 if (!is_dummy(node))
abc490a1 1583 return -EPERM;
273399de 1584 assert(!is_removed(node));
bb7b2f26 1585 } while (!is_end(node));
4105056a
MD
1586 /*
1587 * size accessed without rcu_dereference because hash table is
1588 * being destroyed.
1589 */
1590 size = ht->t.size;
abc490a1 1591 /* Internal sanity check: all nodes left should be dummy */
4105056a 1592 for (order = 0; order < get_count_order_ulong(size) + 1; order++) {
24365af7
MD
1593 unsigned long len;
1594
1595 len = !order ? 1 : 1UL << (order - 1);
1596 for (i = 0; i < len; i++) {
f0c29ed7 1597 dbg_printf("delete order %lu i %lu hash %lu\n",
24365af7 1598 order, i,
4105056a
MD
1599 bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
1600 assert(is_dummy(ht->t.tbl[order]->nodes[i].next));
24365af7 1601 }
5488222b
LJ
1602
1603 if (order == ht->min_alloc_order)
1604 poison_free(ht->t.tbl[0]);
1605 else if (order > ht->min_alloc_order)
1606 poison_free(ht->t.tbl[order]);
1607 /* Nothing to delete for order < ht->min_alloc_order */
674f7a69 1608 }
abc490a1 1609 return 0;
674f7a69
MD
1610}
1611
1612/*
1613 * Should only be called when no more concurrent readers nor writers can
1614 * possibly access the table.
1615 */
b7d619b0 1616int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr)
674f7a69 1617{
5e28c532
MD
1618 int ret;
1619
848d4088 1620 /* Wait for in-flight resize operations to complete */
24953e08
MD
1621 _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
1622 cmm_smp_mb(); /* Store destroy before load resize */
848d4088
MD
1623 while (uatomic_read(&ht->in_progress_resize))
1624 poll(NULL, 0, 100); /* wait for 100ms */
14044b37 1625 ret = cds_lfht_delete_dummy(ht);
abc490a1
MD
1626 if (ret)
1627 return ret;
5afadd12 1628 free_split_items_count(ht);
b7d619b0
MD
1629 if (attr)
1630 *attr = ht->resize_attr;
98808fb1 1631 poison_free(ht);
5e28c532 1632 return ret;
674f7a69
MD
1633}
1634
14044b37 1635void cds_lfht_count_nodes(struct cds_lfht *ht,
d933dd0e 1636 long *approx_before,
273399de 1637 unsigned long *count,
973e5e1b 1638 unsigned long *removed,
d933dd0e 1639 long *approx_after)
273399de 1640{
14044b37
MD
1641 struct cds_lfht_node *node, *next;
1642 struct _cds_lfht_node *lookup;
24365af7 1643 unsigned long nr_dummy = 0;
273399de 1644
7ed7682f 1645 *approx_before = 0;
5afadd12 1646 if (ht->split_count) {
973e5e1b
MD
1647 int i;
1648
4c42f1b8
LJ
1649 for (i = 0; i < split_count_mask + 1; i++) {
1650 *approx_before += uatomic_read(&ht->split_count[i].add);
1651 *approx_before -= uatomic_read(&ht->split_count[i].del);
973e5e1b
MD
1652 }
1653 }
1654
273399de
MD
1655 *count = 0;
1656 *removed = 0;
1657
24365af7 1658 /* Count non-dummy nodes in the table */
4105056a 1659 lookup = &ht->t.tbl[0]->nodes[0];
14044b37 1660 node = (struct cds_lfht_node *) lookup;
273399de 1661 do {
cc4fcb10 1662 next = rcu_dereference(node->p.next);
b198f0fd 1663 if (is_removed(next)) {
973e5e1b
MD
1664 if (!is_dummy(next))
1665 (*removed)++;
1666 else
1667 (nr_dummy)++;
1b81fe1a 1668 } else if (!is_dummy(next))
273399de 1669 (*count)++;
24365af7
MD
1670 else
1671 (nr_dummy)++;
273399de 1672 node = clear_flag(next);
bb7b2f26 1673 } while (!is_end(node));
f0c29ed7 1674 dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
7ed7682f 1675 *approx_after = 0;
5afadd12 1676 if (ht->split_count) {
973e5e1b
MD
1677 int i;
1678
4c42f1b8
LJ
1679 for (i = 0; i < split_count_mask + 1; i++) {
1680 *approx_after += uatomic_read(&ht->split_count[i].add);
1681 *approx_after -= uatomic_read(&ht->split_count[i].del);
973e5e1b
MD
1682 }
1683 }
273399de
MD
1684}
1685
1475579c 1686/* called with resize mutex held */
abc490a1 1687static
4105056a 1688void _do_cds_lfht_grow(struct cds_lfht *ht,
1475579c 1689 unsigned long old_size, unsigned long new_size)
abc490a1 1690{
1475579c 1691 unsigned long old_order, new_order;
1475579c 1692
93d46c39
LJ
1693 old_order = get_count_order_ulong(old_size);
1694 new_order = get_count_order_ulong(new_size);
1a401918
LJ
1695 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1696 old_size, old_order, new_size, new_order);
1475579c 1697 assert(new_size > old_size);
93d46c39 1698 init_table(ht, old_order + 1, new_order);
abc490a1
MD
1699}
1700
1701/* called with resize mutex held */
1702static
4105056a 1703void _do_cds_lfht_shrink(struct cds_lfht *ht,
1475579c 1704 unsigned long old_size, unsigned long new_size)
464a1ec9 1705{
1475579c 1706 unsigned long old_order, new_order;
464a1ec9 1707
5488222b 1708 new_size = max(new_size, ht->min_alloc_size);
93d46c39
LJ
1709 old_order = get_count_order_ulong(old_size);
1710 new_order = get_count_order_ulong(new_size);
1a401918
LJ
1711 dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
1712 old_size, old_order, new_size, new_order);
1475579c 1713 assert(new_size < old_size);
1475579c 1714
4105056a 1715 /* Remove and unlink all dummy nodes to remove. */
93d46c39 1716 fini_table(ht, new_order + 1, old_order);
464a1ec9
MD
1717}
1718
1475579c
MD
1719
1720/* called with resize mutex held */
1721static
1722void _do_cds_lfht_resize(struct cds_lfht *ht)
1723{
1724 unsigned long new_size, old_size;
4105056a
MD
1725
1726 /*
1727 * Resize table, re-do if the target size has changed under us.
1728 */
1729 do {
d2be3620
MD
1730 assert(uatomic_read(&ht->in_progress_resize));
1731 if (CMM_LOAD_SHARED(ht->in_progress_destroy))
1732 break;
4105056a
MD
1733 ht->t.resize_initiated = 1;
1734 old_size = ht->t.size;
1735 new_size = CMM_LOAD_SHARED(ht->t.resize_target);
1736 if (old_size < new_size)
1737 _do_cds_lfht_grow(ht, old_size, new_size);
1738 else if (old_size > new_size)
1739 _do_cds_lfht_shrink(ht, old_size, new_size);
1740 ht->t.resize_initiated = 0;
1741 /* write resize_initiated before read resize_target */
1742 cmm_smp_mb();
4d676753 1743 } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
1475579c
MD
1744}
1745
abc490a1 1746static
ab65b890 1747unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size)
464a1ec9 1748{
ab65b890 1749 return _uatomic_xchg_monotonic_increase(&ht->t.resize_target, new_size);
464a1ec9
MD
1750}
1751
1475579c 1752static
4105056a 1753void resize_target_update_count(struct cds_lfht *ht,
b8af5011 1754 unsigned long count)
1475579c 1755{
5488222b 1756 count = max(count, ht->min_alloc_size);
4105056a 1757 uatomic_set(&ht->t.resize_target, count);
1475579c
MD
1758}
1759
1760void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
464a1ec9 1761{
4105056a
MD
1762 resize_target_update_count(ht, new_size);
1763 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
5f511391 1764 ht->cds_lfht_rcu_thread_offline();
1475579c
MD
1765 pthread_mutex_lock(&ht->resize_mutex);
1766 _do_cds_lfht_resize(ht);
1767 pthread_mutex_unlock(&ht->resize_mutex);
5f511391 1768 ht->cds_lfht_rcu_thread_online();
abc490a1 1769}
464a1ec9 1770
abc490a1
MD
1771static
1772void do_resize_cb(struct rcu_head *head)
1773{
1774 struct rcu_resize_work *work =
1775 caa_container_of(head, struct rcu_resize_work, head);
14044b37 1776 struct cds_lfht *ht = work->ht;
abc490a1 1777
5f511391 1778 ht->cds_lfht_rcu_thread_offline();
abc490a1 1779 pthread_mutex_lock(&ht->resize_mutex);
14044b37 1780 _do_cds_lfht_resize(ht);
abc490a1 1781 pthread_mutex_unlock(&ht->resize_mutex);
5f511391 1782 ht->cds_lfht_rcu_thread_online();
98808fb1 1783 poison_free(work);
848d4088
MD
1784 cmm_smp_mb(); /* finish resize before decrement */
1785 uatomic_dec(&ht->in_progress_resize);
464a1ec9
MD
1786}
1787
abc490a1 1788static
f1f119ee 1789void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
ab7d5fc6 1790{
abc490a1
MD
1791 struct rcu_resize_work *work;
1792
4105056a
MD
1793 /* Store resize_target before read resize_initiated */
1794 cmm_smp_mb();
ab65b890 1795 if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
848d4088 1796 uatomic_inc(&ht->in_progress_resize);
59290e9d 1797 cmm_smp_mb(); /* increment resize count before load destroy */
ed35e6d8
MD
1798 if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
1799 uatomic_dec(&ht->in_progress_resize);
59290e9d 1800 return;
ed35e6d8 1801 }
f9830efd
MD
1802 work = malloc(sizeof(*work));
1803 work->ht = ht;
14044b37 1804 ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
4105056a 1805 CMM_STORE_SHARED(ht->t.resize_initiated, 1);
f9830efd 1806 }
ab7d5fc6 1807}
3171717f 1808
f1f119ee
LJ
1809static
1810void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
1811{
1812 unsigned long target_size = size << growth;
1813
1814 if (resize_target_grow(ht, target_size) >= target_size)
1815 return;
1816
1817 __cds_lfht_resize_lazy_launch(ht);
1818}
1819
89bb121d
LJ
1820/*
1821 * We favor grow operations over shrink. A shrink operation never occurs
1822 * if a grow operation is queued for lazy execution. A grow operation
1823 * cancels any pending shrink lazy execution.
1824 */
3171717f 1825static
4105056a 1826void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
3171717f
MD
1827 unsigned long count)
1828{
b8af5011
MD
1829 if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
1830 return;
89bb121d
LJ
1831 count = max(count, ht->min_alloc_size);
1832 if (count == size)
1833 return; /* Already the right size, no resize needed */
1834 if (count > size) { /* lazy grow */
1835 if (resize_target_grow(ht, count) >= count)
1836 return;
1837 } else { /* lazy shrink */
1838 for (;;) {
1839 unsigned long s;
1840
1841 s = uatomic_cmpxchg(&ht->t.resize_target, size, count);
1842 if (s == size)
1843 break; /* no resize needed */
1844 if (s > size)
1845 return; /* growing is/(was just) in progress */
1846 if (s <= count)
1847 return; /* some other thread do shrink */
1848 size = s;
1849 }
1850 }
f1f119ee 1851 __cds_lfht_resize_lazy_launch(ht);
3171717f 1852}
This page took 0.214549 seconds and 4 git commands to generate.