Commit | Line | Data |
---|---|---|
5e28c532 | 1 | /* |
abc490a1 MD |
2 | * rculfhash.c |
3 | * | |
1475579c | 4 | * Userspace RCU library - Lock-Free Resizable RCU Hash Table |
abc490a1 MD |
5 | * |
6 | * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
7 | * | |
8 | * This library is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU Lesser General Public | |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2.1 of the License, or (at your option) any later version. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
16 | * Lesser General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public | |
19 | * License along with this library; if not, write to the Free Software | |
20 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA | |
5e28c532 MD |
21 | */ |
22 | ||
e753ff5a MD |
23 | /* |
24 | * Based on the following articles: | |
25 | * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free | |
26 | * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405. | |
27 | * - Michael, M. M. High performance dynamic lock-free hash tables | |
28 | * and list-based sets. In Proceedings of the fourteenth annual ACM | |
29 | * symposium on Parallel algorithms and architectures, ACM Press, | |
30 | * (2002), 73-82. | |
31 | * | |
1475579c | 32 | * Some specificities of this Lock-Free Resizable RCU Hash Table |
e753ff5a MD |
33 | * implementation: |
34 | * | |
35 | * - RCU read-side critical section allows readers to perform hash | |
36 | * table lookups and use the returned objects safely by delaying | |
37 | * memory reclaim of a grace period. | |
38 | * - Add and remove operations are lock-free, and do not need to | |
39 | * allocate memory. They need to be executed within RCU read-side | |
40 | * critical section to ensure the objects they read are valid and to | |
41 | * deal with the cmpxchg ABA problem. | |
42 | * - add and add_unique operations are supported. add_unique checks if | |
43 | * the node key already exists in the hash table. It ensures no key | |
44 | * duplicata exists. | |
45 | * - The resize operation executes concurrently with add/remove/lookup. | |
46 | * - Hash table nodes are contained within a split-ordered list. This | |
47 | * list is ordered by incrementing reversed-bits-hash value. | |
48 | * - An index of dummy nodes is kept. These dummy nodes are the hash | |
49 | * table "buckets", and they are also chained together in the | |
50 | * split-ordered list, which allows recursive expansion. | |
1475579c MD |
51 | * - The resize operation for small tables only allows expanding the hash table. |
52 | * It is triggered automatically by detecting long chains in the add | |
53 | * operation. | |
54 | * - The resize operation for larger tables (and available through an | |
55 | * API) allows both expanding and shrinking the hash table. | |
56 | * - Per-CPU Split-counters are used to keep track of the number of | |
57 | * nodes within the hash table for automatic resize triggering. | |
e753ff5a MD |
58 | * - Resize operation initiated by long chain detection is executed by a |
59 | * call_rcu thread, which keeps lock-freedom of add and remove. | |
60 | * - Resize operations are protected by a mutex. | |
61 | * - The removal operation is split in two parts: first, a "removed" | |
62 | * flag is set in the next pointer within the node to remove. Then, | |
63 | * a "garbage collection" is performed in the bucket containing the | |
64 | * removed node (from the start of the bucket up to the removed node). | |
65 | * All encountered nodes with "removed" flag set in their next | |
66 | * pointers are removed from the linked-list. If the cmpxchg used for | |
67 | * removal fails (due to concurrent garbage-collection or concurrent | |
68 | * add), we retry from the beginning of the bucket. This ensures that | |
69 | * the node with "removed" flag set is removed from the hash table | |
70 | * (not visible to lookups anymore) before the RCU read-side critical | |
71 | * section held across removal ends. Furthermore, this ensures that | |
72 | * the node with "removed" flag set is removed from the linked-list | |
73 | * before its memory is reclaimed. Only the thread which removal | |
74 | * successfully set the "removed" flag (with a cmpxchg) into a node's | |
75 | * next pointer is considered to have succeeded its removal (and thus | |
76 | * owns the node to reclaim). Because we garbage-collect starting from | |
77 | * an invariant node (the start-of-bucket dummy node) up to the | |
78 | * "removed" node (or find a reverse-hash that is higher), we are sure | |
79 | * that a successful traversal of the chain leads to a chain that is | |
80 | * present in the linked-list (the start node is never removed) and | |
81 | * that is does not contain the "removed" node anymore, even if | |
82 | * concurrent delete/add operations are changing the structure of the | |
83 | * list concurrently. | |
29e669f6 MD |
84 | * - The add operation performs gargage collection of buckets if it |
85 | * encounters nodes with removed flag set in the bucket where it wants | |
86 | * to add its new node. This ensures lock-freedom of add operation by | |
87 | * helping the remover unlink nodes from the list rather than to wait | |
88 | * for it do to so. | |
e753ff5a MD |
89 | * - A RCU "order table" indexed by log2(hash index) is copied and |
90 | * expanded by the resize operation. This order table allows finding | |
91 | * the "dummy node" tables. | |
92 | * - There is one dummy node table per hash index order. The size of | |
93 | * each dummy node table is half the number of hashes contained in | |
94 | * this order. | |
95 | * - call_rcu is used to garbage-collect the old order table. | |
96 | * - The per-order dummy node tables contain a compact version of the | |
97 | * hash table nodes. These tables are invariant after they are | |
98 | * populated into the hash table. | |
1475579c MD |
99 | * |
100 | * A bit of ascii art explanation: | |
101 | * | |
102 | * Order index is the off-by-one compare to the actual power of 2 because | |
103 | * we use index 0 to deal with the 0 special-case. | |
104 | * | |
105 | * This shows the nodes for a small table ordered by reversed bits: | |
106 | * | |
107 | * bits reverse | |
108 | * 0 000 000 | |
109 | * 4 100 001 | |
110 | * 2 010 010 | |
111 | * 6 110 011 | |
112 | * 1 001 100 | |
113 | * 5 101 101 | |
114 | * 3 011 110 | |
115 | * 7 111 111 | |
116 | * | |
117 | * This shows the nodes in order of non-reversed bits, linked by | |
118 | * reversed-bit order. | |
119 | * | |
120 | * order bits reverse | |
121 | * 0 0 000 000 | |
122 | * | | |
f6fdd688 MD |
123 | * 1 | 1 001 100 <- <- |
124 | * | | | | | |
125 | * 2 | | 2 010 010 | | | |
126 | * | | | 3 011 110 | <- | | |
127 | * | | | | | | | | |
1475579c MD |
128 | * 3 -> | | | 4 100 001 | | |
129 | * -> | | 5 101 101 | | |
130 | * -> | 6 110 011 | |
131 | * -> 7 111 111 | |
e753ff5a MD |
132 | */ |
133 | ||
2ed95849 MD |
134 | #define _LGPL_SOURCE |
135 | #include <stdlib.h> | |
e0ba718a MD |
136 | #include <errno.h> |
137 | #include <assert.h> | |
138 | #include <stdio.h> | |
abc490a1 | 139 | #include <stdint.h> |
f000907d | 140 | #include <string.h> |
e0ba718a | 141 | |
df44348d | 142 | #include "config.h" |
2ed95849 | 143 | #include <urcu.h> |
abc490a1 | 144 | #include <urcu-call-rcu.h> |
a42cc659 MD |
145 | #include <urcu/arch.h> |
146 | #include <urcu/uatomic.h> | |
674f7a69 | 147 | #include <urcu/jhash.h> |
a42cc659 | 148 | #include <urcu/compiler.h> |
abc490a1 | 149 | #include <urcu/rculfhash.h> |
5e28c532 | 150 | #include <stdio.h> |
464a1ec9 | 151 | #include <pthread.h> |
44395fb7 | 152 | |
f9830efd | 153 | #ifdef DEBUG |
f0c29ed7 | 154 | #define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args) |
f9830efd | 155 | #else |
e753ff5a | 156 | #define dbg_printf(fmt, args...) |
f9830efd MD |
157 | #endif |
158 | ||
f8994aee MD |
159 | /* |
160 | * Per-CPU split-counters lazily update the global counter each 1024 | |
161 | * addition/removal. It automatically keeps track of resize required. | |
162 | * We use the bucket length as indicator for need to expand for small | |
163 | * tables and machines lacking per-cpu data suppport. | |
164 | */ | |
165 | #define COUNT_COMMIT_ORDER 10 | |
6ea6bc67 MD |
166 | #define CHAIN_LEN_TARGET 1 |
167 | #define CHAIN_LEN_RESIZE_THRESHOLD 3 | |
2ed95849 | 168 | |
cd95516d | 169 | /* |
76a73da8 | 170 | * Define the minimum table size. |
cd95516d | 171 | */ |
c9edd44a | 172 | #define MIN_TABLE_SIZE 1 |
cd95516d | 173 | |
4105056a MD |
174 | #if (CAA_BITS_PER_LONG == 32) |
175 | #define MAX_TABLE_ORDER 32 | |
176 | #else | |
177 | #define MAX_TABLE_ORDER 64 | |
178 | #endif | |
179 | ||
b7d619b0 MD |
180 | /* |
181 | * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink. | |
182 | */ | |
183 | #define MIN_PARTITION_PER_THREAD 4096 | |
184 | ||
4105056a MD |
185 | #ifndef min |
186 | #define min(a, b) ((a) < (b) ? (a) : (b)) | |
187 | #endif | |
188 | ||
abc490a1 MD |
189 | #ifndef max |
190 | #define max(a, b) ((a) > (b) ? (a) : (b)) | |
191 | #endif | |
2ed95849 | 192 | |
d95bd160 MD |
193 | /* |
194 | * The removed flag needs to be updated atomically with the pointer. | |
195 | * The dummy flag does not require to be updated atomically with the | |
196 | * pointer, but it is added as a pointer low bit flag to save space. | |
197 | */ | |
d37166c6 | 198 | #define REMOVED_FLAG (1UL << 0) |
f5596c94 MD |
199 | #define DUMMY_FLAG (1UL << 1) |
200 | #define FLAGS_MASK ((1UL << 2) - 1) | |
d37166c6 | 201 | |
bb7b2f26 | 202 | /* Value of the end pointer. Should not interact with flags. */ |
f9c80341 | 203 | #define END_VALUE NULL |
bb7b2f26 | 204 | |
df44348d | 205 | struct ht_items_count { |
3171717f | 206 | unsigned long add, remove; |
df44348d MD |
207 | } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); |
208 | ||
1475579c MD |
209 | struct rcu_level { |
210 | struct rcu_head head; | |
211 | struct _cds_lfht_node nodes[0]; | |
212 | }; | |
213 | ||
395270b6 | 214 | struct rcu_table { |
4105056a | 215 | unsigned long size; /* always a power of 2, shared (RCU) */ |
f9830efd | 216 | unsigned long resize_target; |
11519af6 | 217 | int resize_initiated; |
4105056a | 218 | struct rcu_level *tbl[MAX_TABLE_ORDER]; |
395270b6 MD |
219 | }; |
220 | ||
14044b37 | 221 | struct cds_lfht { |
4105056a | 222 | struct rcu_table t; |
14044b37 MD |
223 | cds_lfht_hash_fct hash_fct; |
224 | cds_lfht_compare_fct compare_fct; | |
732ad076 | 225 | unsigned long hash_seed; |
b8af5011 | 226 | int flags; |
5f511391 MD |
227 | /* |
228 | * We need to put the work threads offline (QSBR) when taking this | |
229 | * mutex, because we use synchronize_rcu within this mutex critical | |
230 | * section, which waits on read-side critical sections, and could | |
231 | * therefore cause grace-period deadlock if we hold off RCU G.P. | |
232 | * completion. | |
233 | */ | |
464a1ec9 | 234 | pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ |
33c7c748 | 235 | unsigned int in_progress_resize, in_progress_destroy; |
14044b37 | 236 | void (*cds_lfht_call_rcu)(struct rcu_head *head, |
abc490a1 | 237 | void (*func)(struct rcu_head *head)); |
1475579c | 238 | void (*cds_lfht_synchronize_rcu)(void); |
01dbfa62 MD |
239 | void (*cds_lfht_rcu_read_lock)(void); |
240 | void (*cds_lfht_rcu_read_unlock)(void); | |
5f511391 MD |
241 | void (*cds_lfht_rcu_thread_offline)(void); |
242 | void (*cds_lfht_rcu_thread_online)(void); | |
b7d619b0 MD |
243 | void (*cds_lfht_rcu_register_thread)(void); |
244 | void (*cds_lfht_rcu_unregister_thread)(void); | |
245 | pthread_attr_t *resize_attr; /* Resize threads attributes */ | |
df44348d MD |
246 | unsigned long count; /* global approximate item count */ |
247 | struct ht_items_count *percpu_count; /* per-cpu item count */ | |
2ed95849 MD |
248 | }; |
249 | ||
abc490a1 MD |
250 | struct rcu_resize_work { |
251 | struct rcu_head head; | |
14044b37 | 252 | struct cds_lfht *ht; |
abc490a1 | 253 | }; |
2ed95849 | 254 | |
b7d619b0 MD |
255 | struct partition_resize_work { |
256 | struct rcu_head head; | |
257 | struct cds_lfht *ht; | |
258 | unsigned long i, start, len; | |
259 | void (*fct)(struct cds_lfht *ht, unsigned long i, | |
260 | unsigned long start, unsigned long len); | |
261 | }; | |
262 | ||
76a73da8 MD |
263 | static |
264 | struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, | |
265 | unsigned long size, | |
266 | struct cds_lfht_node *node, | |
267 | int unique, int dummy); | |
268 | ||
abc490a1 MD |
269 | /* |
270 | * Algorithm to reverse bits in a word by lookup table, extended to | |
271 | * 64-bit words. | |
f9830efd | 272 | * Source: |
abc490a1 | 273 | * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable |
f9830efd | 274 | * Originally from Public Domain. |
abc490a1 MD |
275 | */ |
276 | ||
277 | static const uint8_t BitReverseTable256[256] = | |
2ed95849 | 278 | { |
abc490a1 MD |
279 | #define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64 |
280 | #define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16) | |
281 | #define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 ) | |
282 | R6(0), R6(2), R6(1), R6(3) | |
283 | }; | |
284 | #undef R2 | |
285 | #undef R4 | |
286 | #undef R6 | |
2ed95849 | 287 | |
abc490a1 MD |
288 | static |
289 | uint8_t bit_reverse_u8(uint8_t v) | |
290 | { | |
291 | return BitReverseTable256[v]; | |
292 | } | |
ab7d5fc6 | 293 | |
abc490a1 MD |
294 | static __attribute__((unused)) |
295 | uint32_t bit_reverse_u32(uint32_t v) | |
296 | { | |
297 | return ((uint32_t) bit_reverse_u8(v) << 24) | | |
298 | ((uint32_t) bit_reverse_u8(v >> 8) << 16) | | |
299 | ((uint32_t) bit_reverse_u8(v >> 16) << 8) | | |
300 | ((uint32_t) bit_reverse_u8(v >> 24)); | |
2ed95849 MD |
301 | } |
302 | ||
abc490a1 MD |
303 | static __attribute__((unused)) |
304 | uint64_t bit_reverse_u64(uint64_t v) | |
2ed95849 | 305 | { |
abc490a1 MD |
306 | return ((uint64_t) bit_reverse_u8(v) << 56) | |
307 | ((uint64_t) bit_reverse_u8(v >> 8) << 48) | | |
308 | ((uint64_t) bit_reverse_u8(v >> 16) << 40) | | |
309 | ((uint64_t) bit_reverse_u8(v >> 24) << 32) | | |
310 | ((uint64_t) bit_reverse_u8(v >> 32) << 24) | | |
311 | ((uint64_t) bit_reverse_u8(v >> 40) << 16) | | |
312 | ((uint64_t) bit_reverse_u8(v >> 48) << 8) | | |
313 | ((uint64_t) bit_reverse_u8(v >> 56)); | |
314 | } | |
315 | ||
316 | static | |
317 | unsigned long bit_reverse_ulong(unsigned long v) | |
318 | { | |
319 | #if (CAA_BITS_PER_LONG == 32) | |
320 | return bit_reverse_u32(v); | |
321 | #else | |
322 | return bit_reverse_u64(v); | |
323 | #endif | |
324 | } | |
325 | ||
f9830efd | 326 | /* |
24365af7 MD |
327 | * fls: returns the position of the most significant bit. |
328 | * Returns 0 if no bit is set, else returns the position of the most | |
329 | * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit). | |
f9830efd | 330 | */ |
24365af7 MD |
331 | #if defined(__i386) || defined(__x86_64) |
332 | static inline | |
333 | unsigned int fls_u32(uint32_t x) | |
f9830efd | 334 | { |
24365af7 MD |
335 | int r; |
336 | ||
337 | asm("bsrl %1,%0\n\t" | |
338 | "jnz 1f\n\t" | |
339 | "movl $-1,%0\n\t" | |
340 | "1:\n\t" | |
341 | : "=r" (r) : "rm" (x)); | |
342 | return r + 1; | |
343 | } | |
344 | #define HAS_FLS_U32 | |
345 | #endif | |
346 | ||
347 | #if defined(__x86_64) | |
348 | static inline | |
349 | unsigned int fls_u64(uint64_t x) | |
350 | { | |
351 | long r; | |
352 | ||
353 | asm("bsrq %1,%0\n\t" | |
354 | "jnz 1f\n\t" | |
355 | "movq $-1,%0\n\t" | |
356 | "1:\n\t" | |
357 | : "=r" (r) : "rm" (x)); | |
358 | return r + 1; | |
359 | } | |
360 | #define HAS_FLS_U64 | |
361 | #endif | |
362 | ||
363 | #ifndef HAS_FLS_U64 | |
364 | static __attribute__((unused)) | |
365 | unsigned int fls_u64(uint64_t x) | |
366 | { | |
367 | unsigned int r = 64; | |
368 | ||
369 | if (!x) | |
370 | return 0; | |
371 | ||
372 | if (!(x & 0xFFFFFFFF00000000ULL)) { | |
373 | x <<= 32; | |
374 | r -= 32; | |
375 | } | |
376 | if (!(x & 0xFFFF000000000000ULL)) { | |
377 | x <<= 16; | |
378 | r -= 16; | |
379 | } | |
380 | if (!(x & 0xFF00000000000000ULL)) { | |
381 | x <<= 8; | |
382 | r -= 8; | |
383 | } | |
384 | if (!(x & 0xF000000000000000ULL)) { | |
385 | x <<= 4; | |
386 | r -= 4; | |
387 | } | |
388 | if (!(x & 0xC000000000000000ULL)) { | |
389 | x <<= 2; | |
390 | r -= 2; | |
391 | } | |
392 | if (!(x & 0x8000000000000000ULL)) { | |
393 | x <<= 1; | |
394 | r -= 1; | |
395 | } | |
396 | return r; | |
397 | } | |
398 | #endif | |
399 | ||
400 | #ifndef HAS_FLS_U32 | |
401 | static __attribute__((unused)) | |
402 | unsigned int fls_u32(uint32_t x) | |
403 | { | |
404 | unsigned int r = 32; | |
f9830efd | 405 | |
24365af7 MD |
406 | if (!x) |
407 | return 0; | |
408 | if (!(x & 0xFFFF0000U)) { | |
409 | x <<= 16; | |
410 | r -= 16; | |
411 | } | |
412 | if (!(x & 0xFF000000U)) { | |
413 | x <<= 8; | |
414 | r -= 8; | |
415 | } | |
416 | if (!(x & 0xF0000000U)) { | |
417 | x <<= 4; | |
418 | r -= 4; | |
419 | } | |
420 | if (!(x & 0xC0000000U)) { | |
421 | x <<= 2; | |
422 | r -= 2; | |
423 | } | |
424 | if (!(x & 0x80000000U)) { | |
425 | x <<= 1; | |
426 | r -= 1; | |
427 | } | |
428 | return r; | |
429 | } | |
430 | #endif | |
431 | ||
432 | unsigned int fls_ulong(unsigned long x) | |
f9830efd | 433 | { |
24365af7 MD |
434 | #if (CAA_BITS_PER_lONG == 32) |
435 | return fls_u32(x); | |
436 | #else | |
437 | return fls_u64(x); | |
438 | #endif | |
439 | } | |
f9830efd | 440 | |
24365af7 MD |
441 | int get_count_order_u32(uint32_t x) |
442 | { | |
443 | int order; | |
444 | ||
445 | order = fls_u32(x) - 1; | |
446 | if (x & (x - 1)) | |
447 | order++; | |
448 | return order; | |
449 | } | |
450 | ||
451 | int get_count_order_ulong(unsigned long x) | |
452 | { | |
453 | int order; | |
454 | ||
455 | order = fls_ulong(x) - 1; | |
456 | if (x & (x - 1)) | |
457 | order++; | |
458 | return order; | |
f9830efd MD |
459 | } |
460 | ||
98808fb1 MD |
461 | #ifdef POISON_FREE |
462 | #define poison_free(ptr) \ | |
463 | do { \ | |
464 | memset(ptr, 0x42, sizeof(*(ptr))); \ | |
465 | free(ptr); \ | |
466 | } while (0) | |
467 | #else | |
468 | #define poison_free(ptr) free(ptr) | |
469 | #endif | |
470 | ||
f9830efd | 471 | static |
4105056a | 472 | void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth); |
f9830efd | 473 | |
df44348d MD |
474 | /* |
475 | * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are | |
476 | * available, then we support hash table item accounting. | |
477 | * In the unfortunate event the number of CPUs reported would be | |
478 | * inaccurate, we use modulo arithmetic on the number of CPUs we got. | |
479 | */ | |
df44348d MD |
480 | #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) |
481 | ||
f8994aee | 482 | static |
4105056a | 483 | void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, |
f8994aee MD |
484 | unsigned long count); |
485 | ||
df44348d MD |
486 | static long nr_cpus_mask = -1; |
487 | ||
488 | static | |
489 | struct ht_items_count *alloc_per_cpu_items_count(void) | |
490 | { | |
491 | struct ht_items_count *count; | |
492 | ||
493 | switch (nr_cpus_mask) { | |
494 | case -2: | |
495 | return NULL; | |
496 | case -1: | |
497 | { | |
498 | long maxcpus; | |
499 | ||
500 | maxcpus = sysconf(_SC_NPROCESSORS_CONF); | |
501 | if (maxcpus <= 0) { | |
502 | nr_cpus_mask = -2; | |
503 | return NULL; | |
504 | } | |
505 | /* | |
506 | * round up number of CPUs to next power of two, so we | |
507 | * can use & for modulo. | |
508 | */ | |
509 | maxcpus = 1UL << get_count_order_ulong(maxcpus); | |
510 | nr_cpus_mask = maxcpus - 1; | |
511 | } | |
512 | /* Fall-through */ | |
513 | default: | |
514 | return calloc(nr_cpus_mask + 1, sizeof(*count)); | |
515 | } | |
516 | } | |
517 | ||
518 | static | |
519 | void free_per_cpu_items_count(struct ht_items_count *count) | |
520 | { | |
98808fb1 | 521 | poison_free(count); |
df44348d MD |
522 | } |
523 | ||
524 | static | |
525 | int ht_get_cpu(void) | |
526 | { | |
527 | int cpu; | |
528 | ||
529 | assert(nr_cpus_mask >= 0); | |
530 | cpu = sched_getcpu(); | |
531 | if (unlikely(cpu < 0)) | |
532 | return cpu; | |
533 | else | |
534 | return cpu & nr_cpus_mask; | |
535 | } | |
536 | ||
537 | static | |
4105056a | 538 | void ht_count_add(struct cds_lfht *ht, unsigned long size) |
df44348d | 539 | { |
3171717f | 540 | unsigned long percpu_count; |
df44348d MD |
541 | int cpu; |
542 | ||
543 | if (unlikely(!ht->percpu_count)) | |
3171717f | 544 | return; |
df44348d MD |
545 | cpu = ht_get_cpu(); |
546 | if (unlikely(cpu < 0)) | |
3171717f MD |
547 | return; |
548 | percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1); | |
df44348d MD |
549 | if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { |
550 | unsigned long count; | |
551 | ||
552 | dbg_printf("add percpu %lu\n", percpu_count); | |
553 | count = uatomic_add_return(&ht->count, | |
554 | 1UL << COUNT_COMMIT_ORDER); | |
555 | /* If power of 2 */ | |
556 | if (!(count & (count - 1))) { | |
4105056a | 557 | if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size) |
f8994aee MD |
558 | return; |
559 | dbg_printf("add set global %lu\n", count); | |
4105056a | 560 | cds_lfht_resize_lazy_count(ht, size, |
6ea6bc67 | 561 | count >> (CHAIN_LEN_TARGET - 1)); |
df44348d MD |
562 | } |
563 | } | |
564 | } | |
565 | ||
566 | static | |
4105056a | 567 | void ht_count_remove(struct cds_lfht *ht, unsigned long size) |
df44348d MD |
568 | { |
569 | unsigned long percpu_count; | |
3171717f | 570 | int cpu; |
df44348d | 571 | |
3171717f MD |
572 | if (unlikely(!ht->percpu_count)) |
573 | return; | |
574 | cpu = ht_get_cpu(); | |
575 | if (unlikely(cpu < 0)) | |
576 | return; | |
577 | percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1); | |
df44348d MD |
578 | if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { |
579 | unsigned long count; | |
580 | ||
581 | dbg_printf("remove percpu %lu\n", percpu_count); | |
582 | count = uatomic_add_return(&ht->count, | |
3171717f | 583 | -(1UL << COUNT_COMMIT_ORDER)); |
df44348d MD |
584 | /* If power of 2 */ |
585 | if (!(count & (count - 1))) { | |
4105056a | 586 | if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size) |
f8994aee MD |
587 | return; |
588 | dbg_printf("remove set global %lu\n", count); | |
4105056a | 589 | cds_lfht_resize_lazy_count(ht, size, |
6ea6bc67 | 590 | count >> (CHAIN_LEN_TARGET - 1)); |
df44348d MD |
591 | } |
592 | } | |
593 | } | |
594 | ||
595 | #else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ | |
596 | ||
597 | static const long nr_cpus_mask = -1; | |
598 | ||
599 | static | |
600 | struct ht_items_count *alloc_per_cpu_items_count(void) | |
601 | { | |
602 | return NULL; | |
603 | } | |
604 | ||
605 | static | |
606 | void free_per_cpu_items_count(struct ht_items_count *count) | |
607 | { | |
608 | } | |
609 | ||
610 | static | |
4105056a | 611 | void ht_count_add(struct cds_lfht *ht, unsigned long size) |
df44348d MD |
612 | { |
613 | } | |
614 | ||
615 | static | |
4105056a | 616 | void ht_count_remove(struct cds_lfht *ht, unsigned long size) |
df44348d MD |
617 | { |
618 | } | |
619 | ||
620 | #endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ | |
621 | ||
622 | ||
f9830efd | 623 | static |
4105056a | 624 | void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) |
f9830efd | 625 | { |
f8994aee MD |
626 | unsigned long count; |
627 | ||
b8af5011 MD |
628 | if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) |
629 | return; | |
f8994aee MD |
630 | count = uatomic_read(&ht->count); |
631 | /* | |
632 | * Use bucket-local length for small table expand and for | |
633 | * environments lacking per-cpu data support. | |
634 | */ | |
635 | if (count >= (1UL << COUNT_COMMIT_ORDER)) | |
636 | return; | |
24365af7 | 637 | if (chain_len > 100) |
f0c29ed7 | 638 | dbg_printf("WARNING: large chain length: %u.\n", |
24365af7 | 639 | chain_len); |
3390d470 | 640 | if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) |
4105056a | 641 | cds_lfht_resize_lazy(ht, size, |
01370f0b | 642 | get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); |
f9830efd MD |
643 | } |
644 | ||
abc490a1 | 645 | static |
14044b37 | 646 | struct cds_lfht_node *clear_flag(struct cds_lfht_node *node) |
abc490a1 | 647 | { |
14044b37 | 648 | return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK); |
abc490a1 MD |
649 | } |
650 | ||
651 | static | |
14044b37 | 652 | int is_removed(struct cds_lfht_node *node) |
abc490a1 | 653 | { |
d37166c6 | 654 | return ((unsigned long) node) & REMOVED_FLAG; |
abc490a1 MD |
655 | } |
656 | ||
657 | static | |
14044b37 | 658 | struct cds_lfht_node *flag_removed(struct cds_lfht_node *node) |
abc490a1 | 659 | { |
14044b37 | 660 | return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG); |
abc490a1 MD |
661 | } |
662 | ||
f5596c94 | 663 | static |
14044b37 | 664 | int is_dummy(struct cds_lfht_node *node) |
f5596c94 MD |
665 | { |
666 | return ((unsigned long) node) & DUMMY_FLAG; | |
667 | } | |
668 | ||
669 | static | |
14044b37 | 670 | struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node) |
f5596c94 | 671 | { |
14044b37 | 672 | return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG); |
f5596c94 | 673 | } |
bb7b2f26 MD |
674 | |
675 | static | |
676 | struct cds_lfht_node *get_end(void) | |
677 | { | |
678 | return (struct cds_lfht_node *) END_VALUE; | |
679 | } | |
680 | ||
681 | static | |
682 | int is_end(struct cds_lfht_node *node) | |
683 | { | |
684 | return clear_flag(node) == (struct cds_lfht_node *) END_VALUE; | |
685 | } | |
686 | ||
abc490a1 | 687 | static |
f9830efd | 688 | unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) |
abc490a1 MD |
689 | { |
690 | unsigned long old1, old2; | |
691 | ||
692 | old1 = uatomic_read(ptr); | |
693 | do { | |
694 | old2 = old1; | |
695 | if (old2 >= v) | |
f9830efd | 696 | return old2; |
abc490a1 | 697 | } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2); |
f9830efd | 698 | return v; |
abc490a1 MD |
699 | } |
700 | ||
1475579c MD |
701 | static |
702 | void cds_lfht_free_level(struct rcu_head *head) | |
703 | { | |
704 | struct rcu_level *l = | |
705 | caa_container_of(head, struct rcu_level, head); | |
98808fb1 | 706 | poison_free(l); |
1475579c MD |
707 | } |
708 | ||
273399de MD |
709 | /* |
710 | * Remove all logically deleted nodes from a bucket up to a certain node key. | |
711 | */ | |
712 | static | |
f9c80341 | 713 | void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) |
273399de | 714 | { |
14044b37 | 715 | struct cds_lfht_node *iter_prev, *iter, *next, *new_next; |
273399de | 716 | |
c90201ac MD |
717 | assert(!is_dummy(dummy)); |
718 | assert(!is_removed(dummy)); | |
719 | assert(!is_dummy(node)); | |
720 | assert(!is_removed(node)); | |
273399de MD |
721 | for (;;) { |
722 | iter_prev = dummy; | |
723 | /* We can always skip the dummy node initially */ | |
cc4fcb10 MD |
724 | iter = rcu_dereference(iter_prev->p.next); |
725 | assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); | |
bd4db153 MD |
726 | /* |
727 | * We should never be called with dummy (start of chain) | |
728 | * and logically removed node (end of path compression | |
729 | * marker) being the actual same node. This would be a | |
730 | * bug in the algorithm implementation. | |
731 | */ | |
732 | assert(dummy != node); | |
273399de | 733 | for (;;) { |
bb7b2f26 | 734 | if (unlikely(is_end(iter))) |
f9c80341 | 735 | return; |
76412f24 | 736 | if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) |
f9c80341 | 737 | return; |
cc4fcb10 | 738 | next = rcu_dereference(clear_flag(iter)->p.next); |
76412f24 | 739 | if (likely(is_removed(next))) |
273399de | 740 | break; |
b453eae1 | 741 | iter_prev = clear_flag(iter); |
273399de MD |
742 | iter = next; |
743 | } | |
744 | assert(!is_removed(iter)); | |
f5596c94 MD |
745 | if (is_dummy(iter)) |
746 | new_next = flag_dummy(clear_flag(next)); | |
747 | else | |
748 | new_next = clear_flag(next); | |
749 | (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); | |
273399de | 750 | } |
f9c80341 | 751 | return; |
273399de MD |
752 | } |
753 | ||
abc490a1 | 754 | static |
4105056a MD |
755 | struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, |
756 | unsigned long size, | |
757 | struct cds_lfht_node *node, | |
758 | int unique, int dummy) | |
abc490a1 | 759 | { |
14044b37 | 760 | struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, |
f5596c94 | 761 | *dummy_node; |
14044b37 | 762 | struct _cds_lfht_node *lookup; |
24365af7 | 763 | unsigned long hash, index, order; |
abc490a1 | 764 | |
c90201ac MD |
765 | assert(!is_dummy(node)); |
766 | assert(!is_removed(node)); | |
4105056a | 767 | if (!size) { |
f5596c94 | 768 | assert(dummy); |
bb7b2f26 | 769 | node->p.next = flag_dummy(get_end()); |
18117871 MD |
770 | return node; /* Initial first add (head) */ |
771 | } | |
cc4fcb10 | 772 | hash = bit_reverse_ulong(node->p.reverse_hash); |
abc490a1 | 773 | for (;;) { |
f9830efd | 774 | uint32_t chain_len = 0; |
abc490a1 | 775 | |
11519af6 MD |
776 | /* |
777 | * iter_prev points to the non-removed node prior to the | |
778 | * insert location. | |
11519af6 | 779 | */ |
4105056a | 780 | index = hash & (size - 1); |
24365af7 | 781 | order = get_count_order_ulong(index + 1); |
4105056a | 782 | lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)]; |
14044b37 | 783 | iter_prev = (struct cds_lfht_node *) lookup; |
11519af6 | 784 | /* We can always skip the dummy node initially */ |
cc4fcb10 MD |
785 | iter = rcu_dereference(iter_prev->p.next); |
786 | assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); | |
abc490a1 | 787 | for (;;) { |
bb7b2f26 | 788 | if (unlikely(is_end(iter))) |
273399de | 789 | goto insert; |
76412f24 | 790 | if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) |
273399de | 791 | goto insert; |
cc4fcb10 | 792 | next = rcu_dereference(clear_flag(iter)->p.next); |
76412f24 | 793 | if (unlikely(is_removed(next))) |
9dba85be | 794 | goto gc_node; |
e43f23f8 | 795 | if (unique |
1b81fe1a | 796 | && !is_dummy(next) |
e43f23f8 MD |
797 | && !ht->compare_fct(node->key, node->key_len, |
798 | clear_flag(iter)->key, | |
799 | clear_flag(iter)->key_len)) | |
18117871 | 800 | return clear_flag(iter); |
11519af6 | 801 | /* Only account for identical reverse hash once */ |
24365af7 MD |
802 | if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash |
803 | && !is_dummy(next)) | |
4105056a | 804 | check_resize(ht, size, ++chain_len); |
11519af6 | 805 | iter_prev = clear_flag(iter); |
273399de | 806 | iter = next; |
abc490a1 | 807 | } |
273399de | 808 | insert: |
7ec59d3b | 809 | assert(node != clear_flag(iter)); |
11519af6 | 810 | assert(!is_removed(iter_prev)); |
c90201ac | 811 | assert(!is_removed(iter)); |
f000907d | 812 | assert(iter_prev != node); |
f9c80341 | 813 | if (!dummy) |
1b81fe1a | 814 | node->p.next = clear_flag(iter); |
f9c80341 MD |
815 | else |
816 | node->p.next = flag_dummy(clear_flag(iter)); | |
f5596c94 MD |
817 | if (is_dummy(iter)) |
818 | new_node = flag_dummy(node); | |
819 | else | |
820 | new_node = node; | |
cc4fcb10 | 821 | if (uatomic_cmpxchg(&iter_prev->p.next, iter, |
f5596c94 | 822 | new_node) != iter) |
273399de | 823 | continue; /* retry */ |
11519af6 | 824 | else |
273399de | 825 | goto gc_end; |
9dba85be MD |
826 | gc_node: |
827 | assert(!is_removed(iter)); | |
f5596c94 MD |
828 | if (is_dummy(iter)) |
829 | new_next = flag_dummy(clear_flag(next)); | |
830 | else | |
831 | new_next = clear_flag(next); | |
832 | (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); | |
273399de | 833 | /* retry */ |
464a1ec9 | 834 | } |
273399de MD |
835 | gc_end: |
836 | /* Garbage collect logically removed nodes in the bucket */ | |
4105056a | 837 | index = hash & (size - 1); |
24365af7 | 838 | order = get_count_order_ulong(index + 1); |
4105056a | 839 | lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; |
14044b37 | 840 | dummy_node = (struct cds_lfht_node *) lookup; |
f9c80341 | 841 | _cds_lfht_gc_bucket(dummy_node, node); |
18117871 | 842 | return node; |
abc490a1 | 843 | } |
464a1ec9 | 844 | |
abc490a1 | 845 | static |
4105056a MD |
846 | int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, |
847 | struct cds_lfht_node *node, | |
848 | int dummy_removal) | |
abc490a1 | 849 | { |
14044b37 MD |
850 | struct cds_lfht_node *dummy, *next, *old; |
851 | struct _cds_lfht_node *lookup; | |
abc490a1 | 852 | int flagged = 0; |
24365af7 | 853 | unsigned long hash, index, order; |
5e28c532 | 854 | |
7ec59d3b | 855 | /* logically delete the node */ |
c90201ac MD |
856 | assert(!is_dummy(node)); |
857 | assert(!is_removed(node)); | |
cc4fcb10 | 858 | old = rcu_dereference(node->p.next); |
7ec59d3b MD |
859 | do { |
860 | next = old; | |
76412f24 | 861 | if (unlikely(is_removed(next))) |
7ec59d3b | 862 | goto end; |
1475579c MD |
863 | if (dummy_removal) |
864 | assert(is_dummy(next)); | |
865 | else | |
866 | assert(!is_dummy(next)); | |
cc4fcb10 | 867 | old = uatomic_cmpxchg(&node->p.next, next, |
7ec59d3b MD |
868 | flag_removed(next)); |
869 | } while (old != next); | |
870 | ||
871 | /* We performed the (logical) deletion. */ | |
872 | flagged = 1; | |
873 | ||
874 | /* | |
875 | * Ensure that the node is not visible to readers anymore: lookup for | |
273399de MD |
876 | * the node, and remove it (along with any other logically removed node) |
877 | * if found. | |
11519af6 | 878 | */ |
cc4fcb10 | 879 | hash = bit_reverse_ulong(node->p.reverse_hash); |
4105056a MD |
880 | assert(size > 0); |
881 | index = hash & (size - 1); | |
24365af7 | 882 | order = get_count_order_ulong(index + 1); |
4105056a | 883 | lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))]; |
14044b37 | 884 | dummy = (struct cds_lfht_node *) lookup; |
f9c80341 | 885 | _cds_lfht_gc_bucket(dummy, node); |
2ed95849 | 886 | end: |
11519af6 MD |
887 | /* |
888 | * Only the flagging action indicated that we (and no other) | |
889 | * removed the node from the hash. | |
890 | */ | |
7ec59d3b | 891 | if (flagged) { |
cc4fcb10 | 892 | assert(is_removed(rcu_dereference(node->p.next))); |
11519af6 | 893 | return 0; |
7ec59d3b | 894 | } else |
11519af6 | 895 | return -ENOENT; |
abc490a1 | 896 | } |
2ed95849 | 897 | |
b7d619b0 MD |
898 | static |
899 | void *partition_resize_thread(void *arg) | |
900 | { | |
901 | struct partition_resize_work *work = arg; | |
902 | ||
903 | work->ht->cds_lfht_rcu_register_thread(); | |
904 | work->fct(work->ht, work->i, work->start, work->len); | |
905 | work->ht->cds_lfht_rcu_unregister_thread(); | |
906 | return NULL; | |
907 | } | |
908 | ||
909 | static | |
910 | void partition_resize_helper(struct cds_lfht *ht, unsigned long i, | |
911 | unsigned long len, | |
912 | void (*fct)(struct cds_lfht *ht, unsigned long i, | |
913 | unsigned long start, unsigned long len)) | |
914 | { | |
915 | unsigned long partition_len; | |
916 | struct partition_resize_work *work; | |
917 | int cpu, ret; | |
918 | pthread_t *thread_id; | |
919 | ||
920 | /* Note: nr_cpus_mask + 1 is always power of 2 */ | |
921 | partition_len = len >> get_count_order_ulong(nr_cpus_mask + 1); | |
922 | work = calloc(nr_cpus_mask + 1, sizeof(*work)); | |
923 | thread_id = calloc(nr_cpus_mask + 1, sizeof(*thread_id)); | |
924 | assert(work); | |
925 | for (cpu = 0; cpu < nr_cpus_mask + 1; cpu++) { | |
926 | work[cpu].ht = ht; | |
927 | work[cpu].i = i; | |
928 | work[cpu].len = partition_len; | |
929 | work[cpu].start = cpu * partition_len; | |
930 | work[cpu].fct = fct; | |
931 | ret = pthread_create(&thread_id[cpu], ht->resize_attr, | |
932 | partition_resize_thread, &work[cpu]); | |
933 | assert(!ret); | |
934 | } | |
935 | for (cpu = 0; cpu < nr_cpus_mask + 1; cpu++) { | |
936 | ret = pthread_join(thread_id[cpu], NULL); | |
937 | assert(!ret); | |
938 | } | |
939 | free(work); | |
940 | free(thread_id); | |
941 | } | |
942 | ||
e8de508e MD |
943 | /* |
944 | * Holding RCU read lock to protect _cds_lfht_add against memory | |
945 | * reclaim that could be performed by other call_rcu worker threads (ABA | |
946 | * problem). | |
9ee0fc9a | 947 | * |
b7d619b0 | 948 | * When we reach a certain length, we can split this population phase over |
9ee0fc9a MD |
949 | * many worker threads, based on the number of CPUs available in the system. |
950 | * This should therefore take care of not having the expand lagging behind too | |
951 | * many concurrent insertion threads by using the scheduler's ability to | |
952 | * schedule dummy node population fairly with insertions. | |
e8de508e | 953 | */ |
4105056a | 954 | static |
b7d619b0 MD |
955 | void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, |
956 | unsigned long start, unsigned long len) | |
4105056a MD |
957 | { |
958 | unsigned long j; | |
959 | ||
960 | ht->cds_lfht_rcu_read_lock(); | |
b7d619b0 | 961 | for (j = start; j < start + len; j++) { |
4105056a MD |
962 | struct cds_lfht_node *new_node = |
963 | (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; | |
964 | ||
dc1da8f6 | 965 | dbg_printf("init populate: i %lu j %lu hash %lu\n", |
4105056a | 966 | i, j, !i ? 0 : (1UL << (i - 1)) + j); |
dc1da8f6 MD |
967 | new_node->p.reverse_hash = |
968 | bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); | |
4105056a MD |
969 | (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), |
970 | new_node, 0, 1); | |
971 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) | |
972 | break; | |
973 | } | |
974 | ht->cds_lfht_rcu_read_unlock(); | |
b7d619b0 MD |
975 | } |
976 | ||
977 | static | |
978 | void init_table_populate(struct cds_lfht *ht, unsigned long i, | |
979 | unsigned long len) | |
980 | { | |
981 | assert(nr_cpus_mask != -1); | |
982 | if (nr_cpus_mask < 0 || len < (nr_cpus_mask + 1) * MIN_PARTITION_PER_THREAD) { | |
983 | ht->cds_lfht_rcu_thread_online(); | |
984 | init_table_populate_partition(ht, i, 0, len); | |
985 | ht->cds_lfht_rcu_thread_offline(); | |
986 | return; | |
987 | } | |
988 | partition_resize_helper(ht, i, len, init_table_populate_partition); | |
4105056a MD |
989 | } |
990 | ||
abc490a1 | 991 | static |
4105056a | 992 | void init_table(struct cds_lfht *ht, |
24365af7 MD |
993 | unsigned long first_order, unsigned long len_order) |
994 | { | |
995 | unsigned long i, end_order; | |
996 | ||
f0c29ed7 | 997 | dbg_printf("init table: first_order %lu end_order %lu\n", |
24365af7 MD |
998 | first_order, first_order + len_order); |
999 | end_order = first_order + len_order; | |
24365af7 | 1000 | for (i = first_order; i < end_order; i++) { |
4105056a | 1001 | unsigned long len; |
24365af7 MD |
1002 | |
1003 | len = !i ? 1 : 1UL << (i - 1); | |
f0c29ed7 | 1004 | dbg_printf("init order %lu len: %lu\n", i, len); |
4d676753 MD |
1005 | |
1006 | /* Stop expand if the resize target changes under us */ | |
1007 | if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i))) | |
1008 | break; | |
1009 | ||
4105056a | 1010 | ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) |
1475579c | 1011 | + (len * sizeof(struct _cds_lfht_node))); |
b7d619b0 | 1012 | assert(ht->t.tbl[i]); |
4105056a | 1013 | |
4105056a | 1014 | /* |
dc1da8f6 MD |
1015 | * Set all dummy nodes reverse hash values for a level and |
1016 | * link all dummy nodes into the table. | |
4105056a | 1017 | */ |
dc1da8f6 | 1018 | init_table_populate(ht, i, len); |
4105056a | 1019 | |
f9c80341 MD |
1020 | /* |
1021 | * Update table size. | |
1022 | */ | |
1023 | cmm_smp_wmb(); /* populate data before RCU size */ | |
1024 | CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i)); | |
1025 | ||
4105056a MD |
1026 | dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i)); |
1027 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) | |
1028 | break; | |
1029 | } | |
1030 | } | |
1031 | ||
e8de508e MD |
1032 | /* |
1033 | * Holding RCU read lock to protect _cds_lfht_remove against memory | |
1034 | * reclaim that could be performed by other call_rcu worker threads (ABA | |
1035 | * problem). | |
1036 | * For a single level, we logically remove and garbage collect each node. | |
1037 | * | |
1038 | * As a design choice, we perform logical removal and garbage collection on a | |
1039 | * node-per-node basis to simplify this algorithm. We also assume keeping good | |
1040 | * cache locality of the operation would overweight possible performance gain | |
1041 | * that could be achieved by batching garbage collection for multiple levels. | |
1042 | * However, this would have to be justified by benchmarks. | |
1043 | * | |
1044 | * Concurrent removal and add operations are helping us perform garbage | |
1045 | * collection of logically removed nodes. We guarantee that all logically | |
1046 | * removed nodes have been garbage-collected (unlinked) before call_rcu is | |
1047 | * invoked to free a hole level of dummy nodes (after a grace period). | |
1048 | * | |
1049 | * Logical removal and garbage collection can therefore be done in batch or on a | |
1050 | * node-per-node basis, as long as the guarantee above holds. | |
9ee0fc9a | 1051 | * |
b7d619b0 MD |
1052 | * When we reach a certain length, we can split this removal over many worker |
1053 | * threads, based on the number of CPUs available in the system. This should | |
1054 | * take care of not letting resize process lag behind too many concurrent | |
9ee0fc9a | 1055 | * updater threads actively inserting into the hash table. |
e8de508e | 1056 | */ |
4105056a | 1057 | static |
b7d619b0 MD |
1058 | void remove_table_partition(struct cds_lfht *ht, unsigned long i, |
1059 | unsigned long start, unsigned long len) | |
4105056a MD |
1060 | { |
1061 | unsigned long j; | |
1062 | ||
1063 | ht->cds_lfht_rcu_read_lock(); | |
b7d619b0 | 1064 | for (j = start; j < start + len; j++) { |
4105056a MD |
1065 | struct cds_lfht_node *fini_node = |
1066 | (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; | |
1067 | ||
1068 | dbg_printf("remove entry: i %lu j %lu hash %lu\n", | |
1069 | i, j, !i ? 0 : (1UL << (i - 1)) + j); | |
1070 | fini_node->p.reverse_hash = | |
1071 | bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); | |
1072 | (void) _cds_lfht_remove(ht, !i ? 0 : (1UL << (i - 1)), | |
1073 | fini_node, 1); | |
33c7c748 MD |
1074 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
1075 | break; | |
abc490a1 | 1076 | } |
4105056a | 1077 | ht->cds_lfht_rcu_read_unlock(); |
b7d619b0 MD |
1078 | } |
1079 | ||
1080 | static | |
1081 | void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) | |
1082 | { | |
1083 | ||
1084 | assert(nr_cpus_mask != -1); | |
1085 | if (nr_cpus_mask < 0 || len < (nr_cpus_mask + 1) * MIN_PARTITION_PER_THREAD) { | |
1086 | ht->cds_lfht_rcu_thread_online(); | |
1087 | remove_table_partition(ht, i, 0, len); | |
1088 | ht->cds_lfht_rcu_thread_offline(); | |
1089 | return; | |
1090 | } | |
1091 | partition_resize_helper(ht, i, len, remove_table_partition); | |
2ed95849 MD |
1092 | } |
1093 | ||
1475579c | 1094 | static |
4105056a | 1095 | void fini_table(struct cds_lfht *ht, |
1475579c MD |
1096 | unsigned long first_order, unsigned long len_order) |
1097 | { | |
1098 | long i, end_order; | |
1099 | ||
1100 | dbg_printf("fini table: first_order %lu end_order %lu\n", | |
1101 | first_order, first_order + len_order); | |
1102 | end_order = first_order + len_order; | |
1103 | assert(first_order > 0); | |
1475579c | 1104 | for (i = end_order - 1; i >= first_order; i--) { |
4105056a | 1105 | unsigned long len; |
1475579c MD |
1106 | |
1107 | len = !i ? 1 : 1UL << (i - 1); | |
1108 | dbg_printf("fini order %lu len: %lu\n", i, len); | |
4105056a | 1109 | |
4d676753 MD |
1110 | /* Stop shrink if the resize target changes under us */ |
1111 | if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1))) | |
1112 | break; | |
1113 | ||
1114 | cmm_smp_wmb(); /* populate data before RCU size */ | |
1115 | CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1)); | |
1116 | ||
1117 | /* | |
1118 | * We need to wait for all add operations to reach Q.S. (and | |
1119 | * thus use the new table for lookups) before we can start | |
1120 | * releasing the old dummy nodes. Otherwise their lookup will | |
1121 | * return a logically removed node as insert position. | |
1122 | */ | |
1123 | ht->cds_lfht_synchronize_rcu(); | |
1124 | ||
21263e21 | 1125 | /* |
4105056a MD |
1126 | * Set "removed" flag in dummy nodes about to be removed. |
1127 | * Unlink all now-logically-removed dummy node pointers. | |
1128 | * Concurrent add/remove operation are helping us doing | |
1129 | * the gc. | |
21263e21 | 1130 | */ |
4105056a MD |
1131 | remove_table(ht, i, len); |
1132 | ||
1133 | ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level); | |
1134 | ||
1135 | dbg_printf("fini new size: %lu\n", 1UL << i); | |
1475579c MD |
1136 | if (CMM_LOAD_SHARED(ht->in_progress_destroy)) |
1137 | break; | |
1138 | } | |
1475579c MD |
1139 | } |
1140 | ||
7a9dcf9b | 1141 | struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, |
14044b37 MD |
1142 | cds_lfht_compare_fct compare_fct, |
1143 | unsigned long hash_seed, | |
1144 | unsigned long init_size, | |
b8af5011 | 1145 | int flags, |
14044b37 | 1146 | void (*cds_lfht_call_rcu)(struct rcu_head *head, |
1475579c | 1147 | void (*func)(struct rcu_head *head)), |
01dbfa62 MD |
1148 | void (*cds_lfht_synchronize_rcu)(void), |
1149 | void (*cds_lfht_rcu_read_lock)(void), | |
5f511391 MD |
1150 | void (*cds_lfht_rcu_read_unlock)(void), |
1151 | void (*cds_lfht_rcu_thread_offline)(void), | |
b7d619b0 MD |
1152 | void (*cds_lfht_rcu_thread_online)(void), |
1153 | void (*cds_lfht_rcu_register_thread)(void), | |
1154 | void (*cds_lfht_rcu_unregister_thread)(void), | |
1155 | pthread_attr_t *attr) | |
abc490a1 | 1156 | { |
14044b37 | 1157 | struct cds_lfht *ht; |
24365af7 | 1158 | unsigned long order; |
abc490a1 | 1159 | |
8129be4e | 1160 | /* init_size must be power of two */ |
49619ea0 | 1161 | if (init_size && (init_size & (init_size - 1))) |
8129be4e | 1162 | return NULL; |
14044b37 | 1163 | ht = calloc(1, sizeof(struct cds_lfht)); |
b7d619b0 | 1164 | assert(ht); |
abc490a1 | 1165 | ht->hash_fct = hash_fct; |
732ad076 MD |
1166 | ht->compare_fct = compare_fct; |
1167 | ht->hash_seed = hash_seed; | |
14044b37 | 1168 | ht->cds_lfht_call_rcu = cds_lfht_call_rcu; |
1475579c | 1169 | ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu; |
01dbfa62 MD |
1170 | ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock; |
1171 | ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock; | |
5f511391 MD |
1172 | ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline; |
1173 | ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online; | |
b7d619b0 MD |
1174 | ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread; |
1175 | ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread; | |
1176 | ht->resize_attr = attr; | |
df44348d | 1177 | ht->percpu_count = alloc_per_cpu_items_count(); |
abc490a1 MD |
1178 | /* this mutex should not nest in read-side C.S. */ |
1179 | pthread_mutex_init(&ht->resize_mutex, NULL); | |
cd95516d | 1180 | order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1; |
b8af5011 | 1181 | ht->flags = flags; |
5f511391 | 1182 | ht->cds_lfht_rcu_thread_offline(); |
f000907d | 1183 | pthread_mutex_lock(&ht->resize_mutex); |
4d676753 | 1184 | ht->t.resize_target = 1UL << (order - 1); |
4105056a | 1185 | init_table(ht, 0, order); |
f000907d | 1186 | pthread_mutex_unlock(&ht->resize_mutex); |
5f511391 | 1187 | ht->cds_lfht_rcu_thread_online(); |
abc490a1 MD |
1188 | return ht; |
1189 | } | |
1190 | ||
14044b37 | 1191 | struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len) |
2ed95849 | 1192 | { |
bb7b2f26 | 1193 | struct cds_lfht_node *node, *next, *dummy_node; |
14044b37 | 1194 | struct _cds_lfht_node *lookup; |
4105056a | 1195 | unsigned long hash, reverse_hash, index, order, size; |
2ed95849 | 1196 | |
732ad076 | 1197 | hash = ht->hash_fct(key, key_len, ht->hash_seed); |
abc490a1 | 1198 | reverse_hash = bit_reverse_ulong(hash); |
464a1ec9 | 1199 | |
4105056a MD |
1200 | size = rcu_dereference(ht->t.size); |
1201 | index = hash & (size - 1); | |
24365af7 | 1202 | order = get_count_order_ulong(index + 1); |
4105056a | 1203 | lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)]; |
f0c29ed7 | 1204 | dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", |
554c284e | 1205 | hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1))); |
bb7b2f26 MD |
1206 | dummy_node = (struct cds_lfht_node *) lookup; |
1207 | /* We can always skip the dummy node initially */ | |
1208 | node = rcu_dereference(dummy_node->p.next); | |
bb7b2f26 | 1209 | node = clear_flag(node); |
2ed95849 | 1210 | for (;;) { |
bb7b2f26 MD |
1211 | if (unlikely(is_end(node))) { |
1212 | node = NULL; | |
abc490a1 | 1213 | break; |
bb7b2f26 | 1214 | } |
cc4fcb10 | 1215 | if (unlikely(node->p.reverse_hash > reverse_hash)) { |
abc490a1 MD |
1216 | node = NULL; |
1217 | break; | |
2ed95849 | 1218 | } |
1b81fe1a MD |
1219 | next = rcu_dereference(node->p.next); |
1220 | if (likely(!is_removed(next)) | |
1221 | && !is_dummy(next) | |
49c2e2d6 | 1222 | && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { |
273399de | 1223 | break; |
2ed95849 | 1224 | } |
1b81fe1a | 1225 | node = clear_flag(next); |
2ed95849 | 1226 | } |
1b81fe1a | 1227 | assert(!node || !is_dummy(rcu_dereference(node->p.next))); |
abc490a1 MD |
1228 | return node; |
1229 | } | |
e0ba718a | 1230 | |
a481e5ff MD |
1231 | struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, |
1232 | struct cds_lfht_node *node) | |
1233 | { | |
1234 | struct cds_lfht_node *next; | |
1235 | unsigned long reverse_hash; | |
1236 | void *key; | |
1237 | size_t key_len; | |
1238 | ||
1239 | reverse_hash = node->p.reverse_hash; | |
1240 | key = node->key; | |
1241 | key_len = node->key_len; | |
1242 | next = rcu_dereference(node->p.next); | |
1243 | node = clear_flag(next); | |
1244 | ||
1245 | for (;;) { | |
bb7b2f26 MD |
1246 | if (unlikely(is_end(node))) { |
1247 | node = NULL; | |
a481e5ff | 1248 | break; |
bb7b2f26 | 1249 | } |
a481e5ff MD |
1250 | if (unlikely(node->p.reverse_hash > reverse_hash)) { |
1251 | node = NULL; | |
1252 | break; | |
1253 | } | |
1254 | next = rcu_dereference(node->p.next); | |
1255 | if (likely(!is_removed(next)) | |
1256 | && !is_dummy(next) | |
1257 | && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { | |
1258 | break; | |
1259 | } | |
1260 | node = clear_flag(next); | |
1261 | } | |
1262 | assert(!node || !is_dummy(rcu_dereference(node->p.next))); | |
1263 | return node; | |
1264 | } | |
1265 | ||
14044b37 | 1266 | void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) |
abc490a1 | 1267 | { |
4105056a | 1268 | unsigned long hash, size; |
ab7d5fc6 | 1269 | |
49c2e2d6 | 1270 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
cc4fcb10 | 1271 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
2ed95849 | 1272 | |
4105056a MD |
1273 | size = rcu_dereference(ht->t.size); |
1274 | (void) _cds_lfht_add(ht, size, node, 0, 0); | |
1275 | ht_count_add(ht, size); | |
3eca1b8c MD |
1276 | } |
1277 | ||
14044b37 MD |
1278 | struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, |
1279 | struct cds_lfht_node *node) | |
3eca1b8c | 1280 | { |
4105056a | 1281 | unsigned long hash, size; |
df44348d | 1282 | struct cds_lfht_node *ret; |
3eca1b8c | 1283 | |
49c2e2d6 | 1284 | hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); |
cc4fcb10 | 1285 | node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); |
3eca1b8c | 1286 | |
4105056a MD |
1287 | size = rcu_dereference(ht->t.size); |
1288 | ret = _cds_lfht_add(ht, size, node, 1, 0); | |
17f31d1b | 1289 | if (ret == node) |
4105056a | 1290 | ht_count_add(ht, size); |
df44348d | 1291 | return ret; |
2ed95849 MD |
1292 | } |
1293 | ||
14044b37 | 1294 | int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node) |
2ed95849 | 1295 | { |
4105056a | 1296 | unsigned long size; |
df44348d | 1297 | int ret; |
abc490a1 | 1298 | |
4105056a MD |
1299 | size = rcu_dereference(ht->t.size); |
1300 | ret = _cds_lfht_remove(ht, size, node, 0); | |
df44348d | 1301 | if (!ret) |
4105056a | 1302 | ht_count_remove(ht, size); |
df44348d | 1303 | return ret; |
2ed95849 | 1304 | } |
ab7d5fc6 | 1305 | |
abc490a1 | 1306 | static |
14044b37 | 1307 | int cds_lfht_delete_dummy(struct cds_lfht *ht) |
674f7a69 | 1308 | { |
14044b37 MD |
1309 | struct cds_lfht_node *node; |
1310 | struct _cds_lfht_node *lookup; | |
4105056a | 1311 | unsigned long order, i, size; |
674f7a69 | 1312 | |
abc490a1 | 1313 | /* Check that the table is empty */ |
4105056a | 1314 | lookup = &ht->t.tbl[0]->nodes[0]; |
14044b37 | 1315 | node = (struct cds_lfht_node *) lookup; |
abc490a1 | 1316 | do { |
1b81fe1a MD |
1317 | node = clear_flag(node)->p.next; |
1318 | if (!is_dummy(node)) | |
abc490a1 | 1319 | return -EPERM; |
273399de | 1320 | assert(!is_removed(node)); |
bb7b2f26 | 1321 | } while (!is_end(node)); |
4105056a MD |
1322 | /* |
1323 | * size accessed without rcu_dereference because hash table is | |
1324 | * being destroyed. | |
1325 | */ | |
1326 | size = ht->t.size; | |
abc490a1 | 1327 | /* Internal sanity check: all nodes left should be dummy */ |
4105056a | 1328 | for (order = 0; order < get_count_order_ulong(size) + 1; order++) { |
24365af7 MD |
1329 | unsigned long len; |
1330 | ||
1331 | len = !order ? 1 : 1UL << (order - 1); | |
1332 | for (i = 0; i < len; i++) { | |
f0c29ed7 | 1333 | dbg_printf("delete order %lu i %lu hash %lu\n", |
24365af7 | 1334 | order, i, |
4105056a MD |
1335 | bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash)); |
1336 | assert(is_dummy(ht->t.tbl[order]->nodes[i].next)); | |
24365af7 | 1337 | } |
4105056a | 1338 | poison_free(ht->t.tbl[order]); |
674f7a69 | 1339 | } |
abc490a1 | 1340 | return 0; |
674f7a69 MD |
1341 | } |
1342 | ||
1343 | /* | |
1344 | * Should only be called when no more concurrent readers nor writers can | |
1345 | * possibly access the table. | |
1346 | */ | |
b7d619b0 | 1347 | int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) |
674f7a69 | 1348 | { |
5e28c532 MD |
1349 | int ret; |
1350 | ||
848d4088 | 1351 | /* Wait for in-flight resize operations to complete */ |
33c7c748 | 1352 | CMM_STORE_SHARED(ht->in_progress_destroy, 1); |
848d4088 MD |
1353 | while (uatomic_read(&ht->in_progress_resize)) |
1354 | poll(NULL, 0, 100); /* wait for 100ms */ | |
14044b37 | 1355 | ret = cds_lfht_delete_dummy(ht); |
abc490a1 MD |
1356 | if (ret) |
1357 | return ret; | |
df44348d | 1358 | free_per_cpu_items_count(ht->percpu_count); |
b7d619b0 MD |
1359 | if (attr) |
1360 | *attr = ht->resize_attr; | |
98808fb1 | 1361 | poison_free(ht); |
5e28c532 | 1362 | return ret; |
674f7a69 MD |
1363 | } |
1364 | ||
14044b37 | 1365 | void cds_lfht_count_nodes(struct cds_lfht *ht, |
273399de MD |
1366 | unsigned long *count, |
1367 | unsigned long *removed) | |
1368 | { | |
14044b37 MD |
1369 | struct cds_lfht_node *node, *next; |
1370 | struct _cds_lfht_node *lookup; | |
24365af7 | 1371 | unsigned long nr_dummy = 0; |
273399de MD |
1372 | |
1373 | *count = 0; | |
1374 | *removed = 0; | |
1375 | ||
24365af7 | 1376 | /* Count non-dummy nodes in the table */ |
4105056a | 1377 | lookup = &ht->t.tbl[0]->nodes[0]; |
14044b37 | 1378 | node = (struct cds_lfht_node *) lookup; |
273399de | 1379 | do { |
cc4fcb10 | 1380 | next = rcu_dereference(node->p.next); |
273399de | 1381 | if (is_removed(next)) { |
1b81fe1a | 1382 | assert(!is_dummy(next)); |
273399de | 1383 | (*removed)++; |
1b81fe1a | 1384 | } else if (!is_dummy(next)) |
273399de | 1385 | (*count)++; |
24365af7 MD |
1386 | else |
1387 | (nr_dummy)++; | |
273399de | 1388 | node = clear_flag(next); |
bb7b2f26 | 1389 | } while (!is_end(node)); |
f0c29ed7 | 1390 | dbg_printf("number of dummy nodes: %lu\n", nr_dummy); |
273399de MD |
1391 | } |
1392 | ||
1475579c | 1393 | /* called with resize mutex held */ |
abc490a1 | 1394 | static |
4105056a | 1395 | void _do_cds_lfht_grow(struct cds_lfht *ht, |
1475579c | 1396 | unsigned long old_size, unsigned long new_size) |
abc490a1 | 1397 | { |
1475579c | 1398 | unsigned long old_order, new_order; |
1475579c MD |
1399 | |
1400 | old_order = get_count_order_ulong(old_size) + 1; | |
1401 | new_order = get_count_order_ulong(new_size) + 1; | |
1402 | printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", | |
1403 | old_size, old_order, new_size, new_order); | |
1475579c | 1404 | assert(new_size > old_size); |
4105056a | 1405 | init_table(ht, old_order, new_order - old_order); |
abc490a1 MD |
1406 | } |
1407 | ||
1408 | /* called with resize mutex held */ | |
1409 | static | |
4105056a | 1410 | void _do_cds_lfht_shrink(struct cds_lfht *ht, |
1475579c | 1411 | unsigned long old_size, unsigned long new_size) |
464a1ec9 | 1412 | { |
1475579c | 1413 | unsigned long old_order, new_order; |
464a1ec9 | 1414 | |
cd95516d | 1415 | new_size = max(new_size, MIN_TABLE_SIZE); |
24365af7 | 1416 | old_order = get_count_order_ulong(old_size) + 1; |
24365af7 | 1417 | new_order = get_count_order_ulong(new_size) + 1; |
df44348d | 1418 | printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", |
df03fab8 | 1419 | old_size, old_order, new_size, new_order); |
1475579c | 1420 | assert(new_size < old_size); |
1475579c | 1421 | |
4105056a MD |
1422 | /* Remove and unlink all dummy nodes to remove. */ |
1423 | fini_table(ht, new_order, old_order - new_order); | |
464a1ec9 MD |
1424 | } |
1425 | ||
1475579c MD |
1426 | |
1427 | /* called with resize mutex held */ | |
1428 | static | |
1429 | void _do_cds_lfht_resize(struct cds_lfht *ht) | |
1430 | { | |
1431 | unsigned long new_size, old_size; | |
4105056a MD |
1432 | |
1433 | /* | |
1434 | * Resize table, re-do if the target size has changed under us. | |
1435 | */ | |
1436 | do { | |
1437 | ht->t.resize_initiated = 1; | |
1438 | old_size = ht->t.size; | |
1439 | new_size = CMM_LOAD_SHARED(ht->t.resize_target); | |
1440 | if (old_size < new_size) | |
1441 | _do_cds_lfht_grow(ht, old_size, new_size); | |
1442 | else if (old_size > new_size) | |
1443 | _do_cds_lfht_shrink(ht, old_size, new_size); | |
1444 | ht->t.resize_initiated = 0; | |
1445 | /* write resize_initiated before read resize_target */ | |
1446 | cmm_smp_mb(); | |
4d676753 | 1447 | } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target)); |
1475579c MD |
1448 | } |
1449 | ||
abc490a1 | 1450 | static |
4105056a | 1451 | unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size, |
f9830efd | 1452 | int growth_order) |
464a1ec9 | 1453 | { |
4105056a MD |
1454 | return _uatomic_max(&ht->t.resize_target, |
1455 | size << growth_order); | |
464a1ec9 MD |
1456 | } |
1457 | ||
1475579c | 1458 | static |
4105056a | 1459 | void resize_target_update_count(struct cds_lfht *ht, |
b8af5011 | 1460 | unsigned long count) |
1475579c | 1461 | { |
cd95516d | 1462 | count = max(count, MIN_TABLE_SIZE); |
4105056a | 1463 | uatomic_set(&ht->t.resize_target, count); |
1475579c MD |
1464 | } |
1465 | ||
1466 | void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size) | |
464a1ec9 | 1467 | { |
4105056a MD |
1468 | resize_target_update_count(ht, new_size); |
1469 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); | |
5f511391 | 1470 | ht->cds_lfht_rcu_thread_offline(); |
1475579c MD |
1471 | pthread_mutex_lock(&ht->resize_mutex); |
1472 | _do_cds_lfht_resize(ht); | |
1473 | pthread_mutex_unlock(&ht->resize_mutex); | |
5f511391 | 1474 | ht->cds_lfht_rcu_thread_online(); |
abc490a1 | 1475 | } |
464a1ec9 | 1476 | |
abc490a1 MD |
1477 | static |
1478 | void do_resize_cb(struct rcu_head *head) | |
1479 | { | |
1480 | struct rcu_resize_work *work = | |
1481 | caa_container_of(head, struct rcu_resize_work, head); | |
14044b37 | 1482 | struct cds_lfht *ht = work->ht; |
abc490a1 | 1483 | |
5f511391 | 1484 | ht->cds_lfht_rcu_thread_offline(); |
abc490a1 | 1485 | pthread_mutex_lock(&ht->resize_mutex); |
14044b37 | 1486 | _do_cds_lfht_resize(ht); |
abc490a1 | 1487 | pthread_mutex_unlock(&ht->resize_mutex); |
5f511391 | 1488 | ht->cds_lfht_rcu_thread_online(); |
98808fb1 | 1489 | poison_free(work); |
848d4088 MD |
1490 | cmm_smp_mb(); /* finish resize before decrement */ |
1491 | uatomic_dec(&ht->in_progress_resize); | |
464a1ec9 MD |
1492 | } |
1493 | ||
abc490a1 | 1494 | static |
4105056a | 1495 | void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth) |
ab7d5fc6 | 1496 | { |
abc490a1 | 1497 | struct rcu_resize_work *work; |
f9830efd | 1498 | unsigned long target_size; |
abc490a1 | 1499 | |
4105056a MD |
1500 | target_size = resize_target_update(ht, size, growth); |
1501 | /* Store resize_target before read resize_initiated */ | |
1502 | cmm_smp_mb(); | |
1503 | if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) { | |
848d4088 MD |
1504 | uatomic_inc(&ht->in_progress_resize); |
1505 | cmm_smp_mb(); /* increment resize count before calling it */ | |
f9830efd MD |
1506 | work = malloc(sizeof(*work)); |
1507 | work->ht = ht; | |
14044b37 | 1508 | ht->cds_lfht_call_rcu(&work->head, do_resize_cb); |
4105056a | 1509 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); |
f9830efd | 1510 | } |
ab7d5fc6 | 1511 | } |
3171717f | 1512 | |
f8994aee MD |
1513 | #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) |
1514 | ||
3171717f | 1515 | static |
4105056a | 1516 | void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, |
3171717f MD |
1517 | unsigned long count) |
1518 | { | |
1519 | struct rcu_resize_work *work; | |
3171717f | 1520 | |
b8af5011 MD |
1521 | if (!(ht->flags & CDS_LFHT_AUTO_RESIZE)) |
1522 | return; | |
4105056a MD |
1523 | resize_target_update_count(ht, count); |
1524 | /* Store resize_target before read resize_initiated */ | |
1525 | cmm_smp_mb(); | |
1526 | if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) { | |
3171717f MD |
1527 | uatomic_inc(&ht->in_progress_resize); |
1528 | cmm_smp_mb(); /* increment resize count before calling it */ | |
1529 | work = malloc(sizeof(*work)); | |
1530 | work->ht = ht; | |
1531 | ht->cds_lfht_call_rcu(&work->head, do_resize_cb); | |
4105056a | 1532 | CMM_STORE_SHARED(ht->t.resize_initiated, 1); |
3171717f MD |
1533 | } |
1534 | } | |
f8994aee MD |
1535 | |
1536 | #endif |