rculfhash: introduce REMOVED_FLAG and FLAG_MASK
[urcu.git] / rculfhash.c
CommitLineData
5e28c532 1/*
abc490a1
MD
2 * rculfhash.c
3 *
4 * Userspace RCU library - Lock-Free Expandable RCU Hash Table
5 *
6 * Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
5e28c532
MD
21 */
22
2ed95849
MD
23#define _LGPL_SOURCE
24#include <stdlib.h>
e0ba718a
MD
25#include <errno.h>
26#include <assert.h>
27#include <stdio.h>
abc490a1 28#include <stdint.h>
f000907d 29#include <string.h>
e0ba718a 30
2ed95849 31#include <urcu.h>
abc490a1 32#include <urcu-call-rcu.h>
a42cc659
MD
33#include <urcu/arch.h>
34#include <urcu/uatomic.h>
674f7a69 35#include <urcu/jhash.h>
a42cc659 36#include <urcu/compiler.h>
abc490a1 37#include <urcu/rculfhash.h>
5e28c532 38#include <stdio.h>
464a1ec9 39#include <pthread.h>
44395fb7 40
f9830efd
MD
41#define DEBUG /* Test */
42
43#ifdef DEBUG
44#define dbg_printf(args...) printf(args)
45#else
46#define dbg_printf(args...)
47#endif
48
65e8e729
MD
49#define CHAIN_LEN_TARGET 1
50#define CHAIN_LEN_RESIZE_THRESHOLD 2
2ed95849 51
abc490a1
MD
52#ifndef max
53#define max(a, b) ((a) > (b) ? (a) : (b))
54#endif
2ed95849 55
d37166c6
MD
56#define REMOVED_FLAG (1UL << 0)
57#define FLAGS_MASK ((1UL << 1) - 1)
58
395270b6 59struct rcu_table {
abc490a1 60 unsigned long size; /* always a power of 2 */
f9830efd 61 unsigned long resize_target;
11519af6 62 int resize_initiated;
abc490a1 63 struct rcu_head head;
395270b6
MD
64 struct rcu_ht_node *tbl[0];
65};
66
2ed95849 67struct rcu_ht {
395270b6 68 struct rcu_table *t; /* shared */
2ed95849 69 ht_hash_fct hash_fct;
732ad076
MD
70 ht_compare_fct compare_fct;
71 unsigned long hash_seed;
464a1ec9 72 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
848d4088 73 unsigned int in_progress_resize;
abc490a1
MD
74 void (*ht_call_rcu)(struct rcu_head *head,
75 void (*func)(struct rcu_head *head));
2ed95849
MD
76};
77
abc490a1
MD
78struct rcu_resize_work {
79 struct rcu_head head;
2ed95849 80 struct rcu_ht *ht;
abc490a1 81};
2ed95849 82
abc490a1
MD
83/*
84 * Algorithm to reverse bits in a word by lookup table, extended to
85 * 64-bit words.
f9830efd 86 * Source:
abc490a1 87 * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable
f9830efd 88 * Originally from Public Domain.
abc490a1
MD
89 */
90
91static const uint8_t BitReverseTable256[256] =
2ed95849 92{
abc490a1
MD
93#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64
94#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16)
95#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 )
96 R6(0), R6(2), R6(1), R6(3)
97};
98#undef R2
99#undef R4
100#undef R6
2ed95849 101
abc490a1
MD
102static
103uint8_t bit_reverse_u8(uint8_t v)
104{
105 return BitReverseTable256[v];
106}
ab7d5fc6 107
abc490a1
MD
108static __attribute__((unused))
109uint32_t bit_reverse_u32(uint32_t v)
110{
111 return ((uint32_t) bit_reverse_u8(v) << 24) |
112 ((uint32_t) bit_reverse_u8(v >> 8) << 16) |
113 ((uint32_t) bit_reverse_u8(v >> 16) << 8) |
114 ((uint32_t) bit_reverse_u8(v >> 24));
2ed95849
MD
115}
116
abc490a1
MD
117static __attribute__((unused))
118uint64_t bit_reverse_u64(uint64_t v)
2ed95849 119{
abc490a1
MD
120 return ((uint64_t) bit_reverse_u8(v) << 56) |
121 ((uint64_t) bit_reverse_u8(v >> 8) << 48) |
122 ((uint64_t) bit_reverse_u8(v >> 16) << 40) |
123 ((uint64_t) bit_reverse_u8(v >> 24) << 32) |
124 ((uint64_t) bit_reverse_u8(v >> 32) << 24) |
125 ((uint64_t) bit_reverse_u8(v >> 40) << 16) |
126 ((uint64_t) bit_reverse_u8(v >> 48) << 8) |
127 ((uint64_t) bit_reverse_u8(v >> 56));
128}
129
130static
131unsigned long bit_reverse_ulong(unsigned long v)
132{
133#if (CAA_BITS_PER_LONG == 32)
134 return bit_reverse_u32(v);
135#else
136 return bit_reverse_u64(v);
137#endif
138}
139
f9830efd
MD
140/*
141 * Algorithm to find the log2 of a 32-bit unsigned integer.
142 * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
143 * Originally from Public Domain.
144 */
145static const char LogTable256[256] =
146{
147#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
148 -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
149 LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
150 LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
151};
152
153uint32_t log2_u32(uint32_t v)
154{
155 uint32_t t, tt;
156
157 if ((tt = (v >> 16)))
158 return (t = (tt >> 8))
159 ? 24 + LogTable256[t]
160 : 16 + LogTable256[tt];
161 else
162 return (t = (v >> 8))
163 ? 8 + LogTable256[t]
164 : LogTable256[v];
165}
166
167static
168void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
169
170static
171void check_resize(struct rcu_ht *ht, struct rcu_table *t,
172 uint32_t chain_len)
173{
3390d470
MD
174 if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
175 ht_resize_lazy(ht, t,
65e8e729 176 log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
f9830efd
MD
177}
178
abc490a1
MD
179static
180struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
181{
d37166c6 182 return (struct rcu_ht_node *) (((unsigned long) node) & ~FLAGS_MASK);
abc490a1
MD
183}
184
185static
186int is_removed(struct rcu_ht_node *node)
187{
d37166c6 188 return ((unsigned long) node) & REMOVED_FLAG;
abc490a1
MD
189}
190
191static
192struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
193{
d37166c6 194 return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG);
abc490a1
MD
195}
196
197static
f9830efd 198unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
abc490a1
MD
199{
200 unsigned long old1, old2;
201
202 old1 = uatomic_read(ptr);
203 do {
204 old2 = old1;
205 if (old2 >= v)
f9830efd 206 return old2;
abc490a1 207 } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
f9830efd 208 return v;
abc490a1
MD
209}
210
273399de
MD
211/*
212 * Remove all logically deleted nodes from a bucket up to a certain node key.
213 */
214static
215void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
216{
217 struct rcu_ht_node *iter_prev, *iter, *next;
218
219 for (;;) {
220 iter_prev = dummy;
221 /* We can always skip the dummy node initially */
cc4fcb10
MD
222 iter = rcu_dereference(iter_prev->p.next);
223 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
273399de 224 for (;;) {
479c8a32
MD
225 if (unlikely(!iter))
226 return;
cc4fcb10 227 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
273399de 228 return;
cc4fcb10 229 next = rcu_dereference(clear_flag(iter)->p.next);
273399de
MD
230 if (is_removed(next))
231 break;
273399de
MD
232 iter_prev = iter;
233 iter = next;
234 }
235 assert(!is_removed(iter));
cc4fcb10 236 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next));
273399de
MD
237 }
238}
239
abc490a1 240static
18117871
MD
241struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
242 struct rcu_ht_node *node, int unique)
abc490a1 243{
273399de 244 struct rcu_ht_node *iter_prev, *dummy, *iter, *next;
49c2e2d6 245 unsigned long hash;
abc490a1 246
18117871 247 if (!t->size) {
cc4fcb10 248 assert(node->p.dummy);
18117871
MD
249 return node; /* Initial first add (head) */
250 }
cc4fcb10 251 hash = bit_reverse_ulong(node->p.reverse_hash);
abc490a1 252 for (;;) {
f9830efd 253 uint32_t chain_len = 0;
abc490a1 254
11519af6
MD
255 /*
256 * iter_prev points to the non-removed node prior to the
257 * insert location.
11519af6 258 */
49c2e2d6 259 iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]);
11519af6 260 /* We can always skip the dummy node initially */
cc4fcb10
MD
261 iter = rcu_dereference(iter_prev->p.next);
262 assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
abc490a1 263 for (;;) {
273399de
MD
264 if (unlikely(!iter))
265 goto insert;
cc4fcb10 266 if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
273399de 267 goto insert;
cc4fcb10 268 next = rcu_dereference(clear_flag(iter)->p.next);
273399de 269 if (is_removed(next))
9dba85be 270 goto gc_node;
e43f23f8 271 if (unique
cc4fcb10 272 && !clear_flag(iter)->p.dummy
e43f23f8
MD
273 && !ht->compare_fct(node->key, node->key_len,
274 clear_flag(iter)->key,
275 clear_flag(iter)->key_len))
18117871 276 return clear_flag(iter);
11519af6 277 /* Only account for identical reverse hash once */
cc4fcb10 278 if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash)
11519af6
MD
279 check_resize(ht, t, ++chain_len);
280 iter_prev = clear_flag(iter);
273399de 281 iter = next;
abc490a1 282 }
273399de 283 insert:
7ec59d3b 284 assert(node != clear_flag(iter));
11519af6 285 assert(!is_removed(iter_prev));
f000907d 286 assert(iter_prev != node);
cc4fcb10
MD
287 node->p.next = iter;
288 if (uatomic_cmpxchg(&iter_prev->p.next, iter,
273399de
MD
289 node) != iter)
290 continue; /* retry */
11519af6 291 else
273399de 292 goto gc_end;
9dba85be
MD
293 gc_node:
294 assert(!is_removed(iter));
cc4fcb10 295 (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next));
273399de 296 /* retry */
464a1ec9 297 }
273399de
MD
298gc_end:
299 /* Garbage collect logically removed nodes in the bucket */
49c2e2d6 300 dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
273399de 301 _ht_gc_bucket(dummy, node);
18117871 302 return node;
abc490a1 303}
464a1ec9 304
abc490a1
MD
305static
306int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
307{
273399de 308 struct rcu_ht_node *dummy, *next, *old;
abc490a1 309 int flagged = 0;
49c2e2d6 310 unsigned long hash;
5e28c532 311
7ec59d3b 312 /* logically delete the node */
cc4fcb10 313 old = rcu_dereference(node->p.next);
7ec59d3b
MD
314 do {
315 next = old;
316 if (is_removed(next))
317 goto end;
cc4fcb10
MD
318 assert(!node->p.dummy);
319 old = uatomic_cmpxchg(&node->p.next, next,
7ec59d3b
MD
320 flag_removed(next));
321 } while (old != next);
322
323 /* We performed the (logical) deletion. */
324 flagged = 1;
325
326 /*
327 * Ensure that the node is not visible to readers anymore: lookup for
273399de
MD
328 * the node, and remove it (along with any other logically removed node)
329 * if found.
11519af6 330 */
cc4fcb10 331 hash = bit_reverse_ulong(node->p.reverse_hash);
49c2e2d6 332 dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
273399de 333 _ht_gc_bucket(dummy, node);
2ed95849 334end:
11519af6
MD
335 /*
336 * Only the flagging action indicated that we (and no other)
337 * removed the node from the hash.
338 */
7ec59d3b 339 if (flagged) {
cc4fcb10 340 assert(is_removed(rcu_dereference(node->p.next)));
11519af6 341 return 0;
7ec59d3b 342 } else
11519af6 343 return -ENOENT;
abc490a1 344}
2ed95849 345
abc490a1
MD
346static
347void init_table(struct rcu_ht *ht, struct rcu_table *t,
348 unsigned long first, unsigned long len)
349{
350 unsigned long i, end;
351
352 end = first + len;
353 for (i = first; i < end; i++) {
354 /* Update table size when power of two */
355 if (i != 0 && !(i & (i - 1)))
356 t->size = i;
cc4fcb10
MD
357 t->tbl[i] = calloc(1, sizeof(struct _rcu_ht_node));
358 t->tbl[i]->p.dummy = 1;
359 t->tbl[i]->p.reverse_hash = bit_reverse_ulong(i);
3eca1b8c 360 (void) _ht_add(ht, t, t->tbl[i], 0);
abc490a1 361 }
f9830efd 362 t->resize_target = t->size = end;
11519af6 363 t->resize_initiated = 0;
2ed95849
MD
364}
365
abc490a1 366struct rcu_ht *ht_new(ht_hash_fct hash_fct,
732ad076
MD
367 ht_compare_fct compare_fct,
368 unsigned long hash_seed,
abc490a1
MD
369 unsigned long init_size,
370 void (*ht_call_rcu)(struct rcu_head *head,
371 void (*func)(struct rcu_head *head)))
372{
373 struct rcu_ht *ht;
374
375 ht = calloc(1, sizeof(struct rcu_ht));
376 ht->hash_fct = hash_fct;
732ad076
MD
377 ht->compare_fct = compare_fct;
378 ht->hash_seed = hash_seed;
f000907d 379 ht->ht_call_rcu = ht_call_rcu;
848d4088 380 ht->in_progress_resize = 0;
abc490a1
MD
381 /* this mutex should not nest in read-side C.S. */
382 pthread_mutex_init(&ht->resize_mutex, NULL);
383 ht->t = calloc(1, sizeof(struct rcu_table)
384 + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
385 ht->t->size = 0;
f000907d 386 pthread_mutex_lock(&ht->resize_mutex);
abc490a1 387 init_table(ht, ht->t, 0, max(init_size, 1));
f000907d 388 pthread_mutex_unlock(&ht->resize_mutex);
abc490a1
MD
389 return ht;
390}
391
732ad076 392struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
2ed95849 393{
395270b6 394 struct rcu_table *t;
abc490a1
MD
395 struct rcu_ht_node *node;
396 unsigned long hash, reverse_hash;
2ed95849 397
732ad076 398 hash = ht->hash_fct(key, key_len, ht->hash_seed);
abc490a1 399 reverse_hash = bit_reverse_ulong(hash);
464a1ec9 400
395270b6 401 t = rcu_dereference(ht->t);
abc490a1 402 node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
2ed95849 403 for (;;) {
abc490a1
MD
404 if (unlikely(!node))
405 break;
cc4fcb10 406 if (unlikely(node->p.reverse_hash > reverse_hash)) {
abc490a1
MD
407 node = NULL;
408 break;
2ed95849 409 }
cc4fcb10
MD
410 if (likely(!is_removed(rcu_dereference(node->p.next)))
411 && !node->p.dummy
49c2e2d6 412 && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
273399de 413 break;
2ed95849 414 }
cc4fcb10 415 node = clear_flag(rcu_dereference(node->p.next));
2ed95849 416 }
cc4fcb10 417 assert(!node || !node->p.dummy);
abc490a1
MD
418 return node;
419}
e0ba718a 420
f000907d 421void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
abc490a1
MD
422{
423 struct rcu_table *t;
49c2e2d6 424 unsigned long hash;
ab7d5fc6 425
49c2e2d6 426 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
cc4fcb10 427 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
2ed95849 428
abc490a1 429 t = rcu_dereference(ht->t);
3eca1b8c
MD
430 (void) _ht_add(ht, t, node, 0);
431}
432
18117871 433struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
3eca1b8c
MD
434{
435 struct rcu_table *t;
49c2e2d6 436 unsigned long hash;
3eca1b8c 437
49c2e2d6 438 hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
cc4fcb10 439 node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
3eca1b8c
MD
440
441 t = rcu_dereference(ht->t);
442 return _ht_add(ht, t, node, 1);
2ed95849
MD
443}
444
abc490a1 445int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
2ed95849 446{
abc490a1
MD
447 struct rcu_table *t;
448
449 t = rcu_dereference(ht->t);
abc490a1 450 return _ht_remove(ht, t, node);
2ed95849 451}
ab7d5fc6 452
abc490a1
MD
453static
454int ht_delete_dummy(struct rcu_ht *ht)
674f7a69 455{
395270b6 456 struct rcu_table *t;
abc490a1
MD
457 struct rcu_ht_node *node;
458 unsigned long i;
674f7a69 459
abc490a1
MD
460 t = ht->t;
461 /* Check that the table is empty */
462 node = t->tbl[0];
463 do {
cc4fcb10 464 if (!node->p.dummy)
abc490a1 465 return -EPERM;
cc4fcb10 466 node = node->p.next;
273399de 467 assert(!is_removed(node));
abc490a1
MD
468 } while (node);
469 /* Internal sanity check: all nodes left should be dummy */
395270b6 470 for (i = 0; i < t->size; i++) {
cc4fcb10 471 assert(t->tbl[i]->p.dummy);
abc490a1 472 free(t->tbl[i]);
674f7a69 473 }
abc490a1 474 return 0;
674f7a69
MD
475}
476
477/*
478 * Should only be called when no more concurrent readers nor writers can
479 * possibly access the table.
480 */
5e28c532 481int ht_destroy(struct rcu_ht *ht)
674f7a69 482{
5e28c532
MD
483 int ret;
484
848d4088
MD
485 /* Wait for in-flight resize operations to complete */
486 while (uatomic_read(&ht->in_progress_resize))
487 poll(NULL, 0, 100); /* wait for 100ms */
abc490a1
MD
488 ret = ht_delete_dummy(ht);
489 if (ret)
490 return ret;
395270b6 491 free(ht->t);
674f7a69 492 free(ht);
5e28c532 493 return ret;
674f7a69
MD
494}
495
273399de
MD
496void ht_count_nodes(struct rcu_ht *ht,
497 unsigned long *count,
498 unsigned long *removed)
499{
500 struct rcu_table *t;
501 struct rcu_ht_node *node, *next;
502
503 *count = 0;
504 *removed = 0;
505
506 t = rcu_dereference(ht->t);
507 /* Check that the table is empty */
508 node = rcu_dereference(t->tbl[0]);
509 do {
cc4fcb10 510 next = rcu_dereference(node->p.next);
273399de 511 if (is_removed(next)) {
cc4fcb10 512 assert(!node->p.dummy);
273399de 513 (*removed)++;
cc4fcb10 514 } else if (!node->p.dummy)
273399de
MD
515 (*count)++;
516 node = clear_flag(next);
517 } while (node);
518}
519
abc490a1
MD
520static
521void ht_free_table_cb(struct rcu_head *head)
522{
523 struct rcu_table *t =
524 caa_container_of(head, struct rcu_table, head);
525 free(t);
526}
527
528/* called with resize mutex held */
529static
530void _do_ht_resize(struct rcu_ht *ht)
464a1ec9 531{
abc490a1 532 unsigned long new_size, old_size;
395270b6 533 struct rcu_table *new_t, *old_t;
464a1ec9 534
395270b6
MD
535 old_t = ht->t;
536 old_size = old_t->size;
464a1ec9 537
f9830efd
MD
538 new_size = CMM_LOAD_SHARED(old_t->resize_target);
539 dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
540 old_size, new_size);
abc490a1 541 if (old_size == new_size)
464a1ec9 542 return;
f000907d 543 new_t = malloc(sizeof(struct rcu_table)
abc490a1 544 + (new_size * sizeof(struct rcu_ht_node *)));
f000907d
MD
545 assert(new_size > old_size);
546 memcpy(&new_t->tbl, &old_t->tbl,
547 old_size * sizeof(struct rcu_ht_node *));
548 init_table(ht, new_t, old_size, new_size - old_size);
f000907d
MD
549 /* Changing table and size atomically wrt lookups */
550 rcu_assign_pointer(ht->t, new_t);
551 ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
464a1ec9
MD
552}
553
abc490a1 554static
f9830efd
MD
555unsigned long resize_target_update(struct rcu_table *t,
556 int growth_order)
464a1ec9 557{
f9830efd
MD
558 return _uatomic_max(&t->resize_target,
559 t->size << growth_order);
464a1ec9
MD
560}
561
464a1ec9
MD
562void ht_resize(struct rcu_ht *ht, int growth)
563{
f9830efd
MD
564 struct rcu_table *t = rcu_dereference(ht->t);
565 unsigned long target_size;
566
567 target_size = resize_target_update(t, growth);
568 if (t->size < target_size) {
11519af6 569 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd
MD
570 pthread_mutex_lock(&ht->resize_mutex);
571 _do_ht_resize(ht);
572 pthread_mutex_unlock(&ht->resize_mutex);
573 }
abc490a1 574}
464a1ec9 575
abc490a1
MD
576static
577void do_resize_cb(struct rcu_head *head)
578{
579 struct rcu_resize_work *work =
580 caa_container_of(head, struct rcu_resize_work, head);
581 struct rcu_ht *ht = work->ht;
582
583 pthread_mutex_lock(&ht->resize_mutex);
584 _do_ht_resize(ht);
585 pthread_mutex_unlock(&ht->resize_mutex);
586 free(work);
848d4088
MD
587 cmm_smp_mb(); /* finish resize before decrement */
588 uatomic_dec(&ht->in_progress_resize);
464a1ec9
MD
589}
590
abc490a1 591static
f000907d 592void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
ab7d5fc6 593{
abc490a1 594 struct rcu_resize_work *work;
f9830efd 595 unsigned long target_size;
abc490a1 596
f9830efd 597 target_size = resize_target_update(t, growth);
11519af6 598 if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
848d4088
MD
599 uatomic_inc(&ht->in_progress_resize);
600 cmm_smp_mb(); /* increment resize count before calling it */
f9830efd
MD
601 work = malloc(sizeof(*work));
602 work->ht = ht;
603 ht->ht_call_rcu(&work->head, do_resize_cb);
11519af6 604 CMM_STORE_SHARED(t->resize_initiated, 1);
f9830efd 605 }
ab7d5fc6 606}
This page took 0.052665 seconds and 4 git commands to generate.