X-Git-Url: https://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=98547851b210a64e25788eef3cca2bc87f7ce8e0;hp=da04b9dce7ced2659a0b89ee6fd806d3dc97c439;hb=92cfe223501f564d3bd726a6c580702daeef4198;hpb=a2974903f8e4efaadc75e2d5a0dbade288c0cd31 diff --git a/rculfhash.c b/rculfhash.c index da04b9d..9854785 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -20,6 +20,80 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/* + * Based on the following articles: + * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free + * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405. + * - Michael, M. M. High performance dynamic lock-free hash tables + * and list-based sets. In Proceedings of the fourteenth annual ACM + * symposium on Parallel algorithms and architectures, ACM Press, + * (2002), 73-82. + * + * Some specificities of this Lock-Free Expandable RCU Hash Table + * implementation: + * + * - RCU read-side critical section allows readers to perform hash + * table lookups and use the returned objects safely by delaying + * memory reclaim of a grace period. + * - Add and remove operations are lock-free, and do not need to + * allocate memory. They need to be executed within RCU read-side + * critical section to ensure the objects they read are valid and to + * deal with the cmpxchg ABA problem. + * - add and add_unique operations are supported. add_unique checks if + * the node key already exists in the hash table. It ensures no key + * duplicata exists. + * - The resize operation executes concurrently with add/remove/lookup. + * - Hash table nodes are contained within a split-ordered list. This + * list is ordered by incrementing reversed-bits-hash value. + * - An index of dummy nodes is kept. These dummy nodes are the hash + * table "buckets", and they are also chained together in the + * split-ordered list, which allows recursive expansion. + * - The resize operation only allows expanding the hash table. + * It is triggered either through an API call or automatically by + * detecting long chains in the add operation. + * - Resize operation initiated by long chain detection is executed by a + * call_rcu thread, which keeps lock-freedom of add and remove. + * - Resize operations are protected by a mutex. + * - The removal operation is split in two parts: first, a "removed" + * flag is set in the next pointer within the node to remove. Then, + * a "garbage collection" is performed in the bucket containing the + * removed node (from the start of the bucket up to the removed node). + * All encountered nodes with "removed" flag set in their next + * pointers are removed from the linked-list. If the cmpxchg used for + * removal fails (due to concurrent garbage-collection or concurrent + * add), we retry from the beginning of the bucket. This ensures that + * the node with "removed" flag set is removed from the hash table + * (not visible to lookups anymore) before the RCU read-side critical + * section held across removal ends. Furthermore, this ensures that + * the node with "removed" flag set is removed from the linked-list + * before its memory is reclaimed. Only the thread which removal + * successfully set the "removed" flag (with a cmpxchg) into a node's + * next pointer is considered to have succeeded its removal (and thus + * owns the node to reclaim). Because we garbage-collect starting from + * an invariant node (the start-of-bucket dummy node) up to the + * "removed" node (or find a reverse-hash that is higher), we are sure + * that a successful traversal of the chain leads to a chain that is + * present in the linked-list (the start node is never removed) and + * that is does not contain the "removed" node anymore, even if + * concurrent delete/add operations are changing the structure of the + * list concurrently. + * - The add operation performs gargage collection of buckets if it + * encounters nodes with removed flag set in the bucket where it wants + * to add its new node. This ensures lock-freedom of add operation by + * helping the remover unlink nodes from the list rather than to wait + * for it do to so. + * - A RCU "order table" indexed by log2(hash index) is copied and + * expanded by the resize operation. This order table allows finding + * the "dummy node" tables. + * - There is one dummy node table per hash index order. The size of + * each dummy node table is half the number of hashes contained in + * this order. + * - call_rcu is used to garbage-collect the old order table. + * - The per-order dummy node tables contain a compact version of the + * hash table nodes. These tables are invariant after they are + * populated into the hash table. + */ + #define _LGPL_SOURCE #include #include @@ -38,30 +112,34 @@ #include #include -#define DEBUG /* Test */ - #ifdef DEBUG -#define dbg_printf(args...) printf(args) +#define dbg_printf(fmt, args...) printf(fmt, ## args) #else -#define dbg_printf(args...) +#define dbg_printf(fmt, args...) #endif -#define CHAIN_LEN_TARGET 1 -#define CHAIN_LEN_RESIZE_THRESHOLD 2 +#define CHAIN_LEN_TARGET 4 +#define CHAIN_LEN_RESIZE_THRESHOLD 8 #ifndef max #define max(a, b) ((a) > (b) ? (a) : (b)) #endif +/* + * The removed flag needs to be updated atomically with the pointer. + * The dummy flag does not require to be updated atomically with the + * pointer, but it is added as a pointer low bit flag to save space. + */ #define REMOVED_FLAG (1UL << 0) -#define FLAGS_MASK ((1UL << 1) - 1) +#define DUMMY_FLAG (1UL << 1) +#define FLAGS_MASK ((1UL << 2) - 1) struct rcu_table { unsigned long size; /* always a power of 2 */ unsigned long resize_target; int resize_initiated; struct rcu_head head; - struct rcu_ht_node *tbl[0]; + struct _rcu_ht_node *tbl[0]; }; struct rcu_ht { @@ -70,7 +148,7 @@ struct rcu_ht { ht_compare_fct compare_fct; unsigned long hash_seed; pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ - unsigned int in_progress_resize; + unsigned int in_progress_resize, in_progress_destroy; void (*ht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)); }; @@ -138,30 +216,138 @@ unsigned long bit_reverse_ulong(unsigned long v) } /* - * Algorithm to find the log2 of a 32-bit unsigned integer. - * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup - * Originally from Public Domain. + * fls: returns the position of the most significant bit. + * Returns 0 if no bit is set, else returns the position of the most + * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit). */ -static const char LogTable256[256] = +#if defined(__i386) || defined(__x86_64) +static inline +unsigned int fls_u32(uint32_t x) { -#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n - -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, - LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6), - LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7) -}; + int r; + + asm("bsrl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n\t" + "1:\n\t" + : "=r" (r) : "rm" (x)); + return r + 1; +} +#define HAS_FLS_U32 +#endif + +#if defined(__x86_64) +static inline +unsigned int fls_u64(uint64_t x) +{ + long r; + + asm("bsrq %1,%0\n\t" + "jnz 1f\n\t" + "movq $-1,%0\n\t" + "1:\n\t" + : "=r" (r) : "rm" (x)); + return r + 1; +} +#define HAS_FLS_U64 +#endif -uint32_t log2_u32(uint32_t v) +#ifndef HAS_FLS_U64 +static __attribute__((unused)) +unsigned int fls_u64(uint64_t x) { - uint32_t t, tt; + unsigned int r = 64; - if ((tt = (v >> 16))) - return (t = (tt >> 8)) - ? 24 + LogTable256[t] - : 16 + LogTable256[tt]; - else - return (t = (v >> 8)) - ? 8 + LogTable256[t] - : LogTable256[v]; + if (!x) + return 0; + + if (!(x & 0xFFFFFFFF00000000ULL)) { + x <<= 32; + r -= 32; + } + if (!(x & 0xFFFF000000000000ULL)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xFF00000000000000ULL)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xF000000000000000ULL)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xC000000000000000ULL)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x8000000000000000ULL)) { + x <<= 1; + r -= 1; + } + return r; +} +#endif + +#ifndef HAS_FLS_U32 +static __attribute__((unused)) +unsigned int fls_u32(uint32_t x) +{ + unsigned int r = 32; + + if (!x) + return 0; + if (!(x & 0xFFFF0000U)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xFF000000U)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xF0000000U)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xC0000000U)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000U)) { + x <<= 1; + r -= 1; + } + return r; +} +#endif + +unsigned int fls_ulong(unsigned long x) +{ +#if (CAA_BITS_PER_lONG == 32) + return fls_u32(x); +#else + return fls_u64(x); +#endif +} + +int get_count_order_u32(uint32_t x) +{ + int order; + + order = fls_u32(x) - 1; + if (x & (x - 1)) + order++; + return order; +} + +int get_count_order_ulong(unsigned long x) +{ + int order; + + order = fls_ulong(x) - 1; + if (x & (x - 1)) + order++; + return order; } static @@ -171,9 +357,12 @@ static void check_resize(struct rcu_ht *ht, struct rcu_table *t, uint32_t chain_len) { + if (chain_len > 100) + dbg_printf("rculfhash: WARNING: large chain length: %u.\n", + chain_len); if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) ht_resize_lazy(ht, t, - log2_u32(chain_len - CHAIN_LEN_TARGET - 1)); + get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); } static @@ -194,6 +383,18 @@ struct rcu_ht_node *flag_removed(struct rcu_ht_node *node) return (struct rcu_ht_node *) (((unsigned long) node) | REMOVED_FLAG); } +static +int is_dummy(struct rcu_ht_node *node) +{ + return ((unsigned long) node) & DUMMY_FLAG; +} + +static +struct rcu_ht_node *flag_dummy(struct rcu_ht_node *node) +{ + return (struct rcu_ht_node *) (((unsigned long) node) | DUMMY_FLAG); +} + static unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) { @@ -214,7 +415,7 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) static void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node) { - struct rcu_ht_node *iter_prev, *iter, *next; + struct rcu_ht_node *iter_prev, *iter, *next, *new_next; for (;;) { iter_prev = dummy; @@ -224,28 +425,35 @@ void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node) for (;;) { if (unlikely(!clear_flag(iter))) return; - if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash) + if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) return; next = rcu_dereference(clear_flag(iter)->p.next); - if (is_removed(next)) + if (likely(is_removed(next))) break; - iter_prev = iter; + iter_prev = clear_flag(iter); iter = next; } assert(!is_removed(iter)); - (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next)); + if (is_dummy(iter)) + new_next = flag_dummy(clear_flag(next)); + else + new_next = clear_flag(next); + (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); } } static struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t, - struct rcu_ht_node *node, int unique) + struct rcu_ht_node *node, int unique, int dummy) { - struct rcu_ht_node *iter_prev, *dummy, *iter, *next; - unsigned long hash; + struct rcu_ht_node *iter_prev, *iter, *next, *new_node, *new_next, + *dummy_node; + struct _rcu_ht_node *lookup; + unsigned long hash, index, order; if (!t->size) { - assert(node->p.dummy); + assert(dummy); + node->p.next = flag_dummy(NULL); return node; /* Initial first add (head) */ } hash = bit_reverse_ulong(node->p.reverse_hash); @@ -256,26 +464,30 @@ struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t, * iter_prev points to the non-removed node prior to the * insert location. */ - iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]); + index = hash & (t->size - 1); + order = get_count_order_ulong(index + 1); + lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; + iter_prev = (struct rcu_ht_node *) lookup; /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); for (;;) { if (unlikely(!clear_flag(iter))) goto insert; - if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash) + if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) goto insert; next = rcu_dereference(clear_flag(iter)->p.next); - if (is_removed(next)) + if (unlikely(is_removed(next))) goto gc_node; if (unique - && !clear_flag(iter)->p.dummy + && !is_dummy(next) && !ht->compare_fct(node->key, node->key_len, clear_flag(iter)->key, clear_flag(iter)->key_len)) return clear_flag(iter); /* Only account for identical reverse hash once */ - if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash) + if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash + && !is_dummy(next)) check_resize(ht, t, ++chain_len); iter_prev = clear_flag(iter); iter = next; @@ -284,21 +496,35 @@ struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t, assert(node != clear_flag(iter)); assert(!is_removed(iter_prev)); assert(iter_prev != node); - node->p.next = iter; + if (!dummy) + node->p.next = clear_flag(iter); + else + node->p.next = flag_dummy(clear_flag(iter)); + if (is_dummy(iter)) + new_node = flag_dummy(node); + else + new_node = node; if (uatomic_cmpxchg(&iter_prev->p.next, iter, - node) != iter) + new_node) != iter) continue; /* retry */ else goto gc_end; gc_node: assert(!is_removed(iter)); - (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next)); + if (is_dummy(iter)) + new_next = flag_dummy(clear_flag(next)); + else + new_next = clear_flag(next); + (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); /* retry */ } gc_end: /* Garbage collect logically removed nodes in the bucket */ - dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]); - _ht_gc_bucket(dummy, node); + index = hash & (t->size - 1); + order = get_count_order_ulong(index + 1); + lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; + dummy_node = (struct rcu_ht_node *) lookup; + _ht_gc_bucket(dummy_node, node); return node; } @@ -306,16 +532,17 @@ static int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node) { struct rcu_ht_node *dummy, *next, *old; + struct _rcu_ht_node *lookup; int flagged = 0; - unsigned long hash; + unsigned long hash, index, order; /* logically delete the node */ old = rcu_dereference(node->p.next); do { next = old; - if (is_removed(next)) + if (unlikely(is_removed(next))) goto end; - assert(!node->p.dummy); + assert(!is_dummy(next)); old = uatomic_cmpxchg(&node->p.next, next, flag_removed(next)); } while (old != next); @@ -329,7 +556,10 @@ int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node) * if found. */ hash = bit_reverse_ulong(node->p.reverse_hash); - dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]); + index = hash & (t->size - 1); + order = get_count_order_ulong(index + 1); + lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; + dummy = (struct rcu_ht_node *) lookup; _ht_gc_bucket(dummy, node); end: /* @@ -345,21 +575,38 @@ end: static void init_table(struct rcu_ht *ht, struct rcu_table *t, - unsigned long first, unsigned long len) -{ - unsigned long i, end; - - end = first + len; - for (i = first; i < end; i++) { - /* Update table size when power of two */ - if (i != 0 && !(i & (i - 1))) - t->size = i; - t->tbl[i] = calloc(1, sizeof(struct _rcu_ht_node)); - t->tbl[i]->p.dummy = 1; - t->tbl[i]->p.reverse_hash = bit_reverse_ulong(i); - (void) _ht_add(ht, t, t->tbl[i], 0); + unsigned long first_order, unsigned long len_order) +{ + unsigned long i, end_order; + + dbg_printf("rculfhash: init table: first_order %lu end_order %lu\n", + first_order, first_order + len_order); + end_order = first_order + len_order; + t->size = !first_order ? 0 : (1UL << (first_order - 1)); + for (i = first_order; i < end_order; i++) { + unsigned long j, len; + + len = !i ? 1 : 1UL << (i - 1); + dbg_printf("rculfhash: init order %lu len: %lu\n", i, len); + t->tbl[i] = calloc(len, sizeof(struct _rcu_ht_node)); + for (j = 0; j < len; j++) { + dbg_printf("rculfhash: init entry: i %lu j %lu hash %lu\n", + i, j, !i ? 0 : (1UL << (i - 1)) + j); + struct rcu_ht_node *new_node = + (struct rcu_ht_node *) &t->tbl[i][j]; + new_node->p.reverse_hash = + bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); + (void) _ht_add(ht, t, new_node, 0, 1); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; + } + /* Update table size */ + t->size = !i ? 1 : (1UL << i); + dbg_printf("rculfhash: init new size: %lu\n", t->size); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; } - t->resize_target = t->size = end; + t->resize_target = t->size; t->resize_initiated = 0; } @@ -371,6 +618,7 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*func)(struct rcu_head *head))) { struct rcu_ht *ht; + unsigned long order; ht = calloc(1, sizeof(struct rcu_ht)); ht->hash_fct = hash_fct; @@ -380,11 +628,12 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct, ht->in_progress_resize = 0; /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); + order = get_count_order_ulong(max(init_size, 1)) + 1; ht->t = calloc(1, sizeof(struct rcu_table) - + (max(init_size, 1) * sizeof(struct rcu_ht_node *))); + + (order * sizeof(struct _rcu_ht_node *))); ht->t->size = 0; pthread_mutex_lock(&ht->resize_mutex); - init_table(ht, ht->t, 0, max(init_size, 1)); + init_table(ht, ht->t, 0, order); pthread_mutex_unlock(&ht->resize_mutex); return ht; } @@ -392,14 +641,20 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct, struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len) { struct rcu_table *t; - struct rcu_ht_node *node; - unsigned long hash, reverse_hash; + struct rcu_ht_node *node, *next; + struct _rcu_ht_node *lookup; + unsigned long hash, reverse_hash, index, order; hash = ht->hash_fct(key, key_len, ht->hash_seed); reverse_hash = bit_reverse_ulong(hash); t = rcu_dereference(ht->t); - node = rcu_dereference(t->tbl[hash & (t->size - 1)]); + index = hash & (t->size - 1); + order = get_count_order_ulong(index + 1); + lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; + dbg_printf("rculfhash: lookup hash %lu index %lu order %lu aridx %lu\n", + hash, index, order, index & ((1UL << (order - 1)) - 1)); + node = (struct rcu_ht_node *) lookup; for (;;) { if (unlikely(!node)) break; @@ -407,14 +662,15 @@ struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len) node = NULL; break; } - if (likely(!is_removed(rcu_dereference(node->p.next))) - && !node->p.dummy + next = rcu_dereference(node->p.next); + if (likely(!is_removed(next)) + && !is_dummy(next) && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { break; } - node = clear_flag(rcu_dereference(node->p.next)); + node = clear_flag(next); } - assert(!node || !node->p.dummy); + assert(!node || !is_dummy(rcu_dereference(node->p.next))); return node; } @@ -427,7 +683,7 @@ void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node) node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); t = rcu_dereference(ht->t); - (void) _ht_add(ht, t, node, 0); + (void) _ht_add(ht, t, node, 0, 0); } struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node) @@ -439,7 +695,7 @@ struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node) node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); t = rcu_dereference(ht->t); - return _ht_add(ht, t, node, 1); + return _ht_add(ht, t, node, 1, 0); } int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node) @@ -455,21 +711,31 @@ int ht_delete_dummy(struct rcu_ht *ht) { struct rcu_table *t; struct rcu_ht_node *node; - unsigned long i; + struct _rcu_ht_node *lookup; + unsigned long order, i; t = ht->t; /* Check that the table is empty */ - node = t->tbl[0]; + lookup = &t->tbl[0][0]; + node = (struct rcu_ht_node *) lookup; do { - if (!node->p.dummy) + node = clear_flag(node)->p.next; + if (!is_dummy(node)) return -EPERM; - node = node->p.next; assert(!is_removed(node)); } while (clear_flag(node)); /* Internal sanity check: all nodes left should be dummy */ - for (i = 0; i < t->size; i++) { - assert(t->tbl[i]->p.dummy); - free(t->tbl[i]); + for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) { + unsigned long len; + + len = !order ? 1 : 1UL << (order - 1); + for (i = 0; i < len; i++) { + dbg_printf("rculfhash: delete order %lu i %lu hash %lu\n", + order, i, + bit_reverse_ulong(t->tbl[order][i].reverse_hash)); + assert(is_dummy(t->tbl[order][i].next)); + } + free(t->tbl[order]); } return 0; } @@ -483,6 +749,7 @@ int ht_destroy(struct rcu_ht *ht) int ret; /* Wait for in-flight resize operations to complete */ + CMM_STORE_SHARED(ht->in_progress_destroy, 1); while (uatomic_read(&ht->in_progress_resize)) poll(NULL, 0, 100); /* wait for 100ms */ ret = ht_delete_dummy(ht); @@ -499,22 +766,28 @@ void ht_count_nodes(struct rcu_ht *ht, { struct rcu_table *t; struct rcu_ht_node *node, *next; + struct _rcu_ht_node *lookup; + unsigned long nr_dummy = 0; *count = 0; *removed = 0; t = rcu_dereference(ht->t); - /* Check that the table is empty */ - node = rcu_dereference(t->tbl[0]); + /* Count non-dummy nodes in the table */ + lookup = &t->tbl[0][0]; + node = (struct rcu_ht_node *) lookup; do { next = rcu_dereference(node->p.next); if (is_removed(next)) { - assert(!node->p.dummy); + assert(!is_dummy(next)); (*removed)++; - } else if (!node->p.dummy) + } else if (!is_dummy(next)) (*count)++; + else + (nr_dummy)++; node = clear_flag(next); } while (node); + dbg_printf("rculfhash: number of dummy nodes: %lu\n", nr_dummy); } static @@ -529,23 +802,25 @@ void ht_free_table_cb(struct rcu_head *head) static void _do_ht_resize(struct rcu_ht *ht) { - unsigned long new_size, old_size; + unsigned long new_size, old_size, old_order, new_order; struct rcu_table *new_t, *old_t; old_t = ht->t; old_size = old_t->size; + old_order = get_count_order_ulong(old_size) + 1; new_size = CMM_LOAD_SHARED(old_t->resize_target); - dbg_printf("rculfhash: resize from %lu to %lu buckets\n", - old_size, new_size); if (old_size == new_size) return; + new_order = get_count_order_ulong(new_size) + 1; + printf("rculfhash: resize from %lu (order %lu) to %lu (order %lu) buckets\n", + old_size, old_order, new_size, new_order); new_t = malloc(sizeof(struct rcu_table) - + (new_size * sizeof(struct rcu_ht_node *))); + + (new_order * sizeof(struct _rcu_ht_node *))); assert(new_size > old_size); memcpy(&new_t->tbl, &old_t->tbl, - old_size * sizeof(struct rcu_ht_node *)); - init_table(ht, new_t, old_size, new_size - old_size); + old_order * sizeof(struct _rcu_ht_node *)); + init_table(ht, new_t, old_order, new_order - old_order); /* Changing table and size atomically wrt lookups */ rcu_assign_pointer(ht->t, new_t); ht->ht_call_rcu(&old_t->head, ht_free_table_cb);