X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=rculfhash.c;h=5ad75833a37d77ba4f347d68834d0127e8bc574d;hb=9d72a73f9c1fbbdd686a2e39a96f1917d5bb4f96;hp=7e8165021f65116a1ed6e4386f102c1c7d5a9907;hpb=04db56f85e2cd2abb05e4ef14990e99a1058d0df;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index 7e81650..5ad7583 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -4,6 +4,7 @@ * Userspace RCU library - Lock-Free Resizable RCU Hash Table * * Copyright 2010-2011 - Mathieu Desnoyers + * Copyright 2011 - Lai Jiangshan * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -45,7 +46,7 @@ * - The resize operation executes concurrently with add/remove/lookup. * - Hash table nodes are contained within a split-ordered list. This * list is ordered by incrementing reversed-bits-hash value. - * - An index of dummy nodes is kept. These dummy nodes are the hash + * - An index of bucket nodes is kept. These bucket nodes are the hash * table "buckets", and they are also chained together in the * split-ordered list, which allows recursive expansion. * - The resize operation for small tables only allows expanding the hash table. @@ -74,7 +75,7 @@ * successfully set the "removed" flag (with a cmpxchg) into a node's * next pointer is considered to have succeeded its removal (and thus * owns the node to reclaim). Because we garbage-collect starting from - * an invariant node (the start-of-bucket dummy node) up to the + * an invariant node (the start-of-bucket bucket node) up to the * "removed" node (or find a reverse-hash that is higher), we are sure * that a successful traversal of the chain leads to a chain that is * present in the linked-list (the start node is never removed) and @@ -88,19 +89,19 @@ * for it do to so. * - A RCU "order table" indexed by log2(hash index) is copied and * expanded by the resize operation. This order table allows finding - * the "dummy node" tables. - * - There is one dummy node table per hash index order. The size of - * each dummy node table is half the number of hashes contained in + * the "bucket node" tables. + * - There is one bucket node table per hash index order. The size of + * each bucket node table is half the number of hashes contained in * this order (except for order 0). - * - synchronzie_rcu is used to garbage-collect the old dummy node table. - * - The per-order dummy node tables contain a compact version of the + * - synchronzie_rcu is used to garbage-collect the old bucket node table. + * - The per-order bucket node tables contain a compact version of the * hash table nodes. These tables are invariant after they are * populated into the hash table. * - * Dummy node tables: + * Bucket node tables: * - * hash table hash table the last all dummy node tables - * order size dummy node 0 1 2 3 4 5 6(index) + * hash table hash table the last all bucket node tables + * order size bucket node 0 1 2 3 4 5 6(index) * table size * 0 1 1 1 * 1 2 1 1 1 @@ -110,12 +111,12 @@ * 5 32 16 1 1 2 4 8 16 * 6 64 32 1 1 2 4 8 16 32 * - * When growing/shrinking, we only focus on the last dummy node table + * When growing/shrinking, we only focus on the last bucket node table * which size is (!order ? 1 : (1 << (order -1))). * * Example for growing/shrinking: - * grow hash table from order 5 to 6: init the index=6 dummy node table - * shrink hash table from order 6 to 5: fini the index=6 dummy node table + * grow hash table from order 5 to 6: init the index=6 bucket node table + * shrink hash table from order 6 to 5: fini the index=6 bucket node table * * A bit of ascii art explanation: * @@ -195,7 +196,7 @@ #endif /* - * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink. + * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink. */ #define MIN_PARTITION_PER_THREAD_ORDER 12 #define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER) @@ -212,11 +213,11 @@ * The removed flag needs to be updated atomically with the pointer. * It indicates that no node must attach to the node scheduled for * removal, and that node garbage collection must be performed. - * The dummy flag does not require to be updated atomically with the + * The bucket flag does not require to be updated atomically with the * pointer, but it is added as a pointer low bit flag to save space. */ #define REMOVED_FLAG (1UL << 0) -#define DUMMY_FLAG (1UL << 1) +#define BUCKET_FLAG (1UL << 1) #define FLAGS_MASK ((1UL << 2) - 1) /* Value of the end pointer. Should not interact with flags. */ @@ -237,10 +238,10 @@ struct ht_items_count { } __attribute__((aligned(CAA_CACHE_LINE_SIZE))); /* - * rcu_level: Contains the per order-index-level dummy node table. The - * size of each dummy node table is half the number of hashes contained + * rcu_level: Contains the per order-index-level bucket node table. The + * size of each bucket node table is half the number of hashes contained * in this order (except for order 0). The minimum allocation size - * parameter allows combining the dummy node arrays of the lowermost + * parameter allows combining the bucket node arrays of the lowermost * levels to improve cache locality for small index orders. */ struct rcu_level { @@ -318,11 +319,11 @@ struct partition_resize_work { static void _cds_lfht_add(struct cds_lfht *ht, cds_lfht_match_fct match, - void *key, + const void *key, unsigned long size, struct cds_lfht_node *node, struct cds_lfht_iter *unique_ret, - int dummy); + int bucket); /* * Algorithm to reverse bits in a word by lookup table, extended to @@ -717,15 +718,15 @@ struct cds_lfht_node *flag_removed(struct cds_lfht_node *node) } static -int is_dummy(struct cds_lfht_node *node) +int is_bucket(struct cds_lfht_node *node) { - return ((unsigned long) node) & DUMMY_FLAG; + return ((unsigned long) node) & BUCKET_FLAG; } static -struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node) +struct cds_lfht_node *flag_bucket(struct cds_lfht_node *node) { - return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG); + return (struct cds_lfht_node *) (((unsigned long) node) | BUCKET_FLAG); } static @@ -755,18 +756,14 @@ unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr, return old2; } -static -struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, - unsigned long hash) +static inline +struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index) { - unsigned long index, order; - - assert(size > 0); - index = hash & (size - 1); + unsigned long order; - if (index < ht->min_alloc_size) { - dbg_printf("lookup hash %lu index %lu order 0 aridx 0\n", - hash, index); + if ((__builtin_constant_p(index) && index == 0) + || index < ht->min_alloc_size) { + dbg_printf("bucket index %lu order 0 aridx 0\n", index); return &ht->t.tbl[0]->nodes[index]; } /* @@ -775,36 +772,44 @@ struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, * get_count_order_ulong. */ order = fls_ulong(index); - dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", - hash, index, order, index & ((1UL << (order - 1)) - 1)); + dbg_printf("bucket index %lu order %lu aridx %lu\n", + index, order, index & ((1UL << (order - 1)) - 1)); return &ht->t.tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)]; } +static inline +struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size, + unsigned long hash) +{ + assert(size > 0); + return bucket_at(ht, hash & (size - 1)); +} + /* * Remove all logically deleted nodes from a bucket up to a certain node key. */ static -void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) +void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *node) { struct cds_lfht_node *iter_prev, *iter, *next, *new_next; - assert(!is_dummy(dummy)); - assert(!is_removed(dummy)); - assert(!is_dummy(node)); + assert(!is_bucket(bucket)); + assert(!is_removed(bucket)); + assert(!is_bucket(node)); assert(!is_removed(node)); for (;;) { - iter_prev = dummy; - /* We can always skip the dummy node initially */ + iter_prev = bucket; + /* We can always skip the bucket node initially */ iter = rcu_dereference(iter_prev->next); assert(!is_removed(iter)); assert(iter_prev->reverse_hash <= node->reverse_hash); /* - * We should never be called with dummy (start of chain) + * We should never be called with bucket (start of chain) * and logically removed node (end of path compression * marker) being the actual same node. This would be a * bug in the algorithm implementation. */ - assert(dummy != node); + assert(bucket != node); for (;;) { if (caa_unlikely(is_end(iter))) return; @@ -817,8 +822,8 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node iter = next; } assert(!is_removed(iter)); - if (is_dummy(iter)) - new_next = flag_dummy(clear_flag(next)); + if (is_bucket(iter)) + new_next = flag_bucket(clear_flag(next)); else new_next = clear_flag(next); (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next); @@ -838,9 +843,9 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, return -ENOENT; assert(!is_removed(old_node)); - assert(!is_dummy(old_node)); + assert(!is_bucket(old_node)); assert(!is_removed(new_node)); - assert(!is_dummy(new_node)); + assert(!is_bucket(new_node)); assert(new_node != old_node); for (;;) { /* Insert after node to be replaced */ @@ -851,7 +856,7 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, */ return -ENOENT; } - assert(!is_dummy(old_next)); + assert(!is_bucket(old_next)); assert(new_node != clear_flag(old_next)); new_node->next = clear_flag(old_next); /* @@ -890,17 +895,17 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, static void _cds_lfht_add(struct cds_lfht *ht, cds_lfht_match_fct match, - void *key, + const void *key, unsigned long size, struct cds_lfht_node *node, struct cds_lfht_iter *unique_ret, - int dummy) + int bucket_flag) { struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, *return_node; struct cds_lfht_node *bucket; - assert(!is_dummy(node)); + assert(!is_bucket(node)); assert(!is_removed(node)); bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash)); for (;;) { @@ -911,7 +916,7 @@ void _cds_lfht_add(struct cds_lfht *ht, * insert location. */ iter_prev = bucket; - /* We can always skip the dummy node initially */ + /* We can always skip the bucket node initially */ iter = rcu_dereference(iter_prev->next); assert(iter_prev->reverse_hash <= node->reverse_hash); for (;;) { @@ -920,8 +925,8 @@ void _cds_lfht_add(struct cds_lfht *ht, if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash)) goto insert; - /* dummy node is the first node of the identical-hash-value chain */ - if (dummy && clear_flag(iter)->reverse_hash == node->reverse_hash) + /* bucket node is the first node of the identical-hash-value chain */ + if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash) goto insert; next = rcu_dereference(clear_flag(iter)->next); @@ -930,7 +935,7 @@ void _cds_lfht_add(struct cds_lfht *ht, /* uniquely add */ if (unique_ret - && !is_dummy(next) + && !is_bucket(next) && clear_flag(iter)->reverse_hash == node->reverse_hash) { struct cds_lfht_iter d_iter = { .node = node, .next = iter, }; @@ -953,7 +958,7 @@ void _cds_lfht_add(struct cds_lfht *ht, /* Only account for identical reverse hash once */ if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash - && !is_dummy(next)) + && !is_bucket(next)) check_resize(ht, size, ++chain_len); iter_prev = clear_flag(iter); iter = next; @@ -964,12 +969,12 @@ void _cds_lfht_add(struct cds_lfht *ht, assert(!is_removed(iter_prev)); assert(!is_removed(iter)); assert(iter_prev != node); - if (!dummy) + if (!bucket_flag) node->next = clear_flag(iter); else - node->next = flag_dummy(clear_flag(iter)); - if (is_dummy(iter)) - new_node = flag_dummy(node); + node->next = flag_bucket(clear_flag(iter)); + if (is_bucket(iter)) + new_node = flag_bucket(node); else new_node = node; if (uatomic_cmpxchg(&iter_prev->next, iter, @@ -982,8 +987,8 @@ void _cds_lfht_add(struct cds_lfht *ht, gc_node: assert(!is_removed(iter)); - if (is_dummy(iter)) - new_next = flag_dummy(clear_flag(next)); + if (is_bucket(iter)) + new_next = flag_bucket(clear_flag(next)); else new_next = clear_flag(next); (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next); @@ -999,7 +1004,7 @@ end: static int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, struct cds_lfht_node *node, - int dummy_removal) + int bucket_removal) { struct cds_lfht_node *bucket, *next, *old; @@ -1007,7 +1012,7 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, return -ENOENT; /* logically delete the node */ - assert(!is_dummy(node)); + assert(!is_bucket(node)); assert(!is_removed(node)); old = rcu_dereference(node->next); do { @@ -1016,10 +1021,10 @@ int _cds_lfht_del(struct cds_lfht *ht, unsigned long size, next = old; if (caa_unlikely(is_removed(next))) return -ENOENT; - if (dummy_removal) - assert(is_dummy(next)); + if (bucket_removal) + assert(is_bucket(next)); else - assert(!is_dummy(next)); + assert(!is_bucket(next)); new_next = flag_removed(next); old = uatomic_cmpxchg(&node->next, next, new_next); } while (old != next); @@ -1099,25 +1104,24 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, * many worker threads, based on the number of CPUs available in the system. * This should therefore take care of not having the expand lagging behind too * many concurrent insertion threads by using the scheduler's ability to - * schedule dummy node population fairly with insertions. + * schedule bucket node population fairly with insertions. */ static void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, unsigned long start, unsigned long len) { - unsigned long j; + unsigned long j, size = 1UL << (i - 1); assert(i > ht->min_alloc_order); ht->cds_lfht_rcu_read_lock(); - for (j = start; j < start + len; j++) { - struct cds_lfht_node *new_node = &ht->t.tbl[i]->nodes[j]; - - dbg_printf("init populate: i %lu j %lu hash %lu\n", - i, j, (1UL << (i - 1)) + j); - new_node->reverse_hash = - bit_reverse_ulong((1UL << (i - 1)) + j); - _cds_lfht_add(ht, NULL, NULL, 1UL << (i - 1), - new_node, NULL, 1); + for (j = size + start; j < size + start + len; j++) { + struct cds_lfht_node *new_node = bucket_at(ht, j); + + assert(j >= size && j < (size << 1)); + dbg_printf("init populate: order %lu index %lu hash %lu\n", + i, j, j); + new_node->reverse_hash = bit_reverse_ulong(j); + _cds_lfht_add(ht, NULL, NULL, size, new_node, NULL, 1); } ht->cds_lfht_rcu_read_unlock(); } @@ -1159,8 +1163,8 @@ void init_table(struct cds_lfht *ht, assert(ht->t.tbl[i]); /* - * Set all dummy nodes reverse hash values for a level and - * link all dummy nodes into the table. + * Set all bucket nodes reverse hash values for a level and + * link all bucket nodes into the table. */ init_table_populate(ht, i, len); @@ -1191,7 +1195,7 @@ void init_table(struct cds_lfht *ht, * Concurrent removal and add operations are helping us perform garbage * collection of logically removed nodes. We guarantee that all logically * removed nodes have been garbage-collected (unlinked) before call_rcu is - * invoked to free a hole level of dummy nodes (after a grace period). + * invoked to free a hole level of bucket nodes (after a grace period). * * Logical removal and garbage collection can therefore be done in batch or on a * node-per-node basis, as long as the guarantee above holds. @@ -1205,18 +1209,18 @@ static void remove_table_partition(struct cds_lfht *ht, unsigned long i, unsigned long start, unsigned long len) { - unsigned long j; + unsigned long j, size = 1UL << (i - 1); assert(i > ht->min_alloc_order); ht->cds_lfht_rcu_read_lock(); - for (j = start; j < start + len; j++) { - struct cds_lfht_node *fini_node = &ht->t.tbl[i]->nodes[j]; - - dbg_printf("remove entry: i %lu j %lu hash %lu\n", - i, j, (1UL << (i - 1)) + j); - fini_node->reverse_hash = - bit_reverse_ulong((1UL << (i - 1)) + j); - (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1); + for (j = size + start; j < size + start + len; j++) { + struct cds_lfht_node *fini_node = bucket_at(ht, j); + + assert(j >= size && j < (size << 1)); + dbg_printf("remove entry: order %lu index %lu hash %lu\n", + i, j, j); + fini_node->reverse_hash = bit_reverse_ulong(j); + (void) _cds_lfht_del(ht, size, fini_node, 1); } ht->cds_lfht_rcu_read_unlock(); } @@ -1261,7 +1265,7 @@ void fini_table(struct cds_lfht *ht, /* * We need to wait for all add operations to reach Q.S. (and * thus use the new table for lookups) before we can start - * releasing the old dummy nodes. Otherwise their lookup will + * releasing the old bucket nodes. Otherwise their lookup will * return a logically removed node as insert position. */ ht->cds_lfht_synchronize_rcu(); @@ -1269,8 +1273,8 @@ void fini_table(struct cds_lfht *ht, free(free_by_rcu); /* - * Set "removed" flag in dummy nodes about to be removed. - * Unlink all now-logically-removed dummy node pointers. + * Set "removed" flag in bucket nodes about to be removed. + * Unlink all now-logically-removed bucket node pointers. * Concurrent add/remove operation are helping us doing * the gc. */ @@ -1290,17 +1294,18 @@ void fini_table(struct cds_lfht *ht, } static -void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) +void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) { struct cds_lfht_node *prev, *node; - unsigned long order, len, i, j; + unsigned long order, len, i; ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct cds_lfht_node)); assert(ht->t.tbl[0]); - dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0); - ht->t.tbl[0]->nodes[0].next = flag_dummy(get_end()); - ht->t.tbl[0]->nodes[0].reverse_hash = 0; + dbg_printf("create bucket: order 0 index 0 hash 0\n"); + node = bucket_at(ht, 0); + node->next = flag_bucket(get_end()); + node->reverse_hash = 0; for (order = 1; order < get_count_order_ulong(size) + 1; order++) { len = 1UL << (order - 1); @@ -1311,23 +1316,29 @@ void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size) assert(ht->t.tbl[order]); } - i = 0; - prev = ht->t.tbl[i]->nodes; - for (j = 0; j < len; j++) { - if (j & (j - 1)) { /* Between power of 2 */ - prev++; - } else if (j) { /* At each power of 2 */ - i++; - prev = ht->t.tbl[i]->nodes; - } + for (i = 0; i < len; i++) { + /* + * Now, we are trying to init the node with the + * hash=(len+i) (which is also a bucket with the + * index=(len+i)) and insert it into the hash table, + * so this node has to be inserted after the bucket + * with the index=(len+i)&(len-1)=i. And because there + * is no other non-bucket node nor bucket node with + * larger index/hash inserted, so the bucket node + * being inserted should be inserted directly linked + * after the bucket node with index=i. + */ + prev = bucket_at(ht, i); + node = bucket_at(ht, len + i); - node = &ht->t.tbl[order]->nodes[j]; - dbg_printf("create dummy: order %lu index %lu hash %lu\n", - order, j, j + len); + dbg_printf("create bucket: order %lu index %lu hash %lu\n", + order, len + i, len + i); + node->reverse_hash = bit_reverse_ulong(len + i); + + /* insert after prev */ + assert(is_bucket(prev->next)); node->next = prev->next; - assert(is_dummy(node->next)); - node->reverse_hash = bit_reverse_ulong(j + len); - prev->next = flag_dummy(node); + prev->next = flag_bucket(node); } } } @@ -1376,13 +1387,14 @@ struct cds_lfht *_cds_lfht_new(unsigned long init_size, ht->t.resize_target = 1UL << order; ht->min_alloc_size = min_alloc_size; ht->min_alloc_order = get_count_order_ulong(min_alloc_size); - cds_lfht_create_dummy(ht, 1UL << order); + cds_lfht_create_bucket(ht, 1UL << order); ht->t.size = 1UL << order; return ht; } -void cds_lfht_lookup(struct cds_lfht *ht, cds_lfht_match_fct match, - unsigned long hash, void *key, struct cds_lfht_iter *iter) +void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash, + cds_lfht_match_fct match, const void *key, + struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next, *bucket; unsigned long reverse_hash, size; @@ -1391,7 +1403,7 @@ void cds_lfht_lookup(struct cds_lfht *ht, cds_lfht_match_fct match, size = rcu_dereference(ht->t.size); bucket = lookup_bucket(ht, size, hash); - /* We can always skip the dummy node initially */ + /* We can always skip the bucket node initially */ node = rcu_dereference(bucket->next); node = clear_flag(node); for (;;) { @@ -1406,20 +1418,20 @@ void cds_lfht_lookup(struct cds_lfht *ht, cds_lfht_match_fct match, next = rcu_dereference(node->next); assert(node == clear_flag(node)); if (caa_likely(!is_removed(next)) - && !is_dummy(next) + && !is_bucket(next) && node->reverse_hash == reverse_hash && caa_likely(match(node, key))) { break; } node = clear_flag(next); } - assert(!node || !is_dummy(rcu_dereference(node->next))); + assert(!node || !is_bucket(rcu_dereference(node->next))); iter->node = node; iter->next = next; } void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match, - void *key, struct cds_lfht_iter *iter) + const void *key, struct cds_lfht_iter *iter) { struct cds_lfht_node *node, *next; unsigned long reverse_hash; @@ -1440,13 +1452,13 @@ void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match, } next = rcu_dereference(node->next); if (caa_likely(!is_removed(next)) - && !is_dummy(next) + && !is_bucket(next) && caa_likely(match(node, key))) { break; } node = clear_flag(next); } - assert(!node || !is_dummy(rcu_dereference(node->next))); + assert(!node || !is_bucket(rcu_dereference(node->next))); iter->node = node; iter->next = next; } @@ -1463,26 +1475,23 @@ void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter) } next = rcu_dereference(node->next); if (caa_likely(!is_removed(next)) - && !is_dummy(next)) { + && !is_bucket(next)) { break; } node = clear_flag(next); } - assert(!node || !is_dummy(rcu_dereference(node->next))); + assert(!node || !is_bucket(rcu_dereference(node->next))); iter->node = node; iter->next = next; } void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter) { - struct cds_lfht_node *lookup; - /* - * Get next after first dummy node. The first dummy node is the + * Get next after first bucket node. The first bucket node is the * first node of the linked list. */ - lookup = &ht->t.tbl[0]->nodes[0]; - iter->next = lookup->next; + iter->next = bucket_at(ht, 0)->next; cds_lfht_next(ht, iter); } @@ -1498,9 +1507,9 @@ void cds_lfht_add(struct cds_lfht *ht, unsigned long hash, } struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, - cds_lfht_match_fct match, - void *key, unsigned long hash, + cds_lfht_match_fct match, + const void *key, struct cds_lfht_node *node) { unsigned long size; @@ -1515,9 +1524,9 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, } struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, - cds_lfht_match_fct match, - void *key, unsigned long hash, + cds_lfht_match_fct match, + const void *key, struct cds_lfht_node *node) { unsigned long size; @@ -1562,16 +1571,16 @@ int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter) } static -int cds_lfht_delete_dummy(struct cds_lfht *ht) +int cds_lfht_delete_bucket(struct cds_lfht *ht) { struct cds_lfht_node *node; unsigned long order, i, size; /* Check that the table is empty */ - node = &ht->t.tbl[0]->nodes[0]; + node = bucket_at(ht, 0); do { node = clear_flag(node)->next; - if (!is_dummy(node)) + if (!is_bucket(node)) return -EPERM; assert(!is_removed(node)); } while (!is_end(node)); @@ -1580,7 +1589,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) * being destroyed. */ size = ht->t.size; - /* Internal sanity check: all nodes left should be dummy */ + /* Internal sanity check: all nodes left should be bucket */ for (order = 0; order < get_count_order_ulong(size) + 1; order++) { unsigned long len; @@ -1589,7 +1598,7 @@ int cds_lfht_delete_dummy(struct cds_lfht *ht) dbg_printf("delete order %lu i %lu hash %lu\n", order, i, bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash)); - assert(is_dummy(ht->t.tbl[order]->nodes[i].next)); + assert(is_bucket(ht->t.tbl[order]->nodes[i].next)); } if (order == ht->min_alloc_order) @@ -1614,7 +1623,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) cmm_smp_mb(); /* Store destroy before load resize */ while (uatomic_read(&ht->in_progress_resize)) poll(NULL, 0, 100); /* wait for 100ms */ - ret = cds_lfht_delete_dummy(ht); + ret = cds_lfht_delete_bucket(ht); if (ret) return ret; free_split_items_count(ht); @@ -1631,7 +1640,7 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, long *approx_after) { struct cds_lfht_node *node, *next; - unsigned long nr_dummy = 0; + unsigned long nr_bucket = 0; *approx_before = 0; if (ht->split_count) { @@ -1646,22 +1655,22 @@ void cds_lfht_count_nodes(struct cds_lfht *ht, *count = 0; *removed = 0; - /* Count non-dummy nodes in the table */ - node = &ht->t.tbl[0]->nodes[0]; + /* Count non-bucket nodes in the table */ + node = bucket_at(ht, 0); do { next = rcu_dereference(node->next); if (is_removed(next)) { - if (!is_dummy(next)) + if (!is_bucket(next)) (*removed)++; else - (nr_dummy)++; - } else if (!is_dummy(next)) + (nr_bucket)++; + } else if (!is_bucket(next)) (*count)++; else - (nr_dummy)++; + (nr_bucket)++; node = clear_flag(next); } while (!is_end(node)); - dbg_printf("number of dummy nodes: %lu\n", nr_dummy); + dbg_printf("number of bucket nodes: %lu\n", nr_bucket); *approx_after = 0; if (ht->split_count) { int i; @@ -1702,7 +1711,7 @@ void _do_cds_lfht_shrink(struct cds_lfht *ht, old_size, old_order, new_size, new_order); assert(new_size < old_size); - /* Remove and unlink all dummy nodes to remove. */ + /* Remove and unlink all bucket nodes to remove. */ fini_table(ht, new_order + 1, old_order); }