X-Git-Url: https://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=20d389d38517d786477e4acff9f8a87fe5b6b1ae;hp=ec799dcc87a6d38bb37fd5e8320d5bfad63607a8;hb=3967a8a8984fe5cb7ac83996db6709c8aae857c2;hpb=a42cc6594389e8f88cc49e36a7779afdb6529135 diff --git a/rculfhash.c b/rculfhash.c index ec799dc..20d389d 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -1,6 +1,97 @@ +/* + * rculfhash.c + * + * Userspace RCU library - Lock-Free Expandable RCU Hash Table + * + * Copyright 2010-2011 - Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ /* - * TODO: keys are currently assumed <= sizeof(void *). Key target never freed. + * Based on the following articles: + * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free + * extensible hash tables. J. ACM 53, 3 (May 2006), 379-405. + * - Michael, M. M. High performance dynamic lock-free hash tables + * and list-based sets. In Proceedings of the fourteenth annual ACM + * symposium on Parallel algorithms and architectures, ACM Press, + * (2002), 73-82. + * + * Some specificities of this Lock-Free Expandable RCU Hash Table + * implementation: + * + * - RCU read-side critical section allows readers to perform hash + * table lookups and use the returned objects safely by delaying + * memory reclaim of a grace period. + * - Add and remove operations are lock-free, and do not need to + * allocate memory. They need to be executed within RCU read-side + * critical section to ensure the objects they read are valid and to + * deal with the cmpxchg ABA problem. + * - add and add_unique operations are supported. add_unique checks if + * the node key already exists in the hash table. It ensures no key + * duplicata exists. + * - The resize operation executes concurrently with add/remove/lookup. + * - Hash table nodes are contained within a split-ordered list. This + * list is ordered by incrementing reversed-bits-hash value. + * - An index of dummy nodes is kept. These dummy nodes are the hash + * table "buckets", and they are also chained together in the + * split-ordered list, which allows recursive expansion. + * - The resize operation only allows expanding the hash table. + * It is triggered either through an API call or automatically by + * detecting long chains in the add operation. + * - Resize operation initiated by long chain detection is executed by a + * call_rcu thread, which keeps lock-freedom of add and remove. + * - Resize operations are protected by a mutex. + * - The removal operation is split in two parts: first, a "removed" + * flag is set in the next pointer within the node to remove. Then, + * a "garbage collection" is performed in the bucket containing the + * removed node (from the start of the bucket up to the removed node). + * All encountered nodes with "removed" flag set in their next + * pointers are removed from the linked-list. If the cmpxchg used for + * removal fails (due to concurrent garbage-collection or concurrent + * add), we retry from the beginning of the bucket. This ensures that + * the node with "removed" flag set is removed from the hash table + * (not visible to lookups anymore) before the RCU read-side critical + * section held across removal ends. Furthermore, this ensures that + * the node with "removed" flag set is removed from the linked-list + * before its memory is reclaimed. Only the thread which removal + * successfully set the "removed" flag (with a cmpxchg) into a node's + * next pointer is considered to have succeeded its removal (and thus + * owns the node to reclaim). Because we garbage-collect starting from + * an invariant node (the start-of-bucket dummy node) up to the + * "removed" node (or find a reverse-hash that is higher), we are sure + * that a successful traversal of the chain leads to a chain that is + * present in the linked-list (the start node is never removed) and + * that is does not contain the "removed" node anymore, even if + * concurrent delete/add operations are changing the structure of the + * list concurrently. + * - The add operation performs gargage collection of buckets if it + * encounters nodes with removed flag set in the bucket where it wants + * to add its new node. This ensures lock-freedom of add operation by + * helping the remover unlink nodes from the list rather than to wait + * for it do to so. + * - A RCU "order table" indexed by log2(hash index) is copied and + * expanded by the resize operation. This order table allows finding + * the "dummy node" tables. + * - There is one dummy node table per hash index order. The size of + * each dummy node table is half the number of hashes contained in + * this order. + * - call_rcu is used to garbage-collect the old order table. + * - The per-order dummy node tables contain a compact version of the + * hash table nodes. These tables are invariant after they are + * populated into the hash table. */ #define _LGPL_SOURCE @@ -8,478 +99,1043 @@ #include #include #include +#include +#include +#include "config.h" #include -#include +#include #include #include #include #include +#include #include #include -#include + +#ifdef DEBUG +#define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args) +#else +#define dbg_printf(fmt, args...) +#endif /* - * Maximum number of hash table buckets: 256M on 64-bit. - * Should take about 512MB max if we assume 1 node per 4 buckets. + * Per-CPU split-counters lazily update the global counter each 1024 + * addition/removal. It automatically keeps track of resize required. + * We use the bucket length as indicator for need to expand for small + * tables and machines lacking per-cpu data suppport. */ -#define MAX_HT_BUCKETS ((256 << 10) / sizeof(void *)) +#define COUNT_COMMIT_ORDER 10 +#define CHAIN_LEN_TARGET 1 +#define CHAIN_LEN_RESIZE_THRESHOLD 3 -/* node flags */ -#define NODE_STOLEN (1 << 0) +#ifndef max +#define max(a, b) ((a) > (b) ? (a) : (b)) +#endif -struct rcu_ht_node; +/* + * The removed flag needs to be updated atomically with the pointer. + * The dummy flag does not require to be updated atomically with the + * pointer, but it is added as a pointer low bit flag to save space. + */ +#define REMOVED_FLAG (1UL << 0) +#define DUMMY_FLAG (1UL << 1) +#define FLAGS_MASK ((1UL << 2) - 1) -struct rcu_ht_node { - struct rcu_ht_node *next; - void *key; - void *data; - unsigned int flags; -}; +struct ht_items_count { + unsigned long add, remove; +} __attribute__((aligned(CAA_CACHE_LINE_SIZE))); struct rcu_table { - unsigned long size; - struct rcu_ht_node *tbl[0]; + unsigned long size; /* always a power of 2 */ + unsigned long resize_target; + int resize_initiated; + struct rcu_head head; + struct _cds_lfht_node *tbl[0]; }; -struct rcu_ht { +struct cds_lfht { struct rcu_table *t; /* shared */ - ht_hash_fct hash_fct; - void (*free_fct)(void *data); /* fct to free data */ - uint32_t keylen; - uint32_t hashseed; + cds_lfht_hash_fct hash_fct; + cds_lfht_compare_fct compare_fct; + unsigned long hash_seed; pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ - int resize_ongoing; /* fast-path resize check */ + unsigned int in_progress_resize, in_progress_destroy; + void (*cds_lfht_call_rcu)(struct rcu_head *head, + void (*func)(struct rcu_head *head)); + unsigned long count; /* global approximate item count */ + struct ht_items_count *percpu_count; /* per-cpu item count */ }; -struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data), - unsigned long init_size, uint32_t keylen, - uint32_t hashseed) +struct rcu_resize_work { + struct rcu_head head; + struct cds_lfht *ht; +}; + +/* + * Algorithm to reverse bits in a word by lookup table, extended to + * 64-bit words. + * Source: + * http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable + * Originally from Public Domain. + */ + +static const uint8_t BitReverseTable256[256] = { - struct rcu_ht *ht; +#define R2(n) (n), (n) + 2*64, (n) + 1*64, (n) + 3*64 +#define R4(n) R2(n), R2((n) + 2*16), R2((n) + 1*16), R2((n) + 3*16) +#define R6(n) R4(n), R4((n) + 2*4 ), R4((n) + 1*4 ), R4((n) + 3*4 ) + R6(0), R6(2), R6(1), R6(3) +}; +#undef R2 +#undef R4 +#undef R6 - ht = calloc(1, sizeof(struct rcu_ht)); - ht->hash_fct = hash_fct; - ht->free_fct = free_fct; - ht->keylen = keylen; - ht->hashseed = hashseed; - /* this mutex should not nest in read-side C.S. */ - pthread_mutex_init(&ht->resize_mutex, NULL); - ht->resize_ongoing = 0; /* shared */ - ht->t = calloc(1, sizeof(struct rcu_table) - + (init_size * sizeof(struct rcu_ht_node *))); - ht->t->size = init_size; - return ht; +static +uint8_t bit_reverse_u8(uint8_t v) +{ + return BitReverseTable256[v]; } -void *ht_lookup(struct rcu_ht *ht, void *key) +static __attribute__((unused)) +uint32_t bit_reverse_u32(uint32_t v) { - struct rcu_table *t; - unsigned long hash; - struct rcu_ht_node *node; - void *ret; + return ((uint32_t) bit_reverse_u8(v) << 24) | + ((uint32_t) bit_reverse_u8(v >> 8) << 16) | + ((uint32_t) bit_reverse_u8(v >> 16) << 8) | + ((uint32_t) bit_reverse_u8(v >> 24)); +} - rcu_read_lock(); - t = rcu_dereference(ht->t); - smp_read_barrier_depends(); /* read t before size and table */ - hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % t->size; - smp_read_barrier_depends(); /* read size before links */ - node = rcu_dereference(t->tbl[hash]); - for (;;) { - if (likely(!node)) { - ret = NULL; - break; - } - if (node->key == key) { - ret = node->data; - break; - } - node = rcu_dereference(node->next); - } - rcu_read_unlock(); +static __attribute__((unused)) +uint64_t bit_reverse_u64(uint64_t v) +{ + return ((uint64_t) bit_reverse_u8(v) << 56) | + ((uint64_t) bit_reverse_u8(v >> 8) << 48) | + ((uint64_t) bit_reverse_u8(v >> 16) << 40) | + ((uint64_t) bit_reverse_u8(v >> 24) << 32) | + ((uint64_t) bit_reverse_u8(v >> 32) << 24) | + ((uint64_t) bit_reverse_u8(v >> 40) << 16) | + ((uint64_t) bit_reverse_u8(v >> 48) << 8) | + ((uint64_t) bit_reverse_u8(v >> 56)); +} - return ret; +static +unsigned long bit_reverse_ulong(unsigned long v) +{ +#if (CAA_BITS_PER_LONG == 32) + return bit_reverse_u32(v); +#else + return bit_reverse_u64(v); +#endif } /* - * Will re-try until either: - * - The key is already there (-EEXIST) - * - We successfully add the key at the head of a table bucket. + * fls: returns the position of the most significant bit. + * Returns 0 if no bit is set, else returns the position of the most + * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit). */ -int ht_add(struct rcu_ht *ht, void *key, void *data) +#if defined(__i386) || defined(__x86_64) +static inline +unsigned int fls_u32(uint32_t x) { - struct rcu_ht_node *node, *old_head, *new_head; - struct rcu_table *t; - unsigned long hash; - int ret = 0; - - new_head = calloc(1, sizeof(struct rcu_ht_node)); - new_head->key = key; - new_head->data = data; - new_head->flags = 0; - /* here comes the fun and tricky part. - * Add at the beginning with a cmpxchg. - * Hold a read lock between the moment the first element is read - * and the nodes traversal (to find duplicates). This ensures - * the head pointer has not been reclaimed when cmpxchg is done. - * Always adding at the head ensures that we would have to - * re-try if a new item has been added concurrently. So we ensure that - * we never add duplicates. */ -retry: - rcu_read_lock(); - - if (unlikely(LOAD_SHARED(ht->resize_ongoing))) { - rcu_read_unlock(); - /* - * Wait for resize to complete before continuing. - */ - ret = pthread_mutex_lock(&ht->resize_mutex); - assert(!ret); - ret = pthread_mutex_unlock(&ht->resize_mutex); - assert(!ret); - goto retry; + int r; + + asm("bsrl %1,%0\n\t" + "jnz 1f\n\t" + "movl $-1,%0\n\t" + "1:\n\t" + : "=r" (r) : "rm" (x)); + return r + 1; +} +#define HAS_FLS_U32 +#endif + +#if defined(__x86_64) +static inline +unsigned int fls_u64(uint64_t x) +{ + long r; + + asm("bsrq %1,%0\n\t" + "jnz 1f\n\t" + "movq $-1,%0\n\t" + "1:\n\t" + : "=r" (r) : "rm" (x)); + return r + 1; +} +#define HAS_FLS_U64 +#endif + +#ifndef HAS_FLS_U64 +static __attribute__((unused)) +unsigned int fls_u64(uint64_t x) +{ + unsigned int r = 64; + + if (!x) + return 0; + + if (!(x & 0xFFFFFFFF00000000ULL)) { + x <<= 32; + r -= 32; + } + if (!(x & 0xFFFF000000000000ULL)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xFF00000000000000ULL)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xF000000000000000ULL)) { + x <<= 4; + r -= 4; } + if (!(x & 0xC000000000000000ULL)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x8000000000000000ULL)) { + x <<= 1; + r -= 1; + } + return r; +} +#endif - t = rcu_dereference(ht->t); - /* no read barrier needed, because no concurrency with resize */ - hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % t->size; +#ifndef HAS_FLS_U32 +static __attribute__((unused)) +unsigned int fls_u32(uint32_t x) +{ + unsigned int r = 32; - old_head = node = rcu_dereference(t->tbl[hash]); - for (;;) { - if (likely(!node)) { - break; - } - if (node->key == key) { - ret = -EEXIST; - goto end; - } - node = rcu_dereference(node->next); + if (!x) + return 0; + if (!(x & 0xFFFF0000U)) { + x <<= 16; + r -= 16; } - new_head->next = old_head; - if (rcu_cmpxchg_pointer(&t->tbl[hash], old_head, new_head) != old_head) - goto restart; -end: - rcu_read_unlock(); - return ret; + if (!(x & 0xFF000000U)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xF0000000U)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xC0000000U)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000U)) { + x <<= 1; + r -= 1; + } + return r; +} +#endif - /* restart loop, release and re-take the read lock to be kind to GP */ -restart: - rcu_read_unlock(); - goto retry; +unsigned int fls_ulong(unsigned long x) +{ +#if (CAA_BITS_PER_lONG == 32) + return fls_u32(x); +#else + return fls_u64(x); +#endif } +int get_count_order_u32(uint32_t x) +{ + int order; + + order = fls_u32(x) - 1; + if (x & (x - 1)) + order++; + return order; +} + +int get_count_order_ulong(unsigned long x) +{ + int order; + + order = fls_ulong(x) - 1; + if (x & (x - 1)) + order++; + return order; +} + +static +void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth); + /* - * Restart until we successfully remove the entry, or no entry is left - * ((void *)(unsigned long)-ENOENT). - * Deal with concurrent stealers by doing an extra verification pass to check - * that no element in the list are still pointing to the element stolen. - * This could happen if two concurrent steal for consecutive objects are - * executed. A pointer to an object being stolen could be saved by the - * concurrent stealer for the previous object. - * Also, given that in this precise scenario, another stealer can also want to - * delete the doubly-referenced object; use a "stolen" flag to let only one - * stealer delete the object. + * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are + * available, then we support hash table item accounting. + * In the unfortunate event the number of CPUs reported would be + * inaccurate, we use modulo arithmetic on the number of CPUs we got. */ -void *ht_steal(struct rcu_ht *ht, void *key) +#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) + +static +void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t, + unsigned long count); + +static long nr_cpus_mask = -1; + +static +struct ht_items_count *alloc_per_cpu_items_count(void) { - struct rcu_ht_node **prev, *node, *del_node = NULL; - struct rcu_table *t; - unsigned long hash; - void *data; - int ret; + struct ht_items_count *count; -retry: - rcu_read_lock(); + switch (nr_cpus_mask) { + case -2: + return NULL; + case -1: + { + long maxcpus; - if (unlikely(LOAD_SHARED(ht->resize_ongoing))) { - rcu_read_unlock(); + maxcpus = sysconf(_SC_NPROCESSORS_CONF); + if (maxcpus <= 0) { + nr_cpus_mask = -2; + return NULL; + } /* - * Wait for resize to complete before continuing. + * round up number of CPUs to next power of two, so we + * can use & for modulo. */ - ret = pthread_mutex_lock(&ht->resize_mutex); - assert(!ret); - ret = pthread_mutex_unlock(&ht->resize_mutex); - assert(!ret); - goto retry; + maxcpus = 1UL << get_count_order_ulong(maxcpus); + nr_cpus_mask = maxcpus - 1; } + /* Fall-through */ + default: + return calloc(nr_cpus_mask + 1, sizeof(*count)); + } +} - t = rcu_dereference(ht->t); - /* no read barrier needed, because no concurrency with resize */ - hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % t->size; +static +void free_per_cpu_items_count(struct ht_items_count *count) +{ + free(count); +} - prev = &t->tbl[hash]; - node = rcu_dereference(*prev); - for (;;) { - if (likely(!node)) { - if (del_node) { - goto end; - } else { - goto error; - } +static +int ht_get_cpu(void) +{ + int cpu; + + assert(nr_cpus_mask >= 0); + cpu = sched_getcpu(); + if (unlikely(cpu < 0)) + return cpu; + else + return cpu & nr_cpus_mask; +} + +static +void ht_count_add(struct cds_lfht *ht, struct rcu_table *t) +{ + unsigned long percpu_count; + int cpu; + + if (unlikely(!ht->percpu_count)) + return; + cpu = ht_get_cpu(); + if (unlikely(cpu < 0)) + return; + percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1); + if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { + unsigned long count; + + dbg_printf("add percpu %lu\n", percpu_count); + count = uatomic_add_return(&ht->count, + 1UL << COUNT_COMMIT_ORDER); + /* If power of 2 */ + if (!(count & (count - 1))) { + if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) + < t->size) + return; + dbg_printf("add set global %lu\n", count); + cds_lfht_resize_lazy_count(ht, t, + count >> (CHAIN_LEN_TARGET - 1)); } - if (node->key == key) { - break; + } +} + +static +void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t) +{ + unsigned long percpu_count; + int cpu; + + if (unlikely(!ht->percpu_count)) + return; + cpu = ht_get_cpu(); + if (unlikely(cpu < 0)) + return; + percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1); + if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) { + unsigned long count; + + dbg_printf("remove percpu %lu\n", percpu_count); + count = uatomic_add_return(&ht->count, + -(1UL << COUNT_COMMIT_ORDER)); + /* If power of 2 */ + if (!(count & (count - 1))) { + if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) + >= t->size) + return; + dbg_printf("remove set global %lu\n", count); + cds_lfht_resize_lazy_count(ht, t, + count >> (CHAIN_LEN_TARGET - 1)); + } + } +} + +#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ + +static const long nr_cpus_mask = -1; + +static +struct ht_items_count *alloc_per_cpu_items_count(void) +{ + return NULL; +} + +static +void free_per_cpu_items_count(struct ht_items_count *count) +{ +} + +static +void ht_count_add(struct cds_lfht *ht, struct rcu_table *t) +{ +} + +static +void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t) +{ +} + +#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */ + + +static +void check_resize(struct cds_lfht *ht, struct rcu_table *t, + uint32_t chain_len) +{ + unsigned long count; + + count = uatomic_read(&ht->count); + /* + * Use bucket-local length for small table expand and for + * environments lacking per-cpu data support. + */ + if (count >= (1UL << COUNT_COMMIT_ORDER)) + return; + if (chain_len > 100) + dbg_printf("WARNING: large chain length: %u.\n", + chain_len); + if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) + cds_lfht_resize_lazy(ht, t, + get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); +} + +static +struct cds_lfht_node *clear_flag(struct cds_lfht_node *node) +{ + return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK); +} + +static +int is_removed(struct cds_lfht_node *node) +{ + return ((unsigned long) node) & REMOVED_FLAG; +} + +static +struct cds_lfht_node *flag_removed(struct cds_lfht_node *node) +{ + return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG); +} + +static +int is_dummy(struct cds_lfht_node *node) +{ + return ((unsigned long) node) & DUMMY_FLAG; +} + +static +struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node) +{ + return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG); +} + +static +unsigned long _uatomic_max(unsigned long *ptr, unsigned long v) +{ + unsigned long old1, old2; + + old1 = uatomic_read(ptr); + do { + old2 = old1; + if (old2 >= v) + return old2; + } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2); + return v; +} + +/* + * Remove all logically deleted nodes from a bucket up to a certain node key. + */ +static +void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node) +{ + struct cds_lfht_node *iter_prev, *iter, *next, *new_next; + + for (;;) { + iter_prev = dummy; + /* We can always skip the dummy node initially */ + iter = rcu_dereference(iter_prev->p.next); + assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); + for (;;) { + if (unlikely(!clear_flag(iter))) + return; + if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) + return; + next = rcu_dereference(clear_flag(iter)->p.next); + if (likely(is_removed(next))) + break; + iter_prev = clear_flag(iter); + iter = next; } - prev = &node->next; - node = rcu_dereference(*prev); + assert(!is_removed(iter)); + if (is_dummy(iter)) + new_next = flag_dummy(clear_flag(next)); + else + new_next = clear_flag(next); + (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); } +} + +static +struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t, + struct cds_lfht_node *node, int unique, int dummy) +{ + struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next, + *dummy_node; + struct _cds_lfht_node *lookup; + unsigned long hash, index, order; + + if (!t->size) { + assert(dummy); + node->p.next = flag_dummy(NULL); + return node; /* Initial first add (head) */ + } + hash = bit_reverse_ulong(node->p.reverse_hash); + for (;;) { + uint32_t chain_len = 0; - if (!del_node) { /* - * Another concurrent thread stole it ? If so, let it deal with - * this. Assume NODE_STOLEN is the only flag. If this changes, - * read flags before cmpxchg. + * iter_prev points to the non-removed node prior to the + * insert location. */ - if (cmpxchg(&node->flags, 0, NODE_STOLEN) != 0) - goto error; + index = hash & (t->size - 1); + order = get_count_order_ulong(index + 1); + lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; + iter_prev = (struct cds_lfht_node *) lookup; + /* We can always skip the dummy node initially */ + iter = rcu_dereference(iter_prev->p.next); + assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); + for (;;) { + if (unlikely(!clear_flag(iter))) + goto insert; + if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)) + goto insert; + next = rcu_dereference(clear_flag(iter)->p.next); + if (unlikely(is_removed(next))) + goto gc_node; + if (unique + && !is_dummy(next) + && !ht->compare_fct(node->key, node->key_len, + clear_flag(iter)->key, + clear_flag(iter)->key_len)) + return clear_flag(iter); + /* Only account for identical reverse hash once */ + if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash + && !is_dummy(next)) + check_resize(ht, t, ++chain_len); + iter_prev = clear_flag(iter); + iter = next; + } + insert: + assert(node != clear_flag(iter)); + assert(!is_removed(iter_prev)); + assert(iter_prev != node); + if (!dummy) + node->p.next = clear_flag(iter); + else + node->p.next = flag_dummy(clear_flag(iter)); + if (is_dummy(iter)) + new_node = flag_dummy(node); + else + new_node = node; + if (uatomic_cmpxchg(&iter_prev->p.next, iter, + new_node) != iter) + continue; /* retry */ + else + goto gc_end; + gc_node: + assert(!is_removed(iter)); + if (is_dummy(iter)) + new_next = flag_dummy(clear_flag(next)); + else + new_next = clear_flag(next); + (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); + /* retry */ } +gc_end: + /* Garbage collect logically removed nodes in the bucket */ + index = hash & (t->size - 1); + order = get_count_order_ulong(index + 1); + lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; + dummy_node = (struct cds_lfht_node *) lookup; + _cds_lfht_gc_bucket(dummy_node, node); + return node; +} + +static +int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t, + struct cds_lfht_node *node) +{ + struct cds_lfht_node *dummy, *next, *old; + struct _cds_lfht_node *lookup; + int flagged = 0; + unsigned long hash, index, order; - /* Found it ! pointer to object is in "prev" */ - if (rcu_cmpxchg_pointer(prev, node, node->next) == node) - del_node = node; - goto restart; + /* logically delete the node */ + old = rcu_dereference(node->p.next); + do { + next = old; + if (unlikely(is_removed(next))) + goto end; + assert(!is_dummy(next)); + old = uatomic_cmpxchg(&node->p.next, next, + flag_removed(next)); + } while (old != next); + + /* We performed the (logical) deletion. */ + flagged = 1; + /* + * Ensure that the node is not visible to readers anymore: lookup for + * the node, and remove it (along with any other logically removed node) + * if found. + */ + hash = bit_reverse_ulong(node->p.reverse_hash); + index = hash & (t->size - 1); + order = get_count_order_ulong(index + 1); + lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; + dummy = (struct cds_lfht_node *) lookup; + _cds_lfht_gc_bucket(dummy, node); end: /* - * From that point, we own node. Note that there can still be concurrent - * RCU readers using it. We can free it outside of read lock after a GP. + * Only the flagging action indicated that we (and no other) + * removed the node from the hash. */ - rcu_read_unlock(); + if (flagged) { + assert(is_removed(rcu_dereference(node->p.next))); + return 0; + } else + return -ENOENT; +} - data = del_node->data; - call_rcu(free, del_node); - return data; +static +void init_table(struct cds_lfht *ht, struct rcu_table *t, + unsigned long first_order, unsigned long len_order) +{ + unsigned long i, end_order; -error: - data = (void *)(unsigned long)-ENOENT; - rcu_read_unlock(); - return data; + dbg_printf("init table: first_order %lu end_order %lu\n", + first_order, first_order + len_order); + end_order = first_order + len_order; + t->size = !first_order ? 0 : (1UL << (first_order - 1)); + for (i = first_order; i < end_order; i++) { + unsigned long j, len; - /* restart loop, release and re-take the read lock to be kind to GP */ -restart: - rcu_read_unlock(); - goto retry; + len = !i ? 1 : 1UL << (i - 1); + dbg_printf("init order %lu len: %lu\n", i, len); + t->tbl[i] = calloc(len, sizeof(struct _cds_lfht_node)); + for (j = 0; j < len; j++) { + dbg_printf("init entry: i %lu j %lu hash %lu\n", + i, j, !i ? 0 : (1UL << (i - 1)) + j); + struct cds_lfht_node *new_node = + (struct cds_lfht_node *) &t->tbl[i][j]; + new_node->p.reverse_hash = + bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); + (void) _cds_lfht_add(ht, t, new_node, 0, 1); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; + } + /* Update table size */ + t->size = !i ? 1 : (1UL << i); + dbg_printf("init new size: %lu\n", t->size); + if (CMM_LOAD_SHARED(ht->in_progress_destroy)) + break; + } + t->resize_target = t->size; + t->resize_initiated = 0; } -int ht_delete(struct rcu_ht *ht, void *key) +struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, + cds_lfht_compare_fct compare_fct, + unsigned long hash_seed, + unsigned long init_size, + void (*cds_lfht_call_rcu)(struct rcu_head *head, + void (*func)(struct rcu_head *head))) { - void *data; + struct cds_lfht *ht; + unsigned long order; - data = ht_steal(ht, key); - if (data && data != (void *)(unsigned long)-ENOENT) { - if (ht->free_fct) - call_rcu(ht->free_fct, data); - return 0; - } else { - return -ENOENT; - } + /* init_size must be power of two */ + if (init_size && (init_size & (init_size - 1))) + return NULL; + ht = calloc(1, sizeof(struct cds_lfht)); + ht->hash_fct = hash_fct; + ht->compare_fct = compare_fct; + ht->hash_seed = hash_seed; + ht->cds_lfht_call_rcu = cds_lfht_call_rcu; + ht->in_progress_resize = 0; + ht->percpu_count = alloc_per_cpu_items_count(); + /* this mutex should not nest in read-side C.S. */ + pthread_mutex_init(&ht->resize_mutex, NULL); + order = get_count_order_ulong(max(init_size, 1)) + 1; + ht->t = calloc(1, sizeof(struct cds_lfht) + + (order * sizeof(struct _cds_lfht_node *))); + ht->t->size = 0; + pthread_mutex_lock(&ht->resize_mutex); + init_table(ht, ht->t, 0, order); + pthread_mutex_unlock(&ht->resize_mutex); + return ht; } -/* Delete all old elements. Allow concurrent writer accesses. */ -int ht_delete_all(struct rcu_ht *ht) +struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len) { - unsigned long i; - struct rcu_ht_node **prev, *node, *inext; struct rcu_table *t; - int cnt = 0; - int ret; + struct cds_lfht_node *node, *next; + struct _cds_lfht_node *lookup; + unsigned long hash, reverse_hash, index, order; - /* - * Mutual exclusion with resize operations, but leave add/steal execute - * concurrently. This is OK because we operate only on the heads. - */ - ret = pthread_mutex_lock(&ht->resize_mutex); - assert(!ret); + hash = ht->hash_fct(key, key_len, ht->hash_seed); + reverse_hash = bit_reverse_ulong(hash); t = rcu_dereference(ht->t); - /* no read barrier needed, because no concurrency with resize */ - for (i = 0; i < t->size; i++) { - rcu_read_lock(); - prev = &t->tbl[i]; - /* - * Cut the head. After that, we own the first element. - */ - node = rcu_xchg_pointer(prev, NULL); - if (!node) { - rcu_read_unlock(); - continue; + index = hash & (t->size - 1); + order = get_count_order_ulong(index + 1); + lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)]; + dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n", + hash, index, order, index & ((1UL << (order - 1)) - 1)); + node = (struct cds_lfht_node *) lookup; + for (;;) { + if (unlikely(!node)) + break; + if (unlikely(node->p.reverse_hash > reverse_hash)) { + node = NULL; + break; } - /* - * We manage a list shared with concurrent writers and readers. - * Note that a concurrent add may or may not be deleted by us, - * depending if it arrives before or after the head is cut. - * "node" points to our first node. Remove first elements - * iteratively. - */ - for (;;) { - inext = NULL; - prev = &node->next; - if (prev) - inext = rcu_xchg_pointer(prev, NULL); - /* - * "node" is the first element of the list we have cut. - * We therefore own it, no concurrent writer may delete - * it. There can only be concurrent lookups. Concurrent - * add can only be done on a bucket head, but we've cut - * it already. inext is also owned by us, because we - * have exchanged it for "NULL". It will therefore be - * safe to use it after a G.P. - */ - rcu_read_unlock(); - if (node->data) - call_rcu(ht->free_fct, node->data); - call_rcu(free, node); - cnt++; - if (likely(!inext)) + next = rcu_dereference(node->p.next); + if (likely(!is_removed(next)) + && !is_dummy(next) + && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { break; - rcu_read_lock(); - node = inext; } + node = clear_flag(next); } + assert(!node || !is_dummy(rcu_dereference(node->p.next))); + return node; +} - ret = pthread_mutex_unlock(&ht->resize_mutex); - assert(!ret); - return cnt; +struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht, + struct cds_lfht_node *node) +{ + struct cds_lfht_node *next; + unsigned long reverse_hash; + void *key; + size_t key_len; + + reverse_hash = node->p.reverse_hash; + key = node->key; + key_len = node->key_len; + next = rcu_dereference(node->p.next); + node = clear_flag(next); + + for (;;) { + if (unlikely(!node)) + break; + if (unlikely(node->p.reverse_hash > reverse_hash)) { + node = NULL; + break; + } + next = rcu_dereference(node->p.next); + if (likely(!is_removed(next)) + && !is_dummy(next) + && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { + break; + } + node = clear_flag(next); + } + assert(!node || !is_dummy(rcu_dereference(node->p.next))); + return node; +} + +void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node) +{ + struct rcu_table *t; + unsigned long hash; + + hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); + node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); + + t = rcu_dereference(ht->t); + (void) _cds_lfht_add(ht, t, node, 0, 0); + ht_count_add(ht, t); +} + +struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, + struct cds_lfht_node *node) +{ + struct rcu_table *t; + unsigned long hash; + struct cds_lfht_node *ret; + + hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); + node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); + + t = rcu_dereference(ht->t); + ret = _cds_lfht_add(ht, t, node, 1, 0); + if (ret != node) + ht_count_add(ht, t); + return ret; +} + +int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node) +{ + struct rcu_table *t; + int ret; + + t = rcu_dereference(ht->t); + ret = _cds_lfht_remove(ht, t, node); + if (!ret) + ht_count_remove(ht, t); + return ret; +} + +static +int cds_lfht_delete_dummy(struct cds_lfht *ht) +{ + struct rcu_table *t; + struct cds_lfht_node *node; + struct _cds_lfht_node *lookup; + unsigned long order, i; + + t = ht->t; + /* Check that the table is empty */ + lookup = &t->tbl[0][0]; + node = (struct cds_lfht_node *) lookup; + do { + node = clear_flag(node)->p.next; + if (!is_dummy(node)) + return -EPERM; + assert(!is_removed(node)); + } while (clear_flag(node)); + /* Internal sanity check: all nodes left should be dummy */ + for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) { + unsigned long len; + + len = !order ? 1 : 1UL << (order - 1); + for (i = 0; i < len; i++) { + dbg_printf("delete order %lu i %lu hash %lu\n", + order, i, + bit_reverse_ulong(t->tbl[order][i].reverse_hash)); + assert(is_dummy(t->tbl[order][i].next)); + } + free(t->tbl[order]); + } + return 0; } /* * Should only be called when no more concurrent readers nor writers can * possibly access the table. */ -int ht_destroy(struct rcu_ht *ht) +int cds_lfht_destroy(struct cds_lfht *ht) { int ret; - ret = ht_delete_all(ht); + /* Wait for in-flight resize operations to complete */ + CMM_STORE_SHARED(ht->in_progress_destroy, 1); + while (uatomic_read(&ht->in_progress_resize)) + poll(NULL, 0, 100); /* wait for 100ms */ + ret = cds_lfht_delete_dummy(ht); + if (ret) + return ret; free(ht->t); + free_per_cpu_items_count(ht->percpu_count); free(ht); return ret; } -static void ht_resize_grow(struct rcu_ht *ht) +void cds_lfht_count_nodes(struct cds_lfht *ht, + unsigned long *count, + unsigned long *removed) { - unsigned long i, new_size, old_size; + struct rcu_table *t; + struct cds_lfht_node *node, *next; + struct _cds_lfht_node *lookup; + unsigned long nr_dummy = 0; + + *count = 0; + *removed = 0; + + t = rcu_dereference(ht->t); + /* Count non-dummy nodes in the table */ + lookup = &t->tbl[0][0]; + node = (struct cds_lfht_node *) lookup; + do { + next = rcu_dereference(node->p.next); + if (is_removed(next)) { + assert(!is_dummy(next)); + (*removed)++; + } else if (!is_dummy(next)) + (*count)++; + else + (nr_dummy)++; + node = clear_flag(next); + } while (node); + dbg_printf("number of dummy nodes: %lu\n", nr_dummy); +} + +static +void cds_lfht_free_table_cb(struct rcu_head *head) +{ + struct rcu_table *t = + caa_container_of(head, struct rcu_table, head); + free(t); +} + +/* called with resize mutex held */ +static +void _do_cds_lfht_resize(struct cds_lfht *ht) +{ + unsigned long new_size, old_size, old_order, new_order; struct rcu_table *new_t, *old_t; - struct rcu_ht_node *node, *new_node, *tmp; - unsigned long hash; old_t = ht->t; old_size = old_t->size; + old_order = get_count_order_ulong(old_size) + 1; - if (old_size == MAX_HT_BUCKETS) + new_size = CMM_LOAD_SHARED(old_t->resize_target); + if (old_size == new_size) return; + new_order = get_count_order_ulong(new_size) + 1; + printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n", + old_size, old_order, new_size, new_order); + new_t = malloc(sizeof(struct cds_lfht) + + (new_order * sizeof(struct _cds_lfht_node *))); + assert(new_size > old_size); + memcpy(&new_t->tbl, &old_t->tbl, + old_order * sizeof(struct _cds_lfht_node *)); + init_table(ht, new_t, old_order, new_order - old_order); + /* Changing table and size atomically wrt lookups */ + rcu_assign_pointer(ht->t, new_t); + ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb); +} + +static +unsigned long resize_target_update(struct rcu_table *t, + int growth_order) +{ + return _uatomic_max(&t->resize_target, + t->size << growth_order); +} - new_size = old_size << 1; - new_t = calloc(1, sizeof(struct rcu_table) - + (new_size * sizeof(struct rcu_ht_node *))); - new_t->size = new_size; +void cds_lfht_resize(struct cds_lfht *ht, int growth) +{ + struct rcu_table *t = rcu_dereference(ht->t); + unsigned long target_size; - for (i = 0; i < old_size; i++) { + if (growth < 0) { /* - * Re-hash each entry, insert in new table. - * It's important that a reader looking for a key _will_ find it - * if it's in the table. - * Copy each node. (just the node, not ->data) + * Silently refuse to shrink hash table. (not supported) */ - node = old_t->tbl[i]; - while (node) { - hash = ht->hash_fct(node->key, ht->keylen, ht->hashseed) - % new_size; - new_node = malloc(sizeof(struct rcu_ht_node)); - new_node->key = node->key; - new_node->data = node->data; - new_node->flags = node->flags; - new_node->next = new_t->tbl[hash]; /* link to first */ - new_t->tbl[hash] = new_node; /* add to head */ - node = node->next; - } + dbg_printf("shrinking hash table not supported.\n"); + return; } - /* Changing table and size atomically wrt lookups */ - rcu_assign_pointer(ht->t, new_t); - - /* Ensure all concurrent lookups use new size and table */ - synchronize_rcu(); - - for (i = 0; i < old_size; i++) { - node = old_t->tbl[i]; - while (node) { - tmp = node->next; - free(node); - node = tmp; - } + target_size = resize_target_update(t, growth); + if (t->size < target_size) { + CMM_STORE_SHARED(t->resize_initiated, 1); + pthread_mutex_lock(&ht->resize_mutex); + _do_cds_lfht_resize(ht); + pthread_mutex_unlock(&ht->resize_mutex); } - free(old_t); } -static void ht_resize_shrink(struct rcu_ht *ht) +static +void do_resize_cb(struct rcu_head *head) { - unsigned long i, new_size; - struct rcu_table *new_t, *old_t; - struct rcu_ht_node **prev, *node; + struct rcu_resize_work *work = + caa_container_of(head, struct rcu_resize_work, head); + struct cds_lfht *ht = work->ht; - old_t = ht->t; - if (old_t->size == 1) - return; + pthread_mutex_lock(&ht->resize_mutex); + _do_cds_lfht_resize(ht); + pthread_mutex_unlock(&ht->resize_mutex); + free(work); + cmm_smp_mb(); /* finish resize before decrement */ + uatomic_dec(&ht->in_progress_resize); +} - new_size = old_t->size >> 1; +static +void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth) +{ + struct rcu_resize_work *work; + unsigned long target_size; - for (i = 0; i < new_size; i++) { - /* Link end with first entry of i + new_size */ - prev = &old_t->tbl[i]; - node = *prev; - while (node) { - prev = &node->next; - node = *prev; - } - *prev = old_t->tbl[i + new_size]; + target_size = resize_target_update(t, growth); + if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) { + uatomic_inc(&ht->in_progress_resize); + cmm_smp_mb(); /* increment resize count before calling it */ + work = malloc(sizeof(*work)); + work->ht = ht; + ht->cds_lfht_call_rcu(&work->head, do_resize_cb); + CMM_STORE_SHARED(t->resize_initiated, 1); } - smp_wmb(); /* write links before changing size */ - STORE_SHARED(old_t->size, new_size); - - /* Ensure all concurrent lookups use new size */ - synchronize_rcu(); - - new_t = realloc(old_t, sizeof(struct rcu_table) - + (new_size * sizeof(struct rcu_ht_node *))); - /* shrinking, pointers should not move */ - assert(new_t == old_t); } -/* - * growth: >0: *2, <0: /2 - */ -void ht_resize(struct rcu_ht *ht, int growth) -{ - int ret; +#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) - ret = pthread_mutex_lock(&ht->resize_mutex); - assert(!ret); - STORE_SHARED(ht->resize_ongoing, 1); - synchronize_rcu(); - /* All add/remove are waiting on the mutex. */ - if (growth > 0) - ht_resize_grow(ht); - else if (growth < 0) - ht_resize_shrink(ht); - smp_mb(); - STORE_SHARED(ht->resize_ongoing, 0); - ret = pthread_mutex_unlock(&ht->resize_mutex); - assert(!ret); +static +unsigned long resize_target_update_count(struct rcu_table *t, + unsigned long count) +{ + return uatomic_set(&t->resize_target, count); } -/* - * Expects keys <= than pointer size to be encoded in the pointer itself. - */ -uint32_t ht_jhash(void *key, uint32_t length, uint32_t initval) +static +void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t, + unsigned long count) { - uint32_t ret; - void *vkey; + struct rcu_resize_work *work; + unsigned long target_size; - if (length <= sizeof(void *)) - vkey = &key; - else - vkey = key; - ret = jhash(vkey, length, initval); - return ret; + target_size = resize_target_update_count(t, count); + if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) { + uatomic_inc(&ht->in_progress_resize); + cmm_smp_mb(); /* increment resize count before calling it */ + work = malloc(sizeof(*work)); + work->ht = ht; + ht->cds_lfht_call_rcu(&work->head, do_resize_cb); + CMM_STORE_SHARED(t->resize_initiated, 1); + } } + +#endif