* Userspace RCU library - Lock-Free Resizable RCU Hash Table
*
* Copyright 2010-2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* implementation:
*
* - RCU read-side critical section allows readers to perform hash
- * table lookups and use the returned objects safely by delaying
- * memory reclaim of a grace period.
+ * table lookups, as well as traversals, and use the returned objects
+ * safely by allowing memory reclaim to take place only after a grace
+ * period.
* - Add and remove operations are lock-free, and do not need to
* allocate memory. They need to be executed within RCU read-side
* critical section to ensure the objects they read are valid and to
* deal with the cmpxchg ABA problem.
* - add and add_unique operations are supported. add_unique checks if
- * the node key already exists in the hash table. It ensures no key
- * duplicata exists.
- * - The resize operation executes concurrently with add/remove/lookup.
+ * the node key already exists in the hash table. It ensures not to
+ * populate a duplicate key if the node key already exists in the hash
+ * table.
+ * - The resize operation executes concurrently with
+ * add/add_unique/add_replace/remove/lookup/traversal.
* - Hash table nodes are contained within a split-ordered list. This
* list is ordered by incrementing reversed-bits-hash value.
- * - An index of dummy nodes is kept. These dummy nodes are the hash
- * table "buckets", and they are also chained together in the
- * split-ordered list, which allows recursive expansion.
- * - The resize operation for small tables only allows expanding the hash table.
- * It is triggered automatically by detecting long chains in the add
- * operation.
+ * - An index of bucket nodes is kept. These bucket nodes are the hash
+ * table "buckets". These buckets are internal nodes that allow to
+ * perform a fast hash lookup, similarly to a skip list. These
+ * buckets are chained together in the split-ordered list, which
+ * allows recursive expansion by inserting new buckets between the
+ * existing buckets. The split-ordered list allows adding new buckets
+ * between existing buckets as the table needs to grow.
+ * - The resize operation for small tables only allows expanding the
+ * hash table. It is triggered automatically by detecting long chains
+ * in the add operation.
* - The resize operation for larger tables (and available through an
* API) allows both expanding and shrinking the hash table.
- * - Per-CPU Split-counters are used to keep track of the number of
+ * - Split-counters are used to keep track of the number of
* nodes within the hash table for automatic resize triggering.
* - Resize operation initiated by long chain detection is executed by a
* call_rcu thread, which keeps lock-freedom of add and remove.
* (not visible to lookups anymore) before the RCU read-side critical
* section held across removal ends. Furthermore, this ensures that
* the node with "removed" flag set is removed from the linked-list
- * before its memory is reclaimed. Only the thread which removal
- * successfully set the "removed" flag (with a cmpxchg) into a node's
- * next pointer is considered to have succeeded its removal (and thus
- * owns the node to reclaim). Because we garbage-collect starting from
- * an invariant node (the start-of-bucket dummy node) up to the
- * "removed" node (or find a reverse-hash that is higher), we are sure
- * that a successful traversal of the chain leads to a chain that is
- * present in the linked-list (the start node is never removed) and
- * that is does not contain the "removed" node anymore, even if
- * concurrent delete/add operations are changing the structure of the
- * list concurrently.
- * - The add operation performs gargage collection of buckets if it
- * encounters nodes with removed flag set in the bucket where it wants
- * to add its new node. This ensures lock-freedom of add operation by
+ * before its memory is reclaimed. After setting the "removal" flag,
+ * only the thread which removal is the first to set the "removal
+ * owner" flag (with an xchg) into a node's next pointer is considered
+ * to have succeeded its removal (and thus owns the node to reclaim).
+ * Because we garbage-collect starting from an invariant node (the
+ * start-of-bucket bucket node) up to the "removed" node (or find a
+ * reverse-hash that is higher), we are sure that a successful
+ * traversal of the chain leads to a chain that is present in the
+ * linked-list (the start node is never removed) and that it does not
+ * contain the "removed" node anymore, even if concurrent delete/add
+ * operations are changing the structure of the list concurrently.
+ * - The add operations perform garbage collection of buckets if they
+ * encounter nodes with removed flag set in the bucket where they want
+ * to add their new node. This ensures lock-freedom of add operation by
* helping the remover unlink nodes from the list rather than to wait
* for it do to so.
- * - A RCU "order table" indexed by log2(hash index) is copied and
- * expanded by the resize operation. This order table allows finding
- * the "dummy node" tables.
- * - There is one dummy node table per hash index order. The size of
- * each dummy node table is half the number of hashes contained in
- * this order.
- * - call_rcu is used to garbage-collect the old order table.
- * - The per-order dummy node tables contain a compact version of the
- * hash table nodes. These tables are invariant after they are
- * populated into the hash table.
- *
+ * - There are three memory backends for the hash table buckets: the
+ * "order table", the "chunks", and the "mmap".
+ * - These bucket containers contain a compact version of the hash table
+ * nodes.
+ * - The RCU "order table":
+ * - has a first level table indexed by log2(hash index) which is
+ * copied and expanded by the resize operation. This order table
+ * allows finding the "bucket node" tables.
+ * - There is one bucket node table per hash index order. The size of
+ * each bucket node table is half the number of hashes contained in
+ * this order (except for order 0).
+ * - The RCU "chunks" is best suited for close interaction with a page
+ * allocator. It uses a linear array as index to "chunks" containing
+ * each the same number of buckets.
+ * - The RCU "mmap" memory backend uses a single memory map to hold
+ * all buckets.
+ * - synchronize_rcu is used to garbage-collect the old bucket node table.
+ *
+ * Ordering Guarantees:
+ *
+ * To discuss these guarantees, we first define "read" operation as any
+ * of the the basic cds_lfht_lookup, cds_lfht_next_duplicate,
+ * cds_lfht_first, cds_lfht_next operation, as well as
+ * cds_lfht_add_unique (failure).
+ *
+ * We define "read traversal" operation as any of the following
+ * group of operations
+ * - cds_lfht_lookup followed by iteration with cds_lfht_next_duplicate
+ * (and/or cds_lfht_next, although less common).
+ * - cds_lfht_add_unique (failure) followed by iteration with
+ * cds_lfht_next_duplicate (and/or cds_lfht_next, although less
+ * common).
+ * - cds_lfht_first followed iteration with cds_lfht_next (and/or
+ * cds_lfht_next_duplicate, although less common).
+ *
+ * We define "write" operations as any of cds_lfht_add,
+ * cds_lfht_add_unique (success), cds_lfht_add_replace, cds_lfht_del.
+ *
+ * When cds_lfht_add_unique succeeds (returns the node passed as
+ * parameter), it acts as a "write" operation. When cds_lfht_add_unique
+ * fails (returns a node different from the one passed as parameter), it
+ * acts as a "read" operation. A cds_lfht_add_unique failure is a
+ * cds_lfht_lookup "read" operation, therefore, any ordering guarantee
+ * referring to "lookup" imply any of "lookup" or cds_lfht_add_unique
+ * (failure).
+ *
+ * We define "prior" and "later" node as nodes observable by reads and
+ * read traversals respectively before and after a write or sequence of
+ * write operations.
+ *
+ * Hash-table operations are often cascaded, for example, the pointer
+ * returned by a cds_lfht_lookup() might be passed to a cds_lfht_next(),
+ * whose return value might in turn be passed to another hash-table
+ * operation. This entire cascaded series of operations must be enclosed
+ * by a pair of matching rcu_read_lock() and rcu_read_unlock()
+ * operations.
+ *
+ * The following ordering guarantees are offered by this hash table:
+ *
+ * A.1) "read" after "write": if there is ordering between a write and a
+ * later read, then the read is guaranteed to see the write or some
+ * later write.
+ * A.2) "read traversal" after "write": given that there is dependency
+ * ordering between reads in a "read traversal", if there is
+ * ordering between a write and the first read of the traversal,
+ * then the "read traversal" is guaranteed to see the write or
+ * some later write.
+ * B.1) "write" after "read": if there is ordering between a read and a
+ * later write, then the read will never see the write.
+ * B.2) "write" after "read traversal": given that there is dependency
+ * ordering between reads in a "read traversal", if there is
+ * ordering between the last read of the traversal and a later
+ * write, then the "read traversal" will never see the write.
+ * C) "write" while "read traversal": if a write occurs during a "read
+ * traversal", the traversal may, or may not, see the write.
+ * D.1) "write" after "write": if there is ordering between a write and
+ * a later write, then the later write is guaranteed to see the
+ * effects of the first write.
+ * D.2) Concurrent "write" pairs: The system will assign an arbitrary
+ * order to any pair of concurrent conflicting writes.
+ * Non-conflicting writes (for example, to different keys) are
+ * unordered.
+ * E) If a grace period separates a "del" or "replace" operation
+ * and a subsequent operation, then that subsequent operation is
+ * guaranteed not to see the removed item.
+ * F) Uniqueness guarantee: given a hash table that does not contain
+ * duplicate items for a given key, there will only be one item in
+ * the hash table after an arbitrary sequence of add_unique and/or
+ * add_replace operations. Note, however, that a pair of
+ * concurrent read operations might well access two different items
+ * with that key.
+ * G.1) If a pair of lookups for a given key are ordered (e.g. by a
+ * memory barrier), then the second lookup will return the same
+ * node as the previous lookup, or some later node.
+ * G.2) A "read traversal" that starts after the end of a prior "read
+ * traversal" (ordered by memory barriers) is guaranteed to see the
+ * same nodes as the previous traversal, or some later nodes.
+ * G.3) Concurrent "read" pairs: concurrent reads are unordered. For
+ * example, if a pair of reads to the same key run concurrently
+ * with an insertion of that same key, the reads remain unordered
+ * regardless of their return values. In other words, you cannot
+ * rely on the values returned by the reads to deduce ordering.
+ *
+ * Progress guarantees:
+ *
+ * * Reads are wait-free. These operations always move forward in the
+ * hash table linked list, and this list has no loop.
+ * * Writes are lock-free. Any retry loop performed by a write operation
+ * is triggered by progress made within another update operation.
+ *
+ * Bucket node tables:
+ *
+ * hash table hash table the last all bucket node tables
+ * order size bucket node 0 1 2 3 4 5 6(index)
+ * table size
+ * 0 1 1 1
+ * 1 2 1 1 1
+ * 2 4 2 1 1 2
+ * 3 8 4 1 1 2 4
+ * 4 16 8 1 1 2 4 8
+ * 5 32 16 1 1 2 4 8 16
+ * 6 64 32 1 1 2 4 8 16 32
+ *
+ * When growing/shrinking, we only focus on the last bucket node table
+ * which size is (!order ? 1 : (1 << (order -1))).
+ *
+ * Example for growing/shrinking:
+ * grow hash table from order 5 to 6: init the index=6 bucket node table
+ * shrink hash table from order 6 to 5: fini the index=6 bucket node table
+ *
* A bit of ascii art explanation:
*
- * Order index is the off-by-one compare to the actual power of 2 because
- * we use index 0 to deal with the 0 special-case.
+ * The order index is the off-by-one compared to the actual power of 2
+ * because we use index 0 to deal with the 0 special-case.
*
* This shows the nodes for a small table ordered by reversed bits:
*
*
* order bits reverse
* 0 0 000 000
- * |
- * 1 | 1 001 100 <- <-
- * | | | |
- * 2 | | 2 010 010 | |
+ * 1 | 1 001 100 <-
+ * 2 | | 2 010 010 <- |
* | | | 3 011 110 | <- |
- * | | | | | | |
* 3 -> | | | 4 100 001 | |
* -> | | 5 101 101 |
* -> | 6 110 011
*/
#define _LGPL_SOURCE
+#define _GNU_SOURCE
#include <stdlib.h>
#include <errno.h>
#include <assert.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
+#include <sched.h>
#include "config.h"
#include <urcu.h>
#include <urcu-call-rcu.h>
+#include <urcu-flavor.h>
#include <urcu/arch.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
+#include <rculfhash-internal.h>
#include <stdio.h>
#include <pthread.h>
-#ifdef DEBUG
-#define dbg_printf(fmt, args...) printf("[debug rculfhash] " fmt, ## args)
-#else
-#define dbg_printf(fmt, args...)
-#endif
-
/*
- * Per-CPU split-counters lazily update the global counter each 1024
+ * Split-counters lazily update the global counter each 1024
* addition/removal. It automatically keeps track of resize required.
* We use the bucket length as indicator for need to expand for small
* tables and machines lacking per-cpu data suppport.
*/
#define COUNT_COMMIT_ORDER 10
+#define DEFAULT_SPLIT_COUNT_MASK 0xFUL
#define CHAIN_LEN_TARGET 1
#define CHAIN_LEN_RESIZE_THRESHOLD 3
/*
* Define the minimum table size.
*/
-#define MIN_TABLE_SIZE 1
-
-#if (CAA_BITS_PER_LONG == 32)
-#define MAX_TABLE_ORDER 32
-#else
-#define MAX_TABLE_ORDER 64
-#endif
+#define MIN_TABLE_ORDER 0
+#define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
/*
- * Minimum number of dummy nodes to touch per thread to parallelize grow/shrink.
+ * Minimum number of bucket nodes to touch per thread to parallelize grow/shrink.
*/
#define MIN_PARTITION_PER_THREAD_ORDER 12
#define MIN_PARTITION_PER_THREAD (1UL << MIN_PARTITION_PER_THREAD_ORDER)
-#ifndef min
-#define min(a, b) ((a) < (b) ? (a) : (b))
-#endif
-
-#ifndef max
-#define max(a, b) ((a) > (b) ? (a) : (b))
-#endif
-
/*
* The removed flag needs to be updated atomically with the pointer.
* It indicates that no node must attach to the node scheduled for
* removal, and that node garbage collection must be performed.
- * The dummy flag does not require to be updated atomically with the
+ * The bucket flag does not require to be updated atomically with the
* pointer, but it is added as a pointer low bit flag to save space.
+ * The "removal owner" flag is used to detect which of the "del"
+ * operation that has set the "removed flag" gets to return the removed
+ * node to its caller. Note that the replace operation does not need to
+ * iteract with the "removal owner" flag, because it validates that
+ * the "removed" flag is not set before performing its cmpxchg.
*/
#define REMOVED_FLAG (1UL << 0)
-#define DUMMY_FLAG (1UL << 1)
-#define FLAGS_MASK ((1UL << 2) - 1)
+#define BUCKET_FLAG (1UL << 1)
+#define REMOVAL_OWNER_FLAG (1UL << 2)
+#define FLAGS_MASK ((1UL << 3) - 1)
/* Value of the end pointer. Should not interact with flags. */
#define END_VALUE NULL
+/*
+ * ht_items_count: Split-counters counting the number of node addition
+ * and removal in the table. Only used if the CDS_LFHT_ACCOUNTING flag
+ * is set at hash table creation.
+ *
+ * These are free-running counters, never reset to zero. They count the
+ * number of add/remove, and trigger every (1 << COUNT_COMMIT_ORDER)
+ * operations to update the global counter. We choose a power-of-2 value
+ * for the trigger to deal with 32 or 64-bit overflow of the counter.
+ */
struct ht_items_count {
unsigned long add, del;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
-struct rcu_level {
- struct rcu_head head;
- struct _cds_lfht_node nodes[0];
-};
-
-struct rcu_table {
- unsigned long size; /* always a power of 2, shared (RCU) */
- unsigned long resize_target;
- int resize_initiated;
- struct rcu_level *tbl[MAX_TABLE_ORDER];
-};
-
-struct cds_lfht {
- struct rcu_table t;
- cds_lfht_hash_fct hash_fct;
- cds_lfht_compare_fct compare_fct;
- unsigned long hash_seed;
- int flags;
- /*
- * We need to put the work threads offline (QSBR) when taking this
- * mutex, because we use synchronize_rcu within this mutex critical
- * section, which waits on read-side critical sections, and could
- * therefore cause grace-period deadlock if we hold off RCU G.P.
- * completion.
- */
- pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
- unsigned int in_progress_resize, in_progress_destroy;
- void (*cds_lfht_call_rcu)(struct rcu_head *head,
- void (*func)(struct rcu_head *head));
- void (*cds_lfht_synchronize_rcu)(void);
- void (*cds_lfht_rcu_read_lock)(void);
- void (*cds_lfht_rcu_read_unlock)(void);
- void (*cds_lfht_rcu_thread_offline)(void);
- void (*cds_lfht_rcu_thread_online)(void);
- void (*cds_lfht_rcu_register_thread)(void);
- void (*cds_lfht_rcu_unregister_thread)(void);
- pthread_attr_t *resize_attr; /* Resize threads attributes */
- long count; /* global approximate item count */
- struct ht_items_count *percpu_count; /* per-cpu item count */
-};
-
+/*
+ * rcu_resize_work: Contains arguments passed to RCU worker thread
+ * responsible for performing lazy resize.
+ */
struct rcu_resize_work {
struct rcu_head head;
struct cds_lfht *ht;
};
+/*
+ * partition_resize_work: Contains arguments passed to worker threads
+ * executing the hash table resize on partitions of the hash table
+ * assigned to each processor's worker thread.
+ */
struct partition_resize_work {
- struct rcu_head head;
+ pthread_t thread_id;
struct cds_lfht *ht;
unsigned long i, start, len;
void (*fct)(struct cds_lfht *ht, unsigned long i,
unsigned long start, unsigned long len);
};
-enum add_mode {
- ADD_DEFAULT = 0,
- ADD_UNIQUE = 1,
- ADD_REPLACE = 2,
-};
-
-static
-struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
- unsigned long size,
- struct cds_lfht_node *node,
- enum add_mode mode, int dummy);
-
/*
* Algorithm to reverse bits in a word by lookup table, extended to
* 64-bit words.
}
#endif
-unsigned int fls_ulong(unsigned long x)
+unsigned int cds_lfht_fls_ulong(unsigned long x)
{
-#if (CAA_BITS_PER_lONG == 32)
+#if (CAA_BITS_PER_LONG == 32)
return fls_u32(x);
#else
return fls_u64(x);
#endif
}
-int get_count_order_u32(uint32_t x)
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int cds_lfht_get_count_order_u32(uint32_t x)
{
- int order;
+ if (!x)
+ return -1;
- order = fls_u32(x) - 1;
- if (x & (x - 1))
- order++;
- return order;
+ return fls_u32(x - 1);
}
-int get_count_order_ulong(unsigned long x)
+/*
+ * Return the minimum order for which x <= (1UL << order).
+ * Return -1 if x is 0.
+ */
+int cds_lfht_get_count_order_ulong(unsigned long x)
{
- int order;
+ if (!x)
+ return -1;
- order = fls_ulong(x) - 1;
- if (x & (x - 1))
- order++;
- return order;
+ return cds_lfht_fls_ulong(x - 1);
}
-#ifdef POISON_FREE
-#define poison_free(ptr) \
- do { \
- memset(ptr, 0x42, sizeof(*(ptr))); \
- free(ptr); \
- } while (0)
-#else
-#define poison_free(ptr) free(ptr)
-#endif
-
static
-void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth);
-
-/*
- * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
- * available, then we support hash table item accounting.
- * In the unfortunate event the number of CPUs reported would be
- * inaccurate, we use modulo arithmetic on the number of CPUs we got.
- */
-#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth);
static
void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
unsigned long count);
static long nr_cpus_mask = -1;
+static long split_count_mask = -1;
+static int split_count_order = -1;
+
+#if defined(HAVE_SYSCONF)
+static void ht_init_nr_cpus_mask(void)
+{
+ long maxcpus;
+
+ maxcpus = sysconf(_SC_NPROCESSORS_CONF);
+ if (maxcpus <= 0) {
+ nr_cpus_mask = -2;
+ return;
+ }
+ /*
+ * round up number of CPUs to next power of two, so we
+ * can use & for modulo.
+ */
+ maxcpus = 1UL << cds_lfht_get_count_order_ulong(maxcpus);
+ nr_cpus_mask = maxcpus - 1;
+}
+#else /* #if defined(HAVE_SYSCONF) */
+static void ht_init_nr_cpus_mask(void)
+{
+ nr_cpus_mask = -2;
+}
+#endif /* #else #if defined(HAVE_SYSCONF) */
static
-struct ht_items_count *alloc_per_cpu_items_count(void)
+void alloc_split_items_count(struct cds_lfht *ht)
{
struct ht_items_count *count;
- switch (nr_cpus_mask) {
- case -2:
- return NULL;
- case -1:
- {
- long maxcpus;
-
- maxcpus = sysconf(_SC_NPROCESSORS_CONF);
- if (maxcpus <= 0) {
- nr_cpus_mask = -2;
- return NULL;
- }
- /*
- * round up number of CPUs to next power of two, so we
- * can use & for modulo.
- */
- maxcpus = 1UL << get_count_order_ulong(maxcpus);
- nr_cpus_mask = maxcpus - 1;
+ if (nr_cpus_mask == -1) {
+ ht_init_nr_cpus_mask();
+ if (nr_cpus_mask < 0)
+ split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
+ else
+ split_count_mask = nr_cpus_mask;
+ split_count_order =
+ cds_lfht_get_count_order_ulong(split_count_mask + 1);
}
- /* Fall-through */
- default:
- return calloc(nr_cpus_mask + 1, sizeof(*count));
+
+ assert(split_count_mask >= 0);
+
+ if (ht->flags & CDS_LFHT_ACCOUNTING) {
+ ht->split_count = calloc(split_count_mask + 1, sizeof(*count));
+ assert(ht->split_count);
+ } else {
+ ht->split_count = NULL;
}
}
static
-void free_per_cpu_items_count(struct ht_items_count *count)
+void free_split_items_count(struct cds_lfht *ht)
{
- poison_free(count);
+ poison_free(ht->split_count);
}
+#if defined(HAVE_SCHED_GETCPU)
static
-int ht_get_cpu(void)
+int ht_get_split_count_index(unsigned long hash)
{
int cpu;
- assert(nr_cpus_mask >= 0);
+ assert(split_count_mask >= 0);
cpu = sched_getcpu();
- if (unlikely(cpu < 0))
- return cpu;
+ if (caa_unlikely(cpu < 0))
+ return hash & split_count_mask;
else
- return cpu & nr_cpus_mask;
+ return cpu & split_count_mask;
}
-
+#else /* #if defined(HAVE_SCHED_GETCPU) */
static
-void ht_count_add(struct cds_lfht *ht, unsigned long size)
+int ht_get_split_count_index(unsigned long hash)
{
- unsigned long percpu_count;
- int cpu;
-
- if (unlikely(!ht->percpu_count))
- return;
- cpu = ht_get_cpu();
- if (unlikely(cpu < 0))
- return;
- percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
- if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
- long count;
-
- dbg_printf("add percpu %lu\n", percpu_count);
- count = uatomic_add_return(&ht->count,
- 1UL << COUNT_COMMIT_ORDER);
- /* If power of 2 */
- if (!(count & (count - 1))) {
- if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
- return;
- dbg_printf("add set global %ld\n", count);
- cds_lfht_resize_lazy_count(ht, size,
- count >> (CHAIN_LEN_TARGET - 1));
- }
- }
+ return hash & split_count_mask;
}
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) */
static
-void ht_count_del(struct cds_lfht *ht, unsigned long size)
+void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
- unsigned long percpu_count;
- int cpu;
+ unsigned long split_count;
+ int index;
+ long count;
- if (unlikely(!ht->percpu_count))
+ if (caa_unlikely(!ht->split_count))
return;
- cpu = ht_get_cpu();
- if (unlikely(cpu < 0))
+ index = ht_get_split_count_index(hash);
+ split_count = uatomic_add_return(&ht->split_count[index].add, 1);
+ if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
return;
- percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1);
- if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
- long count;
-
- dbg_printf("del percpu %lu\n", percpu_count);
- count = uatomic_add_return(&ht->count,
- -(1UL << COUNT_COMMIT_ORDER));
- /* If power of 2 */
- if (!(count & (count - 1))) {
- if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
- return;
- dbg_printf("del set global %ld\n", count);
- /*
- * Don't shrink table if the number of nodes is below a
- * certain threshold.
- */
- if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1))
- return;
- cds_lfht_resize_lazy_count(ht, size,
- count >> (CHAIN_LEN_TARGET - 1));
- }
- }
-}
-
-#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+ /* Only if number of add multiple of 1UL << COUNT_COMMIT_ORDER */
-static const long nr_cpus_mask = -2;
-
-static
-struct ht_items_count *alloc_per_cpu_items_count(void)
-{
- return NULL;
-}
+ dbg_printf("add split count %lu\n", split_count);
+ count = uatomic_add_return(&ht->count,
+ 1UL << COUNT_COMMIT_ORDER);
+ if (caa_likely(count & (count - 1)))
+ return;
+ /* Only if global count is power of 2 */
-static
-void free_per_cpu_items_count(struct ht_items_count *count)
-{
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
+ return;
+ dbg_printf("add set global %ld\n", count);
+ cds_lfht_resize_lazy_count(ht, size,
+ count >> (CHAIN_LEN_TARGET - 1));
}
static
-void ht_count_add(struct cds_lfht *ht, unsigned long size)
+void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
-}
+ unsigned long split_count;
+ int index;
+ long count;
-static
-void ht_count_del(struct cds_lfht *ht, unsigned long size)
-{
-}
+ if (caa_unlikely(!ht->split_count))
+ return;
+ index = ht_get_split_count_index(hash);
+ split_count = uatomic_add_return(&ht->split_count[index].del, 1);
+ if (caa_likely(split_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))
+ return;
+ /* Only if number of deletes multiple of 1UL << COUNT_COMMIT_ORDER */
-#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+ dbg_printf("del split count %lu\n", split_count);
+ count = uatomic_add_return(&ht->count,
+ -(1UL << COUNT_COMMIT_ORDER));
+ if (caa_likely(count & (count - 1)))
+ return;
+ /* Only if global count is power of 2 */
+ if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
+ return;
+ dbg_printf("del set global %ld\n", count);
+ /*
+ * Don't shrink table if the number of nodes is below a
+ * certain threshold.
+ */
+ if (count < (1UL << COUNT_COMMIT_ORDER) * (split_count_mask + 1))
+ return;
+ cds_lfht_resize_lazy_count(ht, size,
+ count >> (CHAIN_LEN_TARGET - 1));
+}
static
void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len)
* Use bucket-local length for small table expand and for
* environments lacking per-cpu data support.
*/
- if (count >= (1UL << COUNT_COMMIT_ORDER))
+ if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order)))
return;
if (chain_len > 100)
dbg_printf("WARNING: large chain length: %u.\n",
chain_len);
- if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
- cds_lfht_resize_lazy(ht, size,
- get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
+ if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) {
+ int growth;
+
+ /*
+ * Ideal growth calculated based on chain length.
+ */
+ growth = cds_lfht_get_count_order_u32(chain_len
+ - (CHAIN_LEN_TARGET - 1));
+ if ((ht->flags & CDS_LFHT_ACCOUNTING)
+ && (size << growth)
+ >= (1UL << (COUNT_COMMIT_ORDER
+ + split_count_order))) {
+ /*
+ * If ideal growth expands the hash table size
+ * beyond the "small hash table" sizes, use the
+ * maximum small hash table size to attempt
+ * expanding the hash table. This only applies
+ * when node accounting is available, otherwise
+ * the chain length is used to expand the hash
+ * table in every case.
+ */
+ growth = COUNT_COMMIT_ORDER + split_count_order
+ - cds_lfht_get_count_order_ulong(size);
+ if (growth <= 0)
+ return;
+ }
+ cds_lfht_resize_lazy_grow(ht, size, growth);
+ }
}
static
}
static
-struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
+int is_bucket(struct cds_lfht_node *node)
+{
+ return ((unsigned long) node) & BUCKET_FLAG;
+}
+
+static
+struct cds_lfht_node *flag_bucket(struct cds_lfht_node *node)
+{
+ return (struct cds_lfht_node *) (((unsigned long) node) | BUCKET_FLAG);
+}
+
+static
+int is_removal_owner(struct cds_lfht_node *node)
{
- return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
+ return ((unsigned long) node) & REMOVAL_OWNER_FLAG;
}
static
-int is_dummy(struct cds_lfht_node *node)
+struct cds_lfht_node *flag_removal_owner(struct cds_lfht_node *node)
{
- return ((unsigned long) node) & DUMMY_FLAG;
+ return (struct cds_lfht_node *) (((unsigned long) node) | REMOVAL_OWNER_FLAG);
}
static
-struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
+struct cds_lfht_node *flag_removed_or_removal_owner(struct cds_lfht_node *node)
{
- return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
+ return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG | REMOVAL_OWNER_FLAG);
}
static
}
static
-unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
+unsigned long _uatomic_xchg_monotonic_increase(unsigned long *ptr,
+ unsigned long v)
{
unsigned long old1, old2;
if (old2 >= v)
return old2;
} while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
- return v;
+ return old2;
}
static
-void cds_lfht_free_level(struct rcu_head *head)
+void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
{
- struct rcu_level *l =
- caa_container_of(head, struct rcu_level, head);
- poison_free(l);
+ return ht->mm->alloc_bucket_table(ht, order);
+}
+
+/*
+ * cds_lfht_free_bucket_table() should be called with decreasing order.
+ * When cds_lfht_free_bucket_table(0) is called, it means the whole
+ * lfht is destroyed.
+ */
+static
+void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
+{
+ return ht->mm->free_bucket_table(ht, order);
+}
+
+static inline
+struct cds_lfht_node *bucket_at(struct cds_lfht *ht, unsigned long index)
+{
+ return ht->bucket_at(ht, index);
+}
+
+static inline
+struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
+ unsigned long hash)
+{
+ assert(size > 0);
+ return bucket_at(ht, hash & (size - 1));
}
/*
* Remove all logically deleted nodes from a bucket up to a certain node key.
*/
static
-void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
+void _cds_lfht_gc_bucket(struct cds_lfht_node *bucket, struct cds_lfht_node *node)
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
- assert(!is_dummy(dummy));
- assert(!is_removed(dummy));
- assert(!is_dummy(node));
+ assert(!is_bucket(bucket));
+ assert(!is_removed(bucket));
+ assert(!is_removal_owner(bucket));
+ assert(!is_bucket(node));
assert(!is_removed(node));
+ assert(!is_removal_owner(node));
for (;;) {
- iter_prev = dummy;
- /* We can always skip the dummy node initially */
- iter = rcu_dereference(iter_prev->p.next);
- assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ iter_prev = bucket;
+ /* We can always skip the bucket node initially */
+ iter = rcu_dereference(iter_prev->next);
+ assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
/*
- * We should never be called with dummy (start of chain)
+ * We should never be called with bucket (start of chain)
* and logically removed node (end of path compression
* marker) being the actual same node. This would be a
* bug in the algorithm implementation.
*/
- assert(dummy != node);
+ assert(bucket != node);
for (;;) {
- if (unlikely(is_end(iter)))
+ if (caa_unlikely(is_end(iter)))
return;
- if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
+ if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
return;
- next = rcu_dereference(clear_flag(iter)->p.next);
- if (likely(is_removed(next)))
+ next = rcu_dereference(clear_flag(iter)->next);
+ if (caa_likely(is_removed(next)))
break;
iter_prev = clear_flag(iter);
iter = next;
}
assert(!is_removed(iter));
- if (is_dummy(iter))
- new_next = flag_dummy(clear_flag(next));
+ assert(!is_removal_owner(iter));
+ if (is_bucket(iter))
+ new_next = flag_bucket(clear_flag(next));
else
new_next = clear_flag(next);
- if (is_removed(iter))
- new_next = flag_removed(new_next);
- (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+ (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
}
- return;
}
static
int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
struct cds_lfht_node *old_node,
- struct cds_lfht_node *ret_next,
+ struct cds_lfht_node *old_next,
struct cds_lfht_node *new_node)
{
- struct cds_lfht_node *dummy, *old_next;
- struct _cds_lfht_node *lookup;
- int flagged = 0;
- unsigned long hash, index, order;
+ struct cds_lfht_node *bucket, *ret_next;
if (!old_node) /* Return -ENOENT if asked to replace NULL node */
- goto end;
+ return -ENOENT;
assert(!is_removed(old_node));
- assert(!is_dummy(old_node));
+ assert(!is_removal_owner(old_node));
+ assert(!is_bucket(old_node));
assert(!is_removed(new_node));
- assert(!is_dummy(new_node));
+ assert(!is_removal_owner(new_node));
+ assert(!is_bucket(new_node));
assert(new_node != old_node);
- do {
+ for (;;) {
/* Insert after node to be replaced */
- old_next = ret_next;
if (is_removed(old_next)) {
/*
* Too late, the old node has been removed under us
* between lookup and replace. Fail.
*/
- goto end;
+ return -ENOENT;
}
- assert(!is_dummy(old_next));
- assert(new_node != clear_flag(old_next));
- new_node->p.next = clear_flag(old_next);
+ assert(old_next == clear_flag(old_next));
+ assert(new_node != old_next);
+ /*
+ * REMOVAL_OWNER flag is _NEVER_ set before the REMOVED
+ * flag. It is either set atomically at the same time
+ * (replace) or after (del).
+ */
+ assert(!is_removal_owner(old_next));
+ new_node->next = old_next;
/*
* Here is the whole trick for lock-free replace: we add
* the replacement node _after_ the node we want to
* next pointer, they will either skip the old node due
* to the removal flag and see the new node, or use
* the old node, but will not see the new one.
+ * This is a replacement of a node with another node
+ * that has the same value: we are therefore not
+ * removing a value from the hash table. We set both the
+ * REMOVED and REMOVAL_OWNER flags atomically so we own
+ * the node after successful cmpxchg.
*/
- ret_next = uatomic_cmpxchg(&old_node->p.next,
- old_next, flag_removed(new_node));
- } while (ret_next != old_next);
-
- /* We performed the replacement. */
- flagged = 1;
+ ret_next = uatomic_cmpxchg(&old_node->next,
+ old_next, flag_removed_or_removal_owner(new_node));
+ if (ret_next == old_next)
+ break; /* We performed the replacement. */
+ old_next = ret_next;
+ }
/*
* Ensure that the old node is not visible to readers anymore:
* lookup for the node, and remove it (along with any other
* logically removed node) if found.
*/
- hash = bit_reverse_ulong(old_node->p.reverse_hash);
- assert(size > 0);
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
- dummy = (struct cds_lfht_node *) lookup;
- _cds_lfht_gc_bucket(dummy, new_node);
-end:
- /*
- * Only the flagging action indicated that we (and no other)
- * replaced the node from the hash table.
- */
- if (flagged) {
- assert(is_removed(rcu_dereference(old_node->p.next)));
- return 0;
- } else {
- return -ENOENT;
- }
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
+ _cds_lfht_gc_bucket(bucket, new_node);
+
+ assert(is_removed(CMM_LOAD_SHARED(old_node->next)));
+ return 0;
}
+/*
+ * A non-NULL unique_ret pointer uses the "add unique" (or uniquify) add
+ * mode. A NULL unique_ret allows creation of duplicate keys.
+ */
static
-struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
- unsigned long size,
- struct cds_lfht_node *node,
- enum add_mode mode, int dummy)
+void _cds_lfht_add(struct cds_lfht *ht,
+ unsigned long hash,
+ cds_lfht_match_fct match,
+ const void *key,
+ unsigned long size,
+ struct cds_lfht_node *node,
+ struct cds_lfht_iter *unique_ret,
+ int bucket_flag)
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
- *dummy_node, *return_node;
- struct _cds_lfht_node *lookup;
- unsigned long hash, index, order;
+ *return_node;
+ struct cds_lfht_node *bucket;
- assert(!is_dummy(node));
+ assert(!is_bucket(node));
assert(!is_removed(node));
- if (!size) {
- assert(dummy);
- node->p.next = flag_dummy(get_end());
- return node; /* Initial first add (head) */
- }
- hash = bit_reverse_ulong(node->p.reverse_hash);
+ assert(!is_removal_owner(node));
+ bucket = lookup_bucket(ht, size, hash);
for (;;) {
uint32_t chain_len = 0;
* iter_prev points to the non-removed node prior to the
* insert location.
*/
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
- iter_prev = (struct cds_lfht_node *) lookup;
- /* We can always skip the dummy node initially */
- iter = rcu_dereference(iter_prev->p.next);
- assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ iter_prev = bucket;
+ /* We can always skip the bucket node initially */
+ iter = rcu_dereference(iter_prev->next);
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
for (;;) {
- if (unlikely(is_end(iter)))
+ if (caa_unlikely(is_end(iter)))
goto insert;
- if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
+ if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
+ goto insert;
+
+ /* bucket node is the first node of the identical-hash-value chain */
+ if (bucket_flag && clear_flag(iter)->reverse_hash == node->reverse_hash)
goto insert;
- next = rcu_dereference(clear_flag(iter)->p.next);
- if (unlikely(is_removed(next)))
+
+ next = rcu_dereference(clear_flag(iter)->next);
+ if (caa_unlikely(is_removed(next)))
goto gc_node;
- if ((mode == ADD_UNIQUE || mode == ADD_REPLACE)
- && !is_dummy(next)
- && !ht->compare_fct(node->key, node->key_len,
- clear_flag(iter)->key,
- clear_flag(iter)->key_len)) {
- if (mode == ADD_UNIQUE)
- return clear_flag(iter);
- else /* mode == ADD_REPLACE */
- goto replace;
+
+ /* uniquely add */
+ if (unique_ret
+ && !is_bucket(next)
+ && clear_flag(iter)->reverse_hash == node->reverse_hash) {
+ struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
+
+ /*
+ * uniquely adding inserts the node as the first
+ * node of the identical-hash-value node chain.
+ *
+ * This semantic ensures no duplicated keys
+ * should ever be observable in the table
+ * (including traversing the table node by
+ * node by forward iterations)
+ */
+ cds_lfht_next_duplicate(ht, match, key, &d_iter);
+ if (!d_iter.node)
+ goto insert;
+
+ *unique_ret = d_iter;
+ return;
}
+
/* Only account for identical reverse hash once */
- if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
- && !is_dummy(next))
+ if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash
+ && !is_bucket(next))
check_resize(ht, size, ++chain_len);
iter_prev = clear_flag(iter);
iter = next;
insert:
assert(node != clear_flag(iter));
assert(!is_removed(iter_prev));
+ assert(!is_removal_owner(iter_prev));
assert(!is_removed(iter));
+ assert(!is_removal_owner(iter));
assert(iter_prev != node);
- if (!dummy)
- node->p.next = clear_flag(iter);
+ if (!bucket_flag)
+ node->next = clear_flag(iter);
else
- node->p.next = flag_dummy(clear_flag(iter));
- if (is_dummy(iter))
- new_node = flag_dummy(node);
+ node->next = flag_bucket(clear_flag(iter));
+ if (is_bucket(iter))
+ new_node = flag_bucket(node);
else
new_node = node;
- if (uatomic_cmpxchg(&iter_prev->p.next, iter,
+ if (uatomic_cmpxchg(&iter_prev->next, iter,
new_node) != iter) {
continue; /* retry */
} else {
- if (mode == ADD_REPLACE)
- return_node = NULL;
- else /* ADD_DEFAULT and ADD_UNIQUE */
- return_node = node;
- goto gc_end;
- }
-
- replace:
-
- if (!_cds_lfht_replace(ht, size, clear_flag(iter), next,
- node)) {
- return_node = clear_flag(iter);
- goto end; /* gc already done */
- } else {
- continue; /* retry */
+ return_node = node;
+ goto end;
}
gc_node:
assert(!is_removed(iter));
- if (is_dummy(iter))
- new_next = flag_dummy(clear_flag(next));
+ assert(!is_removal_owner(iter));
+ if (is_bucket(iter))
+ new_next = flag_bucket(clear_flag(next));
else
new_next = clear_flag(next);
- (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+ (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
/* retry */
}
-gc_end:
- /* Garbage collect logically removed nodes in the bucket */
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
- dummy_node = (struct cds_lfht_node *) lookup;
- _cds_lfht_gc_bucket(dummy_node, node);
end:
- return return_node;
+ if (unique_ret) {
+ unique_ret->node = return_node;
+ /* unique_ret->next left unset, never used. */
+ }
}
static
int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
- struct cds_lfht_node *node,
- int dummy_removal)
+ struct cds_lfht_node *node)
{
- struct cds_lfht_node *dummy, *next, *old;
- struct _cds_lfht_node *lookup;
- int flagged = 0;
- unsigned long hash, index, order;
+ struct cds_lfht_node *bucket, *next;
if (!node) /* Return -ENOENT if asked to delete NULL node */
- goto end;
+ return -ENOENT;
/* logically delete the node */
- assert(!is_dummy(node));
+ assert(!is_bucket(node));
assert(!is_removed(node));
- old = rcu_dereference(node->p.next);
- do {
- struct cds_lfht_node *new_next;
-
- next = old;
- if (unlikely(is_removed(next)))
- goto end;
- if (dummy_removal)
- assert(is_dummy(next));
- else
- assert(!is_dummy(next));
- new_next = flag_removed(next);
- old = uatomic_cmpxchg(&node->p.next, next, new_next);
- } while (old != next);
+ assert(!is_removal_owner(node));
+ /*
+ * We are first checking if the node had previously been
+ * logically removed (this check is not atomic with setting the
+ * logical removal flag). Return -ENOENT if the node had
+ * previously been removed.
+ */
+ next = CMM_LOAD_SHARED(node->next); /* next is not dereferenced */
+ if (caa_unlikely(is_removed(next)))
+ return -ENOENT;
+ assert(!is_bucket(next));
+ /*
+ * The del operation semantic guarantees a full memory barrier
+ * before the uatomic_or atomic commit of the deletion flag.
+ */
+ cmm_smp_mb__before_uatomic_or();
+ /*
+ * We set the REMOVED_FLAG unconditionally. Note that there may
+ * be more than one concurrent thread setting this flag.
+ * Knowing which wins the race will be known after the garbage
+ * collection phase, stay tuned!
+ */
+ uatomic_or(&node->next, REMOVED_FLAG);
/* We performed the (logical) deletion. */
- flagged = 1;
/*
* Ensure that the node is not visible to readers anymore: lookup for
* the node, and remove it (along with any other logically removed node)
* if found.
*/
- hash = bit_reverse_ulong(node->p.reverse_hash);
- assert(size > 0);
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
- dummy = (struct cds_lfht_node *) lookup;
- _cds_lfht_gc_bucket(dummy, node);
-end:
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
+ _cds_lfht_gc_bucket(bucket, node);
+
+ assert(is_removed(CMM_LOAD_SHARED(node->next)));
/*
- * Only the flagging action indicated that we (and no other)
- * removed the node from the hash.
+ * Last phase: atomically exchange node->next with a version
+ * having "REMOVAL_OWNER_FLAG" set. If the returned node->next
+ * pointer did _not_ have "REMOVAL_OWNER_FLAG" set, we now own
+ * the node and win the removal race.
+ * It is interesting to note that all "add" paths are forbidden
+ * to change the next pointer starting from the point where the
+ * REMOVED_FLAG is set, so here using a read, followed by a
+ * xchg() suffice to guarantee that the xchg() will ever only
+ * set the "REMOVAL_OWNER_FLAG" (or change nothing if the flag
+ * was already set).
*/
- if (flagged) {
- assert(is_removed(rcu_dereference(node->p.next)));
+ if (!is_removal_owner(uatomic_xchg(&node->next,
+ flag_removal_owner(node->next))))
return 0;
- } else {
+ else
return -ENOENT;
- }
}
static
{
struct partition_resize_work *work = arg;
- work->ht->cds_lfht_rcu_register_thread();
+ work->ht->flavor->register_thread();
work->fct(work->ht, work->i, work->start, work->len);
- work->ht->cds_lfht_rcu_unregister_thread();
+ work->ht->flavor->unregister_thread();
return NULL;
}
void (*fct)(struct cds_lfht *ht, unsigned long i,
unsigned long start, unsigned long len))
{
- unsigned long partition_len;
+ unsigned long partition_len, start = 0;
struct partition_resize_work *work;
int thread, ret;
unsigned long nr_threads;
- pthread_t *thread_id;
+
+ assert(nr_cpus_mask != -1);
+ if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
+ goto fallback;
/*
* Note: nr_cpus_mask + 1 is always power of 2.
} else {
nr_threads = 1;
}
- partition_len = len >> get_count_order_ulong(nr_threads);
+ partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
work = calloc(nr_threads, sizeof(*work));
- thread_id = calloc(nr_threads, sizeof(*thread_id));
- assert(work);
+ if (!work) {
+ dbg_printf("error allocating for resize, single-threading\n");
+ goto fallback;
+ }
for (thread = 0; thread < nr_threads; thread++) {
work[thread].ht = ht;
work[thread].i = i;
work[thread].len = partition_len;
work[thread].start = thread * partition_len;
work[thread].fct = fct;
- ret = pthread_create(&thread_id[thread], ht->resize_attr,
+ ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
partition_resize_thread, &work[thread]);
+ if (ret == EAGAIN) {
+ /*
+ * Out of resources: wait and join the threads
+ * we've created, then handle leftovers.
+ */
+ dbg_printf("error spawning for resize, single-threading\n");
+ start = work[thread].start;
+ len -= start;
+ nr_threads = thread;
+ break;
+ }
assert(!ret);
}
for (thread = 0; thread < nr_threads; thread++) {
- ret = pthread_join(thread_id[thread], NULL);
+ ret = pthread_join(work[thread].thread_id, NULL);
assert(!ret);
}
free(work);
- free(thread_id);
+
+ /*
+ * A pthread_create failure above will either lead in us having
+ * no threads to join or starting at a non-zero offset,
+ * fallback to single thread processing of leftovers.
+ */
+ if (start == 0 && nr_threads > 0)
+ return;
+fallback:
+ ht->flavor->thread_online();
+ fct(ht, i, start, len);
+ ht->flavor->thread_offline();
}
/*
* many worker threads, based on the number of CPUs available in the system.
* This should therefore take care of not having the expand lagging behind too
* many concurrent insertion threads by using the scheduler's ability to
- * schedule dummy node population fairly with insertions.
+ * schedule bucket node population fairly with insertions.
*/
static
void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
unsigned long start, unsigned long len)
{
- unsigned long j;
+ unsigned long j, size = 1UL << (i - 1);
- ht->cds_lfht_rcu_read_lock();
- for (j = start; j < start + len; j++) {
- struct cds_lfht_node *new_node =
- (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+ assert(i > MIN_TABLE_ORDER);
+ ht->flavor->read_lock();
+ for (j = size + start; j < size + start + len; j++) {
+ struct cds_lfht_node *new_node = bucket_at(ht, j);
- dbg_printf("init populate: i %lu j %lu hash %lu\n",
- i, j, !i ? 0 : (1UL << (i - 1)) + j);
- new_node->p.reverse_hash =
- bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
- (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
- new_node, ADD_DEFAULT, 1);
+ assert(j >= size && j < (size << 1));
+ dbg_printf("init populate: order %lu index %lu hash %lu\n",
+ i, j, j);
+ new_node->reverse_hash = bit_reverse_ulong(j);
+ _cds_lfht_add(ht, j, NULL, NULL, size, new_node, NULL, 1);
}
- ht->cds_lfht_rcu_read_unlock();
+ ht->flavor->read_unlock();
}
static
void init_table_populate(struct cds_lfht *ht, unsigned long i,
unsigned long len)
{
- assert(nr_cpus_mask != -1);
- if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
- ht->cds_lfht_rcu_thread_online();
- init_table_populate_partition(ht, i, 0, len);
- ht->cds_lfht_rcu_thread_offline();
- return;
- }
partition_resize_helper(ht, i, len, init_table_populate_partition);
}
static
void init_table(struct cds_lfht *ht,
- unsigned long first_order, unsigned long len_order)
+ unsigned long first_order, unsigned long last_order)
{
- unsigned long i, end_order;
+ unsigned long i;
- dbg_printf("init table: first_order %lu end_order %lu\n",
- first_order, first_order + len_order);
- end_order = first_order + len_order;
- for (i = first_order; i < end_order; i++) {
+ dbg_printf("init table: first_order %lu last_order %lu\n",
+ first_order, last_order);
+ assert(first_order > MIN_TABLE_ORDER);
+ for (i = first_order; i <= last_order; i++) {
unsigned long len;
- len = !i ? 1 : 1UL << (i - 1);
+ len = 1UL << (i - 1);
dbg_printf("init order %lu len: %lu\n", i, len);
/* Stop expand if the resize target changes under us */
- if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
+ if (CMM_LOAD_SHARED(ht->resize_target) < (1UL << i))
break;
- ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level)
- + (len * sizeof(struct _cds_lfht_node)));
- assert(ht->t.tbl[i]);
+ cds_lfht_alloc_bucket_table(ht, i);
/*
- * Set all dummy nodes reverse hash values for a level and
- * link all dummy nodes into the table.
+ * Set all bucket nodes reverse hash values for a level and
+ * link all bucket nodes into the table.
*/
init_table_populate(ht, i, len);
* Update table size.
*/
cmm_smp_wmb(); /* populate data before RCU size */
- CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
+ CMM_STORE_SHARED(ht->size, 1UL << i);
- dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
+ dbg_printf("init new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
* Concurrent removal and add operations are helping us perform garbage
* collection of logically removed nodes. We guarantee that all logically
* removed nodes have been garbage-collected (unlinked) before call_rcu is
- * invoked to free a hole level of dummy nodes (after a grace period).
+ * invoked to free a hole level of bucket nodes (after a grace period).
*
- * Logical removal and garbage collection can therefore be done in batch or on a
- * node-per-node basis, as long as the guarantee above holds.
+ * Logical removal and garbage collection can therefore be done in batch
+ * or on a node-per-node basis, as long as the guarantee above holds.
*
* When we reach a certain length, we can split this removal over many worker
* threads, based on the number of CPUs available in the system. This should
void remove_table_partition(struct cds_lfht *ht, unsigned long i,
unsigned long start, unsigned long len)
{
- unsigned long j;
+ unsigned long j, size = 1UL << (i - 1);
- ht->cds_lfht_rcu_read_lock();
- for (j = start; j < start + len; j++) {
- struct cds_lfht_node *fini_node =
- (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+ assert(i > MIN_TABLE_ORDER);
+ ht->flavor->read_lock();
+ for (j = size + start; j < size + start + len; j++) {
+ struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
+ struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
- dbg_printf("remove entry: i %lu j %lu hash %lu\n",
- i, j, !i ? 0 : (1UL << (i - 1)) + j);
- fini_node->p.reverse_hash =
- bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
- (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
- fini_node, 1);
+ assert(j >= size && j < (size << 1));
+ dbg_printf("remove entry: order %lu index %lu hash %lu\n",
+ i, j, j);
+ /* Set the REMOVED_FLAG to freeze the ->next for gc */
+ uatomic_or(&fini_bucket->next, REMOVED_FLAG);
+ _cds_lfht_gc_bucket(parent_bucket, fini_bucket);
}
- ht->cds_lfht_rcu_read_unlock();
+ ht->flavor->read_unlock();
}
static
void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
{
-
- assert(nr_cpus_mask != -1);
- if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) {
- ht->cds_lfht_rcu_thread_online();
- remove_table_partition(ht, i, 0, len);
- ht->cds_lfht_rcu_thread_offline();
- return;
- }
partition_resize_helper(ht, i, len, remove_table_partition);
}
+/*
+ * fini_table() is never called for first_order == 0, which is why
+ * free_by_rcu_order == 0 can be used as criterion to know if free must
+ * be called.
+ */
static
void fini_table(struct cds_lfht *ht,
- unsigned long first_order, unsigned long len_order)
+ unsigned long first_order, unsigned long last_order)
{
- long i, end_order;
+ long i;
+ unsigned long free_by_rcu_order = 0;
- dbg_printf("fini table: first_order %lu end_order %lu\n",
- first_order, first_order + len_order);
- end_order = first_order + len_order;
- assert(first_order > 0);
- for (i = end_order - 1; i >= first_order; i--) {
+ dbg_printf("fini table: first_order %lu last_order %lu\n",
+ first_order, last_order);
+ assert(first_order > MIN_TABLE_ORDER);
+ for (i = last_order; i >= first_order; i--) {
unsigned long len;
- len = !i ? 1 : 1UL << (i - 1);
+ len = 1UL << (i - 1);
dbg_printf("fini order %lu len: %lu\n", i, len);
/* Stop shrink if the resize target changes under us */
- if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
+ if (CMM_LOAD_SHARED(ht->resize_target) > (1UL << (i - 1)))
break;
cmm_smp_wmb(); /* populate data before RCU size */
- CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
+ CMM_STORE_SHARED(ht->size, 1UL << (i - 1));
/*
* We need to wait for all add operations to reach Q.S. (and
* thus use the new table for lookups) before we can start
- * releasing the old dummy nodes. Otherwise their lookup will
+ * releasing the old bucket nodes. Otherwise their lookup will
* return a logically removed node as insert position.
*/
- ht->cds_lfht_synchronize_rcu();
+ ht->flavor->update_synchronize_rcu();
+ if (free_by_rcu_order)
+ cds_lfht_free_bucket_table(ht, free_by_rcu_order);
/*
- * Set "removed" flag in dummy nodes about to be removed.
- * Unlink all now-logically-removed dummy node pointers.
+ * Set "removed" flag in bucket nodes about to be removed.
+ * Unlink all now-logically-removed bucket node pointers.
* Concurrent add/remove operation are helping us doing
* the gc.
*/
remove_table(ht, i, len);
- ht->cds_lfht_call_rcu(&ht->t.tbl[i]->head, cds_lfht_free_level);
+ free_by_rcu_order = i;
dbg_printf("fini new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
+
+ if (free_by_rcu_order) {
+ ht->flavor->update_synchronize_rcu();
+ cds_lfht_free_bucket_table(ht, free_by_rcu_order);
+ }
+}
+
+static
+void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
+{
+ struct cds_lfht_node *prev, *node;
+ unsigned long order, len, i;
+
+ cds_lfht_alloc_bucket_table(ht, 0);
+
+ dbg_printf("create bucket: order 0 index 0 hash 0\n");
+ node = bucket_at(ht, 0);
+ node->next = flag_bucket(get_end());
+ node->reverse_hash = 0;
+
+ for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) {
+ len = 1UL << (order - 1);
+ cds_lfht_alloc_bucket_table(ht, order);
+
+ for (i = 0; i < len; i++) {
+ /*
+ * Now, we are trying to init the node with the
+ * hash=(len+i) (which is also a bucket with the
+ * index=(len+i)) and insert it into the hash table,
+ * so this node has to be inserted after the bucket
+ * with the index=(len+i)&(len-1)=i. And because there
+ * is no other non-bucket node nor bucket node with
+ * larger index/hash inserted, so the bucket node
+ * being inserted should be inserted directly linked
+ * after the bucket node with index=i.
+ */
+ prev = bucket_at(ht, i);
+ node = bucket_at(ht, len + i);
+
+ dbg_printf("create bucket: order %lu index %lu hash %lu\n",
+ order, len + i, len + i);
+ node->reverse_hash = bit_reverse_ulong(len + i);
+
+ /* insert after prev */
+ assert(is_bucket(prev->next));
+ node->next = prev->next;
+ prev->next = flag_bucket(node);
+ }
+ }
}
-struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
- cds_lfht_compare_fct compare_fct,
- unsigned long hash_seed,
- unsigned long init_size,
+struct cds_lfht *_cds_lfht_new(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
int flags,
- void (*cds_lfht_call_rcu)(struct rcu_head *head,
- void (*func)(struct rcu_head *head)),
- void (*cds_lfht_synchronize_rcu)(void),
- void (*cds_lfht_rcu_read_lock)(void),
- void (*cds_lfht_rcu_read_unlock)(void),
- void (*cds_lfht_rcu_thread_offline)(void),
- void (*cds_lfht_rcu_thread_online)(void),
- void (*cds_lfht_rcu_register_thread)(void),
- void (*cds_lfht_rcu_unregister_thread)(void),
+ const struct cds_lfht_mm_type *mm,
+ const struct rcu_flavor_struct *flavor,
pthread_attr_t *attr)
{
struct cds_lfht *ht;
unsigned long order;
+ /* min_nr_alloc_buckets must be power of two */
+ if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
+ return NULL;
+
/* init_size must be power of two */
- if (init_size && (init_size & (init_size - 1)))
+ if (!init_size || (init_size & (init_size - 1)))
+ return NULL;
+
+ /*
+ * Memory management plugin default.
+ */
+ if (!mm) {
+ if (CAA_BITS_PER_LONG > 32
+ && max_nr_buckets
+ && max_nr_buckets <= (1ULL << 32)) {
+ /*
+ * For 64-bit architectures, with max number of
+ * buckets small enough not to use the entire
+ * 64-bit memory mapping space (and allowing a
+ * fair number of hash table instances), use the
+ * mmap allocator, which is faster than the
+ * order allocator.
+ */
+ mm = &cds_lfht_mm_mmap;
+ } else {
+ /*
+ * The fallback is to use the order allocator.
+ */
+ mm = &cds_lfht_mm_order;
+ }
+ }
+
+ /* max_nr_buckets == 0 for order based mm means infinite */
+ if (mm == &cds_lfht_mm_order && !max_nr_buckets)
+ max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
+
+ /* max_nr_buckets must be power of two */
+ if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
return NULL;
- ht = calloc(1, sizeof(struct cds_lfht));
+
+ min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
+ init_size = max(init_size, MIN_TABLE_SIZE);
+ max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
+ init_size = min(init_size, max_nr_buckets);
+
+ ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets);
assert(ht);
- ht->hash_fct = hash_fct;
- ht->compare_fct = compare_fct;
- ht->hash_seed = hash_seed;
- ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
- ht->cds_lfht_synchronize_rcu = cds_lfht_synchronize_rcu;
- ht->cds_lfht_rcu_read_lock = cds_lfht_rcu_read_lock;
- ht->cds_lfht_rcu_read_unlock = cds_lfht_rcu_read_unlock;
- ht->cds_lfht_rcu_thread_offline = cds_lfht_rcu_thread_offline;
- ht->cds_lfht_rcu_thread_online = cds_lfht_rcu_thread_online;
- ht->cds_lfht_rcu_register_thread = cds_lfht_rcu_register_thread;
- ht->cds_lfht_rcu_unregister_thread = cds_lfht_rcu_unregister_thread;
+ assert(ht->mm == mm);
+ assert(ht->bucket_at == mm->bucket_at);
+
+ ht->flags = flags;
+ ht->flavor = flavor;
ht->resize_attr = attr;
- ht->percpu_count = alloc_per_cpu_items_count();
+ alloc_split_items_count(ht);
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
- order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
- ht->flags = flags;
- ht->cds_lfht_rcu_thread_offline();
- pthread_mutex_lock(&ht->resize_mutex);
- ht->t.resize_target = 1UL << (order - 1);
- init_table(ht, 0, order);
- pthread_mutex_unlock(&ht->resize_mutex);
- ht->cds_lfht_rcu_thread_online();
+ order = cds_lfht_get_count_order_ulong(init_size);
+ ht->resize_target = 1UL << order;
+ cds_lfht_create_bucket(ht, 1UL << order);
+ ht->size = 1UL << order;
return ht;
}
-void cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len,
+void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
+ cds_lfht_match_fct match, const void *key,
struct cds_lfht_iter *iter)
{
- struct cds_lfht_node *node, *next, *dummy_node;
- struct _cds_lfht_node *lookup;
- unsigned long hash, reverse_hash, index, order, size;
+ struct cds_lfht_node *node, *next, *bucket;
+ unsigned long reverse_hash, size;
- hash = ht->hash_fct(key, key_len, ht->hash_seed);
reverse_hash = bit_reverse_ulong(hash);
- size = rcu_dereference(ht->t.size);
- index = hash & (size - 1);
- order = get_count_order_ulong(index + 1);
- lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
- dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
- hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
- dummy_node = (struct cds_lfht_node *) lookup;
- /* We can always skip the dummy node initially */
- node = rcu_dereference(dummy_node->p.next);
+ size = rcu_dereference(ht->size);
+ bucket = lookup_bucket(ht, size, hash);
+ /* We can always skip the bucket node initially */
+ node = rcu_dereference(bucket->next);
node = clear_flag(node);
for (;;) {
- if (unlikely(is_end(node))) {
+ if (caa_unlikely(is_end(node))) {
node = next = NULL;
break;
}
- if (unlikely(node->p.reverse_hash > reverse_hash)) {
+ if (caa_unlikely(node->reverse_hash > reverse_hash)) {
node = next = NULL;
break;
}
- next = rcu_dereference(node->p.next);
- if (likely(!is_removed(next))
- && !is_dummy(next)
- && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+ next = rcu_dereference(node->next);
+ assert(node == clear_flag(node));
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)
+ && node->reverse_hash == reverse_hash
+ && caa_likely(match(node, key))) {
break;
}
node = clear_flag(next);
}
- assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
iter->node = node;
iter->next = next;
}
-void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
+ const void *key, struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
- void *key;
- size_t key_len;
node = iter->node;
- reverse_hash = node->p.reverse_hash;
- key = node->key;
- key_len = node->key_len;
+ reverse_hash = node->reverse_hash;
next = iter->next;
node = clear_flag(next);
for (;;) {
- if (unlikely(is_end(node))) {
+ if (caa_unlikely(is_end(node))) {
node = next = NULL;
break;
}
- if (unlikely(node->p.reverse_hash > reverse_hash)) {
+ if (caa_unlikely(node->reverse_hash > reverse_hash)) {
node = next = NULL;
break;
}
- next = rcu_dereference(node->p.next);
- if (likely(!is_removed(next))
- && !is_dummy(next)
- && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+ next = rcu_dereference(node->next);
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)
+ && caa_likely(match(node, key))) {
break;
}
node = clear_flag(next);
}
- assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
iter->node = node;
iter->next = next;
}
node = clear_flag(iter->next);
for (;;) {
- if (unlikely(is_end(node))) {
+ if (caa_unlikely(is_end(node))) {
node = next = NULL;
break;
}
- next = rcu_dereference(node->p.next);
- if (likely(!is_removed(next))
- && !is_dummy(next)) {
+ next = rcu_dereference(node->next);
+ if (caa_likely(!is_removed(next))
+ && !is_bucket(next)) {
break;
}
node = clear_flag(next);
}
- assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ assert(!node || !is_bucket(CMM_LOAD_SHARED(node->next)));
iter->node = node;
iter->next = next;
}
void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
- struct _cds_lfht_node *lookup;
-
/*
- * Get next after first dummy node. The first dummy node is the
+ * Get next after first bucket node. The first bucket node is the
* first node of the linked list.
*/
- lookup = &ht->t.tbl[0]->nodes[0];
- iter->next = lookup->next;
+ iter->next = bucket_at(ht, 0)->next;
cds_lfht_next(ht, iter);
}
-void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
+void cds_lfht_add(struct cds_lfht *ht, unsigned long hash,
+ struct cds_lfht_node *node)
{
- unsigned long hash, size;
-
- hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
- node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+ unsigned long size;
- size = rcu_dereference(ht->t.size);
- (void) _cds_lfht_add(ht, size, node, ADD_DEFAULT, 0);
- ht_count_add(ht, size);
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = rcu_dereference(ht->size);
+ _cds_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0);
+ ht_count_add(ht, size, hash);
}
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
+ unsigned long hash,
+ cds_lfht_match_fct match,
+ const void *key,
struct cds_lfht_node *node)
{
- unsigned long hash, size;
- struct cds_lfht_node *ret;
-
- hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
- node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+ unsigned long size;
+ struct cds_lfht_iter iter;
- size = rcu_dereference(ht->t.size);
- ret = _cds_lfht_add(ht, size, node, ADD_UNIQUE, 0);
- if (ret == node)
- ht_count_add(ht, size);
- return ret;
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = rcu_dereference(ht->size);
+ _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
+ if (iter.node == node)
+ ht_count_add(ht, size, hash);
+ return iter.node;
}
struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
+ unsigned long hash,
+ cds_lfht_match_fct match,
+ const void *key,
struct cds_lfht_node *node)
{
- unsigned long hash, size;
- struct cds_lfht_node *ret;
+ unsigned long size;
+ struct cds_lfht_iter iter;
- hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
- node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+ node->reverse_hash = bit_reverse_ulong(hash);
+ size = rcu_dereference(ht->size);
+ for (;;) {
+ _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0);
+ if (iter.node == node) {
+ ht_count_add(ht, size, hash);
+ return NULL;
+ }
- size = rcu_dereference(ht->t.size);
- ret = _cds_lfht_add(ht, size, node, ADD_REPLACE, 0);
- if (ret == NULL)
- ht_count_add(ht, size);
- return ret;
+ if (!_cds_lfht_replace(ht, size, iter.node, iter.next, node))
+ return iter.node;
+ }
}
-int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
+int cds_lfht_replace(struct cds_lfht *ht,
+ struct cds_lfht_iter *old_iter,
+ unsigned long hash,
+ cds_lfht_match_fct match,
+ const void *key,
struct cds_lfht_node *new_node)
{
unsigned long size;
- size = rcu_dereference(ht->t.size);
+ new_node->reverse_hash = bit_reverse_ulong(hash);
+ if (!old_iter->node)
+ return -ENOENT;
+ if (caa_unlikely(old_iter->node->reverse_hash != new_node->reverse_hash))
+ return -EINVAL;
+ if (caa_unlikely(!match(old_iter->node, key)))
+ return -EINVAL;
+ size = rcu_dereference(ht->size);
return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
new_node);
}
-int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node)
{
- unsigned long size;
+ unsigned long size, hash;
int ret;
- size = rcu_dereference(ht->t.size);
- ret = _cds_lfht_del(ht, size, iter->node, 0);
- if (!ret)
- ht_count_del(ht, size);
+ size = rcu_dereference(ht->size);
+ ret = _cds_lfht_del(ht, size, node);
+ if (!ret) {
+ hash = bit_reverse_ulong(node->reverse_hash);
+ ht_count_del(ht, size, hash);
+ }
return ret;
}
+int cds_lfht_is_node_deleted(struct cds_lfht_node *node)
+{
+ return is_removed(CMM_LOAD_SHARED(node->next));
+}
+
static
-int cds_lfht_delete_dummy(struct cds_lfht *ht)
+int cds_lfht_delete_bucket(struct cds_lfht *ht)
{
struct cds_lfht_node *node;
- struct _cds_lfht_node *lookup;
unsigned long order, i, size;
/* Check that the table is empty */
- lookup = &ht->t.tbl[0]->nodes[0];
- node = (struct cds_lfht_node *) lookup;
+ node = bucket_at(ht, 0);
do {
- node = clear_flag(node)->p.next;
- if (!is_dummy(node))
+ node = clear_flag(node)->next;
+ if (!is_bucket(node))
return -EPERM;
assert(!is_removed(node));
+ assert(!is_removal_owner(node));
} while (!is_end(node));
/*
* size accessed without rcu_dereference because hash table is
* being destroyed.
*/
- size = ht->t.size;
- /* Internal sanity check: all nodes left should be dummy */
- for (order = 0; order < get_count_order_ulong(size) + 1; order++) {
- unsigned long len;
-
- len = !order ? 1 : 1UL << (order - 1);
- for (i = 0; i < len; i++) {
- dbg_printf("delete order %lu i %lu hash %lu\n",
- order, i,
- bit_reverse_ulong(ht->t.tbl[order]->nodes[i].reverse_hash));
- assert(is_dummy(ht->t.tbl[order]->nodes[i].next));
- }
- poison_free(ht->t.tbl[order]);
+ size = ht->size;
+ /* Internal sanity check: all nodes left should be buckets */
+ for (i = 0; i < size; i++) {
+ node = bucket_at(ht, i);
+ dbg_printf("delete bucket: index %lu expected hash %lu hash %lu\n",
+ i, i, bit_reverse_ulong(node->reverse_hash));
+ assert(is_bucket(node->next));
}
+
+ for (order = cds_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
+ cds_lfht_free_bucket_table(ht, order);
+
return 0;
}
/* Wait for in-flight resize operations to complete */
_CMM_STORE_SHARED(ht->in_progress_destroy, 1);
cmm_smp_mb(); /* Store destroy before load resize */
+ ht->flavor->thread_offline();
while (uatomic_read(&ht->in_progress_resize))
poll(NULL, 0, 100); /* wait for 100ms */
- ret = cds_lfht_delete_dummy(ht);
+ ht->flavor->thread_online();
+ ret = cds_lfht_delete_bucket(ht);
if (ret)
return ret;
- free_per_cpu_items_count(ht->percpu_count);
+ free_split_items_count(ht);
if (attr)
*attr = ht->resize_attr;
poison_free(ht);
void cds_lfht_count_nodes(struct cds_lfht *ht,
long *approx_before,
unsigned long *count,
- unsigned long *removed,
long *approx_after)
{
struct cds_lfht_node *node, *next;
- struct _cds_lfht_node *lookup;
- unsigned long nr_dummy = 0;
+ unsigned long nr_bucket = 0, nr_removed = 0;
*approx_before = 0;
- if (nr_cpus_mask >= 0) {
+ if (ht->split_count) {
int i;
- for (i = 0; i < nr_cpus_mask + 1; i++) {
- *approx_before += uatomic_read(&ht->percpu_count[i].add);
- *approx_before -= uatomic_read(&ht->percpu_count[i].del);
+ for (i = 0; i < split_count_mask + 1; i++) {
+ *approx_before += uatomic_read(&ht->split_count[i].add);
+ *approx_before -= uatomic_read(&ht->split_count[i].del);
}
}
*count = 0;
- *removed = 0;
- /* Count non-dummy nodes in the table */
- lookup = &ht->t.tbl[0]->nodes[0];
- node = (struct cds_lfht_node *) lookup;
+ /* Count non-bucket nodes in the table */
+ node = bucket_at(ht, 0);
do {
- next = rcu_dereference(node->p.next);
+ next = rcu_dereference(node->next);
if (is_removed(next)) {
- if (!is_dummy(next))
- (*removed)++;
+ if (!is_bucket(next))
+ (nr_removed)++;
else
- (nr_dummy)++;
- } else if (!is_dummy(next))
+ (nr_bucket)++;
+ } else if (!is_bucket(next))
(*count)++;
else
- (nr_dummy)++;
+ (nr_bucket)++;
node = clear_flag(next);
} while (!is_end(node));
- dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
+ dbg_printf("number of logically removed nodes: %lu\n", nr_removed);
+ dbg_printf("number of bucket nodes: %lu\n", nr_bucket);
*approx_after = 0;
- if (nr_cpus_mask >= 0) {
+ if (ht->split_count) {
int i;
- for (i = 0; i < nr_cpus_mask + 1; i++) {
- *approx_after += uatomic_read(&ht->percpu_count[i].add);
- *approx_after -= uatomic_read(&ht->percpu_count[i].del);
+ for (i = 0; i < split_count_mask + 1; i++) {
+ *approx_after += uatomic_read(&ht->split_count[i].add);
+ *approx_after -= uatomic_read(&ht->split_count[i].del);
}
}
}
{
unsigned long old_order, new_order;
- old_order = get_count_order_ulong(old_size) + 1;
- new_order = get_count_order_ulong(new_size) + 1;
- printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
- old_size, old_order, new_size, new_order);
+ old_order = cds_lfht_get_count_order_ulong(old_size);
+ new_order = cds_lfht_get_count_order_ulong(new_size);
+ dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
assert(new_size > old_size);
- init_table(ht, old_order, new_order - old_order);
+ init_table(ht, old_order + 1, new_order);
}
/* called with resize mutex held */
unsigned long old_order, new_order;
new_size = max(new_size, MIN_TABLE_SIZE);
- old_order = get_count_order_ulong(old_size) + 1;
- new_order = get_count_order_ulong(new_size) + 1;
- printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
- old_size, old_order, new_size, new_order);
+ old_order = cds_lfht_get_count_order_ulong(old_size);
+ new_order = cds_lfht_get_count_order_ulong(new_size);
+ dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+ old_size, old_order, new_size, new_order);
assert(new_size < old_size);
- /* Remove and unlink all dummy nodes to remove. */
- fini_table(ht, new_order, old_order - new_order);
+ /* Remove and unlink all bucket nodes to remove. */
+ fini_table(ht, new_order + 1, old_order);
}
assert(uatomic_read(&ht->in_progress_resize));
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
- ht->t.resize_initiated = 1;
- old_size = ht->t.size;
- new_size = CMM_LOAD_SHARED(ht->t.resize_target);
+ ht->resize_initiated = 1;
+ old_size = ht->size;
+ new_size = CMM_LOAD_SHARED(ht->resize_target);
if (old_size < new_size)
_do_cds_lfht_grow(ht, old_size, new_size);
else if (old_size > new_size)
_do_cds_lfht_shrink(ht, old_size, new_size);
- ht->t.resize_initiated = 0;
+ ht->resize_initiated = 0;
/* write resize_initiated before read resize_target */
cmm_smp_mb();
- } while (ht->t.size != CMM_LOAD_SHARED(ht->t.resize_target));
+ } while (ht->size != CMM_LOAD_SHARED(ht->resize_target));
}
static
-unsigned long resize_target_update(struct cds_lfht *ht, unsigned long size,
- int growth_order)
+unsigned long resize_target_grow(struct cds_lfht *ht, unsigned long new_size)
{
- return _uatomic_max(&ht->t.resize_target,
- size << growth_order);
+ return _uatomic_xchg_monotonic_increase(&ht->resize_target, new_size);
}
static
unsigned long count)
{
count = max(count, MIN_TABLE_SIZE);
- uatomic_set(&ht->t.resize_target, count);
+ count = min(count, ht->max_nr_buckets);
+ uatomic_set(&ht->resize_target, count);
}
void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
{
resize_target_update_count(ht, new_size);
- CMM_STORE_SHARED(ht->t.resize_initiated, 1);
- ht->cds_lfht_rcu_thread_offline();
+ CMM_STORE_SHARED(ht->resize_initiated, 1);
+ ht->flavor->thread_offline();
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
- ht->cds_lfht_rcu_thread_online();
+ ht->flavor->thread_online();
}
static
caa_container_of(head, struct rcu_resize_work, head);
struct cds_lfht *ht = work->ht;
- ht->cds_lfht_rcu_thread_offline();
+ ht->flavor->thread_offline();
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
pthread_mutex_unlock(&ht->resize_mutex);
- ht->cds_lfht_rcu_thread_online();
+ ht->flavor->thread_online();
poison_free(work);
cmm_smp_mb(); /* finish resize before decrement */
uatomic_dec(&ht->in_progress_resize);
}
static
-void cds_lfht_resize_lazy(struct cds_lfht *ht, unsigned long size, int growth)
+void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht)
{
struct rcu_resize_work *work;
- unsigned long target_size;
- target_size = resize_target_update(ht, size, growth);
/* Store resize_target before read resize_initiated */
cmm_smp_mb();
- if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
+ if (!CMM_LOAD_SHARED(ht->resize_initiated)) {
uatomic_inc(&ht->in_progress_resize);
cmm_smp_mb(); /* increment resize count before load destroy */
if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
return;
}
work = malloc(sizeof(*work));
+ if (work == NULL) {
+ dbg_printf("error allocating resize work, bailing out\n");
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work->ht = ht;
- ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
- CMM_STORE_SHARED(ht->t.resize_initiated, 1);
+ ht->flavor->update_call_rcu(&work->head, do_resize_cb);
+ CMM_STORE_SHARED(ht->resize_initiated, 1);
}
}
-#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+static
+void cds_lfht_resize_lazy_grow(struct cds_lfht *ht, unsigned long size, int growth)
+{
+ unsigned long target_size = size << growth;
+ target_size = min(target_size, ht->max_nr_buckets);
+ if (resize_target_grow(ht, target_size) >= target_size)
+ return;
+
+ __cds_lfht_resize_lazy_launch(ht);
+}
+
+/*
+ * We favor grow operations over shrink. A shrink operation never occurs
+ * if a grow operation is queued for lazy execution. A grow operation
+ * cancels any pending shrink lazy execution.
+ */
static
void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size,
unsigned long count)
{
- struct rcu_resize_work *work;
-
if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
return;
- resize_target_update_count(ht, count);
- /* Store resize_target before read resize_initiated */
- cmm_smp_mb();
- if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
- uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before load destroy */
- if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
- uatomic_dec(&ht->in_progress_resize);
+ count = max(count, MIN_TABLE_SIZE);
+ count = min(count, ht->max_nr_buckets);
+ if (count == size)
+ return; /* Already the right size, no resize needed */
+ if (count > size) { /* lazy grow */
+ if (resize_target_grow(ht, count) >= count)
return;
+ } else { /* lazy shrink */
+ for (;;) {
+ unsigned long s;
+
+ s = uatomic_cmpxchg(&ht->resize_target, size, count);
+ if (s == size)
+ break; /* no resize needed */
+ if (s > size)
+ return; /* growing is/(was just) in progress */
+ if (s <= count)
+ return; /* some other thread do shrink */
+ size = s;
}
- work = malloc(sizeof(*work));
- work->ht = ht;
- ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
- CMM_STORE_SHARED(ht->t.resize_initiated, 1);
}
+ __cds_lfht_resize_lazy_launch(ht);
}
-
-#endif