test rculfhash: add/remove/random support (with SIGUSR1)
[urcu.git] / rculfhash.c
index 3120d4585995c030a999903b0667d803ebf5b877..20d389d38517d786477e4acff9f8a87fe5b6b1ae 100644 (file)
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
+/*
+ * Based on the following articles:
+ * - Ori Shalev and Nir Shavit. Split-ordered lists: Lock-free
+ *   extensible hash tables. J. ACM 53, 3 (May 2006), 379-405.
+ * - Michael, M. M. High performance dynamic lock-free hash tables
+ *   and list-based sets. In Proceedings of the fourteenth annual ACM
+ *   symposium on Parallel algorithms and architectures, ACM Press,
+ *   (2002), 73-82.
+ *
+ * Some specificities of this Lock-Free Expandable RCU Hash Table
+ * implementation:
+ *
+ * - RCU read-side critical section allows readers to perform hash
+ *   table lookups and use the returned objects safely by delaying
+ *   memory reclaim of a grace period.
+ * - Add and remove operations are lock-free, and do not need to
+ *   allocate memory. They need to be executed within RCU read-side
+ *   critical section to ensure the objects they read are valid and to
+ *   deal with the cmpxchg ABA problem.
+ * - add and add_unique operations are supported. add_unique checks if
+ *   the node key already exists in the hash table. It ensures no key
+ *   duplicata exists.
+ * - The resize operation executes concurrently with add/remove/lookup.
+ * - Hash table nodes are contained within a split-ordered list. This
+ *   list is ordered by incrementing reversed-bits-hash value.
+ * - An index of dummy nodes is kept. These dummy nodes are the hash
+ *   table "buckets", and they are also chained together in the
+ *   split-ordered list, which allows recursive expansion.
+ * - The resize operation only allows expanding the hash table.
+ *   It is triggered either through an API call or automatically by
+ *   detecting long chains in the add operation.
+ * - Resize operation initiated by long chain detection is executed by a
+ *   call_rcu thread, which keeps lock-freedom of add and remove.
+ * - Resize operations are protected by a mutex.
+ * - The removal operation is split in two parts: first, a "removed"
+ *   flag is set in the next pointer within the node to remove. Then,
+ *   a "garbage collection" is performed in the bucket containing the
+ *   removed node (from the start of the bucket up to the removed node).
+ *   All encountered nodes with "removed" flag set in their next
+ *   pointers are removed from the linked-list. If the cmpxchg used for
+ *   removal fails (due to concurrent garbage-collection or concurrent
+ *   add), we retry from the beginning of the bucket. This ensures that
+ *   the node with "removed" flag set is removed from the hash table
+ *   (not visible to lookups anymore) before the RCU read-side critical
+ *   section held across removal ends. Furthermore, this ensures that
+ *   the node with "removed" flag set is removed from the linked-list
+ *   before its memory is reclaimed. Only the thread which removal
+ *   successfully set the "removed" flag (with a cmpxchg) into a node's
+ *   next pointer is considered to have succeeded its removal (and thus
+ *   owns the node to reclaim). Because we garbage-collect starting from
+ *   an invariant node (the start-of-bucket dummy node) up to the
+ *   "removed" node (or find a reverse-hash that is higher), we are sure
+ *   that a successful traversal of the chain leads to a chain that is
+ *   present in the linked-list (the start node is never removed) and
+ *   that is does not contain the "removed" node anymore, even if
+ *   concurrent delete/add operations are changing the structure of the
+ *   list concurrently.
+ * - The add operation performs gargage collection of buckets if it
+ *   encounters nodes with removed flag set in the bucket where it wants
+ *   to add its new node. This ensures lock-freedom of add operation by
+ *   helping the remover unlink nodes from the list rather than to wait
+ *   for it do to so.
+ * - A RCU "order table" indexed by log2(hash index) is copied and
+ *   expanded by the resize operation. This order table allows finding
+ *   the "dummy node" tables.
+ * - There is one dummy node table per hash index order. The size of
+ *   each dummy node table is half the number of hashes contained in
+ *   this order.
+ * - call_rcu is used to garbage-collect the old order table.
+ * - The per-order dummy node tables contain a compact version of the
+ *   hash table nodes. These tables are invariant after they are
+ *   populated into the hash table.
+ */
+
 #define _LGPL_SOURCE
 #include <stdlib.h>
 #include <errno.h>
 #include <stdint.h>
 #include <string.h>
 
+#include "config.h"
 #include <urcu.h>
 #include <urcu-call-rcu.h>
 #include <urcu/arch.h>
 #include <stdio.h>
 #include <pthread.h>
 
-#define DEBUG          /* Test */
-
 #ifdef DEBUG
-#define dbg_printf(args...)     printf(args)
+#define dbg_printf(fmt, args...)     printf("[debug rculfhash] " fmt, ## args)
 #else
-#define dbg_printf(args...)
+#define dbg_printf(fmt, args...)
 #endif
 
+/*
+ * Per-CPU split-counters lazily update the global counter each 1024
+ * addition/removal. It automatically keeps track of resize required.
+ * We use the bucket length as indicator for need to expand for small
+ * tables and machines lacking per-cpu data suppport.
+ */
+#define COUNT_COMMIT_ORDER             10
 #define CHAIN_LEN_TARGET               1
-#define CHAIN_LEN_RESIZE_THRESHOLD     2
+#define CHAIN_LEN_RESIZE_THRESHOLD     3
 
 #ifndef max
 #define max(a, b)      ((a) > (b) ? (a) : (b))
 #endif
 
+/*
+ * The removed flag needs to be updated atomically with the pointer.
+ * The dummy flag does not require to be updated atomically with the
+ * pointer, but it is added as a pointer low bit flag to save space.
+ */
+#define REMOVED_FLAG           (1UL << 0)
+#define DUMMY_FLAG             (1UL << 1)
+#define FLAGS_MASK             ((1UL << 2) - 1)
+
+struct ht_items_count {
+       unsigned long add, remove;
+} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
+
 struct rcu_table {
        unsigned long size;     /* always a power of 2 */
        unsigned long resize_target;
        int resize_initiated;
        struct rcu_head head;
-       struct rcu_ht_node *tbl[0];
+       struct _cds_lfht_node *tbl[0];
 };
 
-struct rcu_ht {
+struct cds_lfht {
        struct rcu_table *t;            /* shared */
-       ht_hash_fct hash_fct;
-       ht_compare_fct compare_fct;
+       cds_lfht_hash_fct hash_fct;
+       cds_lfht_compare_fct compare_fct;
        unsigned long hash_seed;
        pthread_mutex_t resize_mutex;   /* resize mutex: add/del mutex */
-       unsigned int in_progress_resize;
-       void (*ht_call_rcu)(struct rcu_head *head,
+       unsigned int in_progress_resize, in_progress_destroy;
+       void (*cds_lfht_call_rcu)(struct rcu_head *head,
                      void (*func)(struct rcu_head *head));
+       unsigned long count;            /* global approximate item count */
+       struct ht_items_count *percpu_count;    /* per-cpu item count */
 };
 
 struct rcu_resize_work {
        struct rcu_head head;
-       struct rcu_ht *ht;
+       struct cds_lfht *ht;
 };
 
 /*
@@ -135,62 +230,345 @@ unsigned long bit_reverse_ulong(unsigned long v)
 }
 
 /*
- * Algorithm to find the log2 of a 32-bit unsigned integer.
- * source: http://graphics.stanford.edu/~seander/bithacks.html#IntegerLogLookup
- * Originally from Public Domain.
+ * fls: returns the position of the most significant bit.
+ * Returns 0 if no bit is set, else returns the position of the most
+ * significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
  */
-static const char LogTable256[256] = 
+#if defined(__i386) || defined(__x86_64)
+static inline
+unsigned int fls_u32(uint32_t x)
 {
-#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
-       -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
-       LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
-       LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
-};
+       int r;
+
+       asm("bsrl %1,%0\n\t"
+           "jnz 1f\n\t"
+           "movl $-1,%0\n\t"
+           "1:\n\t"
+           : "=r" (r) : "rm" (x));
+       return r + 1;
+}
+#define HAS_FLS_U32
+#endif
+
+#if defined(__x86_64)
+static inline
+unsigned int fls_u64(uint64_t x)
+{
+       long r;
+
+       asm("bsrq %1,%0\n\t"
+           "jnz 1f\n\t"
+           "movq $-1,%0\n\t"
+           "1:\n\t"
+           : "=r" (r) : "rm" (x));
+       return r + 1;
+}
+#define HAS_FLS_U64
+#endif
+
+#ifndef HAS_FLS_U64
+static __attribute__((unused))
+unsigned int fls_u64(uint64_t x)
+{
+       unsigned int r = 64;
+
+       if (!x)
+               return 0;
+
+       if (!(x & 0xFFFFFFFF00000000ULL)) {
+               x <<= 32;
+               r -= 32;
+       }
+       if (!(x & 0xFFFF000000000000ULL)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xFF00000000000000ULL)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xF000000000000000ULL)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xC000000000000000ULL)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x8000000000000000ULL)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+#endif
+
+#ifndef HAS_FLS_U32
+static __attribute__((unused))
+unsigned int fls_u32(uint32_t x)
+{
+       unsigned int r = 32;
+
+       if (!x)
+               return 0;
+       if (!(x & 0xFFFF0000U)) {
+               x <<= 16;
+               r -= 16;
+       }
+       if (!(x & 0xFF000000U)) {
+               x <<= 8;
+               r -= 8;
+       }
+       if (!(x & 0xF0000000U)) {
+               x <<= 4;
+               r -= 4;
+       }
+       if (!(x & 0xC0000000U)) {
+               x <<= 2;
+               r -= 2;
+       }
+       if (!(x & 0x80000000U)) {
+               x <<= 1;
+               r -= 1;
+       }
+       return r;
+}
+#endif
+
+unsigned int fls_ulong(unsigned long x)
+{
+#if (CAA_BITS_PER_lONG == 32)
+       return fls_u32(x);
+#else
+       return fls_u64(x);
+#endif
+}
 
-uint32_t log2_u32(uint32_t v)
+int get_count_order_u32(uint32_t x)
 {
-       uint32_t t, tt;
+       int order;
+
+       order = fls_u32(x) - 1;
+       if (x & (x - 1))
+               order++;
+       return order;
+}
+
+int get_count_order_ulong(unsigned long x)
+{
+       int order;
+
+       order = fls_ulong(x) - 1;
+       if (x & (x - 1))
+               order++;
+       return order;
+}
+
+static
+void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth);
+
+/*
+ * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
+ * available, then we support hash table item accounting.
+ * In the unfortunate event the number of CPUs reported would be
+ * inaccurate, we use modulo arithmetic on the number of CPUs we got.
+ */
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+                               unsigned long count);
+
+static long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+       struct ht_items_count *count;
+
+       switch (nr_cpus_mask) {
+       case -2:
+               return NULL;
+       case -1:
+       {
+               long maxcpus;
+
+               maxcpus = sysconf(_SC_NPROCESSORS_CONF);
+               if (maxcpus <= 0) {
+                       nr_cpus_mask = -2;
+                       return NULL;
+               }
+               /*
+                * round up number of CPUs to next power of two, so we
+                * can use & for modulo.
+                */
+               maxcpus = 1UL << get_count_order_ulong(maxcpus);
+               nr_cpus_mask = maxcpus - 1;
+       }
+               /* Fall-through */
+       default:
+               return calloc(nr_cpus_mask + 1, sizeof(*count));
+       }
+}
 
-       if ((tt = (v >> 16)))
-               return (t = (tt >> 8))
-                               ? 24 + LogTable256[t]
-                               : 16 + LogTable256[tt];
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+       free(count);
+}
+
+static
+int ht_get_cpu(void)
+{
+       int cpu;
+
+       assert(nr_cpus_mask >= 0);
+       cpu = sched_getcpu();
+       if (unlikely(cpu < 0))
+               return cpu;
        else
-               return (t = (v >> 8))
-                               ? 8 + LogTable256[t]
-                               : LogTable256[v];
+               return cpu & nr_cpus_mask;
 }
 
 static
-void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+       unsigned long percpu_count;
+       int cpu;
+
+       if (unlikely(!ht->percpu_count))
+               return;
+       cpu = ht_get_cpu();
+       if (unlikely(cpu < 0))
+               return;
+       percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
+       if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+               unsigned long count;
+
+               dbg_printf("add percpu %lu\n", percpu_count);
+               count = uatomic_add_return(&ht->count,
+                                          1UL << COUNT_COMMIT_ORDER);
+               /* If power of 2 */
+               if (!(count & (count - 1))) {
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+                                       < t->size)
+                               return;
+                       dbg_printf("add set global %lu\n", count);
+                       cds_lfht_resize_lazy_count(ht, t,
+                               count >> (CHAIN_LEN_TARGET - 1));
+               }
+       }
+}
 
 static
-void check_resize(struct rcu_ht *ht, struct rcu_table *t,
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+       unsigned long percpu_count;
+       int cpu;
+
+       if (unlikely(!ht->percpu_count))
+               return;
+       cpu = ht_get_cpu();
+       if (unlikely(cpu < 0))
+               return;
+       percpu_count = uatomic_add_return(&ht->percpu_count[cpu].remove, -1);
+       if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
+               unsigned long count;
+
+               dbg_printf("remove percpu %lu\n", percpu_count);
+               count = uatomic_add_return(&ht->count,
+                                          -(1UL << COUNT_COMMIT_ORDER));
+               /* If power of 2 */
+               if (!(count & (count - 1))) {
+                       if ((count >> CHAIN_LEN_RESIZE_THRESHOLD)
+                                       >= t->size)
+                               return;
+                       dbg_printf("remove set global %lu\n", count);
+                       cds_lfht_resize_lazy_count(ht, t,
+                               count >> (CHAIN_LEN_TARGET - 1));
+               }
+       }
+}
+
+#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+static const long nr_cpus_mask = -1;
+
+static
+struct ht_items_count *alloc_per_cpu_items_count(void)
+{
+       return NULL;
+}
+
+static
+void free_per_cpu_items_count(struct ht_items_count *count)
+{
+}
+
+static
+void ht_count_add(struct cds_lfht *ht, struct rcu_table *t)
+{
+}
+
+static
+void ht_count_remove(struct cds_lfht *ht, struct rcu_table *t)
+{
+}
+
+#endif /* #else #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
+
+
+static
+void check_resize(struct cds_lfht *ht, struct rcu_table *t,
                  uint32_t chain_len)
 {
+       unsigned long count;
+
+       count = uatomic_read(&ht->count);
+       /*
+        * Use bucket-local length for small table expand and for
+        * environments lacking per-cpu data support.
+        */
+       if (count >= (1UL << COUNT_COMMIT_ORDER))
+               return;
+       if (chain_len > 100)
+               dbg_printf("WARNING: large chain length: %u.\n",
+                          chain_len);
        if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
-               ht_resize_lazy(ht, t,
-                       log2_u32(chain_len - CHAIN_LEN_TARGET - 1));
+               cds_lfht_resize_lazy(ht, t,
+                       get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
 }
 
 static
-struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
+struct cds_lfht_node *clear_flag(struct cds_lfht_node *node)
 {
-       return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
+       return (struct cds_lfht_node *) (((unsigned long) node) & ~FLAGS_MASK);
 }
 
 static
-int is_removed(struct rcu_ht_node *node)
+int is_removed(struct cds_lfht_node *node)
 {
-       return ((unsigned long) node) & 0x1;
+       return ((unsigned long) node) & REMOVED_FLAG;
 }
 
 static
-struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
+struct cds_lfht_node *flag_removed(struct cds_lfht_node *node)
 {
-       return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
+       return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
 }
 
+static
+int is_dummy(struct cds_lfht_node *node)
+{
+       return ((unsigned long) node) & DUMMY_FLAG;
+}
+
+static
+struct cds_lfht_node *flag_dummy(struct cds_lfht_node *node)
+{
+       return (struct cds_lfht_node *) (((unsigned long) node) | DUMMY_FLAG);
+}
 static
 unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
 {
@@ -209,9 +587,9 @@ unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
  * Remove all logically deleted nodes from a bucket up to a certain node key.
  */
 static
-void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
+void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node)
 {
-       struct rcu_ht_node *iter_prev, *iter, *next;
+       struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
 
        for (;;) {
                iter_prev = dummy;
@@ -219,30 +597,37 @@ void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node)
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
                for (;;) {
-                       if (unlikely(!iter))
+                       if (unlikely(!clear_flag(iter)))
                                return;
-                       if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
+                       if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
                                return;
                        next = rcu_dereference(clear_flag(iter)->p.next);
-                       if (is_removed(next))
+                       if (likely(is_removed(next)))
                                break;
-                       iter_prev = iter;
+                       iter_prev = clear_flag(iter);
                        iter = next;
                }
                assert(!is_removed(iter));
-               (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next));
+               if (is_dummy(iter))
+                       new_next = flag_dummy(clear_flag(next));
+               else
+                       new_next = clear_flag(next);
+               (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
        }
 }
 
 static
-struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
-                           struct rcu_ht_node *node, int unique)
+struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, struct rcu_table *t,
+                               struct cds_lfht_node *node, int unique, int dummy)
 {
-       struct rcu_ht_node *iter_prev, *dummy, *iter, *next;
-       unsigned long hash;
+       struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
+                       *dummy_node;
+       struct _cds_lfht_node *lookup;
+       unsigned long hash, index, order;
 
        if (!t->size) {
-               assert(node->p.dummy);
+               assert(dummy);
+               node->p.next = flag_dummy(NULL);
                return node;    /* Initial first add (head) */
        }
        hash = bit_reverse_ulong(node->p.reverse_hash);
@@ -253,26 +638,30 @@ struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
                 * iter_prev points to the non-removed node prior to the
                 * insert location.
                 */
-               iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]);
+               index = hash & (t->size - 1);
+               order = get_count_order_ulong(index + 1);
+               lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+               iter_prev = (struct cds_lfht_node *) lookup;
                /* We can always skip the dummy node initially */
                iter = rcu_dereference(iter_prev->p.next);
                assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
                for (;;) {
-                       if (unlikely(!iter))
+                       if (unlikely(!clear_flag(iter)))
                                goto insert;
-                       if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash)
+                       if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
                                goto insert;
                        next = rcu_dereference(clear_flag(iter)->p.next);
-                       if (is_removed(next))
+                       if (unlikely(is_removed(next)))
                                goto gc_node;
                        if (unique
-                           && !clear_flag(iter)->p.dummy
+                           && !is_dummy(next)
                            && !ht->compare_fct(node->key, node->key_len,
                                                clear_flag(iter)->key,
                                                clear_flag(iter)->key_len))
                                return clear_flag(iter);
                        /* Only account for identical reverse hash once */
-                       if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash)
+                       if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
+                           && !is_dummy(next))
                                check_resize(ht, t, ++chain_len);
                        iter_prev = clear_flag(iter);
                        iter = next;
@@ -281,38 +670,54 @@ struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t,
                assert(node != clear_flag(iter));
                assert(!is_removed(iter_prev));
                assert(iter_prev != node);
-               node->p.next = iter;
+               if (!dummy)
+                       node->p.next = clear_flag(iter);
+               else
+                       node->p.next = flag_dummy(clear_flag(iter));
+               if (is_dummy(iter))
+                       new_node = flag_dummy(node);
+               else
+                       new_node = node;
                if (uatomic_cmpxchg(&iter_prev->p.next, iter,
-                                   node) != iter)
+                                   new_node) != iter)
                        continue;       /* retry */
                else
                        goto gc_end;
        gc_node:
                assert(!is_removed(iter));
-               (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next));
+               if (is_dummy(iter))
+                       new_next = flag_dummy(clear_flag(next));
+               else
+                       new_next = clear_flag(next);
+               (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
                /* retry */
        }
 gc_end:
        /* Garbage collect logically removed nodes in the bucket */
-       dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
-       _ht_gc_bucket(dummy, node);
+       index = hash & (t->size - 1);
+       order = get_count_order_ulong(index + 1);
+       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       dummy_node = (struct cds_lfht_node *) lookup;
+       _cds_lfht_gc_bucket(dummy_node, node);
        return node;
 }
 
 static
-int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
+int _cds_lfht_remove(struct cds_lfht *ht, struct rcu_table *t,
+               struct cds_lfht_node *node)
 {
-       struct rcu_ht_node *dummy, *next, *old;
+       struct cds_lfht_node *dummy, *next, *old;
+       struct _cds_lfht_node *lookup;
        int flagged = 0;
-       unsigned long hash;
+       unsigned long hash, index, order;
 
        /* logically delete the node */
        old = rcu_dereference(node->p.next);
        do {
                next = old;
-               if (is_removed(next))
+               if (unlikely(is_removed(next)))
                        goto end;
-               assert(!node->p.dummy);
+               assert(!is_dummy(next));
                old = uatomic_cmpxchg(&node->p.next, next,
                                      flag_removed(next));
        } while (old != next);
@@ -326,8 +731,11 @@ int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
         * if found.
         */
        hash = bit_reverse_ulong(node->p.reverse_hash);
-       dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]);
-       _ht_gc_bucket(dummy, node);
+       index = hash & (t->size - 1);
+       order = get_count_order_ulong(index + 1);
+       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       dummy = (struct cds_lfht_node *) lookup;
+       _cds_lfht_gc_bucket(dummy, node);
 end:
        /*
         * Only the flagging action indicated that we (and no other)
@@ -341,62 +749,91 @@ end:
 }
 
 static
-void init_table(struct rcu_ht *ht, struct rcu_table *t,
-               unsigned long first, unsigned long len)
+void init_table(struct cds_lfht *ht, struct rcu_table *t,
+               unsigned long first_order, unsigned long len_order)
 {
-       unsigned long i, end;
-
-       end = first + len;
-       for (i = first; i < end; i++) {
-               /* Update table size when power of two */
-               if (i != 0 && !(i & (i - 1)))
-                       t->size = i;
-               t->tbl[i] = calloc(1, sizeof(struct _rcu_ht_node));
-               t->tbl[i]->p.dummy = 1;
-               t->tbl[i]->p.reverse_hash = bit_reverse_ulong(i);
-               (void) _ht_add(ht, t, t->tbl[i], 0);
+       unsigned long i, end_order;
+
+       dbg_printf("init table: first_order %lu end_order %lu\n",
+                  first_order, first_order + len_order);
+       end_order = first_order + len_order;
+       t->size = !first_order ? 0 : (1UL << (first_order - 1));
+       for (i = first_order; i < end_order; i++) {
+               unsigned long j, len;
+
+               len = !i ? 1 : 1UL << (i - 1);
+               dbg_printf("init order %lu len: %lu\n", i, len);
+               t->tbl[i] = calloc(len, sizeof(struct _cds_lfht_node));
+               for (j = 0; j < len; j++) {
+                       dbg_printf("init entry: i %lu j %lu hash %lu\n",
+                                  i, j, !i ? 0 : (1UL << (i - 1)) + j);
+                       struct cds_lfht_node *new_node =
+                               (struct cds_lfht_node *) &t->tbl[i][j];
+                       new_node->p.reverse_hash =
+                               bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
+                       (void) _cds_lfht_add(ht, t, new_node, 0, 1);
+                       if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                               break;
+               }
+               /* Update table size */
+               t->size = !i ? 1 : (1UL << i);
+               dbg_printf("init new size: %lu\n", t->size);
+               if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+                       break;
        }
-       t->resize_target = t->size = end;
+       t->resize_target = t->size;
        t->resize_initiated = 0;
 }
 
-struct rcu_ht *ht_new(ht_hash_fct hash_fct,
-                     ht_compare_fct compare_fct,
-                     unsigned long hash_seed,
-                     unsigned long init_size,
-                     void (*ht_call_rcu)(struct rcu_head *head,
-                               void (*func)(struct rcu_head *head)))
+struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+                       cds_lfht_compare_fct compare_fct,
+                       unsigned long hash_seed,
+                       unsigned long init_size,
+                       void (*cds_lfht_call_rcu)(struct rcu_head *head,
+                                       void (*func)(struct rcu_head *head)))
 {
-       struct rcu_ht *ht;
+       struct cds_lfht *ht;
+       unsigned long order;
 
-       ht = calloc(1, sizeof(struct rcu_ht));
+       /* init_size must be power of two */
+       if (init_size && (init_size & (init_size - 1)))
+               return NULL;
+       ht = calloc(1, sizeof(struct cds_lfht));
        ht->hash_fct = hash_fct;
        ht->compare_fct = compare_fct;
        ht->hash_seed = hash_seed;
-       ht->ht_call_rcu = ht_call_rcu;
+       ht->cds_lfht_call_rcu = cds_lfht_call_rcu;
        ht->in_progress_resize = 0;
+       ht->percpu_count = alloc_per_cpu_items_count();
        /* this mutex should not nest in read-side C.S. */
        pthread_mutex_init(&ht->resize_mutex, NULL);
-       ht->t = calloc(1, sizeof(struct rcu_table)
-                      + (max(init_size, 1) * sizeof(struct rcu_ht_node *)));
+       order = get_count_order_ulong(max(init_size, 1)) + 1;
+       ht->t = calloc(1, sizeof(struct cds_lfht)
+                      + (order * sizeof(struct _cds_lfht_node *)));
        ht->t->size = 0;
        pthread_mutex_lock(&ht->resize_mutex);
-       init_table(ht, ht->t, 0, max(init_size, 1));
+       init_table(ht, ht->t, 0, order);
        pthread_mutex_unlock(&ht->resize_mutex);
        return ht;
 }
 
-struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
+struct cds_lfht_node *cds_lfht_lookup(struct cds_lfht *ht, void *key, size_t key_len)
 {
        struct rcu_table *t;
-       struct rcu_ht_node *node;
-       unsigned long hash, reverse_hash;
+       struct cds_lfht_node *node, *next;
+       struct _cds_lfht_node *lookup;
+       unsigned long hash, reverse_hash, index, order;
 
        hash = ht->hash_fct(key, key_len, ht->hash_seed);
        reverse_hash = bit_reverse_ulong(hash);
 
        t = rcu_dereference(ht->t);
-       node = rcu_dereference(t->tbl[hash & (t->size - 1)]);
+       index = hash & (t->size - 1);
+       order = get_count_order_ulong(index + 1);
+       lookup = &t->tbl[order][index & ((1UL << (order - 1)) - 1)];
+       dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
+                  hash, index, order, index & ((1UL << (order - 1)) - 1));
+       node = (struct cds_lfht_node *) lookup;
        for (;;) {
                if (unlikely(!node))
                        break;
@@ -404,18 +841,52 @@ struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len)
                        node = NULL;
                        break;
                }
-               if (likely(!is_removed(rcu_dereference(node->p.next)))
-                   && !node->p.dummy
+               next = rcu_dereference(node->p.next);
+               if (likely(!is_removed(next))
+                   && !is_dummy(next)
                    && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
                                break;
                }
-               node = clear_flag(rcu_dereference(node->p.next));
+               node = clear_flag(next);
        }
-       assert(!node || !node->p.dummy);
+       assert(!node || !is_dummy(rcu_dereference(node->p.next)));
        return node;
 }
 
-void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
+struct cds_lfht_node *cds_lfht_next(struct cds_lfht *ht,
+                               struct cds_lfht_node *node)
+{
+       struct cds_lfht_node *next;
+       unsigned long reverse_hash;
+       void *key;
+       size_t key_len;
+
+       reverse_hash = node->p.reverse_hash;
+       key = node->key;
+       key_len = node->key_len;
+       next = rcu_dereference(node->p.next);
+       node = clear_flag(next);
+
+       for (;;) {
+               if (unlikely(!node))
+                       break;
+               if (unlikely(node->p.reverse_hash > reverse_hash)) {
+                       node = NULL;
+                       break;
+               }
+               next = rcu_dereference(node->p.next);
+               if (likely(!is_removed(next))
+                   && !is_dummy(next)
+                   && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) {
+                               break;
+               }
+               node = clear_flag(next);
+       }
+       assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+       return node;
+}
+
+void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
 {
        struct rcu_table *t;
        unsigned long hash;
@@ -424,49 +895,69 @@ void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node)
        node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
 
        t = rcu_dereference(ht->t);
-       (void) _ht_add(ht, t, node, 0);
+       (void) _cds_lfht_add(ht, t, node, 0, 0);
+       ht_count_add(ht, t);
 }
 
-struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node)
+struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
+                                       struct cds_lfht_node *node)
 {
        struct rcu_table *t;
        unsigned long hash;
+       struct cds_lfht_node *ret;
 
        hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed);
        node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
 
        t = rcu_dereference(ht->t);
-       return _ht_add(ht, t, node, 1);
+       ret = _cds_lfht_add(ht, t, node, 1, 0);
+       if (ret != node)
+               ht_count_add(ht, t);
+       return ret;
 }
 
-int ht_remove(struct rcu_ht *ht, struct rcu_ht_node *node)
+int cds_lfht_remove(struct cds_lfht *ht, struct cds_lfht_node *node)
 {
        struct rcu_table *t;
+       int ret;
 
        t = rcu_dereference(ht->t);
-       return _ht_remove(ht, t, node);
+       ret = _cds_lfht_remove(ht, t, node);
+       if (!ret)
+               ht_count_remove(ht, t);
+       return ret;
 }
 
 static
-int ht_delete_dummy(struct rcu_ht *ht)
+int cds_lfht_delete_dummy(struct cds_lfht *ht)
 {
        struct rcu_table *t;
-       struct rcu_ht_node *node;
-       unsigned long i;
+       struct cds_lfht_node *node;
+       struct _cds_lfht_node *lookup;
+       unsigned long order, i;
 
        t = ht->t;
        /* Check that the table is empty */
-       node = t->tbl[0];
+       lookup = &t->tbl[0][0];
+       node = (struct cds_lfht_node *) lookup;
        do {
-               if (!node->p.dummy)
+               node = clear_flag(node)->p.next;
+               if (!is_dummy(node))
                        return -EPERM;
-               node = node->p.next;
                assert(!is_removed(node));
-       } while (node);
+       } while (clear_flag(node));
        /* Internal sanity check: all nodes left should be dummy */
-       for (i = 0; i < t->size; i++) {
-               assert(t->tbl[i]->p.dummy);
-               free(t->tbl[i]);
+       for (order = 0; order < get_count_order_ulong(t->size) + 1; order++) {
+               unsigned long len;
+
+               len = !order ? 1 : 1UL << (order - 1);
+               for (i = 0; i < len; i++) {
+                       dbg_printf("delete order %lu i %lu hash %lu\n",
+                               order, i,
+                               bit_reverse_ulong(t->tbl[order][i].reverse_hash));
+                       assert(is_dummy(t->tbl[order][i].next));
+               }
+               free(t->tbl[order]);
        }
        return 0;
 }
@@ -475,47 +966,55 @@ int ht_delete_dummy(struct rcu_ht *ht)
  * Should only be called when no more concurrent readers nor writers can
  * possibly access the table.
  */
-int ht_destroy(struct rcu_ht *ht)
+int cds_lfht_destroy(struct cds_lfht *ht)
 {
        int ret;
 
        /* Wait for in-flight resize operations to complete */
+       CMM_STORE_SHARED(ht->in_progress_destroy, 1);
        while (uatomic_read(&ht->in_progress_resize))
                poll(NULL, 0, 100);     /* wait for 100ms */
-       ret = ht_delete_dummy(ht);
+       ret = cds_lfht_delete_dummy(ht);
        if (ret)
                return ret;
        free(ht->t);
+       free_per_cpu_items_count(ht->percpu_count);
        free(ht);
        return ret;
 }
 
-void ht_count_nodes(struct rcu_ht *ht,
+void cds_lfht_count_nodes(struct cds_lfht *ht,
                unsigned long *count,
                unsigned long *removed)
 {
        struct rcu_table *t;
-       struct rcu_ht_node *node, *next;
+       struct cds_lfht_node *node, *next;
+       struct _cds_lfht_node *lookup;
+       unsigned long nr_dummy = 0;
 
        *count = 0;
        *removed = 0;
 
        t = rcu_dereference(ht->t);
-       /* Check that the table is empty */
-       node = rcu_dereference(t->tbl[0]);
+       /* Count non-dummy nodes in the table */
+       lookup = &t->tbl[0][0];
+       node = (struct cds_lfht_node *) lookup;
        do {
                next = rcu_dereference(node->p.next);
                if (is_removed(next)) {
-                       assert(!node->p.dummy);
+                       assert(!is_dummy(next));
                        (*removed)++;
-               } else if (!node->p.dummy)
+               } else if (!is_dummy(next))
                        (*count)++;
+               else
+                       (nr_dummy)++;
                node = clear_flag(next);
        } while (node);
+       dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
 }
 
 static
-void ht_free_table_cb(struct rcu_head *head)
+void cds_lfht_free_table_cb(struct rcu_head *head)
 {
        struct rcu_table *t =
                caa_container_of(head, struct rcu_table, head);
@@ -524,28 +1023,30 @@ void ht_free_table_cb(struct rcu_head *head)
 
 /* called with resize mutex held */
 static
-void _do_ht_resize(struct rcu_ht *ht)
+void _do_cds_lfht_resize(struct cds_lfht *ht)
 {
-       unsigned long new_size, old_size;
+       unsigned long new_size, old_size, old_order, new_order;
        struct rcu_table *new_t, *old_t;
 
        old_t = ht->t;
        old_size = old_t->size;
+       old_order = get_count_order_ulong(old_size) + 1;
 
        new_size = CMM_LOAD_SHARED(old_t->resize_target);
-       dbg_printf("rculfhash: resize from %lu to %lu buckets\n",
-                  old_size, new_size);
        if (old_size == new_size)
                return;
-       new_t = malloc(sizeof(struct rcu_table)
-                       + (new_size * sizeof(struct rcu_ht_node *)));
+       new_order = get_count_order_ulong(new_size) + 1;
+       printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
+              old_size, old_order, new_size, new_order);
+       new_t = malloc(sizeof(struct cds_lfht)
+                       + (new_order * sizeof(struct _cds_lfht_node *)));
        assert(new_size > old_size);
        memcpy(&new_t->tbl, &old_t->tbl,
-              old_size * sizeof(struct rcu_ht_node *));
-       init_table(ht, new_t, old_size, new_size - old_size);
+              old_order * sizeof(struct _cds_lfht_node *));
+       init_table(ht, new_t, old_order, new_order - old_order);
        /* Changing table and size atomically wrt lookups */
        rcu_assign_pointer(ht->t, new_t);
-       ht->ht_call_rcu(&old_t->head, ht_free_table_cb);
+       ht->cds_lfht_call_rcu(&old_t->head, cds_lfht_free_table_cb);
 }
 
 static
@@ -556,16 +1057,24 @@ unsigned long resize_target_update(struct rcu_table *t,
                            t->size << growth_order);
 }
 
-void ht_resize(struct rcu_ht *ht, int growth)
+void cds_lfht_resize(struct cds_lfht *ht, int growth)
 {
        struct rcu_table *t = rcu_dereference(ht->t);
        unsigned long target_size;
 
+       if (growth < 0) {
+               /*
+                * Silently refuse to shrink hash table. (not supported)
+                */
+               dbg_printf("shrinking hash table not supported.\n");
+               return;
+       }
+
        target_size = resize_target_update(t, growth);
        if (t->size < target_size) {
                CMM_STORE_SHARED(t->resize_initiated, 1);
                pthread_mutex_lock(&ht->resize_mutex);
-               _do_ht_resize(ht);
+               _do_cds_lfht_resize(ht);
                pthread_mutex_unlock(&ht->resize_mutex);
        }
 }
@@ -575,10 +1084,10 @@ void do_resize_cb(struct rcu_head *head)
 {
        struct rcu_resize_work *work =
                caa_container_of(head, struct rcu_resize_work, head);
-       struct rcu_ht *ht = work->ht;
+       struct cds_lfht *ht = work->ht;
 
        pthread_mutex_lock(&ht->resize_mutex);
-       _do_ht_resize(ht);
+       _do_cds_lfht_resize(ht);
        pthread_mutex_unlock(&ht->resize_mutex);
        free(work);
        cmm_smp_mb();   /* finish resize before decrement */
@@ -586,7 +1095,7 @@ void do_resize_cb(struct rcu_head *head)
 }
 
 static
-void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
+void cds_lfht_resize_lazy(struct cds_lfht *ht, struct rcu_table *t, int growth)
 {
        struct rcu_resize_work *work;
        unsigned long target_size;
@@ -597,7 +1106,36 @@ void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth)
                cmm_smp_mb();   /* increment resize count before calling it */
                work = malloc(sizeof(*work));
                work->ht = ht;
-               ht->ht_call_rcu(&work->head, do_resize_cb);
+               ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
+               CMM_STORE_SHARED(t->resize_initiated, 1);
+       }
+}
+
+#if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF)
+
+static
+unsigned long resize_target_update_count(struct rcu_table *t,
+                                  unsigned long count)
+{
+       return uatomic_set(&t->resize_target, count);
+}
+
+static
+void cds_lfht_resize_lazy_count(struct cds_lfht *ht, struct rcu_table *t,
+                               unsigned long count)
+{
+       struct rcu_resize_work *work;
+       unsigned long target_size;
+
+       target_size = resize_target_update_count(t, count);
+       if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) {
+               uatomic_inc(&ht->in_progress_resize);
+               cmm_smp_mb();   /* increment resize count before calling it */
+               work = malloc(sizeof(*work));
+               work->ht = ht;
+               ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
                CMM_STORE_SHARED(t->resize_initiated, 1);
        }
 }
+
+#endif
This page took 0.038718 seconds and 4 git commands to generate.