#include <urcu-call-rcu.h>
#include <urcu/arch.h>
#include <urcu/uatomic.h>
-#include <urcu/jhash.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
#include <stdio.h>
/*
* The removed flag needs to be updated atomically with the pointer.
* It indicates that no node must attach to the node scheduled for
- * removal. The gc flag also needs to be updated atomically with the
- * pointer. It indicates that node garbage collection must be performed.
+ * removal, and that node garbage collection must be performed.
* The dummy flag does not require to be updated atomically with the
* pointer, but it is added as a pointer low bit flag to save space.
*/
#define REMOVED_FLAG (1UL << 0)
-#define GC_FLAG (1UL << 1)
-#define DUMMY_FLAG (1UL << 2)
-#define FLAGS_MASK ((1UL << 3) - 1)
+#define DUMMY_FLAG (1UL << 1)
+#define FLAGS_MASK ((1UL << 2) - 1)
/* Value of the end pointer. Should not interact with flags. */
#define END_VALUE NULL
void (*cds_lfht_rcu_register_thread)(void);
void (*cds_lfht_rcu_unregister_thread)(void);
pthread_attr_t *resize_attr; /* Resize threads attributes */
- unsigned long count; /* global approximate item count */
+ long count; /* global approximate item count */
struct ht_items_count *percpu_count; /* per-cpu item count */
};
struct cds_lfht_node *node,
enum add_mode mode, int dummy);
-static
-int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
- struct cds_lfht_node *node,
- int dummy_removal, int do_gc);
-
/*
* Algorithm to reverse bits in a word by lookup table, extended to
* 64-bit words.
return;
percpu_count = uatomic_add_return(&ht->percpu_count[cpu].add, 1);
if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
- unsigned long count;
+ long count;
dbg_printf("add percpu %lu\n", percpu_count);
count = uatomic_add_return(&ht->count,
if (!(count & (count - 1))) {
if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
return;
- dbg_printf("add set global %lu\n", count);
+ dbg_printf("add set global %ld\n", count);
cds_lfht_resize_lazy_count(ht, size,
count >> (CHAIN_LEN_TARGET - 1));
}
cpu = ht_get_cpu();
if (unlikely(cpu < 0))
return;
- percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, -1);
+ percpu_count = uatomic_add_return(&ht->percpu_count[cpu].del, 1);
if (unlikely(!(percpu_count & ((1UL << COUNT_COMMIT_ORDER) - 1)))) {
- unsigned long count;
+ long count;
dbg_printf("del percpu %lu\n", percpu_count);
count = uatomic_add_return(&ht->count,
if (!(count & (count - 1))) {
if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) >= size)
return;
- dbg_printf("del set global %lu\n", count);
+ dbg_printf("del set global %ld\n", count);
+ /*
+ * Don't shrink table if the number of nodes is below a
+ * certain threshold.
+ */
+ if (count < (1UL << COUNT_COMMIT_ORDER) * (nr_cpus_mask + 1))
+ return;
cds_lfht_resize_lazy_count(ht, size,
count >> (CHAIN_LEN_TARGET - 1));
}
#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
-static const long nr_cpus_mask = -1;
+static const long nr_cpus_mask = -2;
static
struct ht_items_count *alloc_per_cpu_items_count(void)
return (struct cds_lfht_node *) (((unsigned long) node) | REMOVED_FLAG);
}
-static
-int is_gc(struct cds_lfht_node *node)
-{
- return ((unsigned long) node) & GC_FLAG;
-}
-
-static
-struct cds_lfht_node *flag_gc(struct cds_lfht_node *node)
-{
- return (struct cds_lfht_node *) (((unsigned long) node) | GC_FLAG);
-}
-
static
int is_dummy(struct cds_lfht_node *node)
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_next;
assert(!is_dummy(dummy));
- assert(!is_gc(dummy));
assert(!is_removed(dummy));
assert(!is_dummy(node));
- assert(!is_gc(node));
assert(!is_removed(node));
for (;;) {
iter_prev = dummy;
if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
return;
next = rcu_dereference(clear_flag(iter)->p.next);
- if (likely(is_gc(next)))
+ if (likely(is_removed(next)))
break;
iter_prev = clear_flag(iter);
iter = next;
}
- assert(!is_gc(iter));
+ assert(!is_removed(iter));
if (is_dummy(iter))
new_next = flag_dummy(clear_flag(next));
else
return;
}
+static
+int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size,
+ struct cds_lfht_node *old_node,
+ struct cds_lfht_node *ret_next,
+ struct cds_lfht_node *new_node)
+{
+ struct cds_lfht_node *dummy, *old_next;
+ struct _cds_lfht_node *lookup;
+ int flagged = 0;
+ unsigned long hash, index, order;
+
+ if (!old_node) /* Return -ENOENT if asked to replace NULL node */
+ goto end;
+
+ assert(!is_removed(old_node));
+ assert(!is_dummy(old_node));
+ assert(!is_removed(new_node));
+ assert(!is_dummy(new_node));
+ assert(new_node != old_node);
+ do {
+ /* Insert after node to be replaced */
+ old_next = ret_next;
+ if (is_removed(old_next)) {
+ /*
+ * Too late, the old node has been removed under us
+ * between lookup and replace. Fail.
+ */
+ goto end;
+ }
+ assert(!is_dummy(old_next));
+ assert(new_node != clear_flag(old_next));
+ new_node->p.next = clear_flag(old_next);
+ /*
+ * Here is the whole trick for lock-free replace: we add
+ * the replacement node _after_ the node we want to
+ * replace by atomically setting its next pointer at the
+ * same time we set its removal flag. Given that
+ * the lookups/get next use an iterator aware of the
+ * next pointer, they will either skip the old node due
+ * to the removal flag and see the new node, or use
+ * the old node, but will not see the new one.
+ */
+ ret_next = uatomic_cmpxchg(&old_node->p.next,
+ old_next, flag_removed(new_node));
+ } while (ret_next != old_next);
+
+ /* We performed the replacement. */
+ flagged = 1;
+
+ /*
+ * Ensure that the old node is not visible to readers anymore:
+ * lookup for the node, and remove it (along with any other
+ * logically removed node) if found.
+ */
+ hash = bit_reverse_ulong(old_node->p.reverse_hash);
+ assert(size > 0);
+ index = hash & (size - 1);
+ order = get_count_order_ulong(index + 1);
+ lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
+ dummy = (struct cds_lfht_node *) lookup;
+ _cds_lfht_gc_bucket(dummy, new_node);
+end:
+ /*
+ * Only the flagging action indicated that we (and no other)
+ * replaced the node from the hash table.
+ */
+ if (flagged) {
+ assert(is_removed(rcu_dereference(old_node->p.next)));
+ return 0;
+ } else {
+ return -ENOENT;
+ }
+}
+
static
struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht,
unsigned long size,
unsigned long hash, index, order;
assert(!is_dummy(node));
- assert(!is_gc(node));
assert(!is_removed(node));
if (!size) {
assert(dummy);
if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
goto insert;
next = rcu_dereference(clear_flag(iter)->p.next);
- if (unlikely(is_gc(next)))
+ if (unlikely(is_removed(next)))
goto gc_node;
- assert(!is_removed(next));
if ((mode == ADD_UNIQUE || mode == ADD_REPLACE)
&& !is_dummy(next)
&& !ht->compare_fct(node->key, node->key_len,
assert(node != clear_flag(iter));
assert(!is_removed(iter_prev));
assert(!is_removed(iter));
- assert(!is_gc(iter_prev));
- assert(!is_gc(iter));
assert(iter_prev != node);
if (!dummy)
node->p.next = clear_flag(iter);
}
replace:
- /* Insert after node to be replaced */
- iter_prev = clear_flag(iter);
- iter = next;
- assert(node != clear_flag(iter));
- assert(!is_removed(iter_prev));
- assert(!is_removed(iter));
- assert(!is_gc(iter_prev));
- assert(!is_gc(iter));
- assert(iter_prev != node);
- assert(!dummy);
- node->p.next = clear_flag(iter);
- if (is_dummy(iter))
- new_node = flag_dummy(node);
- else
- new_node = node;
- /*
- * Here is the whole trick for lock-free replace: we add
- * the replacement node _after_ the node we want to
- * replace by atomically setting its next pointer at the
- * same time we set its removal and gc flags. Given that
- * the lookups/get next use an iterator aware of the
- * next pointer, they will either skip the old node due
- * to the removal/gc flag and see the new node, or use
- * the old new, but will not see the new one.
- */
- new_node = flag_removed(new_node);
- new_node = flag_gc(new_node);
- if (uatomic_cmpxchg(&iter_prev->p.next,
- iter, new_node) != iter) {
- continue; /* retry */
+
+ if (!_cds_lfht_replace(ht, size, clear_flag(iter), next,
+ node)) {
+ return_node = clear_flag(iter);
+ goto end; /* gc already done */
} else {
- return_node = iter_prev;
- goto gc_end;
+ continue; /* retry */
}
gc_node:
assert(!is_removed(iter));
- assert(!is_gc(iter));
if (is_dummy(iter))
new_next = flag_dummy(clear_flag(next));
else
lookup = &ht->t.tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
dummy_node = (struct cds_lfht_node *) lookup;
_cds_lfht_gc_bucket(dummy_node, node);
+end:
return return_node;
}
static
int _cds_lfht_del(struct cds_lfht *ht, unsigned long size,
struct cds_lfht_node *node,
- int dummy_removal, int do_gc)
+ int dummy_removal)
{
struct cds_lfht_node *dummy, *next, *old;
struct _cds_lfht_node *lookup;
int flagged = 0;
unsigned long hash, index, order;
+ if (!node) /* Return -ENOENT if asked to delete NULL node */
+ goto end;
+
/* logically delete the node */
assert(!is_dummy(node));
- assert(!is_gc(node));
assert(!is_removed(node));
old = rcu_dereference(node->p.next);
do {
else
assert(!is_dummy(next));
new_next = flag_removed(next);
- if (do_gc)
- new_next = flag_gc(new_next);
old = uatomic_cmpxchg(&node->p.next, next, new_next);
} while (old != next);
/* We performed the (logical) deletion. */
flagged = 1;
- if (!do_gc)
- goto end;
-
/*
* Ensure that the node is not visible to readers anymore: lookup for
* the node, and remove it (along with any other logically removed node)
if (flagged) {
assert(is_removed(rcu_dereference(node->p.next)));
return 0;
- } else
+ } else {
return -ENOENT;
+ }
}
static
* We spawn just the number of threads we need to satisfy the minimum
* partition size, up to the number of CPUs in the system.
*/
- nr_threads = min(nr_cpus_mask + 1,
- len >> MIN_PARTITION_PER_THREAD_ORDER);
+ if (nr_cpus_mask > 0) {
+ nr_threads = min(nr_cpus_mask + 1,
+ len >> MIN_PARTITION_PER_THREAD_ORDER);
+ } else {
+ nr_threads = 1;
+ }
partition_len = len >> get_count_order_ulong(nr_threads);
work = calloc(nr_threads, sizeof(*work));
thread_id = calloc(nr_threads, sizeof(*thread_id));
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
new_node, ADD_DEFAULT, 1);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
}
ht->cds_lfht_rcu_read_unlock();
}
fini_node->p.reverse_hash =
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
- fini_node, 1, 1);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
+ fini_node, 1);
}
ht->cds_lfht_rcu_read_unlock();
}
node = clear_flag(node);
for (;;) {
if (unlikely(is_end(node))) {
- node = NULL;
+ node = next = NULL;
break;
}
if (unlikely(node->p.reverse_hash > reverse_hash)) {
- node = NULL;
+ node = next = NULL;
break;
}
next = rcu_dereference(node->p.next);
iter->next = next;
}
-void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
for (;;) {
if (unlikely(is_end(node))) {
- node = NULL;
+ node = next = NULL;
break;
}
if (unlikely(node->p.reverse_hash > reverse_hash)) {
- node = NULL;
+ node = next = NULL;
break;
}
next = rcu_dereference(node->p.next);
iter->next = next;
}
+void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+ struct cds_lfht_node *node, *next;
+
+ node = clear_flag(iter->next);
+ for (;;) {
+ if (unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ next = rcu_dereference(node->p.next);
+ if (likely(!is_removed(next))
+ && !is_dummy(next)) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+ struct _cds_lfht_node *lookup;
+
+ /*
+ * Get next after first dummy node. The first dummy node is the
+ * first node of the linked list.
+ */
+ lookup = &ht->t.tbl[0]->nodes[0];
+ iter->next = lookup->next;
+ cds_lfht_next(ht, iter);
+}
+
void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
{
unsigned long hash, size;
return ret;
}
-struct cds_lfht_node *cds_lfht_replace(struct cds_lfht *ht,
+struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
struct cds_lfht_node *node)
{
unsigned long hash, size;
return ret;
}
-int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node)
+int cds_lfht_replace(struct cds_lfht *ht, struct cds_lfht_iter *old_iter,
+ struct cds_lfht_node *new_node)
+{
+ unsigned long size;
+
+ size = rcu_dereference(ht->t.size);
+ return _cds_lfht_replace(ht, size, old_iter->node, old_iter->next,
+ new_node);
+}
+
+int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
unsigned long size;
int ret;
size = rcu_dereference(ht->t.size);
- ret = _cds_lfht_del(ht, size, node, 0, 1);
+ ret = _cds_lfht_del(ht, size, iter->node, 0);
if (!ret)
ht_count_del(ht, size);
return ret;
if (!is_dummy(node))
return -EPERM;
assert(!is_removed(node));
- assert(!is_gc(node));
} while (!is_end(node));
/*
* size accessed without rcu_dereference because hash table is
int ret;
/* Wait for in-flight resize operations to complete */
- CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+ _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+ cmm_smp_mb(); /* Store destroy before load resize */
while (uatomic_read(&ht->in_progress_resize))
poll(NULL, 0, 100); /* wait for 100ms */
ret = cds_lfht_delete_dummy(ht);
}
void cds_lfht_count_nodes(struct cds_lfht *ht,
+ long *approx_before,
unsigned long *count,
- unsigned long *removed)
+ unsigned long *removed,
+ long *approx_after)
{
struct cds_lfht_node *node, *next;
struct _cds_lfht_node *lookup;
unsigned long nr_dummy = 0;
+ *approx_before = 0;
+ if (nr_cpus_mask >= 0) {
+ int i;
+
+ for (i = 0; i < nr_cpus_mask + 1; i++) {
+ *approx_before += uatomic_read(&ht->percpu_count[i].add);
+ *approx_before -= uatomic_read(&ht->percpu_count[i].del);
+ }
+ }
+
*count = 0;
*removed = 0;
node = (struct cds_lfht_node *) lookup;
do {
next = rcu_dereference(node->p.next);
- if (is_removed(next) || is_gc(next)) {
- assert(!is_dummy(next));
- (*removed)++;
+ if (is_removed(next)) {
+ if (!is_dummy(next))
+ (*removed)++;
+ else
+ (nr_dummy)++;
} else if (!is_dummy(next))
(*count)++;
else
node = clear_flag(next);
} while (!is_end(node));
dbg_printf("number of dummy nodes: %lu\n", nr_dummy);
+ *approx_after = 0;
+ if (nr_cpus_mask >= 0) {
+ int i;
+
+ for (i = 0; i < nr_cpus_mask + 1; i++) {
+ *approx_after += uatomic_read(&ht->percpu_count[i].add);
+ *approx_after -= uatomic_read(&ht->percpu_count[i].del);
+ }
+ }
}
/* called with resize mutex held */
* Resize table, re-do if the target size has changed under us.
*/
do {
+ assert(uatomic_read(&ht->in_progress_resize));
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
ht->t.resize_initiated = 1;
old_size = ht->t.size;
new_size = CMM_LOAD_SHARED(ht->t.resize_target);
cmm_smp_mb();
if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before calling it */
+ cmm_smp_mb(); /* increment resize count before load destroy */
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work = malloc(sizeof(*work));
work->ht = ht;
ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
cmm_smp_mb();
if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before calling it */
+ cmm_smp_mb(); /* increment resize count before load destroy */
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work = malloc(sizeof(*work));
work->ht = ht;
ht->cds_lfht_call_rcu(&work->head, do_resize_cb);