assert(!is_dummy(node));
assert(!is_removed(node));
- if (!size) {
- assert(dummy);
- assert(!unique_ret);
- node->p.next = flag_dummy(get_end());
- return; /* Initial first add (head) */
- }
lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
for (;;) {
uint32_t chain_len = 0;
goto insert;
if (likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
goto insert;
+
/* dummy node is the first node of the identical-hash-value chain */
if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash)
goto insert;
+
next = rcu_dereference(clear_flag(iter)->p.next);
if (unlikely(is_removed(next)))
goto gc_node;
+
+ /* uniquely add */
if (unique_ret
&& !is_dummy(next)
- && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash
- && !ht->compare_fct(node->key, node->key_len,
- clear_flag(iter)->key,
- clear_flag(iter)->key_len)) {
- unique_ret->node = clear_flag(iter);
- unique_ret->next = next;
+ && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) {
+ struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
+
+ /*
+ * uniquely adding inserts the node as the first
+ * node of the identical-hash-value node chain.
+ *
+ * This semantic ensures no duplicated keys
+ * should ever be observable in the table
+ * (including observe one node by one node
+ * by forward iterations)
+ */
+ cds_lfht_next_duplicate(ht, &d_iter);
+ if (!d_iter.node)
+ goto insert;
+
+ *unique_ret = d_iter;
return;
}
+
/* Only account for identical reverse hash once */
if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
&& !is_dummy(next))
{
unsigned long j;
+ assert(i > 0);
ht->cds_lfht_rcu_read_lock();
for (j = start; j < start + len; j++) {
struct cds_lfht_node *new_node =
(struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
dbg_printf("init populate: i %lu j %lu hash %lu\n",
- i, j, !i ? 0 : (1UL << (i - 1)) + j);
+ i, j, (1UL << (i - 1)) + j);
new_node->p.reverse_hash =
- bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
- _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
+ bit_reverse_ulong((1UL << (i - 1)) + j);
+ _cds_lfht_add(ht, 1UL << (i - 1),
new_node, NULL, 1);
}
ht->cds_lfht_rcu_read_unlock();
dbg_printf("init table: first_order %lu last_order %lu\n",
first_order, last_order);
+ assert(first_order > 0);
for (i = first_order; i <= last_order; i++) {
unsigned long len;
- len = !i ? 1 : 1UL << (i - 1);
+ len = 1UL << (i - 1);
dbg_printf("init order %lu len: %lu\n", i, len);
/* Stop expand if the resize target changes under us */
- if (CMM_LOAD_SHARED(ht->t.resize_target) < (!i ? 1 : (1UL << i)))
+ if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i))
break;
ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
* Update table size.
*/
cmm_smp_wmb(); /* populate data before RCU size */
- CMM_STORE_SHARED(ht->t.size, !i ? 1 : (1UL << i));
+ CMM_STORE_SHARED(ht->t.size, 1UL << i);
- dbg_printf("init new size: %lu\n", !i ? 1 : (1UL << i));
+ dbg_printf("init new size: %lu\n", 1UL << i);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
}
{
unsigned long j;
+ assert(i > 0);
ht->cds_lfht_rcu_read_lock();
for (j = start; j < start + len; j++) {
struct cds_lfht_node *fini_node =
(struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
dbg_printf("remove entry: i %lu j %lu hash %lu\n",
- i, j, !i ? 0 : (1UL << (i - 1)) + j);
+ i, j, (1UL << (i - 1)) + j);
fini_node->p.reverse_hash =
- bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
- (void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
- fini_node, 1);
+ bit_reverse_ulong((1UL << (i - 1)) + j);
+ (void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1);
}
ht->cds_lfht_rcu_read_unlock();
}
for (i = last_order; i >= first_order; i--) {
unsigned long len;
- len = !i ? 1 : 1UL << (i - 1);
+ len = 1UL << (i - 1);
dbg_printf("fini order %lu len: %lu\n", i, len);
/* Stop shrink if the resize target changes under us */
}
}
+static
+void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size)
+{
+ struct _cds_lfht_node *prev, *node;
+ unsigned long order, len, i, j;
+
+ ht->t.tbl[0] = calloc(1, sizeof(struct _cds_lfht_node));
+ assert(ht->t.tbl[0]);
+
+ dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0);
+ ht->t.tbl[0]->nodes[0].next = flag_dummy(get_end());
+ ht->t.tbl[0]->nodes[0].reverse_hash = 0;
+
+ for (order = 1; order < get_count_order_ulong(size) + 1; order++) {
+ len = 1UL << (order - 1);
+ ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node));
+ assert(ht->t.tbl[order]);
+
+ i = 0;
+ prev = ht->t.tbl[i]->nodes;
+ for (j = 0; j < len; j++) {
+ if (j & (j - 1)) { /* Between power of 2 */
+ prev++;
+ } else if (j) { /* At each power of 2 */
+ i++;
+ prev = ht->t.tbl[i]->nodes;
+ }
+
+ node = &ht->t.tbl[order]->nodes[j];
+ dbg_printf("create dummy: order %lu index %lu hash %lu\n",
+ order, j, j + len);
+ node->next = prev->next;
+ assert(is_dummy(node->next));
+ node->reverse_hash = bit_reverse_ulong(j + len);
+ prev->next = flag_dummy((struct cds_lfht_node *)node);
+ }
+ }
+}
+
struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
cds_lfht_compare_fct compare_fct,
unsigned long hash_seed,
ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
- order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE));
ht->flags = flags;
- ht->cds_lfht_rcu_thread_offline();
- pthread_mutex_lock(&ht->resize_mutex);
+ order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE));
ht->t.resize_target = 1UL << order;
- init_table(ht, 0, order);
- pthread_mutex_unlock(&ht->resize_mutex);
- ht->cds_lfht_rcu_thread_online();
+ cds_lfht_create_dummy(ht, 1UL << order);
+ ht->t.size = 1UL << order;
return ht;
}