* order bits reverse
* 0 0 000 000
* |
- * 1 | 1 001 100 <-
- * | | |
- * 2 | | 2 010 010 |
- * | | | 3 011 110 <- |
- * | | | | | |
+ * 1 | 1 001 100 <- <-
+ * | | | |
+ * 2 | | 2 010 010 | |
+ * | | | 3 011 110 | <- |
+ * | | | | | | |
* 3 -> | | | 4 100 001 | |
* -> | | 5 101 101 |
* -> | 6 110 011
#define CHAIN_LEN_TARGET 1
#define CHAIN_LEN_RESIZE_THRESHOLD 3
+/*
+ * Define the minimum table size. Protects against hash table resize overload
+ * when too many entries are added quickly before the resize can complete.
+ * This is especially the case if the table could be shrinked to a size of 1.
+ * TODO: we might want to make the add/remove operations help the resize to
+ * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
+ * chain length.
+ */
+#define MIN_TABLE_SIZE 128
+
#ifndef max
#define max(a, b) ((a) > (b) ? (a) : (b))
#endif
*/
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
- lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)];
+ lookup = &t->tbl[order]->nodes[index & ((!order ? 0 : (1UL << (order - 1))) - 1)];
iter_prev = (struct cds_lfht_node *) lookup;
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
/* Garbage collect logically removed nodes in the bucket */
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
- lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)];
+ lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
dummy_node = (struct cds_lfht_node *) lookup;
_cds_lfht_gc_bucket(dummy_node, node);
return node;
* if found.
*/
hash = bit_reverse_ulong(node->p.reverse_hash);
- /*
- * When removing a dummy node, we need to consider the lower
- * order table, so we don't end up looking up the dummy nodes we
- * are currently removing.
- */
-
- if (dummy_removal)
- index = hash & ((t->size == 1) ? 0 : (t->size >> 1) - 1);
- else
- index = hash & (t->size - 1);
+ assert(t->size > 0);
+ index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
- lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)];
+ lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1)) - 1))];
dummy = (struct cds_lfht_node *) lookup;
_cds_lfht_gc_bucket(dummy, node);
end:
len = !i ? 1 : 1UL << (i - 1);
dbg_printf("fini order %lu len: %lu\n", i, len);
+ /* Update table size */
+ t->size = 1UL << (i - 1);
/* Unlink */
for (j = 0; j < len; j++) {
struct cds_lfht_node *new_node =
break;
}
ht->cds_lfht_call_rcu(&t->tbl[i]->head, cds_lfht_free_level);
- /* Update table size */
- t->size = (i == 1) ? 0 : 1UL << (i - 2);
dbg_printf("fini new size: %lu\n", t->size);
if (CMM_LOAD_SHARED(ht->in_progress_destroy))
break;
ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
- order = get_count_order_ulong(max(init_size, 1)) + 1;
+ order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
ht->t = calloc(1, sizeof(struct cds_lfht)
+ (order * sizeof(struct rcu_level *)));
ht->t->size = 0;
t = rcu_dereference(ht->t);
index = hash & (t->size - 1);
order = get_count_order_ulong(index + 1);
- lookup = &t->tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)];
+ lookup = &t->tbl[order]->nodes[index & (!order ? 0 : ((1UL << (order - 1))) - 1)];
dbg_printf("lookup hash %lu index %lu order %lu aridx %lu\n",
- hash, index, order, index & ((1UL << (order - 1)) - 1));
+ hash, index, order, index & (!order ? 0 : ((1UL << (order - 1)) - 1)));
node = (struct cds_lfht_node *) lookup;
for (;;) {
if (unlikely(!node))
unsigned long old_order, new_order;
struct rcu_table *new_t;
- new_size = max(new_size, 1);
+ new_size = max(new_size, MIN_TABLE_SIZE);
old_order = get_count_order_ulong(old_size) + 1;
new_order = get_count_order_ulong(new_size) + 1;
printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
unsigned long resize_target_update_count(struct rcu_table *t,
unsigned long count)
{
- count = max(count, 1);
+ count = max(count, MIN_TABLE_SIZE);
return uatomic_set(&t->resize_target, count);
}