* order bits reverse
* 0 0 000 000
* |
- * 1 | 1 001 100 <-
- * | | |
- * 2 | | 2 010 010 |
- * | | | 3 011 110 <- |
- * | | | | | |
+ * 1 | 1 001 100 <- <-
+ * | | | |
+ * 2 | | 2 010 010 | |
+ * | | | 3 011 110 | <- |
+ * | | | | | | |
* 3 -> | | | 4 100 001 | |
* -> | | 5 101 101 |
* -> | 6 110 011
#define CHAIN_LEN_TARGET 1
#define CHAIN_LEN_RESIZE_THRESHOLD 3
+/*
+ * Define the minimum table size. Protects against hash table resize overload
+ * when too many entries are added quickly before the resize can complete.
+ * This is especially the case if the table could be shrinked to a size of 1.
+ * TODO: we might want to make the add/remove operations help the resize to
+ * add or remove dummy nodes when a resize is ongoing to ensure upper-bound on
+ * chain length.
+ */
+#define MIN_TABLE_SIZE 128
+
#ifndef max
#define max(a, b) ((a) > (b) ? (a) : (b))
#endif
cds_lfht_hash_fct hash_fct;
cds_lfht_compare_fct compare_fct;
unsigned long hash_seed;
+ int flags;
pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
unsigned int in_progress_resize, in_progress_destroy;
void (*cds_lfht_call_rcu)(struct rcu_head *head,
{
unsigned long count;
+ if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+ return;
count = uatomic_read(&ht->count);
/*
* Use bucket-local length for small table expand and for
cds_lfht_compare_fct compare_fct,
unsigned long hash_seed,
unsigned long init_size,
+ int flags,
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head)),
void (*cds_lfht_synchronize_rcu)(void))
ht->percpu_count = alloc_per_cpu_items_count();
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
- order = get_count_order_ulong(max(init_size, 1)) + 1;
+ order = get_count_order_ulong(max(init_size, MIN_TABLE_SIZE)) + 1;
ht->t = calloc(1, sizeof(struct cds_lfht)
+ (order * sizeof(struct rcu_level *)));
ht->t->size = 0;
+ ht->flags = flags;
pthread_mutex_lock(&ht->resize_mutex);
init_table(ht, ht->t, 0, order);
pthread_mutex_unlock(&ht->resize_mutex);
unsigned long old_order, new_order;
struct rcu_table *new_t;
- new_size = max(new_size, 1);
+ new_size = max(new_size, MIN_TABLE_SIZE);
old_order = get_count_order_ulong(old_size) + 1;
new_order = get_count_order_ulong(new_size) + 1;
printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
}
static
-unsigned long resize_target_update_count(struct rcu_table *t,
- unsigned long count)
+void resize_target_update_count(struct rcu_table *t,
+ unsigned long count)
{
- count = max(count, 1);
- return uatomic_set(&t->resize_target, count);
+ count = max(count, MIN_TABLE_SIZE);
+ uatomic_set(&t->resize_target, count);
}
void cds_lfht_resize(struct cds_lfht *ht, unsigned long new_size)
{
struct rcu_table *t = rcu_dereference(ht->t);
- unsigned long target_size;
- target_size = resize_target_update_count(t, new_size);
+ resize_target_update_count(t, new_size);
CMM_STORE_SHARED(t->resize_initiated, 1);
pthread_mutex_lock(&ht->resize_mutex);
_do_cds_lfht_resize(ht);
unsigned long count)
{
struct rcu_resize_work *work;
- unsigned long target_size;
- target_size = resize_target_update_count(t, count);
+ if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
+ return;
+ resize_target_update_count(t, count);
if (!CMM_LOAD_SHARED(t->resize_initiated)) {
uatomic_inc(&ht->in_progress_resize);
cmm_smp_mb(); /* increment resize count before calling it */