/*
* Define the minimum table size.
*/
-#define MIN_TABLE_SIZE 1
+#define MIN_TABLE_ORDER 0
+#define MIN_TABLE_SIZE (1UL << MIN_TABLE_ORDER)
#if (CAA_BITS_PER_LONG == 32)
#define MAX_TABLE_ORDER 32
unsigned long add, del;
} __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
-/*
- * rcu_level: Contains the per order-index-level bucket node table. The
- * size of each bucket node table is half the number of hashes contained
- * in this order (except for order 0). The minimum allocation size
- * parameter allows combining the bucket node arrays of the lowermost
- * levels to improve cache locality for small index orders.
- */
-struct rcu_level {
- /* Note: manually update allocation length when adding a field */
- struct cds_lfht_node nodes[0];
-};
-
/*
* rcu_table: Contains the size and desired new size if a resize
* operation is in progress, as well as the statically-sized array of
- * rcu_level pointers.
+ * bucket table pointers.
*/
struct rcu_table {
unsigned long size; /* always a power of 2, shared (RCU) */
unsigned long resize_target;
int resize_initiated;
- struct rcu_level *tbl[MAX_TABLE_ORDER];
+
+ /*
+ * Contains the per order-index-level bucket node table. The size
+ * of each bucket node table is half the number of hashes contained
+ * in this order (except for order 0). The minimum allocation size
+ * parameter allows combining the bucket node arrays of the lowermost
+ * levels to improve cache locality for small index orders.
+ */
+ struct cds_lfht_node *tbl[MAX_TABLE_ORDER];
};
/*
*/
struct cds_lfht {
struct rcu_table t;
- unsigned long min_alloc_order;
- unsigned long min_alloc_size;
+ unsigned long min_alloc_buckets_order;
+ unsigned long min_nr_alloc_buckets;
+ unsigned long max_nr_buckets;
int flags;
/*
* We need to put the work threads offline (QSBR) when taking this
void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
{
if (order == 0) {
- ht->t.tbl[0] = calloc(ht->min_alloc_size,
+ ht->t.tbl[0] = calloc(ht->min_nr_alloc_buckets,
sizeof(struct cds_lfht_node));
assert(ht->t.tbl[0]);
- } else if (order > ht->min_alloc_order) {
+ } else if (order > ht->min_alloc_buckets_order) {
ht->t.tbl[order] = calloc(1UL << (order -1),
sizeof(struct cds_lfht_node));
assert(ht->t.tbl[order]);
}
- /* Nothing to do for 0 < order && order <= ht->min_alloc_order */
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
}
/*
{
if (order == 0)
poison_free(ht->t.tbl[0]);
- else if (order > ht->min_alloc_order)
+ else if (order > ht->min_alloc_buckets_order)
poison_free(ht->t.tbl[order]);
- /* Nothing to do for 0 < order && order <= ht->min_alloc_order */
+ /* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
}
static inline
unsigned long order;
if ((__builtin_constant_p(index) && index == 0)
- || index < ht->min_alloc_size) {
+ || index < ht->min_nr_alloc_buckets) {
dbg_printf("bucket index %lu order 0 aridx 0\n", index);
- return &ht->t.tbl[0]->nodes[index];
+ return &ht->t.tbl[0][index];
}
/*
* equivalent to get_count_order_ulong(index + 1), but optimizes
order = fls_ulong(index);
dbg_printf("bucket index %lu order %lu aridx %lu\n",
index, order, index & ((1UL << (order - 1)) - 1));
- return &ht->t.tbl[order]->nodes[index & ((1UL << (order - 1)) - 1)];
+ return &ht->t.tbl[order][index & ((1UL << (order - 1)) - 1)];
}
static inline
{
unsigned long j, size = 1UL << (i - 1);
- assert(i > ht->min_alloc_order);
+ assert(i > MIN_TABLE_ORDER);
ht->cds_lfht_rcu_read_lock();
for (j = size + start; j < size + start + len; j++) {
struct cds_lfht_node *new_node = bucket_at(ht, j);
dbg_printf("init table: first_order %lu last_order %lu\n",
first_order, last_order);
- assert(first_order > ht->min_alloc_order);
+ assert(first_order > MIN_TABLE_ORDER);
for (i = first_order; i <= last_order; i++) {
unsigned long len;
{
unsigned long j, size = 1UL << (i - 1);
- assert(i > ht->min_alloc_order);
+ assert(i > MIN_TABLE_ORDER);
ht->cds_lfht_rcu_read_lock();
for (j = size + start; j < size + start + len; j++) {
struct cds_lfht_node *fini_node = bucket_at(ht, j);
dbg_printf("fini table: first_order %lu last_order %lu\n",
first_order, last_order);
- assert(first_order > ht->min_alloc_order);
+ assert(first_order > MIN_TABLE_ORDER);
for (i = last_order; i >= first_order; i--) {
unsigned long len;
}
struct cds_lfht *_cds_lfht_new(unsigned long init_size,
- unsigned long min_alloc_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
int flags,
void (*cds_lfht_call_rcu)(struct rcu_head *head,
void (*func)(struct rcu_head *head)),
struct cds_lfht *ht;
unsigned long order;
- /* min_alloc_size must be power of two */
- if (!min_alloc_size || (min_alloc_size & (min_alloc_size - 1)))
+ /* min_nr_alloc_buckets must be power of two */
+ if (!min_nr_alloc_buckets || (min_nr_alloc_buckets & (min_nr_alloc_buckets - 1)))
return NULL;
+
/* init_size must be power of two */
if (!init_size || (init_size & (init_size - 1)))
return NULL;
- min_alloc_size = max(min_alloc_size, MIN_TABLE_SIZE);
- init_size = max(init_size, min_alloc_size);
+
+ if (!max_nr_buckets)
+ max_nr_buckets = 1UL << (MAX_TABLE_ORDER - 1);
+
+ /* max_nr_buckets must be power of two */
+ if (!max_nr_buckets || (max_nr_buckets & (max_nr_buckets - 1)))
+ return NULL;
+
+ min_nr_alloc_buckets = max(min_nr_alloc_buckets, MIN_TABLE_SIZE);
+ init_size = max(init_size, MIN_TABLE_SIZE);
+ max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
+ init_size = min(init_size, max_nr_buckets);
ht = calloc(1, sizeof(struct cds_lfht));
assert(ht);
ht->flags = flags;
pthread_mutex_init(&ht->resize_mutex, NULL);
order = get_count_order_ulong(init_size);
ht->t.resize_target = 1UL << order;
- ht->min_alloc_size = min_alloc_size;
- ht->min_alloc_order = get_count_order_ulong(min_alloc_size);
+ ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
+ ht->min_alloc_buckets_order = get_count_order_ulong(min_nr_alloc_buckets);
+ ht->max_nr_buckets = max_nr_buckets;
cds_lfht_create_bucket(ht, 1UL << order);
ht->t.size = 1UL << order;
return ht;
{
unsigned long old_order, new_order;
- new_size = max(new_size, ht->min_alloc_size);
+ new_size = max(new_size, MIN_TABLE_SIZE);
old_order = get_count_order_ulong(old_size);
new_order = get_count_order_ulong(new_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
void resize_target_update_count(struct cds_lfht *ht,
unsigned long count)
{
- count = max(count, ht->min_alloc_size);
+ count = max(count, MIN_TABLE_SIZE);
+ count = min(count, ht->max_nr_buckets);
uatomic_set(&ht->t.resize_target, count);
}
{
unsigned long target_size = size << growth;
+ target_size = min(target_size, ht->max_nr_buckets);
if (resize_target_grow(ht, target_size) >= target_size)
return;
{
if (!(ht->flags & CDS_LFHT_AUTO_RESIZE))
return;
- count = max(count, ht->min_alloc_size);
+ count = max(count, MIN_TABLE_SIZE);
+ count = min(count, ht->max_nr_buckets);
if (count == size)
return; /* Already the right size, no resize needed */
if (count > size) { /* lazy grow */