As those are not static anymore (used in plugins).
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
-extern unsigned int fls_ulong(unsigned long x);
-extern int get_count_order_ulong(unsigned long x);
+extern unsigned int cds_lfht_fls_ulong(unsigned long x);
+extern int cds_lfht_get_count_order_ulong(unsigned long x);
#ifdef POISON_FREE
#define poison_free(ptr) \
#ifdef POISON_FREE
#define poison_free(ptr) \
ht->bucket_at = mm->bucket_at;
ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
ht->min_alloc_buckets_order =
ht->bucket_at = mm->bucket_at;
ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
ht->min_alloc_buckets_order =
- get_count_order_ulong(min_nr_alloc_buckets);
+ cds_lfht_get_count_order_ulong(min_nr_alloc_buckets);
ht->max_nr_buckets = max_nr_buckets;
return ht;
ht->max_nr_buckets = max_nr_buckets;
return ht;
return &ht->tbl_order[0][index];
}
/*
return &ht->tbl_order[0][index];
}
/*
- * equivalent to get_count_order_ulong(index + 1), but optimizes
- * away the non-existing 0 special-case for
- * get_count_order_ulong.
+ * equivalent to cds_lfht_get_count_order_ulong(index + 1), but
+ * optimizes away the non-existing 0 special-case for
+ * cds_lfht_get_count_order_ulong.
- order = fls_ulong(index);
+ order = cds_lfht_fls_ulong(index);
dbg_printf("bucket index %lu order %lu aridx %lu\n",
index, order, index & ((1UL << (order - 1)) - 1));
return &ht->tbl_order[order][index & ((1UL << (order - 1)) - 1)];
dbg_printf("bucket index %lu order %lu aridx %lu\n",
index, order, index & ((1UL << (order - 1)) - 1));
return &ht->tbl_order[order][index & ((1UL << (order - 1)) - 1)];
-unsigned int fls_ulong(unsigned long x)
+unsigned int cds_lfht_fls_ulong(unsigned long x)
{
#if (CAA_BITS_PER_LONG == 32)
return fls_u32(x);
{
#if (CAA_BITS_PER_LONG == 32)
return fls_u32(x);
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
*/
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
*/
-int get_count_order_u32(uint32_t x)
+int cds_lfht_get_count_order_u32(uint32_t x)
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
*/
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
*/
-int get_count_order_ulong(unsigned long x)
+int cds_lfht_get_count_order_ulong(unsigned long x)
- return fls_ulong(x - 1);
+ return cds_lfht_fls_ulong(x - 1);
* round up number of CPUs to next power of two, so we
* can use & for modulo.
*/
* round up number of CPUs to next power of two, so we
* can use & for modulo.
*/
- maxcpus = 1UL << get_count_order_ulong(maxcpus);
+ maxcpus = 1UL << cds_lfht_get_count_order_ulong(maxcpus);
nr_cpus_mask = maxcpus - 1;
}
#else /* #if defined(HAVE_SYSCONF) */
nr_cpus_mask = maxcpus - 1;
}
#else /* #if defined(HAVE_SYSCONF) */
chain_len);
if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
cds_lfht_resize_lazy_grow(ht, size,
chain_len);
if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD)
cds_lfht_resize_lazy_grow(ht, size,
- get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
+ cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)));
} else {
nr_threads = 1;
}
} else {
nr_threads = 1;
}
- partition_len = len >> get_count_order_ulong(nr_threads);
+ partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
work = calloc(nr_threads, sizeof(*work));
assert(work);
for (thread = 0; thread < nr_threads; thread++) {
work = calloc(nr_threads, sizeof(*work));
assert(work);
for (thread = 0; thread < nr_threads; thread++) {
node->next = flag_bucket(get_end());
node->reverse_hash = 0;
node->next = flag_bucket(get_end());
node->reverse_hash = 0;
- for (order = 1; order < get_count_order_ulong(size) + 1; order++) {
+ for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) {
len = 1UL << (order - 1);
cds_lfht_alloc_bucket_table(ht, order);
len = 1UL << (order - 1);
cds_lfht_alloc_bucket_table(ht, order);
alloc_split_items_count(ht);
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
alloc_split_items_count(ht);
/* this mutex should not nest in read-side C.S. */
pthread_mutex_init(&ht->resize_mutex, NULL);
- order = get_count_order_ulong(init_size);
+ order = cds_lfht_get_count_order_ulong(init_size);
ht->resize_target = 1UL << order;
cds_lfht_create_bucket(ht, 1UL << order);
ht->size = 1UL << order;
ht->resize_target = 1UL << order;
cds_lfht_create_bucket(ht, 1UL << order);
ht->size = 1UL << order;
assert(is_bucket(node->next));
}
assert(is_bucket(node->next));
}
- for (order = get_count_order_ulong(size); (long)order >= 0; order--)
+ for (order = cds_lfht_get_count_order_ulong(size); (long)order >= 0; order--)
cds_lfht_free_bucket_table(ht, order);
return 0;
cds_lfht_free_bucket_table(ht, order);
return 0;
{
unsigned long old_order, new_order;
{
unsigned long old_order, new_order;
- old_order = get_count_order_ulong(old_size);
- new_order = get_count_order_ulong(new_size);
+ old_order = cds_lfht_get_count_order_ulong(old_size);
+ new_order = cds_lfht_get_count_order_ulong(new_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
assert(new_size > old_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
assert(new_size > old_size);
unsigned long old_order, new_order;
new_size = max(new_size, MIN_TABLE_SIZE);
unsigned long old_order, new_order;
new_size = max(new_size, MIN_TABLE_SIZE);
- old_order = get_count_order_ulong(old_size);
- new_order = get_count_order_ulong(new_size);
+ old_order = cds_lfht_get_count_order_ulong(old_size);
+ new_order = cds_lfht_get_count_order_ulong(new_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
assert(new_size < old_size);
dbg_printf("resize from %lu (order %lu) to %lu (order %lu) buckets\n",
old_size, old_order, new_size, new_order);
assert(new_size < old_size);