#endif
};
+/*
+ * cds_lfht_alloc: Callbacks if we want to use custom memory allocator.
+ */
+struct cds_lfht_alloc {
+ void *(*malloc)(void *state, size_t size);
+ void *(*calloc)(void *state, size_t nmemb, size_t size);
+ void *(*realloc)(void *state, void *ptr, size_t size);
+ void *(*aligned_alloc)(void *state, size_t alignment, size_t size);
+ void (*free)(void *state, void *ptr);
+ void *state;
+};
+
static inline
struct cds_lfht_node *cds_lfht_iter_get_node(struct cds_lfht_iter *iter)
{
struct cds_lfht_mm_type {
struct cds_lfht *(*alloc_cds_lfht)(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets);
+ unsigned long max_nr_buckets, const struct cds_lfht_alloc *alloc);
void (*alloc_bucket_table)(struct cds_lfht *ht, unsigned long order);
void (*free_bucket_table)(struct cds_lfht *ht, unsigned long order);
struct cds_lfht_node *(*bucket_at)(struct cds_lfht *ht,
const struct rcu_flavor_struct *flavor,
pthread_attr_t *attr);
+/*
+ * _cds_lfht_new_with_alloc - API used by cds_lfht_new_with_flavor_alloc.
+ */
+extern
+struct cds_lfht *_cds_lfht_new_with_alloc(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
+ int flags,
+ const struct cds_lfht_mm_type *mm,
+ const struct cds_lfht_alloc *alloc,
+ const struct rcu_flavor_struct *flavor,
+ pthread_attr_t *attr);
+
/*
* cds_lfht_new_flavor - allocate a hash table tied to a RCU flavor.
* @init_size: number of buckets to allocate initially. Must be power of two.
flags, NULL, flavor, attr);
}
+/*
+ * cds_lfht_new_with_flavor_alloc - allocate a hash table tied to a RCU flavor.
+ * @init_size: number of buckets to allocate initially. Must be power of two.
+ * @min_nr_alloc_buckets: the minimum number of allocated buckets.
+ * (must be power of two)
+ * @max_nr_buckets: the maximum number of hash table buckets allowed.
+ * (must be power of two, 0 is accepted, means
+ * "infinite")
+ * @flavor: flavor of liburcu to use to synchronize the hash table
+ * @alloc: Custom memory allocator for hash table memory management.
+ * NULL for default. If a custom allocator is used, then
+ * the whole interface of struct cds_lfht_alloc must be implemented.
+ * @flags: hash table creation flags (can be combined with bitwise or: '|').
+ * 0: no flags.
+ * CDS_LFHT_AUTO_RESIZE: automatically resize hash table.
+ * CDS_LFHT_ACCOUNTING: count the number of node addition
+ * and removal in the table
+ * @attr: optional resize worker thread attributes. NULL for default.
+ *
+ * Return NULL on error.
+ * Note: the RCU flavor must be already included before the hash table header.
+ *
+ * The programmer is responsible for ensuring that resize operation has a
+ * priority equal to hash table updater threads. It should be performed by
+ * specifying the appropriate priority in the pthread "attr" argument, and,
+ * for CDS_LFHT_AUTO_RESIZE, by ensuring that call_rcu worker threads also have
+ * this priority level. Having lower priority for call_rcu and resize threads
+ * does not pose any correctness issue, but the resize operations could be
+ * starved by updates, thus leading to long hash table bucket chains.
+ * Threads calling cds_lfht_new are NOT required to be registered RCU
+ * read-side threads. It can be called very early. (e.g. before RCU is
+ * initialized)
+ */
+static inline
+struct cds_lfht *cds_lfht_new_with_flavor_alloc(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
+ int flags,
+ const struct rcu_flavor_struct *flavor,
+ const struct cds_lfht_alloc *alloc,
+ pthread_attr_t *attr)
+{
+ return _cds_lfht_new_with_alloc(init_size, min_nr_alloc_buckets, max_nr_buckets,
+ flags, NULL, alloc, flavor, attr);
+}
+
#ifdef URCU_API_MAP
/*
/* Initial configuration items */
unsigned long max_nr_buckets;
const struct cds_lfht_mm_type *mm; /* memory management plugin */
+ const struct cds_lfht_alloc *alloc; /* memory allocator for mm */
const struct rcu_flavor_struct *flavor; /* RCU flavor */
long count; /* global approximate item count */
extern int cds_lfht_get_count_order_ulong(unsigned long x);
#ifdef POISON_FREE
-#define poison_free(ptr) \
+#define poison_free(alloc, ptr) \
do { \
if (ptr) { \
memset(ptr, 0x42, sizeof(*(ptr))); \
- free(ptr); \
+ alloc->free(alloc->state, ptr); \
} \
} while (0)
#else
-#define poison_free(ptr) free(ptr)
+#define poison_free(alloc, ptr) alloc->free(alloc->state, ptr)
#endif
static inline
struct cds_lfht *__default_alloc_cds_lfht(
const struct cds_lfht_mm_type *mm,
+ const struct cds_lfht_alloc *alloc,
unsigned long cds_lfht_size,
unsigned long min_nr_alloc_buckets,
unsigned long max_nr_buckets)
{
struct cds_lfht *ht;
- ht = calloc(1, cds_lfht_size);
+ ht = alloc->calloc(alloc->state, 1, cds_lfht_size);
urcu_posix_assert(ht);
ht->mm = mm;
+ ht->alloc = alloc;
ht->bucket_at = mm->bucket_at;
ht->min_nr_alloc_buckets = min_nr_alloc_buckets;
ht->min_alloc_buckets_order =
void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
{
if (order == 0) {
- ht->tbl_chunk[0] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct cds_lfht_node));
+ ht->tbl_chunk[0] = ht->alloc->calloc(ht->alloc->state,
+ ht->min_nr_alloc_buckets, sizeof(struct cds_lfht_node));
urcu_posix_assert(ht->tbl_chunk[0]);
} else if (order > ht->min_alloc_buckets_order) {
unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
for (i = len; i < 2 * len; i++) {
- ht->tbl_chunk[i] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct cds_lfht_node));
+ ht->tbl_chunk[i] = ht->alloc->calloc(ht->alloc->state,
+ ht->min_nr_alloc_buckets, sizeof(struct cds_lfht_node));
urcu_posix_assert(ht->tbl_chunk[i]);
}
}
void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
{
if (order == 0)
- poison_free(ht->tbl_chunk[0]);
+ poison_free(ht->alloc, ht->tbl_chunk[0]);
else if (order > ht->min_alloc_buckets_order) {
unsigned long i, len = 1UL << (order - 1 - ht->min_alloc_buckets_order);
for (i = len; i < 2 * len; i++)
- poison_free(ht->tbl_chunk[i]);
+ poison_free(ht->alloc, ht->tbl_chunk[i]);
}
/* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
}
static
struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
+ unsigned long max_nr_buckets, const struct cds_lfht_alloc *alloc)
{
unsigned long nr_chunks, cds_lfht_size;
cds_lfht_size = max(cds_lfht_size, sizeof(struct cds_lfht));
return __default_alloc_cds_lfht(
- &cds_lfht_mm_chunk, cds_lfht_size,
+ &cds_lfht_mm_chunk, alloc, cds_lfht_size,
min_nr_alloc_buckets, max_nr_buckets);
}
if (order == 0) {
if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
/* small table */
- ht->tbl_mmap = calloc(ht->max_nr_buckets,
- sizeof(*ht->tbl_mmap));
+ ht->tbl_mmap = ht->alloc->calloc(ht->alloc->state,
+ ht->max_nr_buckets, sizeof(*ht->tbl_mmap));
urcu_posix_assert(ht->tbl_mmap);
return;
}
if (order == 0) {
if (ht->min_nr_alloc_buckets == ht->max_nr_buckets) {
/* small table */
- poison_free(ht->tbl_mmap);
+ poison_free(ht->alloc, ht->tbl_mmap);
return;
}
/* large table */
static
struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
+ unsigned long max_nr_buckets, const struct cds_lfht_alloc *alloc)
{
unsigned long page_bucket_size;
}
return __default_alloc_cds_lfht(
- &cds_lfht_mm_mmap, sizeof(struct cds_lfht),
+ &cds_lfht_mm_mmap, alloc, sizeof(struct cds_lfht),
min_nr_alloc_buckets, max_nr_buckets);
}
void cds_lfht_alloc_bucket_table(struct cds_lfht *ht, unsigned long order)
{
if (order == 0) {
- ht->tbl_order[0] = calloc(ht->min_nr_alloc_buckets,
- sizeof(struct cds_lfht_node));
+ ht->tbl_order[0] = ht->alloc->calloc(ht->alloc->state,
+ ht->min_nr_alloc_buckets, sizeof(struct cds_lfht_node));
urcu_posix_assert(ht->tbl_order[0]);
} else if (order > ht->min_alloc_buckets_order) {
- ht->tbl_order[order] = calloc(1UL << (order -1),
- sizeof(struct cds_lfht_node));
+ ht->tbl_order[order] = ht->alloc->calloc(ht->alloc->state,
+ 1UL << (order -1), sizeof(struct cds_lfht_node));
urcu_posix_assert(ht->tbl_order[order]);
}
/* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
void cds_lfht_free_bucket_table(struct cds_lfht *ht, unsigned long order)
{
if (order == 0)
- poison_free(ht->tbl_order[0]);
+ poison_free(ht->alloc, ht->tbl_order[0]);
else if (order > ht->min_alloc_buckets_order)
- poison_free(ht->tbl_order[order]);
+ poison_free(ht->alloc, ht->tbl_order[order]);
/* Nothing to do for 0 < order && order <= ht->min_alloc_buckets_order */
}
static
struct cds_lfht *alloc_cds_lfht(unsigned long min_nr_alloc_buckets,
- unsigned long max_nr_buckets)
+ unsigned long max_nr_buckets, const struct cds_lfht_alloc *alloc)
{
return __default_alloc_cds_lfht(
- &cds_lfht_mm_order, sizeof(struct cds_lfht),
+ &cds_lfht_mm_order, alloc, sizeof(struct cds_lfht),
min_nr_alloc_buckets, max_nr_buckets);
}
#include <string.h>
#include <sched.h>
#include <unistd.h>
+#include <stdlib.h>
#include "compat-getcpu.h"
#include <urcu/assert.h>
#endif
}
+static void *cds_lfht_malloc(void *state __attribute__((unused)),
+ size_t size)
+{
+ return malloc(size);
+}
+
+static void *cds_lfht_calloc(void *state __attribute__((unused)),
+ size_t nmemb, size_t size)
+{
+ return calloc(nmemb, size);
+}
+
+static void *cds_lfht_realloc(void *state __attribute__((unused)),
+ void *ptr, size_t size)
+{
+ return realloc(ptr, size);
+}
+
+static void *cds_lfht_aligned_alloc(void *state __attribute__((unused)),
+ size_t alignment, size_t size)
+{
+ void *ptr;
+
+ if (posix_memalign(&ptr, alignment, size))
+ return NULL;
+ return ptr;
+}
+
+static void cds_lfht_free(void *state __attribute__((unused)), void *ptr)
+{
+ free(ptr);
+}
+
+
+/* Default memory allocator */
+static struct cds_lfht_alloc cds_lfht_default_alloc = {
+ .malloc = cds_lfht_malloc,
+ .calloc = cds_lfht_calloc,
+ .realloc = cds_lfht_realloc,
+ .aligned_alloc = cds_lfht_aligned_alloc,
+ .free = cds_lfht_free,
+ .state = NULL,
+};
+
/*
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
urcu_posix_assert(split_count_mask >= 0);
if (ht->flags & CDS_LFHT_ACCOUNTING) {
- ht->split_count = calloc(split_count_mask + 1,
+ ht->split_count = ht->alloc->calloc(ht->alloc->state, split_count_mask + 1,
sizeof(struct ht_items_count));
urcu_posix_assert(ht->split_count);
} else {
static
void free_split_items_count(struct cds_lfht *ht)
{
- poison_free(ht->split_count);
+ poison_free(ht->alloc, ht->split_count);
}
static
nr_threads = 1;
}
partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
- work = calloc(nr_threads, sizeof(*work));
+ work = ht->alloc->calloc(ht->alloc->state, nr_threads, sizeof(*work));
if (!work) {
dbg_printf("error allocating for resize, single-threading\n");
goto fallback;
ret = pthread_join(work[thread].thread_id, NULL);
urcu_posix_assert(!ret);
}
- free(work);
+ ht->alloc->free(ht->alloc->state, work);
/*
* A pthread_create failure above will either lead in us having
node->next = flag_removed(NULL);
}
-struct cds_lfht *_cds_lfht_new(unsigned long init_size,
+struct cds_lfht *_cds_lfht_new_with_alloc(unsigned long init_size,
unsigned long min_nr_alloc_buckets,
unsigned long max_nr_buckets,
int flags,
const struct cds_lfht_mm_type *mm,
+ const struct cds_lfht_alloc *alloc,
const struct rcu_flavor_struct *flavor,
pthread_attr_t *attr)
{
max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
init_size = min(init_size, max_nr_buckets);
- ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets);
+ ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets, alloc ? : &cds_lfht_default_alloc);
+
urcu_posix_assert(ht);
urcu_posix_assert(ht->mm == mm);
urcu_posix_assert(ht->bucket_at == mm->bucket_at);
return ht;
}
+struct cds_lfht *_cds_lfht_new(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
+ int flags,
+ const struct cds_lfht_mm_type *mm,
+ const struct rcu_flavor_struct *flavor,
+ pthread_attr_t *attr)
+{
+ return _cds_lfht_new_with_alloc(init_size,
+ min_nr_alloc_buckets, max_nr_buckets,
+ flags, mm, NULL, flavor, attr);
+}
+
void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
cds_lfht_match_fct match, const void *key,
struct cds_lfht_iter *iter)
if (ret)
urcu_die(ret);
ht->flavor->unregister_thread();
- poison_free(ht);
+ poison_free(ht->alloc, ht);
}
/*
ret = pthread_mutex_destroy(&ht->resize_mutex);
if (ret)
ret = -EBUSY;
- poison_free(ht);
+ poison_free(ht->alloc, ht);
return ret;
}
_do_cds_lfht_resize(ht);
mutex_unlock(&ht->resize_mutex);
ht->flavor->unregister_thread();
- poison_free(work);
+ poison_free(ht->alloc, work);
}
static
if (uatomic_load(&ht->in_progress_destroy, CMM_RELAXED)) {
return;
}
- work = malloc(sizeof(*work));
+ work = ht->alloc->malloc(ht->alloc->state, sizeof(*work));
if (work == NULL) {
dbg_printf("error allocating resize work, bailing out\n");
return;