#include <string.h>
#include <sched.h>
#include <unistd.h>
+#include <stdlib.h>
#include "compat-getcpu.h"
#include <urcu/assert.h>
#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
-#include <urcu/static/urcu-signal-nr.h>
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
unsigned long start, unsigned long len);
};
+enum nr_cpus_mask_state {
+ NR_CPUS_MASK_INIT_FAILED = -2,
+ NR_CPUS_MASK_UNINITIALIZED = -1,
+};
+
static struct urcu_workqueue *cds_lfht_workqueue;
/*
#endif
}
+static void *cds_lfht_malloc(void *state __attribute__((unused)),
+ size_t size)
+{
+ return malloc(size);
+}
+
+static void *cds_lfht_calloc(void *state __attribute__((unused)),
+ size_t nmemb, size_t size)
+{
+ return calloc(nmemb, size);
+}
+
+static void *cds_lfht_realloc(void *state __attribute__((unused)),
+ void *ptr, size_t size)
+{
+ return realloc(ptr, size);
+}
+
+static void *cds_lfht_aligned_alloc(void *state __attribute__((unused)),
+ size_t alignment, size_t size)
+{
+ void *ptr;
+
+ if (posix_memalign(&ptr, alignment, size))
+ return NULL;
+ return ptr;
+}
+
+static void cds_lfht_free(void *state __attribute__((unused)), void *ptr)
+{
+ free(ptr);
+}
+
+
+/* Default memory allocator */
+static struct cds_lfht_alloc cds_lfht_default_alloc = {
+ .malloc = cds_lfht_malloc,
+ .calloc = cds_lfht_calloc,
+ .realloc = cds_lfht_realloc,
+ .aligned_alloc = cds_lfht_aligned_alloc,
+ .free = cds_lfht_free,
+ .state = NULL,
+};
+
/*
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
urcu_die(ret);
}
-static long nr_cpus_mask = -1;
+static long nr_cpus_mask = NR_CPUS_MASK_UNINITIALIZED;
static long split_count_mask = -1;
static int split_count_order = -1;
maxcpus = get_possible_cpus_array_len();
if (maxcpus <= 0) {
- nr_cpus_mask = -2;
+ nr_cpus_mask = NR_CPUS_MASK_INIT_FAILED;
return;
}
/*
static
void alloc_split_items_count(struct cds_lfht *ht)
{
- if (nr_cpus_mask == -1) {
+ if (nr_cpus_mask == NR_CPUS_MASK_UNINITIALIZED) {
ht_init_nr_cpus_mask();
if (nr_cpus_mask < 0)
split_count_mask = DEFAULT_SPLIT_COUNT_MASK;
urcu_posix_assert(split_count_mask >= 0);
if (ht->flags & CDS_LFHT_ACCOUNTING) {
- ht->split_count = calloc(split_count_mask + 1,
+ ht->split_count = ht->alloc->calloc(ht->alloc->state, split_count_mask + 1,
sizeof(struct ht_items_count));
urcu_posix_assert(ht->split_count);
} else {
static
void free_split_items_count(struct cds_lfht *ht)
{
- poison_free(ht->split_count);
+ poison_free(ht->alloc, ht->split_count);
}
static
struct cds_lfht_node *node)
{
struct cds_lfht_node *bucket, *next;
- struct cds_lfht_node **node_next;
+ uintptr_t *node_next;
if (!node) /* Return -ENOENT if asked to delete NULL node */
return -ENOENT;
* NOTE: The node_next variable is present to avoid breaking
* strict-aliasing rules.
*/
- node_next = &node->next;
+ node_next = (uintptr_t*)&node->next;
uatomic_or_mo(node_next, REMOVED_FLAG, CMM_RELEASE);
/* We performed the (logical) deletion. */
unsigned long thread, nr_threads;
sigset_t newmask, oldmask;
- urcu_posix_assert(nr_cpus_mask != -1);
+ urcu_posix_assert(nr_cpus_mask != NR_CPUS_MASK_UNINITIALIZED);
if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
goto fallback;
nr_threads = 1;
}
partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads);
- work = calloc(nr_threads, sizeof(*work));
+ work = ht->alloc->calloc(ht->alloc->state, nr_threads, sizeof(*work));
if (!work) {
dbg_printf("error allocating for resize, single-threading\n");
goto fallback;
ret = pthread_join(work[thread].thread_id, NULL);
urcu_posix_assert(!ret);
}
- free(work);
+ ht->alloc->free(ht->alloc->state, work);
/*
* A pthread_create failure above will either lead in us having
for (j = size + start; j < size + start + len; j++) {
struct cds_lfht_node *fini_bucket = bucket_at(ht, j);
struct cds_lfht_node *parent_bucket = bucket_at(ht, j - size);
- struct cds_lfht_node **fini_bucket_next;
+ uintptr_t *fini_bucket_next;
urcu_posix_assert(j >= size && j < (size << 1));
dbg_printf("remove entry: order %lu index %lu hash %lu\n",
* NOTE: The fini_bucket_next variable is present to
* avoid breaking strict-aliasing rules.
*/
- fini_bucket_next = &fini_bucket->next;
+ fini_bucket_next = (uintptr_t*)&fini_bucket->next;
uatomic_or(fini_bucket_next, REMOVED_FLAG);
_cds_lfht_gc_bucket(parent_bucket, fini_bucket);
}
node->next = flag_removed(NULL);
}
-struct cds_lfht *_cds_lfht_new(unsigned long init_size,
+struct cds_lfht *_cds_lfht_new_with_alloc(unsigned long init_size,
unsigned long min_nr_alloc_buckets,
unsigned long max_nr_buckets,
int flags,
const struct cds_lfht_mm_type *mm,
const struct rcu_flavor_struct *flavor,
+ const struct cds_lfht_alloc *alloc,
pthread_attr_t *attr)
{
struct cds_lfht *ht;
max_nr_buckets = max(max_nr_buckets, min_nr_alloc_buckets);
init_size = min(init_size, max_nr_buckets);
- ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets);
+ ht = mm->alloc_cds_lfht(min_nr_alloc_buckets, max_nr_buckets, alloc ? : &cds_lfht_default_alloc);
+
urcu_posix_assert(ht);
urcu_posix_assert(ht->mm == mm);
urcu_posix_assert(ht->bucket_at == mm->bucket_at);
return ht;
}
+struct cds_lfht *_cds_lfht_new(unsigned long init_size,
+ unsigned long min_nr_alloc_buckets,
+ unsigned long max_nr_buckets,
+ int flags,
+ const struct cds_lfht_mm_type *mm,
+ const struct rcu_flavor_struct *flavor,
+ pthread_attr_t *attr)
+{
+ return _cds_lfht_new_with_alloc(init_size,
+ min_nr_alloc_buckets, max_nr_buckets,
+ flags, mm, flavor, NULL, attr);
+}
+
void cds_lfht_lookup(struct cds_lfht *ht, unsigned long hash,
cds_lfht_match_fct match, const void *key,
struct cds_lfht_iter *iter)
if (ret)
urcu_die(ret);
ht->flavor->unregister_thread();
- poison_free(ht);
+ poison_free(ht->alloc, ht);
}
/*
ret = pthread_mutex_destroy(&ht->resize_mutex);
if (ret)
ret = -EBUSY;
- poison_free(ht);
+ poison_free(ht->alloc, ht);
return ret;
}
_do_cds_lfht_resize(ht);
mutex_unlock(&ht->resize_mutex);
ht->flavor->unregister_thread();
- poison_free(work);
+ poison_free(ht->alloc, work);
}
static
if (uatomic_load(&ht->in_progress_destroy, CMM_RELAXED)) {
return;
}
- work = malloc(sizeof(*work));
+ work = ht->alloc->malloc(ht->alloc->state, sizeof(*work));
if (work == NULL) {
dbg_printf("error allocating resize work, bailing out\n");
return;