#include <urcu/uatomic.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
+#include <urcu/static/urcu-signal-nr.h>
#include <rculfhash-internal.h>
#include <stdio.h>
#include <pthread.h>
#include <signal.h>
#include "workqueue.h"
#include "urcu-die.h"
+#include "urcu-utils.h"
/*
* Split-counters lazily update the global counter each 1024
* Returns 0 if no bit is set, else returns the position of the most
* significant bit (from 1 to 32 on 32-bit, from 1 to 64 on 64-bit).
*/
-#if defined(__i386) || defined(__x86_64)
+#if defined(URCU_ARCH_X86)
static inline
unsigned int fls_u32(uint32_t x)
{
#define HAS_FLS_U32
#endif
-#if defined(__x86_64)
+#if defined(URCU_ARCH_AMD64)
static inline
unsigned int fls_u64(uint64_t x)
{
* Return the minimum order for which x <= (1UL << order).
* Return -1 if x is 0.
*/
+static
int cds_lfht_get_count_order_u32(uint32_t x)
{
if (!x)
static
void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
- unsigned long split_count;
+ unsigned long split_count, count;
int index;
- long count;
if (caa_unlikely(!ht->split_count))
return;
if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size)
return;
- dbg_printf("add set global %ld\n", count);
+ dbg_printf("add set global %lu\n", count);
cds_lfht_resize_lazy_count(ht, size,
count >> (CHAIN_LEN_TARGET - 1));
}
static
void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash)
{
- unsigned long split_count;
+ unsigned long split_count, count;
int index;
- long count;
if (caa_unlikely(!ht->split_count))
return;
}
static
-int is_removed(struct cds_lfht_node *node)
+int is_removed(const struct cds_lfht_node *node)
{
return ((unsigned long) node) & REMOVED_FLAG;
}
{
unsigned long partition_len, start = 0;
struct partition_resize_work *work;
- int thread, ret;
- unsigned long nr_threads;
+ int ret;
+ unsigned long thread, nr_threads;
assert(nr_cpus_mask != -1);
if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD)
* partition size, up to the number of CPUs in the system.
*/
if (nr_cpus_mask > 0) {
- nr_threads = min(nr_cpus_mask + 1,
+ nr_threads = min_t(unsigned long, nr_cpus_mask + 1,
len >> MIN_PARTITION_PER_THREAD_ORDER);
} else {
nr_threads = 1;
void fini_table(struct cds_lfht *ht,
unsigned long first_order, unsigned long last_order)
{
- long i;
- unsigned long free_by_rcu_order = 0;
+ unsigned long free_by_rcu_order = 0, i;
dbg_printf("fini table: first_order %lu last_order %lu\n",
first_order, last_order);
}
}
+/*
+ * Never called with size < 1.
+ */
static
void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size)
{
struct cds_lfht_node *prev, *node;
unsigned long order, len, i;
+ int bucket_order;
cds_lfht_alloc_bucket_table(ht, 0);
node->next = flag_bucket(get_end());
node->reverse_hash = 0;
- for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) {
+ bucket_order = cds_lfht_get_count_order_ulong(size);
+ assert(bucket_order >= 0);
+
+ for (order = 1; order < (unsigned long) bucket_order + 1; order++) {
len = 1UL << (order - 1);
cds_lfht_alloc_bucket_table(ht, order);
}
}
+#if (CAA_BITS_PER_LONG > 32)
+/*
+ * For 64-bit architectures, with max number of buckets small enough not to
+ * use the entire 64-bit memory mapping space (and allowing a fair number of
+ * hash table instances), use the mmap allocator, which is faster. Otherwise,
+ * fallback to the order allocator.
+ */
+static
+const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ if (max_nr_buckets && max_nr_buckets <= (1ULL << 32))
+ return &cds_lfht_mm_mmap;
+ else
+ return &cds_lfht_mm_order;
+}
+#else
+/*
+ * For 32-bit architectures, use the order allocator.
+ */
+static
+const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets)
+{
+ return &cds_lfht_mm_order;
+}
+#endif
+
struct cds_lfht *_cds_lfht_new(unsigned long init_size,
unsigned long min_nr_alloc_buckets,
unsigned long max_nr_buckets,
/*
* Memory management plugin default.
*/
- if (!mm) {
- if (CAA_BITS_PER_LONG > 32
- && max_nr_buckets
- && max_nr_buckets <= (1ULL << 32)) {
- /*
- * For 64-bit architectures, with max number of
- * buckets small enough not to use the entire
- * 64-bit memory mapping space (and allowing a
- * fair number of hash table instances), use the
- * mmap allocator, which is faster than the
- * order allocator.
- */
- mm = &cds_lfht_mm_mmap;
- } else {
- /*
- * The fallback is to use the order allocator.
- */
- mm = &cds_lfht_mm_order;
- }
- }
+ if (!mm)
+ mm = get_mm_type(max_nr_buckets);
/* max_nr_buckets == 0 for order based mm means infinite */
if (mm == &cds_lfht_mm_order && !max_nr_buckets)
return ret;
}
-int cds_lfht_is_node_deleted(struct cds_lfht_node *node)
+int cds_lfht_is_node_deleted(const struct cds_lfht_node *node)
{
return is_removed(CMM_LOAD_SHARED(node->next));
}
.after_fork_child = cds_lfht_after_fork_child,
};
-/* Block all signals to ensure we don't disturb the application. */
+/*
+ * Block all signals for the workqueue worker thread to ensure we don't
+ * disturb the application. The SIGRCU signal needs to be unblocked for
+ * the urcu-signal flavor.
+ */
static void cds_lfht_worker_init(struct urcu_workqueue *workqueue,
void *priv)
{
int ret;
sigset_t mask;
- /* Block signal for entire process, so only our thread processes it. */
ret = sigfillset(&mask);
if (ret)
urcu_die(errno);
- ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
+ ret = sigdelset(&mask, SIGRCU);
+ if (ret)
+ urcu_die(errno);
+ ret = pthread_sigmask(SIG_SETMASK, &mask, NULL);
if (ret)
urcu_die(ret);
}