X-Git-Url: https://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=src%2Frculfhash.c;h=51972c8d1b318c618776a8d0b4b26e5373b9dbae;hp=8942c80ba8c6e8eb5f155cd49e10c22d8455ee23;hb=afa5940dbe80a259cf8bc4a99403554a3c2c9e32;hpb=d7c76f85442125bcfef40f58b1c6fc1bd5ce4ffd diff --git a/src/rculfhash.c b/src/rculfhash.c index 8942c80..51972c8 100644 --- a/src/rculfhash.c +++ b/src/rculfhash.c @@ -273,12 +273,14 @@ #include #include #include +#include #include #include #include #include #include "workqueue.h" #include "urcu-die.h" +#include "urcu-utils.h" /* * Split-counters lazily update the global counter each 1024 @@ -711,9 +713,8 @@ int ht_get_split_count_index(unsigned long hash) static void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash) { - unsigned long split_count; + unsigned long split_count, count; int index; - long count; if (caa_unlikely(!ht->split_count)) return; @@ -732,7 +733,7 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash) if ((count >> CHAIN_LEN_RESIZE_THRESHOLD) < size) return; - dbg_printf("add set global %ld\n", count); + dbg_printf("add set global %lu\n", count); cds_lfht_resize_lazy_count(ht, size, count >> (CHAIN_LEN_TARGET - 1)); } @@ -740,9 +741,8 @@ void ht_count_add(struct cds_lfht *ht, unsigned long size, unsigned long hash) static void ht_count_del(struct cds_lfht *ht, unsigned long size, unsigned long hash) { - unsigned long split_count; + unsigned long split_count, count; int index; - long count; if (caa_unlikely(!ht->split_count)) return; @@ -826,7 +826,7 @@ struct cds_lfht_node *clear_flag(struct cds_lfht_node *node) } static -int is_removed(struct cds_lfht_node *node) +int is_removed(const struct cds_lfht_node *node) { return ((unsigned long) node) & REMOVED_FLAG; } @@ -1247,8 +1247,8 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, { unsigned long partition_len, start = 0; struct partition_resize_work *work; - int thread, ret; - unsigned long nr_threads; + int ret; + unsigned long thread, nr_threads; assert(nr_cpus_mask != -1); if (nr_cpus_mask < 0 || len < 2 * MIN_PARTITION_PER_THREAD) @@ -1260,7 +1260,7 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, * partition size, up to the number of CPUs in the system. */ if (nr_cpus_mask > 0) { - nr_threads = min(nr_cpus_mask + 1, + nr_threads = min_t(unsigned long, nr_cpus_mask + 1, len >> MIN_PARTITION_PER_THREAD_ORDER); } else { nr_threads = 1; @@ -1449,8 +1449,7 @@ static void fini_table(struct cds_lfht *ht, unsigned long first_order, unsigned long last_order) { - long i; - unsigned long free_by_rcu_order = 0; + unsigned long free_by_rcu_order = 0, i; dbg_printf("fini table: first_order %lu last_order %lu\n", first_order, last_order); @@ -1499,11 +1498,15 @@ void fini_table(struct cds_lfht *ht, } } +/* + * Never called with size < 1. + */ static void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) { struct cds_lfht_node *prev, *node; unsigned long order, len, i; + int bucket_order; cds_lfht_alloc_bucket_table(ht, 0); @@ -1512,7 +1515,10 @@ void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) node->next = flag_bucket(get_end()); node->reverse_hash = 0; - for (order = 1; order < cds_lfht_get_count_order_ulong(size) + 1; order++) { + bucket_order = cds_lfht_get_count_order_ulong(size); + assert(bucket_order >= 0); + + for (order = 1; order < (unsigned long) bucket_order + 1; order++) { len = 1UL << (order - 1); cds_lfht_alloc_bucket_table(ht, order); @@ -1543,6 +1549,32 @@ void cds_lfht_create_bucket(struct cds_lfht *ht, unsigned long size) } } +#if (CAA_BITS_PER_LONG > 32) +/* + * For 64-bit architectures, with max number of buckets small enough not to + * use the entire 64-bit memory mapping space (and allowing a fair number of + * hash table instances), use the mmap allocator, which is faster. Otherwise, + * fallback to the order allocator. + */ +static +const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets) +{ + if (max_nr_buckets && max_nr_buckets <= (1ULL << 32)) + return &cds_lfht_mm_mmap; + else + return &cds_lfht_mm_order; +} +#else +/* + * For 32-bit architectures, use the order allocator. + */ +static +const struct cds_lfht_mm_type *get_mm_type(unsigned long max_nr_buckets) +{ + return &cds_lfht_mm_order; +} +#endif + struct cds_lfht *_cds_lfht_new(unsigned long init_size, unsigned long min_nr_alloc_buckets, unsigned long max_nr_buckets, @@ -1565,26 +1597,8 @@ struct cds_lfht *_cds_lfht_new(unsigned long init_size, /* * Memory management plugin default. */ - if (!mm) { - if (CAA_BITS_PER_LONG > 32 - && max_nr_buckets - && max_nr_buckets <= (1ULL << 32)) { - /* - * For 64-bit architectures, with max number of - * buckets small enough not to use the entire - * 64-bit memory mapping space (and allowing a - * fair number of hash table instances), use the - * mmap allocator, which is faster than the - * order allocator. - */ - mm = &cds_lfht_mm_mmap; - } else { - /* - * The fallback is to use the order allocator. - */ - mm = &cds_lfht_mm_order; - } - } + if (!mm) + mm = get_mm_type(max_nr_buckets); /* max_nr_buckets == 0 for order based mm means infinite */ if (mm == &cds_lfht_mm_order && !max_nr_buckets) @@ -1816,7 +1830,7 @@ int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) return ret; } -int cds_lfht_is_node_deleted(struct cds_lfht_node *node) +int cds_lfht_is_node_deleted(const struct cds_lfht_node *node) { return is_removed(CMM_LOAD_SHARED(node->next)); } @@ -2138,18 +2152,24 @@ static struct urcu_atfork cds_lfht_atfork = { .after_fork_child = cds_lfht_after_fork_child, }; -/* Block all signals to ensure we don't disturb the application. */ +/* + * Block all signals for the workqueue worker thread to ensure we don't + * disturb the application. The SIGRCU signal needs to be unblocked for + * the urcu-signal flavor. + */ static void cds_lfht_worker_init(struct urcu_workqueue *workqueue, void *priv) { int ret; sigset_t mask; - /* Block signal for entire process, so only our thread processes it. */ ret = sigfillset(&mask); if (ret) urcu_die(errno); - ret = pthread_sigmask(SIG_BLOCK, &mask, NULL); + ret = sigdelset(&mask, SIGRCU); + if (ret) + urcu_die(errno); + ret = pthread_sigmask(SIG_SETMASK, &mask, NULL); if (ret) urcu_die(ret); }