X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=src%2Fcommon%2Fhashtable%2Frculfhash.c;h=fb44640bdc4946c8ca7c1f8a540be38f40346e12;hb=df5b86c84d896eb2d74a8757c234492c1d1fc3be;hp=ebdc4ffaa8ee84842eb18c4c0588042fade9c66c;hpb=9d8ad8e29d1f50e6a33c0e9a644c50b5e90364d2;p=lttng-tools.git diff --git a/src/common/hashtable/rculfhash.c b/src/common/hashtable/rculfhash.c index ebdc4ffaa..fb44640bd 100644 --- a/src/common/hashtable/rculfhash.c +++ b/src/common/hashtable/rculfhash.c @@ -278,6 +278,8 @@ #include "rculfhash-internal.h" #include "urcu-flavor.h" +#include + /* * We need to lock pthread exit, which deadlocks __nptl_setxid in the runas * clone. This work-around will be allowed to be removed when runas.c gets @@ -570,6 +572,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, static long nr_cpus_mask = -1; static long split_count_mask = -1; +static int split_count_order = -1; #if defined(HAVE_SYSCONF) static void ht_init_nr_cpus_mask(void) @@ -606,6 +609,8 @@ void alloc_split_items_count(struct cds_lfht *ht) split_count_mask = DEFAULT_SPLIT_COUNT_MASK; else split_count_mask = nr_cpus_mask; + split_count_order = + cds_lfht_get_count_order_ulong(split_count_mask + 1); } assert(split_count_mask >= 0); @@ -624,7 +629,7 @@ void free_split_items_count(struct cds_lfht *ht) poison_free(ht->split_count); } -#if defined(HAVE_SCHED_GETCPU) +#if defined(HAVE_SCHED_GETCPU) && !defined(VALGRIND) static int ht_get_split_count_index(unsigned long hash) { @@ -721,7 +726,7 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) * Use bucket-local length for small table expand and for * environments lacking per-cpu data support. */ - if (count >= (1UL << COUNT_COMMIT_ORDER)) + if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order))) return; if (chain_len > 100) dbg_printf("WARNING: large chain length: %u.\n", @@ -735,7 +740,9 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) growth = cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1)); if ((ht->flags & CDS_LFHT_ACCOUNTING) - && (size << growth) >= (1UL << COUNT_COMMIT_ORDER)) { + && (size << growth) + >= (1UL << (COUNT_COMMIT_ORDER + + split_count_order))) { /* * If ideal growth expands the hash table size * beyond the "small hash table" sizes, use the @@ -745,8 +752,8 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) * the chain length is used to expand the hash * table in every case. */ - growth = COUNT_COMMIT_ORDER - - cds_lfht_get_count_order_u32(size); + growth = COUNT_COMMIT_ORDER + split_count_order + - cds_lfht_get_count_order_ulong(size); if (growth <= 0) return; } @@ -1776,7 +1783,7 @@ int cds_lfht_destroy(struct cds_lfht *ht, pthread_attr_t **attr) } #endif while (uatomic_read(&ht->in_progress_resize)) - poll(NULL, 0, 100); /* wait for 100ms */ + (void) poll(NULL, 0, 100); /* wait for 100ms */ ret = cds_lfht_delete_bucket(ht); if (ret) return ret; @@ -1976,7 +1983,7 @@ void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht) uatomic_dec(&ht->in_progress_resize); return; } - work = malloc(sizeof(*work)); + work = zmalloc(sizeof(*work)); if (work == NULL) { dbg_printf("error allocating resize work, bailing out\n"); uatomic_dec(&ht->in_progress_resize);