X-Git-Url: https://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=2a450459f5cfa50e94bce03e00be0ad1be7b2b4f;hp=423609d6c188dd79f6392763ad7b8082f2180def;hb=7c75d49894833089f34b7e71196e60b2ae5979b1;hpb=10e68472b50ab90c3a2848cc89b5b5439b5999ce diff --git a/rculfhash.c b/rculfhash.c index 423609d..2a45045 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -563,6 +563,7 @@ void cds_lfht_resize_lazy_count(struct cds_lfht *ht, unsigned long size, static long nr_cpus_mask = -1; static long split_count_mask = -1; +static int split_count_order = -1; #if defined(HAVE_SYSCONF) static void ht_init_nr_cpus_mask(void) @@ -597,6 +598,8 @@ void alloc_split_items_count(struct cds_lfht *ht) split_count_mask = DEFAULT_SPLIT_COUNT_MASK; else split_count_mask = nr_cpus_mask; + split_count_order = + cds_lfht_get_count_order_ulong(split_count_mask + 1); } assert(split_count_mask >= 0); @@ -713,14 +716,39 @@ void check_resize(struct cds_lfht *ht, unsigned long size, uint32_t chain_len) * Use bucket-local length for small table expand and for * environments lacking per-cpu data support. */ - if (count >= (1UL << COUNT_COMMIT_ORDER)) + if (count >= (1UL << (COUNT_COMMIT_ORDER + split_count_order))) return; if (chain_len > 100) dbg_printf("WARNING: large chain length: %u.\n", chain_len); - if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) - cds_lfht_resize_lazy_grow(ht, size, - cds_lfht_get_count_order_u32(chain_len - (CHAIN_LEN_TARGET - 1))); + if (chain_len >= CHAIN_LEN_RESIZE_THRESHOLD) { + int growth; + + /* + * Ideal growth calculated based on chain length. + */ + growth = cds_lfht_get_count_order_u32(chain_len + - (CHAIN_LEN_TARGET - 1)); + if ((ht->flags & CDS_LFHT_ACCOUNTING) + && (size << growth) + >= (1UL << (COUNT_COMMIT_ORDER + + split_count_order))) { + /* + * If ideal growth expands the hash table size + * beyond the "small hash table" sizes, use the + * maximum small hash table size to attempt + * expanding the hash table. This only applies + * when node accounting is available, otherwise + * the chain length is used to expand the hash + * table in every case. + */ + growth = COUNT_COMMIT_ORDER + split_count_order + - cds_lfht_get_count_order_ulong(size); + if (growth <= 0) + return; + } + cds_lfht_resize_lazy_grow(ht, size, growth); + } } static @@ -1161,7 +1189,10 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, } partition_len = len >> cds_lfht_get_count_order_ulong(nr_threads); work = calloc(nr_threads, sizeof(*work)); - assert(work); + if (!work) { + dbg_printf("error allocating for resize, single-threading\n"); + goto fallback; + } for (thread = 0; thread < nr_threads; thread++) { work[thread].ht = ht; work[thread].i = i; @@ -1177,6 +1208,11 @@ void partition_resize_helper(struct cds_lfht *ht, unsigned long i, assert(!ret); } free(work); + return; +fallback: + ht->flavor->thread_online(); + fct(ht, i, 0, len); + ht->flavor->thread_offline(); } /*