X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=rculfhash.c;h=ce1cbb79a55f4cc69ed729dddb170773a46d17d8;hb=cf77d1fac55a2b5c711d26b6fff22c68cf52dc72;hp=08d024c10996833462c94e1ea96162f609502195;hpb=f9c8034108dccf1434c99da6b5768aad0b8c10f1;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index 08d024c..ce1cbb7 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -813,7 +813,6 @@ struct cds_lfht_node *_cds_lfht_add(struct cds_lfht *ht, new_next = flag_dummy(clear_flag(next)); else new_next = clear_flag(next); - assert(new_next != NULL); (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next); /* retry */ } @@ -849,7 +848,6 @@ int _cds_lfht_remove(struct cds_lfht *ht, unsigned long size, assert(is_dummy(next)); else assert(!is_dummy(next)); - assert(next != NULL); old = uatomic_cmpxchg(&node->p.next, next, flag_removed(next)); } while (old != next); @@ -881,32 +879,19 @@ end: return -ENOENT; } -static -void init_table_hash(struct cds_lfht *ht, unsigned long i, - unsigned long len) -{ - unsigned long j; - - for (j = 0; j < len; j++) { - struct cds_lfht_node *new_node = - (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; - - dbg_printf("init hash entry: i %lu j %lu hash %lu\n", - i, j, !i ? 0 : (1UL << (i - 1)) + j); - new_node->p.reverse_hash = - bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); - if (CMM_LOAD_SHARED(ht->in_progress_destroy)) - break; - } -} - /* * Holding RCU read lock to protect _cds_lfht_add against memory * reclaim that could be performed by other call_rcu worker threads (ABA * problem). + * + * TODO: when we reach a certain length, we can split this population phase over + * many worker threads, based on the number of CPUs available in the system. + * This should therefore take care of not having the expand lagging behind too + * many concurrent insertion threads by using the scheduler's ability to + * schedule dummy node population fairly with insertions. */ static -void init_table_link(struct cds_lfht *ht, unsigned long i, unsigned long len) +void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len) { unsigned long j; @@ -916,8 +901,10 @@ void init_table_link(struct cds_lfht *ht, unsigned long i, unsigned long len) struct cds_lfht_node *new_node = (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j]; - dbg_printf("init link: i %lu j %lu hash %lu\n", + dbg_printf("init populate: i %lu j %lu hash %lu\n", i, j, !i ? 0 : (1UL << (i - 1)) + j); + new_node->p.reverse_hash = + bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j); (void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)), new_node, 0, 1); if (CMM_LOAD_SHARED(ht->in_progress_destroy)) @@ -949,14 +936,11 @@ void init_table(struct cds_lfht *ht, ht->t.tbl[i] = calloc(1, sizeof(struct rcu_level) + (len * sizeof(struct _cds_lfht_node))); - /* Set all dummy nodes reverse hash values for a level */ - init_table_hash(ht, i, len); - /* - * Link all dummy nodes into the table. Concurrent - * add/remove are helping us. + * Set all dummy nodes reverse hash values for a level and + * link all dummy nodes into the table. */ - init_table_link(ht, i, len); + init_table_populate(ht, i, len); /* * Update table size. @@ -989,6 +973,11 @@ void init_table(struct cds_lfht *ht, * * Logical removal and garbage collection can therefore be done in batch or on a * node-per-node basis, as long as the guarantee above holds. + * + * TODO: when we reach a certain length, we can split this removal over many + * worker threads, based on the number of CPUs available in the system. This + * should take care of not letting resize process lag behind too many concurrent + * updater threads actively inserting into the hash table. */ static void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) @@ -1061,7 +1050,7 @@ void fini_table(struct cds_lfht *ht, } } -struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, +struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, cds_lfht_compare_fct compare_fct, unsigned long hash_seed, unsigned long init_size,