X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=rculfhash.c;h=ce1cbb79a55f4cc69ed729dddb170773a46d17d8;hb=cf77d1fac55a2b5c711d26b6fff22c68cf52dc72;hp=2ed570c7f5d1f6856bea7d23312c95a7e6ad1354;hpb=1e4bfa28de3120feedc018145f47318ca6cf9f63;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index 2ed570c..ce1cbb7 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -883,6 +883,12 @@ end: * Holding RCU read lock to protect _cds_lfht_add against memory * reclaim that could be performed by other call_rcu worker threads (ABA * problem). + * + * TODO: when we reach a certain length, we can split this population phase over + * many worker threads, based on the number of CPUs available in the system. + * This should therefore take care of not having the expand lagging behind too + * many concurrent insertion threads by using the scheduler's ability to + * schedule dummy node population fairly with insertions. */ static void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len) @@ -967,6 +973,11 @@ void init_table(struct cds_lfht *ht, * * Logical removal and garbage collection can therefore be done in batch or on a * node-per-node basis, as long as the guarantee above holds. + * + * TODO: when we reach a certain length, we can split this removal over many + * worker threads, based on the number of CPUs available in the system. This + * should take care of not letting resize process lag behind too many concurrent + * updater threads actively inserting into the hash table. */ static void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len) @@ -1039,7 +1050,7 @@ void fini_table(struct cds_lfht *ht, } } -struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct, +struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct, cds_lfht_compare_fct compare_fct, unsigned long hash_seed, unsigned long init_size,