new_next = flag_dummy(clear_flag(next));
else
new_next = clear_flag(next);
- assert(new_next != NULL);
(void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
/* retry */
}
assert(is_dummy(next));
else
assert(!is_dummy(next));
- assert(next != NULL);
old = uatomic_cmpxchg(&node->p.next, next,
flag_removed(next));
} while (old != next);
* Holding RCU read lock to protect _cds_lfht_add against memory
* reclaim that could be performed by other call_rcu worker threads (ABA
* problem).
+ *
+ * TODO: when we reach a certain length, we can split this population phase over
+ * many worker threads, based on the number of CPUs available in the system.
+ * This should therefore take care of not having the expand lagging behind too
+ * many concurrent insertion threads by using the scheduler's ability to
+ * schedule dummy node population fairly with insertions.
*/
static
void init_table_populate(struct cds_lfht *ht, unsigned long i, unsigned long len)
*
* Logical removal and garbage collection can therefore be done in batch or on a
* node-per-node basis, as long as the guarantee above holds.
+ *
+ * TODO: when we reach a certain length, we can split this removal over many
+ * worker threads, based on the number of CPUs available in the system. This
+ * should take care of not letting resize process lag behind too many concurrent
+ * updater threads actively inserting into the hash table.
*/
static
void remove_table(struct cds_lfht *ht, unsigned long i, unsigned long len)
}
}
-struct cds_lfht *cds_lfht_new(cds_lfht_hash_fct hash_fct,
+struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
cds_lfht_compare_fct compare_fct,
unsigned long hash_seed,
unsigned long init_size,