+ if (nr_cpus_mask > 0) {
+ nr_threads = min(nr_cpus_mask + 1,
+ len >> MIN_PARTITION_PER_THREAD_ORDER);
+ } else {
+ nr_threads = 1;
+ }
+ partition_len = len >> get_count_order_ulong(nr_threads);
+ work = calloc(nr_threads, sizeof(*work));
+ assert(work);
+ for (thread = 0; thread < nr_threads; thread++) {
+ work[thread].ht = ht;
+ work[thread].i = i;
+ work[thread].len = partition_len;
+ work[thread].start = thread * partition_len;
+ work[thread].fct = fct;
+ ret = pthread_create(&(work[thread].thread_id), ht->resize_attr,
+ partition_resize_thread, &work[thread]);
+ assert(!ret);
+ }
+ for (thread = 0; thread < nr_threads; thread++) {
+ ret = pthread_join(work[thread].thread_id, NULL);
+ assert(!ret);
+ }
+ free(work);
+}
+
+/*
+ * Holding RCU read lock to protect _cds_lfht_add against memory
+ * reclaim that could be performed by other call_rcu worker threads (ABA
+ * problem).
+ *
+ * When we reach a certain length, we can split this population phase over
+ * many worker threads, based on the number of CPUs available in the system.
+ * This should therefore take care of not having the expand lagging behind too
+ * many concurrent insertion threads by using the scheduler's ability to
+ * schedule dummy node population fairly with insertions.
+ */
+static
+void init_table_populate_partition(struct cds_lfht *ht, unsigned long i,
+ unsigned long start, unsigned long len)
+{
+ unsigned long j;
+
+ assert(i > ht->min_alloc_order);
+ ht->cds_lfht_rcu_read_lock();
+ for (j = start; j < start + len; j++) {
+ struct cds_lfht_node *new_node =
+ (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+
+ dbg_printf("init populate: i %lu j %lu hash %lu\n",
+ i, j, (1UL << (i - 1)) + j);
+ new_node->p.reverse_hash =
+ bit_reverse_ulong((1UL << (i - 1)) + j);
+ _cds_lfht_add(ht, 1UL << (i - 1),
+ new_node, NULL, 1);
+ }
+ ht->cds_lfht_rcu_read_unlock();