+ long i;
+ void *free_by_rcu = NULL;
+
+ dbg_printf("fini table: first_order %lu last_order %lu\n",
+ first_order, last_order);
+ assert(first_order > ht->min_alloc_order);
+ for (i = last_order; i >= first_order; i--) {
+ unsigned long len;
+
+ len = 1UL << (i - 1);
+ dbg_printf("fini order %lu len: %lu\n", i, len);
+
+ /* Stop shrink if the resize target changes under us */
+ if (CMM_LOAD_SHARED(ht->t.resize_target) > (1UL << (i - 1)))
+ break;
+
+ cmm_smp_wmb(); /* populate data before RCU size */
+ CMM_STORE_SHARED(ht->t.size, 1UL << (i - 1));
+
+ /*
+ * We need to wait for all add operations to reach Q.S. (and
+ * thus use the new table for lookups) before we can start
+ * releasing the old dummy nodes. Otherwise their lookup will
+ * return a logically removed node as insert position.
+ */
+ ht->cds_lfht_synchronize_rcu();
+ if (free_by_rcu)
+ free(free_by_rcu);
+
+ /*
+ * Set "removed" flag in dummy nodes about to be removed.
+ * Unlink all now-logically-removed dummy node pointers.
+ * Concurrent add/remove operation are helping us doing
+ * the gc.
+ */
+ remove_table(ht, i, len);
+
+ free_by_rcu = ht->t.tbl[i];
+
+ dbg_printf("fini new size: %lu\n", 1UL << i);
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
+ }
+
+ if (free_by_rcu) {
+ ht->cds_lfht_synchronize_rcu();
+ free(free_by_rcu);
+ }
+}
+
+static
+void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size)
+{
+ struct _cds_lfht_node *prev, *node;
+ unsigned long order, len, i, j;
+
+ ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct _cds_lfht_node));
+ assert(ht->t.tbl[0]);
+
+ dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0);
+ ht->t.tbl[0]->nodes[0].next = flag_dummy(get_end());
+ ht->t.tbl[0]->nodes[0].reverse_hash = 0;
+
+ for (order = 1; order < get_count_order_ulong(size) + 1; order++) {
+ len = 1UL << (order - 1);
+ if (order <= ht->min_alloc_order) {
+ ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len);
+ } else {
+ ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node));
+ assert(ht->t.tbl[order]);
+ }
+
+ i = 0;
+ prev = ht->t.tbl[i]->nodes;
+ for (j = 0; j < len; j++) {
+ if (j & (j - 1)) { /* Between power of 2 */
+ prev++;
+ } else if (j) { /* At each power of 2 */
+ i++;
+ prev = ht->t.tbl[i]->nodes;
+ }
+
+ node = &ht->t.tbl[order]->nodes[j];
+ dbg_printf("create dummy: order %lu index %lu hash %lu\n",
+ order, j, j + len);
+ node->next = prev->next;
+ assert(is_dummy(node->next));
+ node->reverse_hash = bit_reverse_ulong(j + len);
+ prev->next = flag_dummy((struct cds_lfht_node *)node);
+ }
+ }
+}
+
+struct cds_lfht *_cds_lfht_new(cds_lfht_hash_fct hash_fct,
+ cds_lfht_compare_fct compare_fct,
+ unsigned long hash_seed,
+ unsigned long init_size,
+ unsigned long min_alloc_size,
+ int flags,
+ void (*cds_lfht_call_rcu)(struct rcu_head *head,
+ void (*func)(struct rcu_head *head)),
+ void (*cds_lfht_synchronize_rcu)(void),
+ void (*cds_lfht_rcu_read_lock)(void),
+ void (*cds_lfht_rcu_read_unlock)(void),
+ void (*cds_lfht_rcu_thread_offline)(void),
+ void (*cds_lfht_rcu_thread_online)(void),
+ void (*cds_lfht_rcu_register_thread)(void),
+ void (*cds_lfht_rcu_unregister_thread)(void),
+ pthread_attr_t *attr)
+{
+ struct cds_lfht *ht;