#include <urcu-call-rcu.h>
#include <urcu/arch.h>
#include <urcu/uatomic.h>
-#include <urcu/jhash.h>
#include <urcu/compiler.h>
#include <urcu/rculfhash.h>
#include <stdio.h>
#else /* #if defined(HAVE_SCHED_GETCPU) && defined(HAVE_SYSCONF) */
-static const long nr_cpus_mask = -1;
+static const long nr_cpus_mask = -2;
static
struct ht_items_count *alloc_per_cpu_items_count(void)
* We spawn just the number of threads we need to satisfy the minimum
* partition size, up to the number of CPUs in the system.
*/
- nr_threads = min(nr_cpus_mask + 1,
- len >> MIN_PARTITION_PER_THREAD_ORDER);
+ if (nr_cpus_mask > 0) {
+ nr_threads = min(nr_cpus_mask + 1,
+ len >> MIN_PARTITION_PER_THREAD_ORDER);
+ } else {
+ nr_threads = 1;
+ }
partition_len = len >> get_count_order_ulong(nr_threads);
work = calloc(nr_threads, sizeof(*work));
thread_id = calloc(nr_threads, sizeof(*thread_id));
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_add(ht, !i ? 0 : (1UL << (i - 1)),
new_node, ADD_DEFAULT, 1);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
}
ht->cds_lfht_rcu_read_unlock();
}
bit_reverse_ulong(!i ? 0 : (1UL << (i - 1)) + j);
(void) _cds_lfht_del(ht, !i ? 0 : (1UL << (i - 1)),
fini_node, 1);
- if (CMM_LOAD_SHARED(ht->in_progress_destroy))
- break;
}
ht->cds_lfht_rcu_read_unlock();
}
iter->next = next;
}
-void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+void cds_lfht_next_duplicate(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
iter->next = next;
}
+void cds_lfht_next(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+ struct cds_lfht_node *node, *next;
+
+ node = clear_flag(iter->next);
+ for (;;) {
+ if (unlikely(is_end(node))) {
+ node = next = NULL;
+ break;
+ }
+ next = rcu_dereference(node->p.next);
+ if (likely(!is_removed(next))
+ && !is_dummy(next)) {
+ break;
+ }
+ node = clear_flag(next);
+ }
+ assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ iter->node = node;
+ iter->next = next;
+}
+
+void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
+{
+ struct _cds_lfht_node *lookup;
+
+ /*
+ * Get next after first dummy node. The first dummy node is the
+ * first node of the linked list.
+ */
+ lookup = &ht->t.tbl[0]->nodes[0];
+ iter->next = lookup->next;
+ cds_lfht_next(ht, iter);
+}
+
void cds_lfht_add(struct cds_lfht *ht, struct cds_lfht_node *node)
{
unsigned long hash, size;
int ret;
/* Wait for in-flight resize operations to complete */
- CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+ _CMM_STORE_SHARED(ht->in_progress_destroy, 1);
+ cmm_smp_mb(); /* Store destroy before load resize */
while (uatomic_read(&ht->in_progress_resize))
poll(NULL, 0, 100); /* wait for 100ms */
ret = cds_lfht_delete_dummy(ht);
* Resize table, re-do if the target size has changed under us.
*/
do {
+ assert(uatomic_read(&ht->in_progress_resize));
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy))
+ break;
ht->t.resize_initiated = 1;
old_size = ht->t.size;
new_size = CMM_LOAD_SHARED(ht->t.resize_target);
cmm_smp_mb();
if (!CMM_LOAD_SHARED(ht->t.resize_initiated) && size < target_size) {
uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before calling it */
+ cmm_smp_mb(); /* increment resize count before load destroy */
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work = malloc(sizeof(*work));
work->ht = ht;
ht->cds_lfht_call_rcu(&work->head, do_resize_cb);
cmm_smp_mb();
if (!CMM_LOAD_SHARED(ht->t.resize_initiated)) {
uatomic_inc(&ht->in_progress_resize);
- cmm_smp_mb(); /* increment resize count before calling it */
+ cmm_smp_mb(); /* increment resize count before load destroy */
+ if (CMM_LOAD_SHARED(ht->in_progress_destroy)) {
+ uatomic_dec(&ht->in_progress_resize);
+ return;
+ }
work = malloc(sizeof(*work));
work->ht = ht;
ht->cds_lfht_call_rcu(&work->head, do_resize_cb);