X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=rculfhash.c;h=3120d4585995c030a999903b0667d803ebf5b877;hb=cc4fcb1069347d2b5fbf71a61e7c1712df9a177a;hp=5baa2d415b34483486ece296cb9c511ae2cf0714;hpb=479c8a32f3a1c319e9e1a18692b47992d393ee38;p=urcu.git diff --git a/rculfhash.c b/rculfhash.c index 5baa2d4..3120d45 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -67,6 +67,7 @@ struct rcu_ht { ht_compare_fct compare_fct; unsigned long hash_seed; pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */ + unsigned int in_progress_resize; void (*ht_call_rcu)(struct rcu_head *head, void (*func)(struct rcu_head *head)); }; @@ -215,32 +216,36 @@ void _ht_gc_bucket(struct rcu_ht_node *dummy, struct rcu_ht_node *node) for (;;) { iter_prev = dummy; /* We can always skip the dummy node initially */ - iter = rcu_dereference(iter_prev->next); - assert(iter_prev->reverse_hash <= node->reverse_hash); + iter = rcu_dereference(iter_prev->p.next); + assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); for (;;) { if (unlikely(!iter)) return; - if (clear_flag(iter)->reverse_hash > node->reverse_hash) + if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash) return; - next = rcu_dereference(clear_flag(iter)->next); + next = rcu_dereference(clear_flag(iter)->p.next); if (is_removed(next)) break; iter_prev = iter; iter = next; } assert(!is_removed(iter)); - (void) uatomic_cmpxchg(&iter_prev->next, iter, clear_flag(next)); + (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next)); } } static -int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node, - int unique) +struct rcu_ht_node *_ht_add(struct rcu_ht *ht, struct rcu_table *t, + struct rcu_ht_node *node, int unique) { struct rcu_ht_node *iter_prev, *dummy, *iter, *next; + unsigned long hash; - if (!t->size) - return 0; + if (!t->size) { + assert(node->p.dummy); + return node; /* Initial first add (head) */ + } + hash = bit_reverse_ulong(node->p.reverse_hash); for (;;) { uint32_t chain_len = 0; @@ -248,20 +253,26 @@ int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node, * iter_prev points to the non-removed node prior to the * insert location. */ - iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]); + iter_prev = rcu_dereference(t->tbl[hash & (t->size - 1)]); /* We can always skip the dummy node initially */ - iter = rcu_dereference(iter_prev->next); - assert(iter_prev->reverse_hash <= node->reverse_hash); + iter = rcu_dereference(iter_prev->p.next); + assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); for (;;) { if (unlikely(!iter)) goto insert; - if (clear_flag(iter)->reverse_hash > node->reverse_hash) + if (clear_flag(iter)->p.reverse_hash > node->p.reverse_hash) goto insert; - next = rcu_dereference(clear_flag(iter)->next); + next = rcu_dereference(clear_flag(iter)->p.next); if (is_removed(next)) - goto gc; + goto gc_node; + if (unique + && !clear_flag(iter)->p.dummy + && !ht->compare_fct(node->key, node->key_len, + clear_flag(iter)->key, + clear_flag(iter)->key_len)) + return clear_flag(iter); /* Only account for identical reverse hash once */ - if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash) + if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash) check_resize(ht, t, ++chain_len); iter_prev = clear_flag(iter); iter = next; @@ -270,23 +281,22 @@ int _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node, assert(node != clear_flag(iter)); assert(!is_removed(iter_prev)); assert(iter_prev != node); - node->next = iter; - if (uatomic_cmpxchg(&iter_prev->next, iter, + node->p.next = iter; + if (uatomic_cmpxchg(&iter_prev->p.next, iter, node) != iter) continue; /* retry */ else goto gc_end; - gc: - /* Garbage collect logically removed nodes in the bucket */ - dummy = rcu_dereference(t->tbl[node->hash & (t->size - 1)]); - _ht_gc_bucket(dummy, node); + gc_node: + assert(!is_removed(iter)); + (void) uatomic_cmpxchg(&iter_prev->p.next, iter, clear_flag(next)); /* retry */ } gc_end: /* Garbage collect logically removed nodes in the bucket */ - dummy = rcu_dereference(t->tbl[node->hash & (t->size - 1)]); + dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]); _ht_gc_bucket(dummy, node); - return 0; + return node; } static @@ -294,15 +304,16 @@ int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node) { struct rcu_ht_node *dummy, *next, *old; int flagged = 0; + unsigned long hash; /* logically delete the node */ - old = rcu_dereference(node->next); + old = rcu_dereference(node->p.next); do { next = old; if (is_removed(next)) goto end; - assert(!node->dummy); - old = uatomic_cmpxchg(&node->next, next, + assert(!node->p.dummy); + old = uatomic_cmpxchg(&node->p.next, next, flag_removed(next)); } while (old != next); @@ -314,7 +325,8 @@ int _ht_remove(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node) * the node, and remove it (along with any other logically removed node) * if found. */ - dummy = rcu_dereference(t->tbl[node->hash & (t->size - 1)]); + hash = bit_reverse_ulong(node->p.reverse_hash); + dummy = rcu_dereference(t->tbl[hash & (t->size - 1)]); _ht_gc_bucket(dummy, node); end: /* @@ -322,7 +334,7 @@ end: * removed the node from the hash. */ if (flagged) { - assert(is_removed(rcu_dereference(node->next))); + assert(is_removed(rcu_dereference(node->p.next))); return 0; } else return -ENOENT; @@ -339,10 +351,9 @@ void init_table(struct rcu_ht *ht, struct rcu_table *t, /* Update table size when power of two */ if (i != 0 && !(i & (i - 1))) t->size = i; - t->tbl[i] = calloc(1, sizeof(struct rcu_ht_node)); - t->tbl[i]->dummy = 1; - t->tbl[i]->hash = i; - t->tbl[i]->reverse_hash = bit_reverse_ulong(i); + t->tbl[i] = calloc(1, sizeof(struct _rcu_ht_node)); + t->tbl[i]->p.dummy = 1; + t->tbl[i]->p.reverse_hash = bit_reverse_ulong(i); (void) _ht_add(ht, t, t->tbl[i], 0); } t->resize_target = t->size = end; @@ -363,6 +374,7 @@ struct rcu_ht *ht_new(ht_hash_fct hash_fct, ht->compare_fct = compare_fct; ht->hash_seed = hash_seed; ht->ht_call_rcu = ht_call_rcu; + ht->in_progress_resize = 0; /* this mutex should not nest in read-side C.S. */ pthread_mutex_init(&ht->resize_mutex, NULL); ht->t = calloc(1, sizeof(struct rcu_table) @@ -388,38 +400,40 @@ struct rcu_ht_node *ht_lookup(struct rcu_ht *ht, void *key, size_t key_len) for (;;) { if (unlikely(!node)) break; - if (unlikely(node->reverse_hash > reverse_hash)) { + if (unlikely(node->p.reverse_hash > reverse_hash)) { node = NULL; break; } - if (!ht->compare_fct(node->key, node->key_len, key, key_len)) { - if (likely(!is_removed(rcu_dereference(node->next))) - && likely(!node->dummy)) + if (likely(!is_removed(rcu_dereference(node->p.next))) + && !node->p.dummy + && likely(!ht->compare_fct(node->key, node->key_len, key, key_len))) { break; } - node = clear_flag(rcu_dereference(node->next)); + node = clear_flag(rcu_dereference(node->p.next)); } - assert(!node || !node->dummy); + assert(!node || !node->p.dummy); return node; } void ht_add(struct rcu_ht *ht, struct rcu_ht_node *node) { struct rcu_table *t; + unsigned long hash; - node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); - node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash); + hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); + node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); t = rcu_dereference(ht->t); (void) _ht_add(ht, t, node, 0); } -int ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node) +struct rcu_ht_node *ht_add_unique(struct rcu_ht *ht, struct rcu_ht_node *node) { struct rcu_table *t; + unsigned long hash; - node->hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); - node->reverse_hash = bit_reverse_ulong((unsigned long) node->hash); + hash = ht->hash_fct(node->key, node->key_len, ht->hash_seed); + node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash); t = rcu_dereference(ht->t); return _ht_add(ht, t, node, 1); @@ -444,14 +458,14 @@ int ht_delete_dummy(struct rcu_ht *ht) /* Check that the table is empty */ node = t->tbl[0]; do { - if (!node->dummy) + if (!node->p.dummy) return -EPERM; - node = node->next; + node = node->p.next; assert(!is_removed(node)); } while (node); /* Internal sanity check: all nodes left should be dummy */ for (i = 0; i < t->size; i++) { - assert(t->tbl[i]->dummy); + assert(t->tbl[i]->p.dummy); free(t->tbl[i]); } return 0; @@ -465,6 +479,9 @@ int ht_destroy(struct rcu_ht *ht) { int ret; + /* Wait for in-flight resize operations to complete */ + while (uatomic_read(&ht->in_progress_resize)) + poll(NULL, 0, 100); /* wait for 100ms */ ret = ht_delete_dummy(ht); if (ret) return ret; @@ -487,11 +504,11 @@ void ht_count_nodes(struct rcu_ht *ht, /* Check that the table is empty */ node = rcu_dereference(t->tbl[0]); do { - next = rcu_dereference(node->next); + next = rcu_dereference(node->p.next); if (is_removed(next)) { - assert(!node->dummy); + assert(!node->p.dummy); (*removed)++; - } else if (!node->dummy) + } else if (!node->p.dummy) (*count)++; node = clear_flag(next); } while (node); @@ -564,6 +581,8 @@ void do_resize_cb(struct rcu_head *head) _do_ht_resize(ht); pthread_mutex_unlock(&ht->resize_mutex); free(work); + cmm_smp_mb(); /* finish resize before decrement */ + uatomic_dec(&ht->in_progress_resize); } static @@ -574,6 +593,8 @@ void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth) target_size = resize_target_update(t, growth); if (!CMM_LOAD_SHARED(t->resize_initiated) && t->size < target_size) { + uatomic_inc(&ht->in_progress_resize); + cmm_smp_mb(); /* increment resize count before calling it */ work = malloc(sizeof(*work)); work->ht = ht; ht->ht_call_rcu(&work->head, do_resize_cb);