From dc0e9798320a244262c504c47f34af058bc1ebb3 Mon Sep 17 00:00:00 2001 From: Mathieu Desnoyers Date: Thu, 6 Jun 2013 19:43:37 -0400 Subject: [PATCH] rcuja: document that destroy free_node_cb does not need to wait for Q.S. Signed-off-by: Mathieu Desnoyers --- rcuja/rcuja-internal.h | 4 +- rcuja/rcuja-shadow-nodes.c | 86 ++++++++++++++++++++------------------ rcuja/rcuja.c | 9 ++-- urcu/rcuja.h | 8 ++-- 4 files changed, 58 insertions(+), 49 deletions(-) diff --git a/rcuja/rcuja-internal.h b/rcuja/rcuja-internal.h index d4bc32e..2d3f131 100644 --- a/rcuja/rcuja-internal.h +++ b/rcuja/rcuja-internal.h @@ -174,7 +174,7 @@ unsigned long ja_node_type(struct cds_ja_inode_flag *node); __attribute__((visibility("protected"))) void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node, struct cds_ja_inode_flag *node_flag, - void (*rcu_free_node)(struct cds_ja_node *node)); + void (*free_node_cb)(struct cds_ja_node *node)); __attribute__((visibility("protected"))) struct cds_ja_shadow_node *rcuja_shadow_lookup_lock(struct cds_lfht *ht, @@ -204,7 +204,7 @@ int rcuja_shadow_clear(struct cds_lfht *ht, __attribute__((visibility("protected"))) void rcuja_shadow_prune(struct cds_lfht *ht, unsigned int flags, - void (*rcu_free_node)(struct cds_ja_node *node)); + void (*free_node_cb)(struct cds_ja_node *node)); __attribute__((visibility("protected"))) struct cds_lfht *rcuja_create_ht(const struct rcu_flavor_struct *flavor); diff --git a/rcuja/rcuja-shadow-nodes.c b/rcuja/rcuja-shadow-nodes.c index 917c6a2..718c082 100644 --- a/rcuja/rcuja-shadow-nodes.c +++ b/rcuja/rcuja-shadow-nodes.c @@ -340,26 +340,27 @@ int rcuja_shadow_clear(struct cds_lfht *ht, * don't let RCU JA use a node being removed. */ ret = cds_lfht_del(ht, lookup_node); - if (!ret) { - if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE) - && shadow_node->level) { - if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) { - flavor->update_call_rcu(&shadow_node->head, - free_shadow_node_and_node_and_lock); - } else { - flavor->update_call_rcu(&shadow_node->head, - free_shadow_node_and_node); - } + if (ret) + goto unlock; + if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE) + && shadow_node->level) { + if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) { + flavor->update_call_rcu(&shadow_node->head, + free_shadow_node_and_node_and_lock); } else { - if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) { - flavor->update_call_rcu(&shadow_node->head, - free_shadow_node_and_lock); - } else { - flavor->update_call_rcu(&shadow_node->head, - free_shadow_node); - } + flavor->update_call_rcu(&shadow_node->head, + free_shadow_node_and_node); + } + } else { + if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) { + flavor->update_call_rcu(&shadow_node->head, + free_shadow_node_and_lock); + } else { + flavor->update_call_rcu(&shadow_node->head, + free_shadow_node); } } +unlock: if (lookup_shadow) { lockret = pthread_mutex_unlock(shadow_node->lock); assert(!lockret); @@ -377,7 +378,7 @@ rcu_unlock: __attribute__((visibility("protected"))) void rcuja_shadow_prune(struct cds_lfht *ht, unsigned int flags, - void (*rcu_free_node)(struct cds_ja_node *node)) + void (*free_node_cb)(struct cds_ja_node *node)) { const struct rcu_flavor_struct *flavor; struct cds_ja_shadow_node *shadow_node; @@ -385,37 +386,42 @@ void rcuja_shadow_prune(struct cds_lfht *ht, int ret, lockret; flavor = cds_lfht_rcu_flavor(ht); + /* + * Read-side lock is needed to ensure hash table node existence + * vs concurrent resize. + */ flavor->read_lock(); cds_lfht_for_each_entry(ht, &iter, shadow_node, ht_node) { lockret = pthread_mutex_lock(shadow_node->lock); assert(!lockret); ret = cds_lfht_del(ht, &shadow_node->ht_node); - if (!ret) { - if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE) - && shadow_node->level) { - if (shadow_node->level == shadow_node->ja->tree_depth - 1) { - rcuja_free_all_children(shadow_node, - shadow_node->node_flag, - rcu_free_node); - } - if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) { - flavor->update_call_rcu(&shadow_node->head, - free_shadow_node_and_node_and_lock); - } else { - flavor->update_call_rcu(&shadow_node->head, - free_shadow_node_and_node); - } + if (ret) + goto unlock; + if ((flags & RCUJA_SHADOW_CLEAR_FREE_NODE) + && shadow_node->level) { + if (shadow_node->level == shadow_node->ja->tree_depth - 1) { + rcuja_free_all_children(shadow_node, + shadow_node->node_flag, + free_node_cb); + } + if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) { + flavor->update_call_rcu(&shadow_node->head, + free_shadow_node_and_node_and_lock); + } else { + flavor->update_call_rcu(&shadow_node->head, + free_shadow_node_and_node); + } + } else { + if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) { + flavor->update_call_rcu(&shadow_node->head, + free_shadow_node_and_lock); } else { - if (flags & RCUJA_SHADOW_CLEAR_FREE_LOCK) { - flavor->update_call_rcu(&shadow_node->head, - free_shadow_node_and_lock); - } else { - flavor->update_call_rcu(&shadow_node->head, - free_shadow_node); - } + flavor->update_call_rcu(&shadow_node->head, + free_shadow_node); } } + unlock: lockret = pthread_mutex_unlock(shadow_node->lock); assert(!lockret); } diff --git a/rcuja/rcuja.c b/rcuja/rcuja.c index ca72e04..81ec0a8 100644 --- a/rcuja/rcuja.c +++ b/rcuja/rcuja.c @@ -2630,11 +2630,12 @@ int ja_final_checks(struct cds_ja *ja) } /* - * There should be no more concurrent add to the judy array while it is - * being destroyed (ensured by the caller). + * There should be no more concurrent add, delete, nor look-up performed + * on the Judy array while it is being destroyed (ensured by the + * caller). */ int cds_ja_destroy(struct cds_ja *ja, - void (*rcu_free_node)(struct cds_ja_node *node)) + void (*free_node_cb)(struct cds_ja_node *node)) { const struct rcu_flavor_struct *flavor; int ret; @@ -2642,7 +2643,7 @@ int cds_ja_destroy(struct cds_ja *ja, flavor = cds_lfht_rcu_flavor(ja->ht); rcuja_shadow_prune(ja->ht, RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK, - rcu_free_node); + free_node_cb); flavor->thread_offline(); ret = rcuja_delete_ht(ja->ht); if (ret) diff --git a/urcu/rcuja.h b/urcu/rcuja.h index 2a0de28..421e838 100644 --- a/urcu/rcuja.h +++ b/urcu/rcuja.h @@ -143,11 +143,13 @@ struct cds_ja *cds_ja_new(unsigned int key_bits) * @rcu_free_node_cb: callback performing memory free of leftover nodes. * * Returns 0 on success, negative error value on error. - * The @rcu_free_node_cb callback should internally wait for a grace - * period before freeing the node. + * There should be no more concurrent add, delete, nor look-up performed + * on the Judy array while it is being destroyed (ensured by the caller). + * There is no need for the @rcu_free_node_cb callback to wait for grace + * periods, since there are no more concurrent users of the Judy array. */ int cds_ja_destroy(struct cds_ja *ja, - void (*rcu_free_node_cb)(struct cds_ja_node *node)); + void (*free_node_cb)(struct cds_ja_node *node)); /* * Iterate through duplicates returned by cds_ja_lookup*() -- 2.34.1