projects
/
userspace-rcu.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
rculfhash: add assertion in path compression
[userspace-rcu.git]
/
rculfhash.c
diff --git
a/rculfhash.c
b/rculfhash.c
index 4b781f7b9012273b24dfb93e99afa0d664032daf..2565099d2c12b633dce919e927363b10ff63912c 100644
(file)
--- a/
rculfhash.c
+++ b/
rculfhash.c
@@
-672,6
+672,13
@@
void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
/* We can always skip the dummy node initially */
iter = rcu_dereference(iter_prev->p.next);
assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ /*
+ * We should never be called with dummy (start of chain)
+ * and logically removed node (end of path compression
+ * marker) being the actual same node. This would be a
+ * bug in the algorithm implementation.
+ */
+ assert(dummy != node);
for (;;) {
if (unlikely(!clear_flag(iter)))
return;
for (;;) {
if (unlikely(!clear_flag(iter)))
return;
@@
-890,8
+897,12
@@
void fini_table(struct cds_lfht *ht, struct rcu_table *t,
len = !i ? 1 : 1UL << (i - 1);
dbg_printf("fini order %lu len: %lu\n", i, len);
len = !i ? 1 : 1UL << (i - 1);
dbg_printf("fini order %lu len: %lu\n", i, len);
- /* Update table size */
- t->size = 1UL << (i - 1);
+ /*
+ * Update table size. Need to shrink this table prior to
+ * removal so gc lookups use non-logically-removed dummy
+ * nodes.
+ */
+ t->size = (i == 1) ? 0 : 1UL << (i - 2);
/* Unlink */
for (j = 0; j < len; j++) {
struct cds_lfht_node *fini_node =
/* Unlink */
for (j = 0; j < len; j++) {
struct cds_lfht_node *fini_node =
This page took
0.023074 seconds
and
4
git commands to generate.