X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;ds=sidebyside;f=rculfhash.c;h=2565099d2c12b633dce919e927363b10ff63912c;hb=bd4db1530ed788ea97c8a9e4c0d04ef757181ad6;hp=4b781f7b9012273b24dfb93e99afa0d664032daf;hpb=c90201ac0cbb9b552a2618eaa00de1f80d4248f1;p=userspace-rcu.git diff --git a/rculfhash.c b/rculfhash.c index 4b781f7..2565099 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -672,6 +672,13 @@ void _cds_lfht_gc_bucket(struct cds_lfht_node *dummy, struct cds_lfht_node *node /* We can always skip the dummy node initially */ iter = rcu_dereference(iter_prev->p.next); assert(iter_prev->p.reverse_hash <= node->p.reverse_hash); + /* + * We should never be called with dummy (start of chain) + * and logically removed node (end of path compression + * marker) being the actual same node. This would be a + * bug in the algorithm implementation. + */ + assert(dummy != node); for (;;) { if (unlikely(!clear_flag(iter))) return; @@ -890,8 +897,12 @@ void fini_table(struct cds_lfht *ht, struct rcu_table *t, len = !i ? 1 : 1UL << (i - 1); dbg_printf("fini order %lu len: %lu\n", i, len); - /* Update table size */ - t->size = 1UL << (i - 1); + /* + * Update table size. Need to shrink this table prior to + * removal so gc lookups use non-logically-removed dummy + * nodes. + */ + t->size = (i == 1) ? 0 : 1UL << (i - 2); /* Unlink */ for (j = 0; j < len; j++) { struct cds_lfht_node *fini_node =