Make struct lfht_test_node only contains basic field.
Let user take the responsibility to handle the <key>(hash_set)
or <key,value>(hash_map) management and calculation.
[ Edit by Mathieu Desnoyers: rebased on top of commit
0422d92c2d658f6093b8209f75808efd2109a110 ]
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
struct rcu_level {
/* Note: manually update allocation length when adding a field */
*/
struct rcu_level {
/* Note: manually update allocation length when adding a field */
- struct _cds_lfht_node nodes[0];
+ struct cds_lfht_node nodes[0];
static
void _cds_lfht_add(struct cds_lfht *ht,
cds_lfht_match_fct match,
static
void _cds_lfht_add(struct cds_lfht *ht,
cds_lfht_match_fct match,
unsigned long size,
struct cds_lfht_node *node,
struct cds_lfht_iter *unique_ret,
unsigned long size,
struct cds_lfht_node *node,
struct cds_lfht_iter *unique_ret,
-struct _cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
+struct cds_lfht_node *lookup_bucket(struct cds_lfht *ht, unsigned long size,
unsigned long hash)
{
unsigned long index, order;
unsigned long hash)
{
unsigned long index, order;
for (;;) {
iter_prev = dummy;
/* We can always skip the dummy node initially */
for (;;) {
iter_prev = dummy;
/* We can always skip the dummy node initially */
- iter = rcu_dereference(iter_prev->p.next);
+ iter = rcu_dereference(iter_prev->next);
assert(!is_removed(iter));
assert(!is_removed(iter));
- assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
/*
* We should never be called with dummy (start of chain)
* and logically removed node (end of path compression
/*
* We should never be called with dummy (start of chain)
* and logically removed node (end of path compression
for (;;) {
if (caa_unlikely(is_end(iter)))
return;
for (;;) {
if (caa_unlikely(is_end(iter)))
return;
- if (caa_likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
+ if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
- next = rcu_dereference(clear_flag(iter)->p.next);
+ next = rcu_dereference(clear_flag(iter)->next);
if (caa_likely(is_removed(next)))
break;
iter_prev = clear_flag(iter);
if (caa_likely(is_removed(next)))
break;
iter_prev = clear_flag(iter);
new_next = flag_dummy(clear_flag(next));
else
new_next = clear_flag(next);
new_next = flag_dummy(clear_flag(next));
else
new_next = clear_flag(next);
- (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+ (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
struct cds_lfht_node *old_next,
struct cds_lfht_node *new_node)
{
struct cds_lfht_node *old_next,
struct cds_lfht_node *new_node)
{
- struct cds_lfht_node *dummy, *ret_next;
- struct _cds_lfht_node *lookup;
+ struct cds_lfht_node *bucket, *ret_next;
if (!old_node) /* Return -ENOENT if asked to replace NULL node */
return -ENOENT;
if (!old_node) /* Return -ENOENT if asked to replace NULL node */
return -ENOENT;
}
assert(!is_dummy(old_next));
assert(new_node != clear_flag(old_next));
}
assert(!is_dummy(old_next));
assert(new_node != clear_flag(old_next));
- new_node->p.next = clear_flag(old_next);
+ new_node->next = clear_flag(old_next);
/*
* Here is the whole trick for lock-free replace: we add
* the replacement node _after_ the node we want to
/*
* Here is the whole trick for lock-free replace: we add
* the replacement node _after_ the node we want to
* to the removal flag and see the new node, or use
* the old node, but will not see the new one.
*/
* to the removal flag and see the new node, or use
* the old node, but will not see the new one.
*/
- ret_next = uatomic_cmpxchg(&old_node->p.next,
+ ret_next = uatomic_cmpxchg(&old_node->next,
old_next, flag_removed(new_node));
if (ret_next == old_next)
break; /* We performed the replacement. */
old_next, flag_removed(new_node));
if (ret_next == old_next)
break; /* We performed the replacement. */
* lookup for the node, and remove it (along with any other
* logically removed node) if found.
*/
* lookup for the node, and remove it (along with any other
* logically removed node) if found.
*/
- lookup = lookup_bucket(ht, size, bit_reverse_ulong(old_node->p.reverse_hash));
- dummy = (struct cds_lfht_node *) lookup;
- _cds_lfht_gc_bucket(dummy, new_node);
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(old_node->reverse_hash));
+ _cds_lfht_gc_bucket(bucket, new_node);
- assert(is_removed(rcu_dereference(old_node->p.next)));
+ assert(is_removed(rcu_dereference(old_node->next)));
static
void _cds_lfht_add(struct cds_lfht *ht,
cds_lfht_match_fct match,
static
void _cds_lfht_add(struct cds_lfht *ht,
cds_lfht_match_fct match,
unsigned long size,
struct cds_lfht_node *node,
struct cds_lfht_iter *unique_ret,
unsigned long size,
struct cds_lfht_node *node,
struct cds_lfht_iter *unique_ret,
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
*return_node;
{
struct cds_lfht_node *iter_prev, *iter, *next, *new_node, *new_next,
*return_node;
- struct _cds_lfht_node *lookup;
+ struct cds_lfht_node *bucket;
assert(!is_dummy(node));
assert(!is_removed(node));
assert(!is_dummy(node));
assert(!is_removed(node));
- lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
for (;;) {
uint32_t chain_len = 0;
for (;;) {
uint32_t chain_len = 0;
* iter_prev points to the non-removed node prior to the
* insert location.
*/
* iter_prev points to the non-removed node prior to the
* insert location.
*/
- iter_prev = (struct cds_lfht_node *) lookup;
/* We can always skip the dummy node initially */
/* We can always skip the dummy node initially */
- iter = rcu_dereference(iter_prev->p.next);
- assert(iter_prev->p.reverse_hash <= node->p.reverse_hash);
+ iter = rcu_dereference(iter_prev->next);
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
for (;;) {
if (caa_unlikely(is_end(iter)))
goto insert;
for (;;) {
if (caa_unlikely(is_end(iter)))
goto insert;
- if (caa_likely(clear_flag(iter)->p.reverse_hash > node->p.reverse_hash))
+ if (caa_likely(clear_flag(iter)->reverse_hash > node->reverse_hash))
goto insert;
/* dummy node is the first node of the identical-hash-value chain */
goto insert;
/* dummy node is the first node of the identical-hash-value chain */
- if (dummy && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash)
+ if (dummy && clear_flag(iter)->reverse_hash == node->reverse_hash)
- next = rcu_dereference(clear_flag(iter)->p.next);
+ next = rcu_dereference(clear_flag(iter)->next);
if (caa_unlikely(is_removed(next)))
goto gc_node;
/* uniquely add */
if (unique_ret
&& !is_dummy(next)
if (caa_unlikely(is_removed(next)))
goto gc_node;
/* uniquely add */
if (unique_ret
&& !is_dummy(next)
- && clear_flag(iter)->p.reverse_hash == node->p.reverse_hash) {
+ && clear_flag(iter)->reverse_hash == node->reverse_hash) {
struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
/*
struct cds_lfht_iter d_iter = { .node = node, .next = iter, };
/*
* (including observe one node by one node
* by forward iterations)
*/
* (including observe one node by one node
* by forward iterations)
*/
- cds_lfht_next_duplicate(ht, match, &d_iter);
+ cds_lfht_next_duplicate(ht, match, key, &d_iter);
if (!d_iter.node)
goto insert;
if (!d_iter.node)
goto insert;
}
/* Only account for identical reverse hash once */
}
/* Only account for identical reverse hash once */
- if (iter_prev->p.reverse_hash != clear_flag(iter)->p.reverse_hash
+ if (iter_prev->reverse_hash != clear_flag(iter)->reverse_hash
&& !is_dummy(next))
check_resize(ht, size, ++chain_len);
iter_prev = clear_flag(iter);
&& !is_dummy(next))
check_resize(ht, size, ++chain_len);
iter_prev = clear_flag(iter);
assert(!is_removed(iter));
assert(iter_prev != node);
if (!dummy)
assert(!is_removed(iter));
assert(iter_prev != node);
if (!dummy)
- node->p.next = clear_flag(iter);
+ node->next = clear_flag(iter);
- node->p.next = flag_dummy(clear_flag(iter));
+ node->next = flag_dummy(clear_flag(iter));
if (is_dummy(iter))
new_node = flag_dummy(node);
else
new_node = node;
if (is_dummy(iter))
new_node = flag_dummy(node);
else
new_node = node;
- if (uatomic_cmpxchg(&iter_prev->p.next, iter,
+ if (uatomic_cmpxchg(&iter_prev->next, iter,
new_node) != iter) {
continue; /* retry */
} else {
new_node) != iter) {
continue; /* retry */
} else {
new_next = flag_dummy(clear_flag(next));
else
new_next = clear_flag(next);
new_next = flag_dummy(clear_flag(next));
else
new_next = clear_flag(next);
- (void) uatomic_cmpxchg(&iter_prev->p.next, iter, new_next);
+ (void) uatomic_cmpxchg(&iter_prev->next, iter, new_next);
struct cds_lfht_node *node,
int dummy_removal)
{
struct cds_lfht_node *node,
int dummy_removal)
{
- struct cds_lfht_node *dummy, *next, *old;
- struct _cds_lfht_node *lookup;
+ struct cds_lfht_node *bucket, *next, *old;
if (!node) /* Return -ENOENT if asked to delete NULL node */
return -ENOENT;
if (!node) /* Return -ENOENT if asked to delete NULL node */
return -ENOENT;
/* logically delete the node */
assert(!is_dummy(node));
assert(!is_removed(node));
/* logically delete the node */
assert(!is_dummy(node));
assert(!is_removed(node));
- old = rcu_dereference(node->p.next);
+ old = rcu_dereference(node->next);
do {
struct cds_lfht_node *new_next;
do {
struct cds_lfht_node *new_next;
else
assert(!is_dummy(next));
new_next = flag_removed(next);
else
assert(!is_dummy(next));
new_next = flag_removed(next);
- old = uatomic_cmpxchg(&node->p.next, next, new_next);
+ old = uatomic_cmpxchg(&node->next, next, new_next);
} while (old != next);
/* We performed the (logical) deletion. */
} while (old != next);
/* We performed the (logical) deletion. */
* the node, and remove it (along with any other logically removed node)
* if found.
*/
* the node, and remove it (along with any other logically removed node)
* if found.
*/
- lookup = lookup_bucket(ht, size, bit_reverse_ulong(node->p.reverse_hash));
- dummy = (struct cds_lfht_node *) lookup;
- _cds_lfht_gc_bucket(dummy, node);
+ bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash));
+ _cds_lfht_gc_bucket(bucket, node);
- assert(is_removed(rcu_dereference(node->p.next)));
+ assert(is_removed(rcu_dereference(node->next)));
assert(i > ht->min_alloc_order);
ht->cds_lfht_rcu_read_lock();
for (j = start; j < start + len; j++) {
assert(i > ht->min_alloc_order);
ht->cds_lfht_rcu_read_lock();
for (j = start; j < start + len; j++) {
- struct cds_lfht_node *new_node =
- (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+ struct cds_lfht_node *new_node = &ht->t.tbl[i]->nodes[j];
dbg_printf("init populate: i %lu j %lu hash %lu\n",
i, j, (1UL << (i - 1)) + j);
dbg_printf("init populate: i %lu j %lu hash %lu\n",
i, j, (1UL << (i - 1)) + j);
- new_node->p.reverse_hash =
+ new_node->reverse_hash =
bit_reverse_ulong((1UL << (i - 1)) + j);
bit_reverse_ulong((1UL << (i - 1)) + j);
- _cds_lfht_add(ht, NULL, 1UL << (i - 1),
+ _cds_lfht_add(ht, NULL, NULL, 1UL << (i - 1),
new_node, NULL, 1);
}
ht->cds_lfht_rcu_read_unlock();
new_node, NULL, 1);
}
ht->cds_lfht_rcu_read_unlock();
if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i))
break;
if (CMM_LOAD_SHARED(ht->t.resize_target) < (1UL << i))
break;
- ht->t.tbl[i] = calloc(1, len * sizeof(struct _cds_lfht_node));
+ ht->t.tbl[i] = calloc(1, len * sizeof(struct cds_lfht_node));
assert(i > ht->min_alloc_order);
ht->cds_lfht_rcu_read_lock();
for (j = start; j < start + len; j++) {
assert(i > ht->min_alloc_order);
ht->cds_lfht_rcu_read_lock();
for (j = start; j < start + len; j++) {
- struct cds_lfht_node *fini_node =
- (struct cds_lfht_node *) &ht->t.tbl[i]->nodes[j];
+ struct cds_lfht_node *fini_node = &ht->t.tbl[i]->nodes[j];
dbg_printf("remove entry: i %lu j %lu hash %lu\n",
i, j, (1UL << (i - 1)) + j);
dbg_printf("remove entry: i %lu j %lu hash %lu\n",
i, j, (1UL << (i - 1)) + j);
- fini_node->p.reverse_hash =
+ fini_node->reverse_hash =
bit_reverse_ulong((1UL << (i - 1)) + j);
(void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1);
}
bit_reverse_ulong((1UL << (i - 1)) + j);
(void) _cds_lfht_del(ht, 1UL << (i - 1), fini_node, 1);
}
static
void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size)
{
static
void cds_lfht_create_dummy(struct cds_lfht *ht, unsigned long size)
{
- struct _cds_lfht_node *prev, *node;
+ struct cds_lfht_node *prev, *node;
unsigned long order, len, i, j;
unsigned long order, len, i, j;
- ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct _cds_lfht_node));
+ ht->t.tbl[0] = calloc(1, ht->min_alloc_size * sizeof(struct cds_lfht_node));
assert(ht->t.tbl[0]);
dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0);
assert(ht->t.tbl[0]);
dbg_printf("create dummy: order %lu index %lu hash %lu\n", 0, 0, 0);
if (order <= ht->min_alloc_order) {
ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len);
} else {
if (order <= ht->min_alloc_order) {
ht->t.tbl[order] = (struct rcu_level *) (ht->t.tbl[0]->nodes + len);
} else {
- ht->t.tbl[order] = calloc(1, len * sizeof(struct _cds_lfht_node));
+ ht->t.tbl[order] = calloc(1, len * sizeof(struct cds_lfht_node));
assert(ht->t.tbl[order]);
}
assert(ht->t.tbl[order]);
}
node->next = prev->next;
assert(is_dummy(node->next));
node->reverse_hash = bit_reverse_ulong(j + len);
node->next = prev->next;
assert(is_dummy(node->next));
node->reverse_hash = bit_reverse_ulong(j + len);
- prev->next = flag_dummy((struct cds_lfht_node *)node);
+ prev->next = flag_dummy(node);
void cds_lfht_lookup(struct cds_lfht *ht, cds_lfht_match_fct match,
unsigned long hash, void *key, struct cds_lfht_iter *iter)
{
void cds_lfht_lookup(struct cds_lfht *ht, cds_lfht_match_fct match,
unsigned long hash, void *key, struct cds_lfht_iter *iter)
{
- struct cds_lfht_node *node, *next, *dummy_node;
- struct _cds_lfht_node *lookup;
+ struct cds_lfht_node *node, *next, *bucket;
unsigned long reverse_hash, size;
reverse_hash = bit_reverse_ulong(hash);
size = rcu_dereference(ht->t.size);
unsigned long reverse_hash, size;
reverse_hash = bit_reverse_ulong(hash);
size = rcu_dereference(ht->t.size);
- lookup = lookup_bucket(ht, size, hash);
- dummy_node = (struct cds_lfht_node *) lookup;
+ bucket = lookup_bucket(ht, size, hash);
/* We can always skip the dummy node initially */
/* We can always skip the dummy node initially */
- node = rcu_dereference(dummy_node->p.next);
+ node = rcu_dereference(bucket->next);
node = clear_flag(node);
for (;;) {
if (caa_unlikely(is_end(node))) {
node = next = NULL;
break;
}
node = clear_flag(node);
for (;;) {
if (caa_unlikely(is_end(node))) {
node = next = NULL;
break;
}
- if (caa_unlikely(node->p.reverse_hash > reverse_hash)) {
+ if (caa_unlikely(node->reverse_hash > reverse_hash)) {
node = next = NULL;
break;
}
node = next = NULL;
break;
}
- next = rcu_dereference(node->p.next);
+ next = rcu_dereference(node->next);
assert(node == clear_flag(node));
if (caa_likely(!is_removed(next))
&& !is_dummy(next)
assert(node == clear_flag(node));
if (caa_likely(!is_removed(next))
&& !is_dummy(next)
- && node->p.reverse_hash == reverse_hash
+ && node->reverse_hash == reverse_hash
&& caa_likely(match(node, key))) {
break;
}
node = clear_flag(next);
}
&& caa_likely(match(node, key))) {
break;
}
node = clear_flag(next);
}
- assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ assert(!node || !is_dummy(rcu_dereference(node->next)));
iter->node = node;
iter->next = next;
}
void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
iter->node = node;
iter->next = next;
}
void cds_lfht_next_duplicate(struct cds_lfht *ht, cds_lfht_match_fct match,
- struct cds_lfht_iter *iter)
+ void *key, struct cds_lfht_iter *iter)
{
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
{
struct cds_lfht_node *node, *next;
unsigned long reverse_hash;
- reverse_hash = node->p.reverse_hash;
- key = node->key;
+ reverse_hash = node->reverse_hash;
next = iter->next;
node = clear_flag(next);
next = iter->next;
node = clear_flag(next);
node = next = NULL;
break;
}
node = next = NULL;
break;
}
- if (caa_unlikely(node->p.reverse_hash > reverse_hash)) {
+ if (caa_unlikely(node->reverse_hash > reverse_hash)) {
node = next = NULL;
break;
}
node = next = NULL;
break;
}
- next = rcu_dereference(node->p.next);
+ next = rcu_dereference(node->next);
if (caa_likely(!is_removed(next))
&& !is_dummy(next)
if (caa_likely(!is_removed(next))
&& !is_dummy(next)
- && caa_likely(match(node->key, key))) {
+ && caa_likely(match(node, key))) {
break;
}
node = clear_flag(next);
}
break;
}
node = clear_flag(next);
}
- assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ assert(!node || !is_dummy(rcu_dereference(node->next)));
iter->node = node;
iter->next = next;
}
iter->node = node;
iter->next = next;
}
node = next = NULL;
break;
}
node = next = NULL;
break;
}
- next = rcu_dereference(node->p.next);
+ next = rcu_dereference(node->next);
if (caa_likely(!is_removed(next))
&& !is_dummy(next)) {
break;
}
node = clear_flag(next);
}
if (caa_likely(!is_removed(next))
&& !is_dummy(next)) {
break;
}
node = clear_flag(next);
}
- assert(!node || !is_dummy(rcu_dereference(node->p.next)));
+ assert(!node || !is_dummy(rcu_dereference(node->next)));
iter->node = node;
iter->next = next;
}
void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
iter->node = node;
iter->next = next;
}
void cds_lfht_first(struct cds_lfht *ht, struct cds_lfht_iter *iter)
{
- struct _cds_lfht_node *lookup;
+ struct cds_lfht_node *lookup;
/*
* Get next after first dummy node. The first dummy node is the
/*
* Get next after first dummy node. The first dummy node is the
- node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+ node->reverse_hash = bit_reverse_ulong((unsigned long) hash);
size = rcu_dereference(ht->t.size);
size = rcu_dereference(ht->t.size);
- _cds_lfht_add(ht, NULL, size, node, NULL, 0);
+ _cds_lfht_add(ht, NULL, NULL, size, node, NULL, 0);
ht_count_add(ht, size, hash);
}
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
cds_lfht_match_fct match,
ht_count_add(ht, size, hash);
}
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
cds_lfht_match_fct match,
unsigned long hash,
struct cds_lfht_node *node)
{
unsigned long size;
struct cds_lfht_iter iter;
unsigned long hash,
struct cds_lfht_node *node)
{
unsigned long size;
struct cds_lfht_iter iter;
- node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+ node->reverse_hash = bit_reverse_ulong((unsigned long) hash);
size = rcu_dereference(ht->t.size);
size = rcu_dereference(ht->t.size);
- _cds_lfht_add(ht, match, size, node, &iter, 0);
+ _cds_lfht_add(ht, match, key, size, node, &iter, 0);
if (iter.node == node)
ht_count_add(ht, size, hash);
return iter.node;
if (iter.node == node)
ht_count_add(ht, size, hash);
return iter.node;
struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
cds_lfht_match_fct match,
struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
cds_lfht_match_fct match,
unsigned long hash,
struct cds_lfht_node *node)
{
unsigned long size;
struct cds_lfht_iter iter;
unsigned long hash,
struct cds_lfht_node *node)
{
unsigned long size;
struct cds_lfht_iter iter;
- node->p.reverse_hash = bit_reverse_ulong((unsigned long) hash);
+ node->reverse_hash = bit_reverse_ulong((unsigned long) hash);
size = rcu_dereference(ht->t.size);
for (;;) {
size = rcu_dereference(ht->t.size);
for (;;) {
- _cds_lfht_add(ht, match, size, node, &iter, 0);
+ _cds_lfht_add(ht, match, key, size, node, &iter, 0);
if (iter.node == node) {
ht_count_add(ht, size, hash);
return NULL;
if (iter.node == node) {
ht_count_add(ht, size, hash);
return NULL;
size = rcu_dereference(ht->t.size);
ret = _cds_lfht_del(ht, size, iter->node, 0);
if (!ret) {
size = rcu_dereference(ht->t.size);
ret = _cds_lfht_del(ht, size, iter->node, 0);
if (!ret) {
- hash = bit_reverse_ulong(iter->node->p.reverse_hash);
+ hash = bit_reverse_ulong(iter->node->reverse_hash);
ht_count_del(ht, size, hash);
}
return ret;
ht_count_del(ht, size, hash);
}
return ret;
int cds_lfht_delete_dummy(struct cds_lfht *ht)
{
struct cds_lfht_node *node;
int cds_lfht_delete_dummy(struct cds_lfht *ht)
{
struct cds_lfht_node *node;
- struct _cds_lfht_node *lookup;
unsigned long order, i, size;
/* Check that the table is empty */
unsigned long order, i, size;
/* Check that the table is empty */
- lookup = &ht->t.tbl[0]->nodes[0];
- node = (struct cds_lfht_node *) lookup;
+ node = &ht->t.tbl[0]->nodes[0];
- node = clear_flag(node)->p.next;
+ node = clear_flag(node)->next;
if (!is_dummy(node))
return -EPERM;
assert(!is_removed(node));
if (!is_dummy(node))
return -EPERM;
assert(!is_removed(node));
long *approx_after)
{
struct cds_lfht_node *node, *next;
long *approx_after)
{
struct cds_lfht_node *node, *next;
- struct _cds_lfht_node *lookup;
unsigned long nr_dummy = 0;
*approx_before = 0;
unsigned long nr_dummy = 0;
*approx_before = 0;
*removed = 0;
/* Count non-dummy nodes in the table */
*removed = 0;
/* Count non-dummy nodes in the table */
- lookup = &ht->t.tbl[0]->nodes[0];
- node = (struct cds_lfht_node *) lookup;
+ node = &ht->t.tbl[0]->nodes[0];
- next = rcu_dereference(node->p.next);
+ next = rcu_dereference(node->next);
if (is_removed(next)) {
if (!is_dummy(next))
(*removed)++;
if (is_removed(next)) {
if (!is_dummy(next))
(*removed)++;
struct lfht_test_node {
struct cds_lfht_node node;
struct lfht_test_node {
struct cds_lfht_node node;
+ void *key;
+ unsigned int key_len;
/* cache-cold for iteration */
struct rcu_head head;
};
/* cache-cold for iteration */
struct rcu_head head;
};
void lfht_test_node_init(struct lfht_test_node *node, void *key,
size_t key_len)
{
void lfht_test_node_init(struct lfht_test_node *node, void *key,
size_t key_len)
{
- cds_lfht_node_init(&node->node, key, key_len);
+ cds_lfht_node_init(&node->node);
+ node->key = key;
+ node->key_len = key_len;
}
static inline struct lfht_test_node *
}
static inline struct lfht_test_node *
-int test_match(struct cds_lfht_node *node, void *arg)
+int test_match(struct cds_lfht_node *node, void *key)
- return !test_compare(node->key, node->key_len,
- arg, sizeof(unsigned long));
+ struct lfht_test_node *test_node = to_test_node(node);
+
+ return !test_compare(test_node->key, test_node->key_len,
+ key, sizeof(unsigned long));
sizeof(void *));
rcu_read_lock();
if (add_unique) {
sizeof(void *));
rcu_read_lock();
if (add_unique) {
- ret_node = cds_lfht_add_unique(test_ht, test_match,
- test_hash(node->node.key, node->node.key_len, TEST_HASH_SEED),
+ ret_node = cds_lfht_add_unique(test_ht, test_match, node->key,
+ test_hash(node->key, node->key_len, TEST_HASH_SEED),
&node->node);
} else {
if (add_replace)
&node->node);
} else {
if (add_replace)
- ret_node = cds_lfht_add_replace(test_ht, test_match,
- test_hash(node->node.key, node->node.key_len, TEST_HASH_SEED),
+ ret_node = cds_lfht_add_replace(test_ht, test_match, node->key,
+ test_hash(node->key, node->key_len, TEST_HASH_SEED),
&node->node);
else
cds_lfht_add(test_ht,
&node->node);
else
cds_lfht_add(test_ht,
- test_hash(node->node.key, node->node.key_len, TEST_HASH_SEED),
+ test_hash(node->key, node->key_len, TEST_HASH_SEED),
&node->node);
}
rcu_read_unlock();
&node->node);
}
rcu_read_unlock();
sizeof(void *));
rcu_read_lock();
if (add_unique) {
sizeof(void *));
rcu_read_lock();
if (add_unique) {
- ret_node = cds_lfht_add_unique(test_ht, test_match,
- test_hash(node->node.key, node->node.key_len, TEST_HASH_SEED),
+ ret_node = cds_lfht_add_unique(test_ht, test_match, node->key,
+ test_hash(node->key, node->key_len, TEST_HASH_SEED),
&node->node);
} else {
if (add_replace)
&node->node);
} else {
if (add_replace)
- ret_node = cds_lfht_add_replace(test_ht, test_match,
- test_hash(node->node.key, node->node.key_len, TEST_HASH_SEED),
+ ret_node = cds_lfht_add_replace(test_ht, test_match, node->key,
+ test_hash(node->key, node->key_len, TEST_HASH_SEED),
&node->node);
else
cds_lfht_add(test_ht,
&node->node);
else
cds_lfht_add(test_ht,
- test_hash(node->node.key, node->node.key_len, TEST_HASH_SEED),
+ test_hash(node->key, node->key_len, TEST_HASH_SEED),
&node->node);
}
rcu_read_unlock();
&node->node);
}
rcu_read_unlock();
-/*
- * struct cds_lfht_node and struct _cds_lfht_node should be aligned on
- * 4-bytes boundaries because the two lower bits are used as flags.
- */
- * _cds_lfht_node: Contains the internal pointers and reverse-hash
+ * cds_lfht_node: Contains the next pointers and reverse-hash
* value required for lookup and traversal of the hash table.
* value required for lookup and traversal of the hash table.
- */
-struct _cds_lfht_node {
- struct cds_lfht_node *next; /* ptr | DUMMY_FLAG | REMOVED_FLAG */
- unsigned long reverse_hash;
-} __attribute__((aligned(4)));
-
-/*
- * cds_lfht_node: Contains the full key and length required to check for
- * an actual match, and also contains an rcu_head structure that is used
- * by RCU to track a node through a given RCU grace period. There is an
- * instance of _cds_lfht_node enclosed as a field within each
- * _cds_lfht_node structure.
+ *
+ * struct cds_lfht_node should be aligned on 4-bytes boundaries because
+ * the two lower bits are used as flags.
*
* struct cds_lfht_node can be embedded into a structure (as a field).
* caa_container_of() can be used to get the structure from the struct
* cds_lfht_node after a lookup.
*
* struct cds_lfht_node can be embedded into a structure (as a field).
* caa_container_of() can be used to get the structure from the struct
* cds_lfht_node after a lookup.
+ *
+ * The structure which embeds it typically holds the key (or key-value
+ * pair) of the object. The caller code is responsible for calculation
+ * of the hash value for cds_lfht APIs.
*/
struct cds_lfht_node {
*/
struct cds_lfht_node {
- /* cache-hot for iteration */
- struct _cds_lfht_node p; /* needs to be first field */
- void *key;
- unsigned int key_len;
-};
+ struct cds_lfht_node *next; /* ptr | DUMMY_FLAG | REMOVED_FLAG */
+ unsigned long reverse_hash;
+} __attribute__((aligned(4)));
/* cds_lfht_iter: Used to track state while traversing a hash chain. */
struct cds_lfht_iter {
/* cds_lfht_iter: Used to track state while traversing a hash chain. */
struct cds_lfht_iter {
/*
* cds_lfht_node_init - initialize a hash table node
* @node: the node to initialize.
/*
* cds_lfht_node_init - initialize a hash table node
* @node: the node to initialize.
- * @key: pointer to the key to use.
- * @key_len: the length of the key, in bytes.
+ *
+ * This function is kept to be eventually used for debugging purposes
+ * (detection of memory corruption).
-void cds_lfht_node_init(struct cds_lfht_node *node, void *key,
- size_t key_len)
+void cds_lfht_node_init(struct cds_lfht_node *node)
- node->key = key;
- node->key_len = key_len;
* cds_lfht_next_duplicate - get the next item with same key (after a lookup).
* @ht: the hash table.
* @match: the key match function.
* cds_lfht_next_duplicate - get the next item with same key (after a lookup).
* @ht: the hash table.
* @match: the key match function.
+ * @key: the current node key.
* @iter: Node, if found (output). *iter->node set to NULL if not found.
*
* Uses an iterator initialized by a lookup.
* @iter: Node, if found (output). *iter->node set to NULL if not found.
*
* Uses an iterator initialized by a lookup.
* Threads calling this API need to be registered RCU read-side threads.
*/
void cds_lfht_next_duplicate(struct cds_lfht *ht,
* Threads calling this API need to be registered RCU read-side threads.
*/
void cds_lfht_next_duplicate(struct cds_lfht *ht,
- cds_lfht_match_fct match, struct cds_lfht_iter *iter);
+ cds_lfht_match_fct match, void *key,
+ struct cds_lfht_iter *iter);
/*
* cds_lfht_first - get the first node in the table.
/*
* cds_lfht_first - get the first node in the table.
* cds_lfht_add_unique - add a node to hash table, if key is not present.
* @ht: the hash table.
* @match: the key match function.
* cds_lfht_add_unique - add a node to hash table, if key is not present.
* @ht: the hash table.
* @match: the key match function.
+ * @key: the node's key.
+ * @hash: the node's hash.
* @node: the node to try adding.
*
* Return the node added upon success.
* @node: the node to try adding.
*
* Return the node added upon success.
*/
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
cds_lfht_match_fct match,
*/
struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht,
cds_lfht_match_fct match,
unsigned long hash,
struct cds_lfht_node *node);
unsigned long hash,
struct cds_lfht_node *node);
* cds_lfht_add_replace - replace or add a node within hash table.
* @ht: the hash table.
* @match: the key match function.
* cds_lfht_add_replace - replace or add a node within hash table.
* @ht: the hash table.
* @match: the key match function.
+ * @key: the node's key.
+ * @hash: the node's hash.
* @node: the node to add.
*
* Return the node replaced upon success. If no node matching the key
* @node: the node to add.
*
* Return the node replaced upon success. If no node matching the key
*/
struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
cds_lfht_match_fct match,
*/
struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht,
cds_lfht_match_fct match,
unsigned long hash,
struct cds_lfht_node *node);
unsigned long hash,
struct cds_lfht_node *node);