b4c832699565e29ea791c9c3835fa2d9041d8087
6 #include <arch_atomic.h>
9 #include <urcu-defer.h>
12 #include <urcu/jhash.h>
17 struct rcu_ht_node
*next
;
23 struct rcu_ht_node
**tbl
;
25 void (*free_fct
)(void *data
); /* fct to free data */
32 struct rcu_ht
*ht_new(ht_hash_fct hash_fct
, void (*free_fct
)(void *data
),
33 unsigned long init_size
)
37 ht
= calloc(1, sizeof(struct rcu_ht
));
38 ht
->hash_fct
= hash_fct
;
39 ht
->free_fct
= free_fct
;
40 ht
->size
.add
= init_size
;
41 ht
->size
.lookup
= init_size
;
42 ht
->tbl
= calloc(init_size
, sizeof(struct rcu_ht_node
*));
46 void *ht_lookup(struct rcu_ht
*ht
, void *key
)
49 struct rcu_ht_node
*node
;
52 hash
= ht
->hash_fct(key
) % ht
->size
.lookup
;
55 node
= rcu_dereference(*ht
->tbl
[hash
]);
61 if (node
->key
== key
) {
65 node
= rcu_dereference(node
->next
);
73 * Will re-try until either:
74 * - The key is already there (-EEXIST)
75 * - We successfully add the key at the head of a table bucket.
77 int ht_add(struct rcu_ht
*ht
, void *key
, void *data
)
79 struct rcu_ht_node
*node
, *old_head
, *new_head
;
83 new_head
= calloc(1, sizeof(struct rcu_ht_node
));
85 new_head
->data
= data
;
86 hash
= ht
->hash_fct(key
) % ht
->size
.add
;
88 /* here comes the fun and tricky part.
89 * Add at the beginning with a cmpxchg.
90 * Hold a read lock between the moment the first element is read
91 * and the nodes traversal (to find duplicates). This ensures
92 * the head pointer has not been reclaimed when cmpxchg is done.
93 * Always adding at the head ensures that we would have to
94 * re-try if a new item has been added concurrently. So we ensure that
95 * we never add duplicates. */
99 old_head
= node
= rcu_dereference(*ht
->tbl
[hash
]);
104 if (node
->key
== key
) {
108 node
= rcu_dereference(node
->next
);
110 if (rcu_cmpxchg_pointer(ht
->tbl
[hash
], old_head
, new_head
) != old_head
)
117 /* restart loop, release and re-take the read lock to be kind to GP */
124 * Restart until we successfully remove the entry, or no entry is left
125 * ((void *)(unsigned long)-ENOENT).
127 void *ht_steal(struct rcu_ht
*ht
, void *key
)
129 struct rcu_ht_node
**prev
, *node
;
133 hash
= ht
->hash_fct(key
) % ht
->size
.lookup
;
138 prev
= ht
->tbl
[hash
];
139 node
= rcu_dereference(*prev
);
142 data
= (void *)(unsigned long)-ENOENT
;
145 if (node
->key
== key
) {
149 node
= rcu_dereference(*prev
);
151 /* Found it ! pointer to object is in "prev" */
152 if (rcu_cmpxchg_pointer(prev
, node
, node
->next
) != node
)
158 call_rcu(free
, node
);
162 /* restart loop, release and re-take the read lock to be kind to GP */
168 int ht_delete(struct rcu_ht
*ht
, void *key
)
172 data
= ht_steal(ht
, key
);
174 if (ht
->free_fct
&& data
)
175 call_rcu(ht
->free_fct
, data
);
182 /* Delete all old elements. Allow concurrent writer accesses. */
183 void ht_delete_all(struct rcu_ht
*ht
)
186 struct rcu_ht_node
**prev
, *node
, *inext
;
188 for (i
= 0; i
< ht
->size
.lookup
; i
++) {
194 * Cut the head. After that, we own the first element.
196 node
= rcu_xchg_pointer(prev
, NULL
);
202 * We manage a list shared with concurrent writers and readers.
203 * Note that a concurrent add may or may not be deleted by us,
204 * depending if it arrives before or after the head is cut.
205 * "node" points to our first node. Remove first elements
210 inext
= rcu_xchg_pointer(prev
, NULL
);
212 * "node" is the first element of the list we have cut.
213 * We therefore own it, no concurrent writer may delete
214 * it. There can only be concurrent lookups. Concurrent
215 * add can only be done on a bucket head, but we've cut
216 * it already. inext is also owned by us, because we
217 * have exchanged it for "NULL". It will therefore be
218 * safe to use it after a G.P.
222 call_rcu(ht
->free_fct
, node
->data
);
223 call_rcu(free
, node
);
233 * Should only be called when no more concurrent readers nor writers can
234 * possibly access the table.
236 void ht_destroy(struct rcu_ht
*ht
)
243 uint32_t ht_jhash(void *key
, uint32_t length
, uint32_t initval
)
245 return jhash(key
, length
, initval
);
This page took 0.043067 seconds and 4 git commands to generate.