Update urcu-ht
[urcu.git] / urcu-ht.c
CommitLineData
2ed95849 1
5e28c532
MD
2/*
3 * TODO: keys are currently assumed <= sizeof(void *). Key target never freed.
4 */
5
2ed95849
MD
6#define _LGPL_SOURCE
7#include <stdlib.h>
e0ba718a
MD
8#include <errno.h>
9#include <assert.h>
10#include <stdio.h>
11
2ed95849 12#include <urcu.h>
e0ba718a 13#include <urcu-defer.h>
2ed95849
MD
14#include <arch.h>
15#include <arch_atomic.h>
2ed95849 16#include <compiler.h>
674f7a69 17#include <urcu/jhash.h>
5e28c532 18#include <stdio.h>
464a1ec9 19#include <pthread.h>
e0ba718a 20#include <urcu-ht.h>
2ed95849 21
44395fb7
MD
22/*
23 * Maximum number of hash table buckets: 256M on 64-bit.
24 * Should take about 512MB max if we assume 1 node per 4 buckets.
25 */
26#define MAX_HT_BUCKETS ((256 << 10) / sizeof(void *))
27
3df2df75
MD
28/* node flags */
29#define NODE_STOLEN (1 << 0)
2ed95849
MD
30
31struct rcu_ht_node;
32
33struct rcu_ht_node {
34 struct rcu_ht_node *next;
35 void *key;
36 void *data;
3df2df75 37 unsigned int flags;
2ed95849
MD
38};
39
40struct rcu_ht {
674f7a69 41 struct rcu_ht_node **tbl;
2ed95849
MD
42 ht_hash_fct hash_fct;
43 void (*free_fct)(void *data); /* fct to free data */
464a1ec9 44 unsigned long size;
5e28c532
MD
45 uint32_t keylen;
46 uint32_t hashseed;
464a1ec9
MD
47 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
48 int resize_ongoing; /* fast-path resize check */
2ed95849
MD
49};
50
674f7a69 51struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data),
5e28c532
MD
52 unsigned long init_size, uint32_t keylen,
53 uint32_t hashseed)
2ed95849
MD
54{
55 struct rcu_ht *ht;
56
57 ht = calloc(1, sizeof(struct rcu_ht));
58 ht->hash_fct = hash_fct;
59 ht->free_fct = free_fct;
44395fb7 60 ht->size = init_size; /* shared */
5e28c532
MD
61 ht->keylen = keylen;
62 ht->hashseed = hashseed;
464a1ec9
MD
63 /* this mutex should not nest in read-side C.S. */
64 pthread_mutex_init(&ht->resize_mutex, NULL);
44395fb7 65 ht->resize_ongoing = 0; /* shared */
674f7a69 66 ht->tbl = calloc(init_size, sizeof(struct rcu_ht_node *));
ab7d5fc6 67 return ht;
2ed95849
MD
68}
69
2ed95849
MD
70void *ht_lookup(struct rcu_ht *ht, void *key)
71{
72 unsigned long hash;
73 struct rcu_ht_node *node;
74 void *ret;
75
464a1ec9
MD
76 hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % ht->size;
77 smp_read_barrier_depends(); /* read size before links */
2ed95849
MD
78
79 rcu_read_lock();
5e28c532 80 node = rcu_dereference(ht->tbl[hash]);
2ed95849
MD
81 for (;;) {
82 if (likely(!node)) {
83 ret = NULL;
84 break;
85 }
86 if (node->key == key) {
87 ret = node->data;
88 break;
89 }
90 node = rcu_dereference(node->next);
91 }
92 rcu_read_unlock();
ab7d5fc6
MD
93
94 return ret;
2ed95849
MD
95}
96
97/*
98 * Will re-try until either:
99 * - The key is already there (-EEXIST)
100 * - We successfully add the key at the head of a table bucket.
101 */
102int ht_add(struct rcu_ht *ht, void *key, void *data)
103{
104 struct rcu_ht_node *node, *old_head, *new_head;
105 unsigned long hash;
106 int ret = 0;
107
108 new_head = calloc(1, sizeof(struct rcu_ht_node));
109 new_head->key = key;
110 new_head->data = data;
3df2df75 111 new_head->flags = 0;
2ed95849
MD
112 /* here comes the fun and tricky part.
113 * Add at the beginning with a cmpxchg.
114 * Hold a read lock between the moment the first element is read
115 * and the nodes traversal (to find duplicates). This ensures
116 * the head pointer has not been reclaimed when cmpxchg is done.
117 * Always adding at the head ensures that we would have to
118 * re-try if a new item has been added concurrently. So we ensure that
119 * we never add duplicates. */
120retry:
121 rcu_read_lock();
122
44395fb7 123 if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
464a1ec9
MD
124 rcu_read_unlock();
125 /*
126 * Wait for resize to complete before continuing.
127 */
128 ret = pthread_mutex_lock(&ht->resize_mutex);
129 assert(!ret);
130 ret = pthread_mutex_unlock(&ht->resize_mutex);
131 assert(!ret);
132 goto retry;
133 }
134
44395fb7
MD
135 hash = ht->hash_fct(key, ht->keylen, ht->hashseed)
136 % LOAD_SHARED(ht->size);
5e28c532
MD
137
138 old_head = node = rcu_dereference(ht->tbl[hash]);
2ed95849
MD
139 for (;;) {
140 if (likely(!node)) {
141 break;
142 }
143 if (node->key == key) {
144 ret = -EEXIST;
145 goto end;
146 }
147 node = rcu_dereference(node->next);
148 }
5e28c532
MD
149 new_head->next = old_head;
150 if (rcu_cmpxchg_pointer(&ht->tbl[hash], old_head, new_head) != old_head)
2ed95849
MD
151 goto restart;
152end:
153 rcu_read_unlock();
2ed95849
MD
154 return ret;
155
156 /* restart loop, release and re-take the read lock to be kind to GP */
157restart:
158 rcu_read_unlock();
159 goto retry;
160}
161
162/*
163 * Restart until we successfully remove the entry, or no entry is left
164 * ((void *)(unsigned long)-ENOENT).
e0ba718a
MD
165 * Deal with concurrent stealers by doing an extra verification pass to check
166 * that no element in the list are still pointing to the element stolen.
167 * This could happen if two concurrent steal for consecutive objects are
168 * executed. A pointer to an object being stolen could be saved by the
169 * concurrent stealer for the previous object.
170 * Also, given that in this precise scenario, another stealer can also want to
171 * delete the doubly-referenced object; use a "stolen" flag to let only one
172 * stealer delete the object.
2ed95849 173 */
ab7d5fc6 174void *ht_steal(struct rcu_ht *ht, void *key)
2ed95849 175{
2e6070d0 176 struct rcu_ht_node **prev, *node, *del_node = NULL;
2ed95849 177 unsigned long hash;
ab7d5fc6 178 void *data;
464a1ec9 179 int ret;
2ed95849 180
2ed95849
MD
181retry:
182 rcu_read_lock();
183
44395fb7 184 if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
464a1ec9
MD
185 rcu_read_unlock();
186 /*
187 * Wait for resize to complete before continuing.
188 */
189 ret = pthread_mutex_lock(&ht->resize_mutex);
190 assert(!ret);
191 ret = pthread_mutex_unlock(&ht->resize_mutex);
192 assert(!ret);
193 goto retry;
194 }
195
44395fb7
MD
196 hash = ht->hash_fct(key, ht->keylen, ht->hashseed)
197 % LOAD_SHARED(ht->size);
5e28c532
MD
198
199 prev = &ht->tbl[hash];
2ed95849
MD
200 node = rcu_dereference(*prev);
201 for (;;) {
202 if (likely(!node)) {
2e6070d0
MD
203 if (del_node) {
204 goto end;
205 } else {
2e6070d0
MD
206 goto error;
207 }
2ed95849
MD
208 }
209 if (node->key == key) {
210 break;
211 }
212 prev = &node->next;
213 node = rcu_dereference(*prev);
214 }
e0ba718a 215
85f6ba5f
MD
216 if (!del_node) {
217 /*
218 * Another concurrent thread stole it ? If so, let it deal with
3df2df75
MD
219 * this. Assume NODE_STOLEN is the only flag. If this changes,
220 * read flags before cmpxchg.
85f6ba5f 221 */
3df2df75 222 if (cmpxchg(&node->flags, 0, NODE_STOLEN) != 0)
85f6ba5f
MD
223 goto error;
224 }
e0ba718a 225
2ed95849 226 /* Found it ! pointer to object is in "prev" */
7ce4eb38 227 if (rcu_cmpxchg_pointer(prev, node, node->next) == node)
2e6070d0
MD
228 del_node = node;
229 goto restart;
5e28c532 230
2e6070d0
MD
231end:
232 /*
233 * From that point, we own node. Note that there can still be concurrent
234 * RCU readers using it. We can free it outside of read lock after a GP.
235 */
2ed95849
MD
236 rcu_read_unlock();
237
567d7550
MD
238 data = del_node->data;
239 call_rcu(free, del_node);
5e28c532 240 return data;
ab7d5fc6 241
5e28c532 242error:
e0ba718a 243 data = (void *)(unsigned long)-ENOENT;
5e28c532 244 rcu_read_unlock();
ab7d5fc6 245 return data;
2ed95849
MD
246
247 /* restart loop, release and re-take the read lock to be kind to GP */
248restart:
249 rcu_read_unlock();
250 goto retry;
251}
252
253int ht_delete(struct rcu_ht *ht, void *key)
254{
ab7d5fc6 255 void *data;
2ed95849 256
ab7d5fc6 257 data = ht_steal(ht, key);
5e28c532
MD
258 if (data && data != (void *)(unsigned long)-ENOENT) {
259 if (ht->free_fct)
ab7d5fc6 260 call_rcu(ht->free_fct, data);
2ed95849
MD
261 return 0;
262 } else {
263 return -ENOENT;
264 }
265}
ab7d5fc6 266
674f7a69 267/* Delete all old elements. Allow concurrent writer accesses. */
5e28c532 268int ht_delete_all(struct rcu_ht *ht)
674f7a69
MD
269{
270 unsigned long i;
271 struct rcu_ht_node **prev, *node, *inext;
5e28c532 272 int cnt = 0;
464a1ec9
MD
273 int ret;
274
275 /*
276 * Mutual exclusion with resize operations, but leave add/steal execute
277 * concurrently. This is OK because we operate only on the heads.
278 */
279 ret = pthread_mutex_lock(&ht->resize_mutex);
280 assert(!ret);
674f7a69 281
464a1ec9 282 for (i = 0; i < ht->size; i++) {
674f7a69 283 rcu_read_lock();
5e28c532
MD
284 prev = &ht->tbl[i];
285 /*
286 * Cut the head. After that, we own the first element.
287 */
288 node = rcu_xchg_pointer(prev, NULL);
289 if (!node) {
674f7a69
MD
290 rcu_read_unlock();
291 continue;
292 }
293 /*
294 * We manage a list shared with concurrent writers and readers.
295 * Note that a concurrent add may or may not be deleted by us,
296 * depending if it arrives before or after the head is cut.
297 * "node" points to our first node. Remove first elements
298 * iteratively.
299 */
300 for (;;) {
5e28c532 301 inext = NULL;
674f7a69 302 prev = &node->next;
5e28c532
MD
303 if (prev)
304 inext = rcu_xchg_pointer(prev, NULL);
674f7a69
MD
305 /*
306 * "node" is the first element of the list we have cut.
307 * We therefore own it, no concurrent writer may delete
308 * it. There can only be concurrent lookups. Concurrent
309 * add can only be done on a bucket head, but we've cut
310 * it already. inext is also owned by us, because we
311 * have exchanged it for "NULL". It will therefore be
312 * safe to use it after a G.P.
313 */
314 rcu_read_unlock();
315 if (node->data)
316 call_rcu(ht->free_fct, node->data);
317 call_rcu(free, node);
5e28c532 318 cnt++;
674f7a69
MD
319 if (likely(!inext))
320 break;
321 rcu_read_lock();
322 node = inext;
323 }
324 }
464a1ec9
MD
325
326 ret = pthread_mutex_unlock(&ht->resize_mutex);
327 assert(!ret);
5e28c532 328 return cnt;
674f7a69
MD
329}
330
331/*
332 * Should only be called when no more concurrent readers nor writers can
333 * possibly access the table.
334 */
5e28c532 335int ht_destroy(struct rcu_ht *ht)
674f7a69 336{
5e28c532
MD
337 int ret;
338
339 ret = ht_delete_all(ht);
674f7a69
MD
340 free(ht->tbl);
341 free(ht);
5e28c532 342 return ret;
674f7a69
MD
343}
344
464a1ec9
MD
345static void ht_resize_grow(struct rcu_ht *ht)
346{
347 unsigned long i, new_size, old_size;
348 struct rcu_ht_node **new_tbl, **old_tbl;
349 struct rcu_ht_node *node, *new_node, *tmp;
350 unsigned long hash;
351
352 old_size = ht->size;
353
44395fb7 354 if (old_size == MAX_HT_BUCKETS)
464a1ec9
MD
355 return;
356
44395fb7 357 old_tbl = ht->tbl;
464a1ec9
MD
358 new_size = old_size << 1;
359 new_tbl = calloc(new_size, sizeof(struct rcu_ht_node *));
360
361 for (i = 0; i < old_size; i++) {
362 /*
363 * Re-hash each entry, insert in new table.
364 * It's important that a reader looking for a key _will_ find it
365 * if it's in the table.
366 * Copy each node. (just the node, not ->data)
367 */
44395fb7 368 node = old_tbl[i];
464a1ec9
MD
369 while (node) {
370 hash = ht->hash_fct(node->key, ht->keylen, ht->hashseed)
371 % new_size;
372 new_node = malloc(sizeof(struct rcu_ht_node));
373 new_node->key = node->key;
374 new_node->data = node->data;
44395fb7
MD
375 new_node->next = new_tbl[hash]; /* add to head */
376 new_tbl[hash] = new_node;
464a1ec9
MD
377 node = node->next;
378 }
379 }
380
464a1ec9
MD
381 ht->tbl = new_tbl;
382 smp_wmb(); /* write links and table before changing size */
44395fb7 383 STORE_SHARED(ht->size, new_size);
464a1ec9
MD
384
385 /* Ensure all concurrent lookups use new size and table */
386 synchronize_rcu();
387
388 for (i = 0; i < old_size; i++) {
389 node = old_tbl[i];
390 while (node) {
391 tmp = node->next;
392 free(node);
393 node = tmp;
394 }
395 }
396 free(old_tbl);
397}
398
399static void ht_resize_shrink(struct rcu_ht *ht)
400{
401 unsigned long i, new_size;
402 struct rcu_ht_node **new_tbl;
403 struct rcu_ht_node **prev, *node;
404
405 if (ht->size == 1)
406 return;
407
408 new_size = ht->size >> 1;
409
410 for (i = 0; i < new_size; i++) {
44395fb7 411 /* Link end with first entry of i + new_size */
464a1ec9
MD
412 prev = &ht->tbl[i];
413 node = *prev;
414 while (node) {
415 prev = &node->next;
416 node = *prev;
417 }
44395fb7 418 *prev = ht->tbl[i + new_size];
464a1ec9
MD
419 }
420 smp_wmb(); /* write links before changing size */
44395fb7 421 STORE_SHARED(ht->size, new_size);
464a1ec9
MD
422
423 /* Ensure all concurrent lookups use new size */
424 synchronize_rcu();
425
426 new_tbl = realloc(ht->tbl, new_size * sizeof(struct rcu_ht_node *));
427 /* shrinking, pointers should not move */
428 assert(new_tbl == ht->tbl);
429}
430
431/*
432 * growth: >0: *2, <0: /2
433 */
434void ht_resize(struct rcu_ht *ht, int growth)
435{
436 int ret;
437
438 ret = pthread_mutex_lock(&ht->resize_mutex);
439 assert(!ret);
44395fb7 440 STORE_SHARED(ht->resize_ongoing, 1);
464a1ec9
MD
441 synchronize_rcu();
442 /* All add/remove are waiting on the mutex. */
443 if (growth > 0)
444 ht_resize_grow(ht);
445 else if (growth < 0)
446 ht_resize_shrink(ht);
447 smp_mb();
44395fb7 448 STORE_SHARED(ht->resize_ongoing, 0);
464a1ec9
MD
449 ret = pthread_mutex_unlock(&ht->resize_mutex);
450 assert(!ret);
451}
452
5e28c532
MD
453/*
454 * Expects keys <= than pointer size to be encoded in the pointer itself.
455 */
674f7a69 456uint32_t ht_jhash(void *key, uint32_t length, uint32_t initval)
ab7d5fc6 457{
5e28c532
MD
458 uint32_t ret;
459 void *vkey;
460
0eb566f4 461 if (length <= sizeof(void *))
5e28c532
MD
462 vkey = &key;
463 else
464 vkey = key;
465 ret = jhash(vkey, length, initval);
466 return ret;
ab7d5fc6 467}
This page took 0.041729 seconds and 4 git commands to generate.