Rename urcu-ht to rculfhash
[urcu.git] / rculfhash.c
CommitLineData
2ed95849 1
5e28c532
MD
2/*
3 * TODO: keys are currently assumed <= sizeof(void *). Key target never freed.
4 */
5
2ed95849
MD
6#define _LGPL_SOURCE
7#include <stdlib.h>
e0ba718a
MD
8#include <errno.h>
9#include <assert.h>
10#include <stdio.h>
11
2ed95849 12#include <urcu.h>
e0ba718a 13#include <urcu-defer.h>
a42cc659
MD
14#include <urcu/arch.h>
15#include <urcu/uatomic.h>
674f7a69 16#include <urcu/jhash.h>
a42cc659 17#include <urcu/compiler.h>
5e28c532 18#include <stdio.h>
464a1ec9 19#include <pthread.h>
a42cc659 20#include <urcu/rculfhash.h>
2ed95849 21
44395fb7
MD
22/*
23 * Maximum number of hash table buckets: 256M on 64-bit.
24 * Should take about 512MB max if we assume 1 node per 4 buckets.
25 */
26#define MAX_HT_BUCKETS ((256 << 10) / sizeof(void *))
27
3df2df75
MD
28/* node flags */
29#define NODE_STOLEN (1 << 0)
2ed95849
MD
30
31struct rcu_ht_node;
32
33struct rcu_ht_node {
34 struct rcu_ht_node *next;
35 void *key;
36 void *data;
3df2df75 37 unsigned int flags;
2ed95849
MD
38};
39
395270b6
MD
40struct rcu_table {
41 unsigned long size;
42 struct rcu_ht_node *tbl[0];
43};
44
2ed95849 45struct rcu_ht {
395270b6 46 struct rcu_table *t; /* shared */
2ed95849
MD
47 ht_hash_fct hash_fct;
48 void (*free_fct)(void *data); /* fct to free data */
5e28c532
MD
49 uint32_t keylen;
50 uint32_t hashseed;
464a1ec9
MD
51 pthread_mutex_t resize_mutex; /* resize mutex: add/del mutex */
52 int resize_ongoing; /* fast-path resize check */
2ed95849
MD
53};
54
674f7a69 55struct rcu_ht *ht_new(ht_hash_fct hash_fct, void (*free_fct)(void *data),
5e28c532
MD
56 unsigned long init_size, uint32_t keylen,
57 uint32_t hashseed)
2ed95849
MD
58{
59 struct rcu_ht *ht;
60
61 ht = calloc(1, sizeof(struct rcu_ht));
62 ht->hash_fct = hash_fct;
63 ht->free_fct = free_fct;
5e28c532
MD
64 ht->keylen = keylen;
65 ht->hashseed = hashseed;
464a1ec9
MD
66 /* this mutex should not nest in read-side C.S. */
67 pthread_mutex_init(&ht->resize_mutex, NULL);
44395fb7 68 ht->resize_ongoing = 0; /* shared */
395270b6
MD
69 ht->t = calloc(1, sizeof(struct rcu_table)
70 + (init_size * sizeof(struct rcu_ht_node *)));
71 ht->t->size = init_size;
ab7d5fc6 72 return ht;
2ed95849
MD
73}
74
2ed95849
MD
75void *ht_lookup(struct rcu_ht *ht, void *key)
76{
395270b6 77 struct rcu_table *t;
2ed95849
MD
78 unsigned long hash;
79 struct rcu_ht_node *node;
80 void *ret;
81
2ed95849 82 rcu_read_lock();
395270b6
MD
83 t = rcu_dereference(ht->t);
84 smp_read_barrier_depends(); /* read t before size and table */
85 hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % t->size;
86 smp_read_barrier_depends(); /* read size before links */
87 node = rcu_dereference(t->tbl[hash]);
2ed95849
MD
88 for (;;) {
89 if (likely(!node)) {
90 ret = NULL;
91 break;
92 }
93 if (node->key == key) {
94 ret = node->data;
95 break;
96 }
97 node = rcu_dereference(node->next);
98 }
99 rcu_read_unlock();
ab7d5fc6
MD
100
101 return ret;
2ed95849
MD
102}
103
104/*
105 * Will re-try until either:
106 * - The key is already there (-EEXIST)
107 * - We successfully add the key at the head of a table bucket.
108 */
109int ht_add(struct rcu_ht *ht, void *key, void *data)
110{
111 struct rcu_ht_node *node, *old_head, *new_head;
395270b6 112 struct rcu_table *t;
2ed95849
MD
113 unsigned long hash;
114 int ret = 0;
115
116 new_head = calloc(1, sizeof(struct rcu_ht_node));
117 new_head->key = key;
118 new_head->data = data;
3df2df75 119 new_head->flags = 0;
2ed95849
MD
120 /* here comes the fun and tricky part.
121 * Add at the beginning with a cmpxchg.
122 * Hold a read lock between the moment the first element is read
123 * and the nodes traversal (to find duplicates). This ensures
124 * the head pointer has not been reclaimed when cmpxchg is done.
125 * Always adding at the head ensures that we would have to
126 * re-try if a new item has been added concurrently. So we ensure that
127 * we never add duplicates. */
128retry:
129 rcu_read_lock();
130
44395fb7 131 if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
464a1ec9
MD
132 rcu_read_unlock();
133 /*
134 * Wait for resize to complete before continuing.
135 */
136 ret = pthread_mutex_lock(&ht->resize_mutex);
137 assert(!ret);
138 ret = pthread_mutex_unlock(&ht->resize_mutex);
139 assert(!ret);
140 goto retry;
141 }
142
395270b6
MD
143 t = rcu_dereference(ht->t);
144 /* no read barrier needed, because no concurrency with resize */
145 hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % t->size;
5e28c532 146
395270b6 147 old_head = node = rcu_dereference(t->tbl[hash]);
2ed95849
MD
148 for (;;) {
149 if (likely(!node)) {
150 break;
151 }
152 if (node->key == key) {
153 ret = -EEXIST;
154 goto end;
155 }
156 node = rcu_dereference(node->next);
157 }
5e28c532 158 new_head->next = old_head;
395270b6 159 if (rcu_cmpxchg_pointer(&t->tbl[hash], old_head, new_head) != old_head)
2ed95849
MD
160 goto restart;
161end:
162 rcu_read_unlock();
2ed95849
MD
163 return ret;
164
165 /* restart loop, release and re-take the read lock to be kind to GP */
166restart:
167 rcu_read_unlock();
168 goto retry;
169}
170
171/*
172 * Restart until we successfully remove the entry, or no entry is left
173 * ((void *)(unsigned long)-ENOENT).
e0ba718a
MD
174 * Deal with concurrent stealers by doing an extra verification pass to check
175 * that no element in the list are still pointing to the element stolen.
176 * This could happen if two concurrent steal for consecutive objects are
177 * executed. A pointer to an object being stolen could be saved by the
178 * concurrent stealer for the previous object.
179 * Also, given that in this precise scenario, another stealer can also want to
180 * delete the doubly-referenced object; use a "stolen" flag to let only one
181 * stealer delete the object.
2ed95849 182 */
ab7d5fc6 183void *ht_steal(struct rcu_ht *ht, void *key)
2ed95849 184{
2e6070d0 185 struct rcu_ht_node **prev, *node, *del_node = NULL;
395270b6 186 struct rcu_table *t;
2ed95849 187 unsigned long hash;
ab7d5fc6 188 void *data;
464a1ec9 189 int ret;
2ed95849 190
2ed95849
MD
191retry:
192 rcu_read_lock();
193
44395fb7 194 if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
464a1ec9
MD
195 rcu_read_unlock();
196 /*
197 * Wait for resize to complete before continuing.
198 */
199 ret = pthread_mutex_lock(&ht->resize_mutex);
200 assert(!ret);
201 ret = pthread_mutex_unlock(&ht->resize_mutex);
202 assert(!ret);
203 goto retry;
204 }
205
395270b6
MD
206 t = rcu_dereference(ht->t);
207 /* no read barrier needed, because no concurrency with resize */
208 hash = ht->hash_fct(key, ht->keylen, ht->hashseed) % t->size;
5e28c532 209
395270b6 210 prev = &t->tbl[hash];
2ed95849
MD
211 node = rcu_dereference(*prev);
212 for (;;) {
213 if (likely(!node)) {
2e6070d0
MD
214 if (del_node) {
215 goto end;
216 } else {
2e6070d0
MD
217 goto error;
218 }
2ed95849
MD
219 }
220 if (node->key == key) {
221 break;
222 }
223 prev = &node->next;
224 node = rcu_dereference(*prev);
225 }
e0ba718a 226
85f6ba5f
MD
227 if (!del_node) {
228 /*
229 * Another concurrent thread stole it ? If so, let it deal with
3df2df75
MD
230 * this. Assume NODE_STOLEN is the only flag. If this changes,
231 * read flags before cmpxchg.
85f6ba5f 232 */
3df2df75 233 if (cmpxchg(&node->flags, 0, NODE_STOLEN) != 0)
85f6ba5f
MD
234 goto error;
235 }
e0ba718a 236
2ed95849 237 /* Found it ! pointer to object is in "prev" */
7ce4eb38 238 if (rcu_cmpxchg_pointer(prev, node, node->next) == node)
2e6070d0
MD
239 del_node = node;
240 goto restart;
5e28c532 241
2e6070d0
MD
242end:
243 /*
244 * From that point, we own node. Note that there can still be concurrent
245 * RCU readers using it. We can free it outside of read lock after a GP.
246 */
2ed95849
MD
247 rcu_read_unlock();
248
567d7550
MD
249 data = del_node->data;
250 call_rcu(free, del_node);
5e28c532 251 return data;
ab7d5fc6 252
5e28c532 253error:
e0ba718a 254 data = (void *)(unsigned long)-ENOENT;
5e28c532 255 rcu_read_unlock();
ab7d5fc6 256 return data;
2ed95849
MD
257
258 /* restart loop, release and re-take the read lock to be kind to GP */
259restart:
260 rcu_read_unlock();
261 goto retry;
262}
263
264int ht_delete(struct rcu_ht *ht, void *key)
265{
ab7d5fc6 266 void *data;
2ed95849 267
ab7d5fc6 268 data = ht_steal(ht, key);
5e28c532
MD
269 if (data && data != (void *)(unsigned long)-ENOENT) {
270 if (ht->free_fct)
ab7d5fc6 271 call_rcu(ht->free_fct, data);
2ed95849
MD
272 return 0;
273 } else {
274 return -ENOENT;
275 }
276}
ab7d5fc6 277
674f7a69 278/* Delete all old elements. Allow concurrent writer accesses. */
5e28c532 279int ht_delete_all(struct rcu_ht *ht)
674f7a69
MD
280{
281 unsigned long i;
282 struct rcu_ht_node **prev, *node, *inext;
395270b6 283 struct rcu_table *t;
5e28c532 284 int cnt = 0;
464a1ec9
MD
285 int ret;
286
287 /*
288 * Mutual exclusion with resize operations, but leave add/steal execute
289 * concurrently. This is OK because we operate only on the heads.
290 */
291 ret = pthread_mutex_lock(&ht->resize_mutex);
292 assert(!ret);
674f7a69 293
395270b6
MD
294 t = rcu_dereference(ht->t);
295 /* no read barrier needed, because no concurrency with resize */
296 for (i = 0; i < t->size; i++) {
674f7a69 297 rcu_read_lock();
395270b6 298 prev = &t->tbl[i];
5e28c532
MD
299 /*
300 * Cut the head. After that, we own the first element.
301 */
302 node = rcu_xchg_pointer(prev, NULL);
303 if (!node) {
674f7a69
MD
304 rcu_read_unlock();
305 continue;
306 }
307 /*
308 * We manage a list shared with concurrent writers and readers.
309 * Note that a concurrent add may or may not be deleted by us,
310 * depending if it arrives before or after the head is cut.
311 * "node" points to our first node. Remove first elements
312 * iteratively.
313 */
314 for (;;) {
5e28c532 315 inext = NULL;
674f7a69 316 prev = &node->next;
5e28c532
MD
317 if (prev)
318 inext = rcu_xchg_pointer(prev, NULL);
674f7a69
MD
319 /*
320 * "node" is the first element of the list we have cut.
321 * We therefore own it, no concurrent writer may delete
322 * it. There can only be concurrent lookups. Concurrent
323 * add can only be done on a bucket head, but we've cut
324 * it already. inext is also owned by us, because we
325 * have exchanged it for "NULL". It will therefore be
326 * safe to use it after a G.P.
327 */
328 rcu_read_unlock();
329 if (node->data)
330 call_rcu(ht->free_fct, node->data);
331 call_rcu(free, node);
5e28c532 332 cnt++;
674f7a69
MD
333 if (likely(!inext))
334 break;
335 rcu_read_lock();
336 node = inext;
337 }
338 }
464a1ec9
MD
339
340 ret = pthread_mutex_unlock(&ht->resize_mutex);
341 assert(!ret);
5e28c532 342 return cnt;
674f7a69
MD
343}
344
345/*
346 * Should only be called when no more concurrent readers nor writers can
347 * possibly access the table.
348 */
5e28c532 349int ht_destroy(struct rcu_ht *ht)
674f7a69 350{
5e28c532
MD
351 int ret;
352
353 ret = ht_delete_all(ht);
395270b6 354 free(ht->t);
674f7a69 355 free(ht);
5e28c532 356 return ret;
674f7a69
MD
357}
358
464a1ec9
MD
359static void ht_resize_grow(struct rcu_ht *ht)
360{
361 unsigned long i, new_size, old_size;
395270b6 362 struct rcu_table *new_t, *old_t;
464a1ec9
MD
363 struct rcu_ht_node *node, *new_node, *tmp;
364 unsigned long hash;
365
395270b6
MD
366 old_t = ht->t;
367 old_size = old_t->size;
464a1ec9 368
44395fb7 369 if (old_size == MAX_HT_BUCKETS)
464a1ec9
MD
370 return;
371
372 new_size = old_size << 1;
395270b6
MD
373 new_t = calloc(1, sizeof(struct rcu_table)
374 + (new_size * sizeof(struct rcu_ht_node *)));
1e52eccf 375 new_t->size = new_size;
464a1ec9
MD
376
377 for (i = 0; i < old_size; i++) {
378 /*
379 * Re-hash each entry, insert in new table.
380 * It's important that a reader looking for a key _will_ find it
381 * if it's in the table.
382 * Copy each node. (just the node, not ->data)
383 */
395270b6 384 node = old_t->tbl[i];
464a1ec9
MD
385 while (node) {
386 hash = ht->hash_fct(node->key, ht->keylen, ht->hashseed)
387 % new_size;
388 new_node = malloc(sizeof(struct rcu_ht_node));
389 new_node->key = node->key;
390 new_node->data = node->data;
1e52eccf
MD
391 new_node->flags = node->flags;
392 new_node->next = new_t->tbl[hash]; /* link to first */
393 new_t->tbl[hash] = new_node; /* add to head */
464a1ec9
MD
394 node = node->next;
395 }
396 }
397
1e52eccf
MD
398 /* Changing table and size atomically wrt lookups */
399 rcu_assign_pointer(ht->t, new_t);
464a1ec9
MD
400
401 /* Ensure all concurrent lookups use new size and table */
402 synchronize_rcu();
403
404 for (i = 0; i < old_size; i++) {
395270b6 405 node = old_t->tbl[i];
464a1ec9
MD
406 while (node) {
407 tmp = node->next;
408 free(node);
409 node = tmp;
410 }
411 }
395270b6 412 free(old_t);
464a1ec9
MD
413}
414
415static void ht_resize_shrink(struct rcu_ht *ht)
416{
417 unsigned long i, new_size;
395270b6 418 struct rcu_table *new_t, *old_t;
464a1ec9
MD
419 struct rcu_ht_node **prev, *node;
420
395270b6
MD
421 old_t = ht->t;
422 if (old_t->size == 1)
464a1ec9
MD
423 return;
424
395270b6 425 new_size = old_t->size >> 1;
464a1ec9
MD
426
427 for (i = 0; i < new_size; i++) {
44395fb7 428 /* Link end with first entry of i + new_size */
395270b6 429 prev = &old_t->tbl[i];
464a1ec9
MD
430 node = *prev;
431 while (node) {
432 prev = &node->next;
433 node = *prev;
434 }
395270b6 435 *prev = old_t->tbl[i + new_size];
464a1ec9
MD
436 }
437 smp_wmb(); /* write links before changing size */
395270b6 438 STORE_SHARED(old_t->size, new_size);
464a1ec9
MD
439
440 /* Ensure all concurrent lookups use new size */
441 synchronize_rcu();
442
395270b6
MD
443 new_t = realloc(old_t, sizeof(struct rcu_table)
444 + (new_size * sizeof(struct rcu_ht_node *)));
464a1ec9 445 /* shrinking, pointers should not move */
395270b6 446 assert(new_t == old_t);
464a1ec9
MD
447}
448
449/*
450 * growth: >0: *2, <0: /2
451 */
452void ht_resize(struct rcu_ht *ht, int growth)
453{
454 int ret;
455
456 ret = pthread_mutex_lock(&ht->resize_mutex);
457 assert(!ret);
44395fb7 458 STORE_SHARED(ht->resize_ongoing, 1);
464a1ec9
MD
459 synchronize_rcu();
460 /* All add/remove are waiting on the mutex. */
461 if (growth > 0)
462 ht_resize_grow(ht);
463 else if (growth < 0)
464 ht_resize_shrink(ht);
465 smp_mb();
44395fb7 466 STORE_SHARED(ht->resize_ongoing, 0);
464a1ec9
MD
467 ret = pthread_mutex_unlock(&ht->resize_mutex);
468 assert(!ret);
469}
470
5e28c532
MD
471/*
472 * Expects keys <= than pointer size to be encoded in the pointer itself.
473 */
674f7a69 474uint32_t ht_jhash(void *key, uint32_t length, uint32_t initval)
ab7d5fc6 475{
5e28c532
MD
476 uint32_t ret;
477 void *vkey;
478
0eb566f4 479 if (length <= sizeof(void *))
5e28c532
MD
480 vkey = &key;
481 else
482 vkey = key;
483 ret = jhash(vkey, length, initval);
484 return ret;
ab7d5fc6 485}
This page took 0.04411 seconds and 4 git commands to generate.