Remove runtime dependency on liburcu shared objects
[lttng-ust.git] / liblttng-ust / rculfhash.h
1 #ifndef _LTTNG_UST_RCULFHASH_H
2 #define _LTTNG_UST_RCULFHASH_H
3
4 /*
5 * urcu/rculfhash.h
6 *
7 * Userspace RCU library - Lock-Free RCU Hash Table
8 *
9 * Copyright 2011 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 * Copyright 2011 - Lai Jiangshan <laijs@cn.fujitsu.com>
11 *
12 * This library is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public
14 * License as published by the Free Software Foundation; either
15 * version 2.1 of the License, or (at your option) any later version.
16 *
17 * This library is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with this library; if not, write to the Free Software
24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
25 */
26
27 #include <stdint.h>
28 #include <pthread.h>
29 #include <urcu/compiler.h>
30
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34
35 struct lttng_ust_lfht;
36
37 /*
38 * lttng_ust_lfht_node: Contains the next pointers and reverse-hash
39 * value required for lookup and traversal of the hash table.
40 *
41 * struct lttng_ust_lfht_node should be aligned on 8-bytes boundaries because
42 * the three lower bits are used as flags. It is worth noting that the
43 * information contained within these three bits could be represented on
44 * two bits by re-using the same bit for REMOVAL_OWNER_FLAG and
45 * BUCKET_FLAG. This can be done if we ensure that no iterator nor
46 * updater check the BUCKET_FLAG after it detects that the REMOVED_FLAG
47 * is set. Given the minimum size of struct lttng_ust_lfht_node is 8 bytes on
48 * 32-bit architectures, we choose to go for simplicity and reserve
49 * three bits.
50 *
51 * struct lttng_ust_lfht_node can be embedded into a structure (as a field).
52 * caa_container_of() can be used to get the structure from the struct
53 * lttng_ust_lfht_node after a lookup.
54 *
55 * The structure which embeds it typically holds the key (or key-value
56 * pair) of the object. The caller code is responsible for calculation
57 * of the hash value for lttng_ust_lfht APIs.
58 */
59 struct lttng_ust_lfht_node {
60 struct lttng_ust_lfht_node *next; /* ptr | REMOVAL_OWNER_FLAG | BUCKET_FLAG | REMOVED_FLAG */
61 unsigned long reverse_hash;
62 } __attribute__((aligned(8)));
63
64 /* lttng_ust_lfht_iter: Used to track state while traversing a hash chain. */
65 struct lttng_ust_lfht_iter {
66 struct lttng_ust_lfht_node *node, *next;
67 };
68
69 static inline
70 struct lttng_ust_lfht_node *lttng_ust_lfht_iter_get_node(struct lttng_ust_lfht_iter *iter)
71 {
72 return iter->node;
73 }
74
75 struct rcu_flavor_struct;
76
77 /*
78 * Caution !
79 * Ensure reader and writer threads are registered as urcu readers.
80 */
81
82 typedef int (*lttng_ust_lfht_match_fct)(struct lttng_ust_lfht_node *node, const void *key);
83
84 /*
85 * lttng_ust_lfht_node_init - initialize a hash table node
86 * @node: the node to initialize.
87 *
88 * This function is kept to be eventually used for debugging purposes
89 * (detection of memory corruption).
90 */
91 static inline
92 void lttng_ust_lfht_node_init(struct lttng_ust_lfht_node *node)
93 {
94 }
95
96 /*
97 * Hash table creation flags.
98 */
99 enum {
100 LTTNG_UST_LFHT_AUTO_RESIZE = (1U << 0),
101 LTTNG_UST_LFHT_ACCOUNTING = (1U << 1),
102 };
103
104 struct lttng_ust_lfht_mm_type {
105 struct lttng_ust_lfht *(*alloc_lttng_ust_lfht)(unsigned long min_nr_alloc_buckets,
106 unsigned long max_nr_buckets);
107 void (*alloc_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
108 void (*free_bucket_table)(struct lttng_ust_lfht *ht, unsigned long order);
109 struct lttng_ust_lfht_node *(*bucket_at)(struct lttng_ust_lfht *ht,
110 unsigned long index);
111 };
112
113 extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_order;
114 extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_chunk;
115 extern const struct lttng_ust_lfht_mm_type lttng_ust_lfht_mm_mmap;
116
117 /*
118 * lttng_ust_lfht_new - allocate a hash table.
119 * @init_size: number of buckets to allocate initially. Must be power of two.
120 * @min_nr_alloc_buckets: the minimum number of allocated buckets.
121 * (must be power of two)
122 * @max_nr_buckets: the maximum number of hash table buckets allowed.
123 * (must be power of two, 0 is accepted, means
124 * "infinite")
125 * @flags: hash table creation flags (can be combined with bitwise or: '|').
126 * 0: no flags.
127 * LTTNG_UST_LFHT_AUTO_RESIZE: automatically resize hash table.
128 * LTTNG_UST_LFHT_ACCOUNTING: count the number of node addition
129 * and removal in the table
130 *
131 * Return NULL on error.
132 * Note: the RCU flavor must be already included before the hash table header.
133 */
134 extern
135 struct lttng_ust_lfht *lttng_ust_lfht_new(unsigned long init_size,
136 unsigned long min_nr_alloc_buckets,
137 unsigned long max_nr_buckets,
138 int flags,
139 const struct lttng_ust_lfht_mm_type *mm);
140
141 /*
142 * lttng_ust_lfht_destroy - destroy a hash table.
143 * @ht: the hash table to destroy.
144 *
145 * Return 0 on success, negative error value on error.
146
147 * Prior to liburcu 0.10:
148 * - Threads calling this API need to be registered RCU read-side
149 * threads.
150 * - lttng_ust_lfht_destroy should *not* be called from a RCU read-side
151 * critical section. It should *not* be called from a call_rcu thread
152 * context neither.
153 *
154 * Starting from liburcu 0.10, rculfhash implements its own worker
155 * thread to handle resize operations, which removes RCU requirements on
156 * lttng_ust_lfht_destroy.
157 */
158 extern
159 int lttng_ust_lfht_destroy(struct lttng_ust_lfht *ht);
160
161 /*
162 * lttng_ust_lfht_count_nodes - count the number of nodes in the hash table.
163 * @ht: the hash table.
164 * @split_count_before: sample the node count split-counter before traversal.
165 * @count: traverse the hash table, count the number of nodes observed.
166 * @split_count_after: sample the node count split-counter after traversal.
167 *
168 * Call with rcu_read_lock held.
169 * Threads calling this API need to be registered RCU read-side threads.
170 */
171 extern
172 void lttng_ust_lfht_count_nodes(struct lttng_ust_lfht *ht,
173 long *split_count_before,
174 unsigned long *count,
175 long *split_count_after);
176
177 /*
178 * lttng_ust_lfht_lookup - lookup a node by key.
179 * @ht: the hash table.
180 * @hash: the key hash.
181 * @match: the key match function.
182 * @key: the current node key.
183 * @iter: node, if found (output). *iter->node set to NULL if not found.
184 *
185 * Call with rcu_read_lock held.
186 * Threads calling this API need to be registered RCU read-side threads.
187 * This function acts as a rcu_dereference() to read the node pointer.
188 */
189 extern
190 void lttng_ust_lfht_lookup(struct lttng_ust_lfht *ht, unsigned long hash,
191 lttng_ust_lfht_match_fct match, const void *key,
192 struct lttng_ust_lfht_iter *iter);
193
194 /*
195 * lttng_ust_lfht_next_duplicate - get the next item with same key, after iterator.
196 * @ht: the hash table.
197 * @match: the key match function.
198 * @key: the current node key.
199 * @iter: input: current iterator.
200 * output: node, if found. *iter->node set to NULL if not found.
201 *
202 * Uses an iterator initialized by a lookup or traversal. Important: the
203 * iterator _needs_ to be initialized before calling
204 * lttng_ust_lfht_next_duplicate.
205 * Sets *iter-node to the following node with same key.
206 * Sets *iter->node to NULL if no following node exists with same key.
207 * RCU read-side lock must be held across lttng_ust_lfht_lookup and
208 * lttng_ust_lfht_next calls, and also between lttng_ust_lfht_next calls using the
209 * node returned by a previous lttng_ust_lfht_next.
210 * Call with rcu_read_lock held.
211 * Threads calling this API need to be registered RCU read-side threads.
212 * This function acts as a rcu_dereference() to read the node pointer.
213 */
214 extern
215 void lttng_ust_lfht_next_duplicate(struct lttng_ust_lfht *ht,
216 lttng_ust_lfht_match_fct match, const void *key,
217 struct lttng_ust_lfht_iter *iter);
218
219 /*
220 * lttng_ust_lfht_first - get the first node in the table.
221 * @ht: the hash table.
222 * @iter: First node, if exists (output). *iter->node set to NULL if not found.
223 *
224 * Output in "*iter". *iter->node set to NULL if table is empty.
225 * Call with rcu_read_lock held.
226 * Threads calling this API need to be registered RCU read-side threads.
227 * This function acts as a rcu_dereference() to read the node pointer.
228 */
229 extern
230 void lttng_ust_lfht_first(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter);
231
232 /*
233 * lttng_ust_lfht_next - get the next node in the table.
234 * @ht: the hash table.
235 * @iter: input: current iterator.
236 * output: next node, if exists. *iter->node set to NULL if not found.
237 *
238 * Input/Output in "*iter". *iter->node set to NULL if *iter was
239 * pointing to the last table node.
240 * Call with rcu_read_lock held.
241 * Threads calling this API need to be registered RCU read-side threads.
242 * This function acts as a rcu_dereference() to read the node pointer.
243 */
244 extern
245 void lttng_ust_lfht_next(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_iter *iter);
246
247 /*
248 * lttng_ust_lfht_add - add a node to the hash table.
249 * @ht: the hash table.
250 * @hash: the key hash.
251 * @node: the node to add.
252 *
253 * This function supports adding redundant keys into the table.
254 * Call with rcu_read_lock held.
255 * Threads calling this API need to be registered RCU read-side threads.
256 * This function issues a full memory barrier before and after its
257 * atomic commit.
258 */
259 extern
260 void lttng_ust_lfht_add(struct lttng_ust_lfht *ht, unsigned long hash,
261 struct lttng_ust_lfht_node *node);
262
263 /*
264 * lttng_ust_lfht_add_unique - add a node to hash table, if key is not present.
265 * @ht: the hash table.
266 * @hash: the node's hash.
267 * @match: the key match function.
268 * @key: the node's key.
269 * @node: the node to try adding.
270 *
271 * Return the node added upon success.
272 * Return the unique node already present upon failure. If
273 * lttng_ust_lfht_add_unique fails, the node passed as parameter should be
274 * freed by the caller. In this case, the caller does NOT need to wait
275 * for a grace period before freeing or re-using the node.
276 * Call with rcu_read_lock held.
277 * Threads calling this API need to be registered RCU read-side threads.
278 *
279 * The semantic of this function is that if only this function is used
280 * to add keys into the table, no duplicated keys should ever be
281 * observable in the table. The same guarantee apply for combination of
282 * add_unique and add_replace (see below).
283 *
284 * Upon success, this function issues a full memory barrier before and
285 * after its atomic commit. Upon failure, this function acts like a
286 * simple lookup operation: it acts as a rcu_dereference() to read the
287 * node pointer. The failure case does not guarantee any other memory
288 * barrier.
289 */
290 extern
291 struct lttng_ust_lfht_node *lttng_ust_lfht_add_unique(struct lttng_ust_lfht *ht,
292 unsigned long hash,
293 lttng_ust_lfht_match_fct match,
294 const void *key,
295 struct lttng_ust_lfht_node *node);
296
297 /*
298 * lttng_ust_lfht_add_replace - replace or add a node within hash table.
299 * @ht: the hash table.
300 * @hash: the node's hash.
301 * @match: the key match function.
302 * @key: the node's key.
303 * @node: the node to add.
304 *
305 * Return the node replaced upon success. If no node matching the key
306 * was present, return NULL, which also means the operation succeeded.
307 * This replacement operation should never fail.
308 * Call with rcu_read_lock held.
309 * Threads calling this API need to be registered RCU read-side threads.
310 * After successful replacement, a grace period must be waited for before
311 * freeing or re-using the memory reserved for the returned node.
312 *
313 * The semantic of replacement vs lookups and traversals is the
314 * following: if lookups and traversals are performed between a key
315 * unique insertion and its removal, we guarantee that the lookups and
316 * traversals will always find exactly one instance of the key if it is
317 * replaced concurrently with the lookups.
318 *
319 * Providing this semantic allows us to ensure that replacement-only
320 * schemes will never generate duplicated keys. It also allows us to
321 * guarantee that a combination of add_replace and add_unique updates
322 * will never generate duplicated keys.
323 *
324 * This function issues a full memory barrier before and after its
325 * atomic commit.
326 */
327 extern
328 struct lttng_ust_lfht_node *lttng_ust_lfht_add_replace(struct lttng_ust_lfht *ht,
329 unsigned long hash,
330 lttng_ust_lfht_match_fct match,
331 const void *key,
332 struct lttng_ust_lfht_node *node);
333
334 /*
335 * lttng_ust_lfht_replace - replace a node pointed to by iter within hash table.
336 * @ht: the hash table.
337 * @old_iter: the iterator position of the node to replace.
338 * @hash: the node's hash.
339 * @match: the key match function.
340 * @key: the node's key.
341 * @new_node: the new node to use as replacement.
342 *
343 * Return 0 if replacement is successful, negative value otherwise.
344 * Replacing a NULL old node or an already removed node will fail with
345 * -ENOENT.
346 * If the hash or value of the node to replace and the new node differ,
347 * this function returns -EINVAL without proceeding to the replacement.
348 * Old node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next.
349 * RCU read-side lock must be held between lookup and replacement.
350 * Call with rcu_read_lock held.
351 * Threads calling this API need to be registered RCU read-side threads.
352 * After successful replacement, a grace period must be waited for before
353 * freeing or re-using the memory reserved for the old node (which can
354 * be accessed with lttng_ust_lfht_iter_get_node).
355 *
356 * The semantic of replacement vs lookups is the same as
357 * lttng_ust_lfht_add_replace().
358 *
359 * Upon success, this function issues a full memory barrier before and
360 * after its atomic commit. Upon failure, this function does not issue
361 * any memory barrier.
362 */
363 extern
364 int lttng_ust_lfht_replace(struct lttng_ust_lfht *ht,
365 struct lttng_ust_lfht_iter *old_iter,
366 unsigned long hash,
367 lttng_ust_lfht_match_fct match,
368 const void *key,
369 struct lttng_ust_lfht_node *new_node);
370
371 /*
372 * lttng_ust_lfht_del - remove node pointed to by iterator from hash table.
373 * @ht: the hash table.
374 * @node: the node to delete.
375 *
376 * Return 0 if the node is successfully removed, negative value
377 * otherwise.
378 * Deleting a NULL node or an already removed node will fail with a
379 * negative value.
380 * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
381 * followed by use of lttng_ust_lfht_iter_get_node.
382 * RCU read-side lock must be held between lookup and removal.
383 * Call with rcu_read_lock held.
384 * Threads calling this API need to be registered RCU read-side threads.
385 * After successful removal, a grace period must be waited for before
386 * freeing or re-using the memory reserved for old node (which can be
387 * accessed with lttng_ust_lfht_iter_get_node).
388 * Upon success, this function issues a full memory barrier before and
389 * after its atomic commit. Upon failure, this function does not issue
390 * any memory barrier.
391 */
392 extern
393 int lttng_ust_lfht_del(struct lttng_ust_lfht *ht, struct lttng_ust_lfht_node *node);
394
395 /*
396 * lttng_ust_lfht_is_node_deleted - query whether a node is removed from hash table.
397 *
398 * Return non-zero if the node is deleted from the hash table, 0
399 * otherwise.
400 * Node can be looked up with lttng_ust_lfht_lookup and lttng_ust_lfht_next,
401 * followed by use of lttng_ust_lfht_iter_get_node.
402 * RCU read-side lock must be held between lookup and call to this
403 * function.
404 * Call with rcu_read_lock held.
405 * Threads calling this API need to be registered RCU read-side threads.
406 * This function does not issue any memory barrier.
407 */
408 extern
409 int lttng_ust_lfht_is_node_deleted(const struct lttng_ust_lfht_node *node);
410
411 /*
412 * lttng_ust_lfht_resize - Force a hash table resize
413 * @ht: the hash table.
414 * @new_size: update to this hash table size.
415 *
416 * Threads calling this API need to be registered RCU read-side threads.
417 * This function does not (necessarily) issue memory barriers.
418 * lttng_ust_lfht_resize should *not* be called from a RCU read-side critical
419 * section.
420 */
421 extern
422 void lttng_ust_lfht_resize(struct lttng_ust_lfht *ht, unsigned long new_size);
423
424 /*
425 * Note: it is safe to perform element removal (del), replacement, or
426 * any hash table update operation during any of the following hash
427 * table traversals.
428 * These functions act as rcu_dereference() to read the node pointers.
429 */
430 #define lttng_ust_lfht_for_each(ht, iter, node) \
431 for (lttng_ust_lfht_first(ht, iter), \
432 node = lttng_ust_lfht_iter_get_node(iter); \
433 node != NULL; \
434 lttng_ust_lfht_next(ht, iter), \
435 node = lttng_ust_lfht_iter_get_node(iter))
436
437 #define lttng_ust_lfht_for_each_duplicate(ht, hash, match, key, iter, node) \
438 for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
439 node = lttng_ust_lfht_iter_get_node(iter); \
440 node != NULL; \
441 lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
442 node = lttng_ust_lfht_iter_get_node(iter))
443
444 #define lttng_ust_lfht_for_each_entry(ht, iter, pos, member) \
445 for (lttng_ust_lfht_first(ht, iter), \
446 pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
447 __typeof__(*(pos)), member); \
448 lttng_ust_lfht_iter_get_node(iter) != NULL; \
449 lttng_ust_lfht_next(ht, iter), \
450 pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
451 __typeof__(*(pos)), member))
452
453 #define lttng_ust_lfht_for_each_entry_duplicate(ht, hash, match, key, \
454 iter, pos, member) \
455 for (lttng_ust_lfht_lookup(ht, hash, match, key, iter), \
456 pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
457 __typeof__(*(pos)), member); \
458 lttng_ust_lfht_iter_get_node(iter) != NULL; \
459 lttng_ust_lfht_next_duplicate(ht, match, key, iter), \
460 pos = caa_container_of(lttng_ust_lfht_iter_get_node(iter), \
461 __typeof__(*(pos)), member))
462
463 #ifdef __cplusplus
464 }
465 #endif
466
467 #endif /* _LTTNG_UST_RCULFHASH_H */
This page took 0.039058 seconds and 4 git commands to generate.