X-Git-Url: https://git.lttng.org/?p=urcu.git;a=blobdiff_plain;f=rculfhash.c;h=b06b703e699a3f57efc64bb1e5be1829dc2be849;hp=958982bf40a420f4c8d15103591d3e8ce8c79524;hb=18ca7a5b5573e511a66f118ec863df82a6d247e4;hpb=709bacf9834b5007ec04aad9195ec060b9e27b67 diff --git a/rculfhash.c b/rculfhash.c index 958982b..b06b703 100644 --- a/rculfhash.c +++ b/rculfhash.c @@ -71,18 +71,18 @@ * (not visible to lookups anymore) before the RCU read-side critical * section held across removal ends. Furthermore, this ensures that * the node with "removed" flag set is removed from the linked-list - * before its memory is reclaimed. Only the thread which removal - * successfully set the "removed" flag (with a cmpxchg) into a node's - * next pointer is considered to have succeeded its removal (and thus - * owns the node to reclaim). Because we garbage-collect starting from - * an invariant node (the start-of-bucket bucket node) up to the - * "removed" node (or find a reverse-hash that is higher), we are sure - * that a successful traversal of the chain leads to a chain that is - * present in the linked-list (the start node is never removed) and - * that is does not contain the "removed" node anymore, even if - * concurrent delete/add operations are changing the structure of the - * list concurrently. - * - The add operation performs gargage collection of buckets if it + * before its memory is reclaimed. After setting the "removal" flag, + * only the thread which removal is the first to set the "removal + * owner" flag (with an xchg) into a node's next pointer is considered + * to have succeeded its removal (and thus owns the node to reclaim). + * Because we garbage-collect starting from an invariant node (the + * start-of-bucket bucket node) up to the "removed" node (or find a + * reverse-hash that is higher), we are sure that a successful + * traversal of the chain leads to a chain that is present in the + * linked-list (the start node is never removed) and that is does not + * contain the "removed" node anymore, even if concurrent delete/add + * operations are changing the structure of the list concurrently. + * - The add operation performs garbage collection of buckets if it * encounters nodes with removed flag set in the bucket where it wants * to add its new node. This ensures lock-freedom of add operation by * helping the remover unlink nodes from the list rather than to wait @@ -98,6 +98,98 @@ * hash table nodes. These tables are invariant after they are * populated into the hash table. * + * Ordering Guarantees: + * + * To discuss these guarantees, we first define "read" operation as any + * of the the basic cds_lfht_lookup, cds_lfht_next_duplicate, + * cds_lfht_first, cds_lfht_next operation, as well as + * cds_lfht_add_unique (failure). + * + * We define "read traversal" operation as any of the following + * group of operations + * - cds_lfht_lookup followed by iteration with cds_lfht_next_duplicate + * (and/or cds_lfht_next, although less common). + * - cds_lfht_add_unique (failure) followed by iteration with + * cds_lfht_next_duplicate (and/or cds_lfht_next, although less + * common). + * - cds_lfht_first followed iteration with cds_lfht_next (and/or + * cds_lfht_next_duplicate, although less common). + * + * We define "write" operations as any of cds_lfht_add, + * cds_lfht_add_unique (success), cds_lfht_add_replace, cds_lfht_del. + * + * When cds_lfht_add_unique succeeds (returns the node passed as + * parameter), it acts as a "write" operation. When cds_lfht_add_unique + * fails (returns a node different from the one passed as parameter), it + * acts as a "read" operation. A cds_lfht_add_unique failure is a + * cds_lfht_lookup "read" operation, therefore, any ordering guarantee + * referring to "lookup" imply any of "lookup" or cds_lfht_add_unique + * (failure). + * + * We define "prior" and "later" node as nodes observable by reads and + * read traversals respectively before and after a write or sequence of + * write operations. + * + * Hash-table operations are often cascaded, for example, the pointer + * returned by a cds_lfht_lookup() might be passed to a cds_lfht_next(), + * whose return value might in turn be passed to another hash-table + * operation. This entire cascaded series of operations must be enclosed + * by a pair of matching rcu_read_lock() and rcu_read_unlock() + * operations. + * + * The following ordering guarantees are offered by this hash table: + * + * A.1) "read" after "write": if there is ordering between a write and a + * later read, then the read is guaranteed to see the write or some + * later write. + * A.2) "read traversal" after "write": given that there is dependency + * ordering between reads in a "read traversal", if there is + * ordering between a write and the first read of the traversal, + * then the "read traversal" is guaranteed to see the write or + * some later write. + * B.1) "write" after "read": if there is ordering between a read and a + * later write, then the read will never see the write. + * B.2) "write" after "read traversal": given that there is dependency + * ordering between reads in a "read traversal", if there is + * ordering between the last read of the traversal and a later + * write, then the "read traversal" will never see the write. + * C) "write" while "read traversal": if a write occurs during a "read + * traversal", the traversal may, or may not, see the write. + * D.1) "write" after "write": if there is ordering between a write and + * a later write, then the later write is guaranteed to see the + * effects of the first write. + * D.2) Concurrent "write" pairs: The system will assign an arbitrary + * order to any pair of concurrent conflicting writes. + * Non-conflicting writes (for example, to different keys) are + * unordered. + * E) If a grace period separates a "del" or "replace" operation + * and a subsequent operation, then that subsequent operation is + * guaranteed not to see the removed item. + * F) Uniqueness guarantee: given a hash table that does not contain + * duplicate items for a given key, there will only be one item in + * the hash table after an arbitrary sequence of add_unique and/or + * add_replace operations. Note, however, that a pair of + * concurrent read operations might well access two different items + * with that key. + * G.1) If a pair of lookups for a given key are ordered (e.g. by a + * memory barrier), then the second lookup will return the same + * node as the previous lookup, or some later node. + * G.2) A "read traversal" that starts after the end of a prior "read + * traversal" (ordered by memory barriers) is guaranteed to see the + * same nodes as the previous traversal, or some later nodes. + * G.3) Concurrent "read" pairs: concurrent reads are unordered. For + * example, if a pair of reads to the same key run concurrently + * with an insertion of that same key, the reads remain unordered + * regardless of their return values. In other words, you cannot + * rely on the values returned by the reads to deduce ordering. + * + * Progress guarantees: + * + * * Reads are wait-free. These operations always move forward in the + * hash table linked list, and this list has no loop. + * * Writes are lock-free. Any retry loop performed by a write operation + * is triggered by progress made within another update operation. + * * Bucket node tables: * * hash table hash table the last all bucket node tables @@ -150,12 +242,14 @@ */ #define _LGPL_SOURCE +#define _GNU_SOURCE #include #include #include #include #include #include +#include #include "config.h" #include @@ -820,6 +914,7 @@ int _cds_lfht_replace(struct cds_lfht *ht, unsigned long size, */ static void _cds_lfht_add(struct cds_lfht *ht, + unsigned long hash, cds_lfht_match_fct match, const void *key, unsigned long size, @@ -833,7 +928,7 @@ void _cds_lfht_add(struct cds_lfht *ht, assert(!is_bucket(node)); assert(!is_removed(node)); - bucket = lookup_bucket(ht, size, bit_reverse_ulong(node->reverse_hash)); + bucket = lookup_bucket(ht, size, hash); for (;;) { uint32_t chain_len = 0; @@ -1067,7 +1162,7 @@ void init_table_populate_partition(struct cds_lfht *ht, unsigned long i, dbg_printf("init populate: order %lu index %lu hash %lu\n", i, j, j); new_node->reverse_hash = bit_reverse_ulong(j); - _cds_lfht_add(ht, NULL, NULL, size, new_node, NULL, 1); + _cds_lfht_add(ht, j, NULL, NULL, size, new_node, NULL, 1); } ht->flavor->read_unlock(); } @@ -1473,7 +1568,7 @@ void cds_lfht_add(struct cds_lfht *ht, unsigned long hash, node->reverse_hash = bit_reverse_ulong(hash); size = rcu_dereference(ht->size); - _cds_lfht_add(ht, NULL, NULL, size, node, NULL, 0); + _cds_lfht_add(ht, hash, NULL, NULL, size, node, NULL, 0); ht_count_add(ht, size, hash); } @@ -1488,7 +1583,7 @@ struct cds_lfht_node *cds_lfht_add_unique(struct cds_lfht *ht, node->reverse_hash = bit_reverse_ulong(hash); size = rcu_dereference(ht->size); - _cds_lfht_add(ht, match, key, size, node, &iter, 0); + _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0); if (iter.node == node) ht_count_add(ht, size, hash); return iter.node; @@ -1506,7 +1601,7 @@ struct cds_lfht_node *cds_lfht_add_replace(struct cds_lfht *ht, node->reverse_hash = bit_reverse_ulong(hash); size = rcu_dereference(ht->size); for (;;) { - _cds_lfht_add(ht, match, key, size, node, &iter, 0); + _cds_lfht_add(ht, hash, match, key, size, node, &iter, 0); if (iter.node == node) { ht_count_add(ht, size, hash); return NULL; @@ -1552,6 +1647,11 @@ int cds_lfht_del(struct cds_lfht *ht, struct cds_lfht_node *node) return ret; } +int cds_lfht_is_node_deleted(struct cds_lfht_node *node) +{ + return is_removed(rcu_dereference(node->next)); +} + static int cds_lfht_delete_bucket(struct cds_lfht *ht) { @@ -1774,6 +1874,11 @@ void __cds_lfht_resize_lazy_launch(struct cds_lfht *ht) return; } work = malloc(sizeof(*work)); + if (work == NULL) { + dbg_printf("error allocating resize work, bailing out\n"); + uatomic_dec(&ht->in_progress_resize); + return; + } work->ht = ht; ht->flavor->update_call_rcu(&work->head, do_resize_cb); CMM_STORE_SHARED(ht->resize_initiated, 1);