- struct rcu_ht_node *node, *old_head, *new_head;
- struct rcu_table *t;
- unsigned long hash;
- int ret = 0;
-
- new_head = calloc(1, sizeof(struct rcu_ht_node));
- new_head->key = key;
- new_head->data = data;
- new_head->flags = 0;
- /* here comes the fun and tricky part.
- * Add at the beginning with a cmpxchg.
- * Hold a read lock between the moment the first element is read
- * and the nodes traversal (to find duplicates). This ensures
- * the head pointer has not been reclaimed when cmpxchg is done.
- * Always adding at the head ensures that we would have to
- * re-try if a new item has been added concurrently. So we ensure that
- * we never add duplicates. */
-retry:
- rcu_read_lock();
-
- if (unlikely(LOAD_SHARED(ht->resize_ongoing))) {
- rcu_read_unlock();
- /*
- * Wait for resize to complete before continuing.
- */
- ret = pthread_mutex_lock(&ht->resize_mutex);
- assert(!ret);
- ret = pthread_mutex_unlock(&ht->resize_mutex);
- assert(!ret);
- goto retry;
+#define LT(n) n, n, n, n, n, n, n, n, n, n, n, n, n, n, n, n
+ -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3,
+ LT(4), LT(5), LT(5), LT(6), LT(6), LT(6), LT(6),
+ LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7), LT(7)
+};
+
+uint32_t log2_u32(uint32_t v)
+{
+ uint32_t t, tt;
+
+ if ((tt = (v >> 16)))
+ return (t = (tt >> 8))
+ ? 24 + LogTable256[t]
+ : 16 + LogTable256[tt];
+ else
+ return (t = (v >> 8))
+ ? 8 + LogTable256[t]
+ : LogTable256[v];
+}
+
+static
+void ht_resize_lazy(struct rcu_ht *ht, struct rcu_table *t, int growth);
+
+static
+void check_resize(struct rcu_ht *ht, struct rcu_table *t,
+ uint32_t chain_len)
+{
+ if (chain_len >= BUCKET_SIZE_RESIZE_THRESHOLD)
+ ht_resize_lazy(ht, t, log2_u32(chain_len));
+}
+
+static
+struct rcu_ht_node *clear_flag(struct rcu_ht_node *node)
+{
+ return (struct rcu_ht_node *) (((unsigned long) node) & ~0x1);
+}
+
+static
+int is_removed(struct rcu_ht_node *node)
+{
+ return ((unsigned long) node) & 0x1;
+}
+
+static
+struct rcu_ht_node *flag_removed(struct rcu_ht_node *node)
+{
+ return (struct rcu_ht_node *) (((unsigned long) node) | 0x1);
+}
+
+static
+unsigned long _uatomic_max(unsigned long *ptr, unsigned long v)
+{
+ unsigned long old1, old2;
+
+ old1 = uatomic_read(ptr);
+ do {
+ old2 = old1;
+ if (old2 >= v)
+ return old2;
+ } while ((old1 = uatomic_cmpxchg(ptr, old2, v)) != old2);
+ return v;
+}
+
+static
+void _ht_add(struct rcu_ht *ht, struct rcu_table *t, struct rcu_ht_node *node)
+{
+ struct rcu_ht_node *iter_prev = NULL, *iter = NULL;
+
+ if (!t->size)
+ return;
+ for (;;) {
+ uint32_t chain_len = 0;
+
+ iter_prev = rcu_dereference(t->tbl[node->hash & (t->size - 1)]);
+ assert(iter_prev);
+ assert(iter_prev->reverse_hash <= node->reverse_hash);
+ for (;;) {
+ iter = clear_flag(rcu_dereference(iter_prev->next));
+ if (unlikely(!iter))
+ break;
+ if (iter->reverse_hash < node->reverse_hash)
+ break;
+ iter_prev = iter;
+ check_resize(ht, t, ++chain_len);
+ }
+ /* add in iter_prev->next */
+ if (is_removed(iter))
+ continue;
+ assert(node != iter);
+ node->next = iter;
+ assert(iter_prev != node);
+ if (uatomic_cmpxchg(&iter_prev->next, iter, node) != iter)
+ continue;
+ break;