rcuja: below/equal result key
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
b1a90ce3 27#include <string.h>
61009379 28#include <urcu/rcuja.h>
d68c6810
MD
29#include <urcu/compiler.h>
30#include <urcu/arch.h>
31#include <assert.h>
8e519e3c 32#include <urcu-pointer.h>
f07b240f 33#include <urcu/uatomic.h>
b4540e8a 34#include <stdint.h>
8e519e3c 35
61009379
MD
36#include "rcuja-internal.h"
37
b1a90ce3
MD
38#ifndef abs
39#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
40#endif
41
d96bfb0d 42enum cds_ja_type_class {
e5227865 43 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
44 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
45 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
46 RCU_JA_POOL = 1, /* Type B */
47 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
48 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 49 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
50 /* 32-bit: 101 to 256 children, 1024 bytes */
51 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 52 /* Leaf nodes are implicit from their height in the tree */
1db4943c 53 RCU_JA_NR_TYPES,
e1db2db5
MD
54
55 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
56};
57
d96bfb0d
MD
58struct cds_ja_type {
59 enum cds_ja_type_class type_class;
8e519e3c
MD
60 uint16_t min_child; /* minimum number of children: 1 to 256 */
61 uint16_t max_child; /* maximum number of children: 1 to 256 */
62 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
63 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
64 uint16_t nr_pool_order; /* number of pools */
65 uint16_t pool_size_order; /* pool size */
e5227865
MD
66};
67
68/*
69 * Iteration on the array to find the right node size for the number of
d68c6810 70 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 71 * possible node size, which contains 256 children).
d68c6810
MD
72 * The min_child overlaps with the previous max_child to provide an
73 * hysteresis loop to reallocation for patterns of cyclic add/removal
74 * within the same node.
75 * The node the index within the following arrays is represented on 3
76 * bits. It identifies the node type, min/max number of children, and
77 * the size order.
3d45251f
MD
78 * The max_child values for the RCU_JA_POOL below result from
79 * statistical approximation: over million populations, the max_child
80 * covers between 97% and 99% of the populations generated. Therefore, a
81 * fallback should exist to cover the rare extreme population unbalance
82 * cases, but it will not have a major impact on speed nor space
83 * consumption, since those are rare cases.
e5227865 84 */
e5227865 85
d68c6810
MD
86#if (CAA_BITS_PER_LONG < 64)
87/* 32-bit pointers */
1db4943c
MD
88enum {
89 ja_type_0_max_child = 1,
90 ja_type_1_max_child = 3,
91 ja_type_2_max_child = 6,
92 ja_type_3_max_child = 12,
93 ja_type_4_max_child = 25,
94 ja_type_5_max_child = 48,
95 ja_type_6_max_child = 92,
96 ja_type_7_max_child = 256,
e1db2db5 97 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
98};
99
8e519e3c
MD
100enum {
101 ja_type_0_max_linear_child = 1,
102 ja_type_1_max_linear_child = 3,
103 ja_type_2_max_linear_child = 6,
104 ja_type_3_max_linear_child = 12,
105 ja_type_4_max_linear_child = 25,
106 ja_type_5_max_linear_child = 24,
107 ja_type_6_max_linear_child = 23,
108};
109
1db4943c
MD
110enum {
111 ja_type_5_nr_pool_order = 1,
112 ja_type_6_nr_pool_order = 2,
113};
114
d96bfb0d 115const struct cds_ja_type ja_types[] = {
8e519e3c
MD
116 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 121
fd800776 122 /* Pools may fill sooner than max_child */
1cee749c 123 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
1cee749c 125 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 126 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
127
128 /*
b1a90ce3
MD
129 * Upon node removal below min_child, if child pool is filled
130 * beyond capacity, we roll back to pigeon.
3d45251f 131 */
58c16c03 132 { .type_class = RCU_JA_PIGEON, .min_child = 83, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
133
134 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 135};
d68c6810
MD
136#else /* !(CAA_BITS_PER_LONG < 64) */
137/* 64-bit pointers */
1db4943c
MD
138enum {
139 ja_type_0_max_child = 1,
140 ja_type_1_max_child = 3,
141 ja_type_2_max_child = 7,
142 ja_type_3_max_child = 14,
143 ja_type_4_max_child = 28,
144 ja_type_5_max_child = 54,
145 ja_type_6_max_child = 104,
146 ja_type_7_max_child = 256,
e1db2db5 147 ja_type_8_max_child = 256,
1db4943c
MD
148};
149
8e519e3c
MD
150enum {
151 ja_type_0_max_linear_child = 1,
152 ja_type_1_max_linear_child = 3,
153 ja_type_2_max_linear_child = 7,
154 ja_type_3_max_linear_child = 14,
155 ja_type_4_max_linear_child = 28,
156 ja_type_5_max_linear_child = 27,
157 ja_type_6_max_linear_child = 26,
158};
159
1db4943c
MD
160enum {
161 ja_type_5_nr_pool_order = 1,
162 ja_type_6_nr_pool_order = 2,
163};
164
d96bfb0d 165const struct cds_ja_type ja_types[] = {
8e519e3c
MD
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
170 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 171
3d45251f 172 /* Pools may fill sooner than max_child. */
1cee749c 173 /* This pool is hardcoded at index 5. See ja_node_ptr(). */
8e519e3c 174 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
1cee749c 175 /* This pool is hardcoded at index 6. See ja_node_ptr(). */
8e519e3c 176 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 177
3d45251f 178 /*
b1a90ce3
MD
179 * Upon node removal below min_child, if child pool is filled
180 * beyond capacity, we roll back to pigeon.
3d45251f 181 */
64457f6c 182 { .type_class = RCU_JA_PIGEON, .min_child = 95, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
183
184 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 185};
d68c6810 186#endif /* !(BITS_PER_LONG < 64) */
e5227865 187
1db4943c
MD
188static inline __attribute__((unused))
189void static_array_size_check(void)
190{
e1db2db5 191 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
192}
193
e5227865 194/*
d96bfb0d 195 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
196 * read-side. For linear and pool node configurations, it starts with a
197 * byte counting the number of children in the node. Then, the
198 * node-specific data is placed.
199 * The node mutex, if any is needed, protecting concurrent updated of
200 * each node is placed in a separate hash table indexed by node address.
201 * For the pigeon configuration, the number of children is also kept in
202 * a separate hash table, indexed by node address, because it is only
203 * required for updates.
e5227865 204 */
1db4943c 205
ff38c745
MD
206#define DECLARE_LINEAR_NODE(index) \
207 struct { \
208 uint8_t nr_child; \
209 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 210 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
211 }
212
213#define DECLARE_POOL_NODE(index) \
214 struct { \
215 struct { \
216 uint8_t nr_child; \
217 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 218 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
219 } linear[1U << ja_type_## index ##_nr_pool_order]; \
220 }
1db4943c 221
b4540e8a 222struct cds_ja_inode {
1db4943c
MD
223 union {
224 /* Linear configuration */
225 DECLARE_LINEAR_NODE(0) conf_0;
226 DECLARE_LINEAR_NODE(1) conf_1;
227 DECLARE_LINEAR_NODE(2) conf_2;
228 DECLARE_LINEAR_NODE(3) conf_3;
229 DECLARE_LINEAR_NODE(4) conf_4;
230
231 /* Pool configuration */
232 DECLARE_POOL_NODE(5) conf_5;
233 DECLARE_POOL_NODE(6) conf_6;
234
235 /* Pigeon configuration */
236 struct {
b4540e8a 237 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
238 } conf_7;
239 /* data aliasing nodes for computed accesses */
b4540e8a 240 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 241 } u;
e5227865
MD
242};
243
2e313670 244enum ja_recompact {
19ddcd04
MD
245 JA_RECOMPACT_ADD_SAME,
246 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
247 JA_RECOMPACT_DEL,
248};
249
b023ba9f
MD
250enum ja_lookup_inequality {
251 JA_LOOKUP_BE,
252 JA_LOOKUP_AE,
253};
254
255enum ja_direction {
256 JA_LEFT,
257 JA_RIGHT,
258 JA_LEFTMOST,
259 JA_RIGHTMOST,
260};
261
b1a90ce3
MD
262static
263struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
264{
265 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
266}
267
268unsigned long ja_node_type(struct cds_ja_inode_flag *node)
269{
270 unsigned long type;
271
272 if (_ja_node_mask_ptr(node) == NULL) {
273 return NODE_INDEX_NULL;
274 }
275 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
276 assert(type < (1UL << JA_TYPE_BITS));
277 return type;
278}
279
354981c2
MD
280static
281struct cds_ja_inode *alloc_cds_ja_node(struct cds_ja *ja,
282 const struct cds_ja_type *ja_type)
e5227865 283{
b1a90ce3
MD
284 size_t len = 1U << ja_type->order;
285 void *p;
286 int ret;
287
288 ret = posix_memalign(&p, len, len);
289 if (ret || !p) {
290 return NULL;
291 }
292 memset(p, 0, len);
354981c2 293 uatomic_inc(&ja->nr_nodes_allocated);
b1a90ce3 294 return p;
e5227865
MD
295}
296
354981c2 297void free_cds_ja_node(struct cds_ja *ja, struct cds_ja_inode *node)
e5227865
MD
298{
299 free(node);
48cbe001 300 if (node)
354981c2 301 uatomic_inc(&ja->nr_nodes_freed);
e5227865
MD
302}
303
d68c6810
MD
304#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
305#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
306#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
307#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
308
309static
1db4943c 310uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 311{
1db4943c 312 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
313}
314
11c5e016 315static
d96bfb0d 316uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 317 struct cds_ja_inode *node)
11c5e016
MD
318{
319 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 320 return rcu_dereference(node->u.data[0]);
11c5e016
MD
321}
322
13a7f5a6
MD
323/*
324 * The order in which values and pointers are does does not matter: if
325 * a value is missing, we return NULL. If a value is there, but its
326 * associated pointers is still NULL, we return NULL too.
327 */
d68c6810 328static
b4540e8a
MD
329struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
330 struct cds_ja_inode *node,
b0ca2d21 331 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 332 uint8_t n)
d68c6810
MD
333{
334 uint8_t nr_child;
335 uint8_t *values;
b4540e8a
MD
336 struct cds_ja_inode_flag **pointers;
337 struct cds_ja_inode_flag *ptr;
d68c6810
MD
338 unsigned int i;
339
8e519e3c 340 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 341
11c5e016 342 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 343 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
344 assert(nr_child <= type->max_linear_child);
345 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 346
1db4943c 347 values = &node->u.data[1];
d68c6810 348 for (i = 0; i < nr_child; i++) {
13a7f5a6 349 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
350 break;
351 }
b0ca2d21
MD
352 if (i >= nr_child) {
353 if (caa_unlikely(node_flag_ptr))
354 *node_flag_ptr = NULL;
d68c6810 355 return NULL;
b0ca2d21 356 }
b4540e8a 357 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 358 ptr = rcu_dereference(pointers[i]);
b0ca2d21
MD
359 if (caa_unlikely(node_flag_ptr))
360 *node_flag_ptr = &pointers[i];
d68c6810
MD
361 return ptr;
362}
363
291b2543 364static
b023ba9f 365struct cds_ja_inode_flag *ja_linear_node_get_direction(const struct cds_ja_type *type,
291b2543 366 struct cds_ja_inode *node,
36305a3d 367 int n, uint8_t *result_key,
b023ba9f 368 enum ja_direction dir)
291b2543
MD
369{
370 uint8_t nr_child;
371 uint8_t *values;
372 struct cds_ja_inode_flag **pointers;
373 struct cds_ja_inode_flag *ptr;
b023ba9f
MD
374 unsigned int i;
375 int match_idx = -1, match_v;
291b2543
MD
376
377 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
b023ba9f
MD
378 assert(dir == JA_LEFT || dir == JA_RIGHT);
379
380 if (dir == JA_LEFT) {
381 match_v = -1;
382 } else {
383 match_v = JA_ENTRY_PER_NODE;
384 }
291b2543
MD
385
386 nr_child = ja_linear_node_get_nr_child(type, node);
387 cmm_smp_rmb(); /* read nr_child before values and pointers */
388 assert(nr_child <= type->max_linear_child);
389 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
390
391 values = &node->u.data[1];
392 for (i = 0; i < nr_child; i++) {
393 unsigned int v;
394
395 v = CMM_LOAD_SHARED(values[i]);
b023ba9f
MD
396 if (dir == JA_LEFT) {
397 if ((int) v < n && (int) v > match_v) {
398 match_v = v;
399 match_idx = i;
400 }
401 } else {
402 if ((int) v > n && (int) v < match_v) {
403 match_v = v;
404 match_idx = i;
405 }
291b2543
MD
406 }
407 }
b023ba9f
MD
408
409 if (match_idx < 0) {
291b2543
MD
410 return NULL;
411 }
b023ba9f
MD
412 assert(match_v >= 0 && match_v < JA_ENTRY_PER_NODE);
413
36305a3d 414 *result_key = (uint8_t) match_v;
291b2543
MD
415 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
416 ptr = rcu_dereference(pointers[match_idx]);
417 return ptr;
418}
419
11c5e016 420static
5a9a87dd 421void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 422 struct cds_ja_inode *node,
11c5e016
MD
423 uint8_t i,
424 uint8_t *v,
b4540e8a 425 struct cds_ja_inode_flag **iter)
11c5e016
MD
426{
427 uint8_t *values;
b4540e8a 428 struct cds_ja_inode_flag **pointers;
11c5e016
MD
429
430 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
431 assert(i < ja_linear_node_get_nr_child(type, node));
432
433 values = &node->u.data[1];
434 *v = values[i];
b4540e8a 435 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
436 *iter = pointers[i];
437}
438
d68c6810 439static
b4540e8a
MD
440struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
441 struct cds_ja_inode *node,
b1a90ce3 442 struct cds_ja_inode_flag *node_flag,
b0ca2d21 443 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 444 uint8_t n)
d68c6810 445{
b4540e8a 446 struct cds_ja_inode *linear;
d68c6810 447
fd800776 448 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
449
450 switch (type->nr_pool_order) {
451 case 1:
452 {
453 unsigned long bitsel, index;
454
455 bitsel = ja_node_pool_1d_bitsel(node_flag);
456 assert(bitsel < CHAR_BIT);
19ddcd04 457 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
458 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
459 break;
460 }
461 case 2:
462 {
19ddcd04
MD
463 unsigned long bitsel[2], index[2], rindex;
464
465 ja_node_pool_2d_bitsel(node_flag, bitsel);
466 assert(bitsel[0] < CHAR_BIT);
467 assert(bitsel[1] < CHAR_BIT);
468 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
469 index[0] <<= 1;
470 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
471 rindex = index[0] | index[1];
472 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
473 break;
474 }
475 default:
476 linear = NULL;
477 assert(0);
478 }
48cbe001 479 return ja_linear_node_get_nth(type, linear, node_flag_ptr, n);
d68c6810
MD
480}
481
11c5e016 482static
b4540e8a
MD
483struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
484 struct cds_ja_inode *node,
11c5e016
MD
485 uint8_t i)
486{
487 assert(type->type_class == RCU_JA_POOL);
b4540e8a 488 return (struct cds_ja_inode *)
11c5e016
MD
489 &node->u.data[(unsigned int) i << type->pool_size_order];
490}
491
291b2543 492static
b023ba9f 493struct cds_ja_inode_flag *ja_pool_node_get_direction(const struct cds_ja_type *type,
291b2543 494 struct cds_ja_inode *node,
36305a3d 495 int n, uint8_t *result_key,
b023ba9f 496 enum ja_direction dir)
291b2543
MD
497{
498 unsigned int pool_nr;
b023ba9f 499 int match_v;
291b2543
MD
500 struct cds_ja_inode_flag *match_node_flag = NULL;
501
502 assert(type->type_class == RCU_JA_POOL);
b023ba9f
MD
503 assert(dir == JA_LEFT || dir == JA_RIGHT);
504
505 if (dir == JA_LEFT) {
506 match_v = -1;
507 } else {
508 match_v = JA_ENTRY_PER_NODE;
509 }
291b2543
MD
510
511 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
512 struct cds_ja_inode *pool =
513 ja_pool_node_get_ith_pool(type,
514 node, pool_nr);
515 uint8_t nr_child =
516 ja_linear_node_get_nr_child(type, pool);
517 unsigned int j;
518
519 for (j = 0; j < nr_child; j++) {
520 struct cds_ja_inode_flag *iter;
521 uint8_t v;
522
523 ja_linear_node_get_ith_pos(type, pool,
524 j, &v, &iter);
525 if (!iter)
526 continue;
b023ba9f
MD
527 if (dir == JA_LEFT) {
528 if ((int) v < n && (int) v > match_v) {
529 match_v = v;
530 match_node_flag = iter;
531 }
532 } else {
533 if ((int) v > n && (int) v < match_v) {
534 match_v = v;
535 match_node_flag = iter;
536 }
291b2543
MD
537 }
538 }
539 }
36305a3d
MD
540 if (match_node_flag)
541 *result_key = (uint8_t) match_v;
291b2543
MD
542 return match_node_flag;
543}
544
d68c6810 545static
b4540e8a
MD
546struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
547 struct cds_ja_inode *node,
b0ca2d21 548 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 549 uint8_t n)
d68c6810 550{
48cbe001
MD
551 struct cds_ja_inode_flag **child_node_flag_ptr;
552 struct cds_ja_inode_flag *child_node_flag;
5a9a87dd 553
d68c6810 554 assert(type->type_class == RCU_JA_PIGEON);
48cbe001
MD
555 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
556 child_node_flag = rcu_dereference(*child_node_flag_ptr);
582a6ade 557 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
48cbe001 558 child_node_flag_ptr);
b0ca2d21 559 if (caa_unlikely(node_flag_ptr))
48cbe001
MD
560 *node_flag_ptr = child_node_flag_ptr;
561 return child_node_flag;
d68c6810
MD
562}
563
291b2543 564static
b023ba9f 565struct cds_ja_inode_flag *ja_pigeon_node_get_direction(const struct cds_ja_type *type,
291b2543 566 struct cds_ja_inode *node,
36305a3d 567 int n, uint8_t *result_key,
b023ba9f 568 enum ja_direction dir)
291b2543
MD
569{
570 struct cds_ja_inode_flag **child_node_flag_ptr;
571 struct cds_ja_inode_flag *child_node_flag;
572 int i;
573
574 assert(type->type_class == RCU_JA_PIGEON);
b023ba9f
MD
575 assert(dir == JA_LEFT || dir == JA_RIGHT);
576
577 if (dir == JA_LEFT) {
578 /* n - 1 is first value left of n */
579 for (i = n - 1; i >= 0; i--) {
580 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
581 child_node_flag = rcu_dereference(*child_node_flag_ptr);
582 if (child_node_flag) {
583 dbg_printf("ja_pigeon_node_get_left child_node_flag %p\n",
584 child_node_flag);
36305a3d 585 *result_key = i;
b023ba9f
MD
586 return child_node_flag;
587 }
588 }
589 } else {
590 /* n + 1 is first value right of n */
591 for (i = n + 1; i < JA_ENTRY_PER_NODE; i++) {
592 child_node_flag_ptr = &((struct cds_ja_inode_flag **) node->u.data)[i];
593 child_node_flag = rcu_dereference(*child_node_flag_ptr);
594 if (child_node_flag) {
595 dbg_printf("ja_pigeon_node_get_right child_node_flag %p\n",
596 child_node_flag);
36305a3d 597 *result_key = i;
b023ba9f
MD
598 return child_node_flag;
599 }
291b2543
MD
600 }
601 }
602 return NULL;
603}
604
2e313670
MD
605static
606struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
607 struct cds_ja_inode *node,
608 uint8_t i)
609{
48cbe001 610 return ja_pigeon_node_get_nth(type, node, NULL, i);
2e313670
MD
611}
612
13a7f5a6
MD
613/*
614 * ja_node_get_nth: get nth item from a node.
615 * node_flag is already rcu_dereference'd.
616 */
d68c6810 617static
b62a8d0c 618struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
b0ca2d21 619 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 620 uint8_t n)
d68c6810
MD
621{
622 unsigned int type_index;
b4540e8a 623 struct cds_ja_inode *node;
d96bfb0d 624 const struct cds_ja_type *type;
d68c6810 625
d68c6810 626 node = ja_node_ptr(node_flag);
5a9a87dd 627 assert(node != NULL);
d68c6810
MD
628 type_index = ja_node_type(node_flag);
629 type = &ja_types[type_index];
630
631 switch (type->type_class) {
632 case RCU_JA_LINEAR:
5a9a87dd 633 return ja_linear_node_get_nth(type, node,
b62a8d0c 634 node_flag_ptr, n);
fd800776 635 case RCU_JA_POOL:
b1a90ce3 636 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c 637 node_flag_ptr, n);
d68c6810 638 case RCU_JA_PIGEON:
5a9a87dd 639 return ja_pigeon_node_get_nth(type, node,
b62a8d0c 640 node_flag_ptr, n);
d68c6810
MD
641 default:
642 assert(0);
643 return (void *) -1UL;
644 }
645}
646
291b2543 647static
b023ba9f 648struct cds_ja_inode_flag *ja_node_get_direction(struct cds_ja_inode_flag *node_flag,
36305a3d 649 int n, uint8_t *result_key,
b023ba9f 650 enum ja_direction dir)
291b2543
MD
651{
652 unsigned int type_index;
653 struct cds_ja_inode *node;
654 const struct cds_ja_type *type;
655
656 node = ja_node_ptr(node_flag);
657 assert(node != NULL);
658 type_index = ja_node_type(node_flag);
659 type = &ja_types[type_index];
660
661 switch (type->type_class) {
662 case RCU_JA_LINEAR:
36305a3d 663 return ja_linear_node_get_direction(type, node, n, result_key, dir);
291b2543 664 case RCU_JA_POOL:
36305a3d 665 return ja_pool_node_get_direction(type, node, n, result_key, dir);
291b2543 666 case RCU_JA_PIGEON:
36305a3d 667 return ja_pigeon_node_get_direction(type, node, n, result_key, dir);
291b2543
MD
668 default:
669 assert(0);
670 return (void *) -1UL;
671 }
672}
673
674static
b023ba9f 675struct cds_ja_inode_flag *ja_node_get_leftright(struct cds_ja_inode_flag *node_flag,
36305a3d 676 unsigned int n, uint8_t *result_key,
b023ba9f 677 enum ja_direction dir)
291b2543 678{
36305a3d 679 return ja_node_get_direction(node_flag, n, result_key, dir);
b023ba9f
MD
680}
681
682static
683struct cds_ja_inode_flag *ja_node_get_minmax(struct cds_ja_inode_flag *node_flag,
36305a3d 684 uint8_t *result_key,
b023ba9f
MD
685 enum ja_direction dir)
686{
687 switch (dir) {
688 case JA_LEFTMOST:
689 return ja_node_get_direction(node_flag,
36305a3d 690 -1, result_key, JA_RIGHT);
b023ba9f
MD
691 case JA_RIGHTMOST:
692 return ja_node_get_direction(node_flag,
36305a3d 693 JA_ENTRY_PER_NODE, result_key, JA_LEFT);
b023ba9f
MD
694 default:
695 assert(0);
696 }
291b2543
MD
697}
698
8e519e3c 699static
d96bfb0d 700int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 701 struct cds_ja_inode *node,
d96bfb0d 702 struct cds_ja_shadow_node *shadow_node,
8e519e3c 703 uint8_t n,
b4540e8a 704 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
705{
706 uint8_t nr_child;
707 uint8_t *values, *nr_child_ptr;
b4540e8a 708 struct cds_ja_inode_flag **pointers;
2e313670 709 unsigned int i, unused = 0;
8e519e3c
MD
710
711 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
712
713 nr_child_ptr = &node->u.data[0];
48cbe001
MD
714 dbg_printf("linear set nth: n %u, nr_child_ptr %p\n",
715 (unsigned int) n, nr_child_ptr);
8e519e3c
MD
716 nr_child = *nr_child_ptr;
717 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
718
719 values = &node->u.data[1];
2e313670
MD
720 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
721 /* Check if node value is already populated */
8e519e3c 722 for (i = 0; i < nr_child; i++) {
2e313670
MD
723 if (values[i] == n) {
724 if (pointers[i])
725 return -EEXIST;
726 else
727 break;
728 } else {
729 if (!pointers[i])
730 unused++;
731 }
8e519e3c 732 }
2e313670
MD
733 if (i == nr_child && nr_child >= type->max_linear_child) {
734 if (unused)
735 return -ERANGE; /* recompact node */
736 else
737 return -ENOSPC; /* No space left in this node type */
738 }
739
740 assert(pointers[i] == NULL);
741 rcu_assign_pointer(pointers[i], child_node_flag);
742 /* If we expanded the nr_child, increment it */
743 if (i == nr_child) {
744 CMM_STORE_SHARED(values[nr_child], n);
745 /* write pointer and value before nr_child */
746 cmm_smp_wmb();
747 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 748 }
e1db2db5 749 shadow_node->nr_child++;
a2a7ff59
MD
750 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
751 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
752 (unsigned int) shadow_node->nr_child,
753 node, shadow_node);
754
8e519e3c
MD
755 return 0;
756}
757
758static
d96bfb0d 759int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 760 struct cds_ja_inode *node,
b1a90ce3 761 struct cds_ja_inode_flag *node_flag,
d96bfb0d 762 struct cds_ja_shadow_node *shadow_node,
8e519e3c 763 uint8_t n,
b4540e8a 764 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 765{
b4540e8a 766 struct cds_ja_inode *linear;
8e519e3c
MD
767
768 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
769
770 switch (type->nr_pool_order) {
771 case 1:
772 {
773 unsigned long bitsel, index;
774
775 bitsel = ja_node_pool_1d_bitsel(node_flag);
776 assert(bitsel < CHAR_BIT);
19ddcd04 777 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
778 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
779 break;
780 }
781 case 2:
782 {
19ddcd04
MD
783 unsigned long bitsel[2], index[2], rindex;
784
785 ja_node_pool_2d_bitsel(node_flag, bitsel);
786 assert(bitsel[0] < CHAR_BIT);
787 assert(bitsel[1] < CHAR_BIT);
788 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
789 index[0] <<= 1;
790 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
791 rindex = index[0] | index[1];
792 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
793 break;
794 }
795 default:
796 linear = NULL;
797 assert(0);
798 }
799
e1db2db5
MD
800 return ja_linear_node_set_nth(type, linear, shadow_node,
801 n, child_node_flag);
8e519e3c
MD
802}
803
804static
d96bfb0d 805int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 806 struct cds_ja_inode *node,
d96bfb0d 807 struct cds_ja_shadow_node *shadow_node,
8e519e3c 808 uint8_t n,
b4540e8a 809 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 810{
b4540e8a 811 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
812
813 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 814 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 815 if (*ptr)
8e519e3c
MD
816 return -EEXIST;
817 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 818 shadow_node->nr_child++;
8e519e3c
MD
819 return 0;
820}
821
d68c6810 822/*
7a0b2331 823 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 824 * (negative error value) if it is already there.
d68c6810 825 */
8e519e3c 826static
d96bfb0d 827int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 828 struct cds_ja_inode *node,
b1a90ce3 829 struct cds_ja_inode_flag *node_flag,
d96bfb0d 830 struct cds_ja_shadow_node *shadow_node,
e1db2db5 831 uint8_t n,
b4540e8a 832 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 833{
8e519e3c
MD
834 switch (type->type_class) {
835 case RCU_JA_LINEAR:
e1db2db5 836 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
837 child_node_flag);
838 case RCU_JA_POOL:
b1a90ce3 839 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
840 child_node_flag);
841 case RCU_JA_PIGEON:
e1db2db5 842 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 843 child_node_flag);
e1db2db5
MD
844 case RCU_JA_NULL:
845 return -ENOSPC;
8e519e3c
MD
846 default:
847 assert(0);
848 return -EINVAL;
849 }
850
851 return 0;
852}
7a0b2331 853
2e313670 854static
af3cbd45 855int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
856 struct cds_ja_inode *node,
857 struct cds_ja_shadow_node *shadow_node,
af3cbd45 858 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
859{
860 uint8_t nr_child;
af3cbd45 861 uint8_t *nr_child_ptr;
2e313670
MD
862
863 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
864
865 nr_child_ptr = &node->u.data[0];
2e313670
MD
866 nr_child = *nr_child_ptr;
867 assert(nr_child <= type->max_linear_child);
868
48cbe001
MD
869 if (type->type_class == RCU_JA_LINEAR) {
870 assert(!shadow_node->fallback_removal_count);
871 if (shadow_node->nr_child <= type->min_child) {
2e313670
MD
872 /* We need to try recompacting the node */
873 return -EFBIG;
874 }
875 }
19ddcd04 876 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
877 assert(*node_flag_ptr != NULL);
878 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
879 /*
880 * Value and nr_child are never changed (would cause ABA issue).
881 * Instead, we leave the pointer to NULL and recompact the node
882 * once in a while. It is allowed to set a NULL pointer to a new
883 * value without recompaction though.
884 * Only update the shadow node accounting.
885 */
886 shadow_node->nr_child--;
af3cbd45 887 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
888 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
889 (unsigned int) shadow_node->nr_child,
890 node, shadow_node);
2e313670
MD
891 return 0;
892}
893
894static
af3cbd45 895int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 896 struct cds_ja_inode *node,
19ddcd04 897 struct cds_ja_inode_flag *node_flag,
2e313670 898 struct cds_ja_shadow_node *shadow_node,
af3cbd45 899 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
900 uint8_t n)
901{
902 struct cds_ja_inode *linear;
903
904 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
905
906 if (shadow_node->fallback_removal_count) {
907 shadow_node->fallback_removal_count--;
908 } else {
909 /* We should try recompacting the node */
910 if (shadow_node->nr_child <= type->min_child)
911 return -EFBIG;
912 }
913
914 switch (type->nr_pool_order) {
915 case 1:
916 {
917 unsigned long bitsel, index;
918
919 bitsel = ja_node_pool_1d_bitsel(node_flag);
920 assert(bitsel < CHAR_BIT);
921 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
922 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
923 break;
924 }
925 case 2:
926 {
927 unsigned long bitsel[2], index[2], rindex;
928
929 ja_node_pool_2d_bitsel(node_flag, bitsel);
930 assert(bitsel[0] < CHAR_BIT);
931 assert(bitsel[1] < CHAR_BIT);
932 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
933 index[0] <<= 1;
934 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
935 rindex = index[0] | index[1];
936 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
937 break;
938 }
939 default:
940 linear = NULL;
941 assert(0);
942 }
943
af3cbd45 944 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
945}
946
947static
af3cbd45 948int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
949 struct cds_ja_inode *node,
950 struct cds_ja_shadow_node *shadow_node,
af3cbd45 951 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 952{
2e313670 953 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
954
955 if (shadow_node->fallback_removal_count) {
956 shadow_node->fallback_removal_count--;
957 } else {
958 /* We should try recompacting the node */
959 if (shadow_node->nr_child <= type->min_child)
960 return -EFBIG;
961 }
4d6ef45e 962 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 963 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
964 shadow_node->nr_child--;
965 return 0;
966}
967
968/*
af3cbd45 969 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
970 * (negative error value) if it is not found (-ENOENT).
971 */
972static
af3cbd45 973int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 974 struct cds_ja_inode *node,
19ddcd04 975 struct cds_ja_inode_flag *node_flag,
2e313670 976 struct cds_ja_shadow_node *shadow_node,
af3cbd45 977 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
978 uint8_t n)
979{
980 switch (type->type_class) {
981 case RCU_JA_LINEAR:
af3cbd45 982 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 983 case RCU_JA_POOL:
19ddcd04 984 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 985 case RCU_JA_PIGEON:
af3cbd45 986 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
987 case RCU_JA_NULL:
988 return -ENOENT;
989 default:
990 assert(0);
991 return -EINVAL;
992 }
993
994 return 0;
995}
996
b1a90ce3
MD
997/*
998 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
999 * distribution in two sub-distributions containing as much elements one
1000 * compared to the other.
1001 */
1002static
1003unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
1004 struct cds_ja *ja,
1005 unsigned int type_index,
1006 const struct cds_ja_type *type,
1007 struct cds_ja_inode *node,
1008 struct cds_ja_shadow_node *shadow_node,
1009 uint8_t n,
1010 struct cds_ja_inode_flag *child_node_flag,
1011 struct cds_ja_inode_flag **nullify_node_flag_ptr)
1012{
1013 uint8_t nr_one[JA_BITS_PER_BYTE];
1014 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
1015 unsigned int distrib_nr_child = 0;
1016
1017 memset(nr_one, 0, sizeof(nr_one));
1018
1019 switch (type->type_class) {
1020 case RCU_JA_LINEAR:
1021 {
1022 uint8_t nr_child =
1023 ja_linear_node_get_nr_child(type, node);
1024 unsigned int i;
1025
1026 for (i = 0; i < nr_child; i++) {
1027 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1028 uint8_t v;
1029
1030 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1031 if (!iter)
1032 continue;
1033 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1034 continue;
f5531dd9
MD
1035 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1036 if (v & (1U << bit_i))
1037 nr_one[bit_i]++;
b1a90ce3
MD
1038 }
1039 distrib_nr_child++;
1040 }
1041 break;
1042 }
1043 case RCU_JA_POOL:
1044 {
1045 unsigned int pool_nr;
1046
1047 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1048 struct cds_ja_inode *pool =
1049 ja_pool_node_get_ith_pool(type,
1050 node, pool_nr);
1051 uint8_t nr_child =
1052 ja_linear_node_get_nr_child(type, pool);
1053 unsigned int j;
1054
1055 for (j = 0; j < nr_child; j++) {
1056 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1057 uint8_t v;
1058
1059 ja_linear_node_get_ith_pos(type, pool,
1060 j, &v, &iter);
1061 if (!iter)
1062 continue;
1063 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1064 continue;
f5531dd9
MD
1065 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1066 if (v & (1U << bit_i))
1067 nr_one[bit_i]++;
b1a90ce3
MD
1068 }
1069 distrib_nr_child++;
1070 }
1071 }
1072 break;
1073 }
1074 case RCU_JA_PIGEON:
1075 {
b1a90ce3
MD
1076 unsigned int i;
1077
1078 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1079 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
b1a90ce3 1080 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
1081
1082 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1083 if (!iter)
1084 continue;
1085 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1086 continue;
f5531dd9
MD
1087 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1088 if (i & (1U << bit_i))
1089 nr_one[bit_i]++;
b1a90ce3
MD
1090 }
1091 distrib_nr_child++;
1092 }
1093 break;
1094 }
1095 case RCU_JA_NULL:
19ddcd04 1096 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
1097 break;
1098 default:
1099 assert(0);
1100 break;
1101 }
1102
19ddcd04 1103 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
1104 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1105 if (n & (1U << bit_i))
1106 nr_one[bit_i]++;
b1a90ce3
MD
1107 }
1108 distrib_nr_child++;
1109 }
1110
1111 /*
1112 * The best bit selector is that for which the number of ones is
1113 * closest to half of the number of children in the
f5531dd9
MD
1114 * distribution. We calculate the distance using the double of
1115 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
1116 */
1117 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1118 unsigned int distance_to_best;
1119
1b34283b 1120 distance_to_best = abs_int(((unsigned int) nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
1121 if (distance_to_best < overall_best_distance) {
1122 overall_best_distance = distance_to_best;
1123 bitsel = bit_i;
1124 }
1125 }
1126 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
1127 return bitsel;
1128}
1129
19ddcd04
MD
1130/*
1131 * Calculate bit distribution in two dimensions. Returns the two bits
1132 * (each 0 to 7) that splits the distribution in four sub-distributions
1133 * containing as much elements one compared to the other.
1134 */
1135static
1136void ja_node_sum_distribution_2d(enum ja_recompact mode,
1137 struct cds_ja *ja,
1138 unsigned int type_index,
1139 const struct cds_ja_type *type,
1140 struct cds_ja_inode *node,
1141 struct cds_ja_shadow_node *shadow_node,
1142 uint8_t n,
1143 struct cds_ja_inode_flag *child_node_flag,
1144 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1145 unsigned int *_bitsel)
1146{
1147 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1148 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1149 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
1150 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
1151 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
1152 unsigned int bit_i, bit_j;
1153 int overall_best_distance = INT_MAX;
19ddcd04
MD
1154 unsigned int distrib_nr_child = 0;
1155
1156 memset(nr_2d_11, 0, sizeof(nr_2d_11));
1157 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
1158 memset(nr_2d_01, 0, sizeof(nr_2d_01));
1159 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
1160
1161 switch (type->type_class) {
1162 case RCU_JA_LINEAR:
1163 {
1164 uint8_t nr_child =
1165 ja_linear_node_get_nr_child(type, node);
1166 unsigned int i;
1167
1168 for (i = 0; i < nr_child; i++) {
1169 struct cds_ja_inode_flag *iter;
1170 uint8_t v;
1171
1172 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1173 if (!iter)
1174 continue;
1175 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1176 continue;
1177 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1178 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1179 if (v & (1U << bit_i)) {
1180 if (v & (1U << bit_j)) {
1181 nr_2d_11[bit_i][bit_j]++;
1182 } else {
1183 nr_2d_10[bit_i][bit_j]++;
1184 }
1185 } else {
1186 if (v & (1U << bit_j)) {
1187 nr_2d_01[bit_i][bit_j]++;
1188 } else {
1189 nr_2d_00[bit_i][bit_j]++;
1190 }
19ddcd04
MD
1191 }
1192 }
1193 }
1194 distrib_nr_child++;
1195 }
1196 break;
1197 }
1198 case RCU_JA_POOL:
1199 {
1200 unsigned int pool_nr;
1201
1202 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1203 struct cds_ja_inode *pool =
1204 ja_pool_node_get_ith_pool(type,
1205 node, pool_nr);
1206 uint8_t nr_child =
1207 ja_linear_node_get_nr_child(type, pool);
1208 unsigned int j;
1209
1210 for (j = 0; j < nr_child; j++) {
1211 struct cds_ja_inode_flag *iter;
1212 uint8_t v;
1213
1214 ja_linear_node_get_ith_pos(type, pool,
1215 j, &v, &iter);
1216 if (!iter)
1217 continue;
1218 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1219 continue;
1220 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1221 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1222 if (v & (1U << bit_i)) {
1223 if (v & (1U << bit_j)) {
1224 nr_2d_11[bit_i][bit_j]++;
1225 } else {
1226 nr_2d_10[bit_i][bit_j]++;
1227 }
1228 } else {
1229 if (v & (1U << bit_j)) {
1230 nr_2d_01[bit_i][bit_j]++;
1231 } else {
1232 nr_2d_00[bit_i][bit_j]++;
1233 }
19ddcd04
MD
1234 }
1235 }
1236 }
1237 distrib_nr_child++;
1238 }
1239 }
1240 break;
1241 }
1242 case RCU_JA_PIGEON:
1243 {
19ddcd04
MD
1244 unsigned int i;
1245
1246 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1247 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
19ddcd04
MD
1248 struct cds_ja_inode_flag *iter;
1249
1250 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1251 if (!iter)
1252 continue;
1253 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1254 continue;
1255 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1256 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1257 if (i & (1U << bit_i)) {
1258 if (i & (1U << bit_j)) {
1259 nr_2d_11[bit_i][bit_j]++;
1260 } else {
1261 nr_2d_10[bit_i][bit_j]++;
1262 }
1263 } else {
1264 if (i & (1U << bit_j)) {
1265 nr_2d_01[bit_i][bit_j]++;
1266 } else {
1267 nr_2d_00[bit_i][bit_j]++;
1268 }
19ddcd04
MD
1269 }
1270 }
1271 }
1272 distrib_nr_child++;
1273 }
1274 break;
1275 }
1276 case RCU_JA_NULL:
1277 assert(mode == JA_RECOMPACT_ADD_NEXT);
1278 break;
1279 default:
1280 assert(0);
1281 break;
1282 }
1283
1284 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1285 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1286 for (bit_j = 0; bit_j < bit_i; bit_j++) {
7f14b43a
MD
1287 if (n & (1U << bit_i)) {
1288 if (n & (1U << bit_j)) {
1289 nr_2d_11[bit_i][bit_j]++;
1290 } else {
1291 nr_2d_10[bit_i][bit_j]++;
1292 }
1293 } else {
1294 if (n & (1U << bit_j)) {
1295 nr_2d_01[bit_i][bit_j]++;
1296 } else {
1297 nr_2d_00[bit_i][bit_j]++;
1298 }
19ddcd04
MD
1299 }
1300 }
1301 }
1302 distrib_nr_child++;
1303 }
1304
1305 /*
1306 * The best bit selector is that for which the number of nodes
1307 * in each sub-class is closest to one-fourth of the number of
1308 * children in the distribution. We calculate the distance using
1309 * 4 times the size of the sub-distribution to eliminate
1310 * truncation error.
1311 */
1312 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1313 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1314 int distance_to_best[4];
19ddcd04 1315
1b34283b
MD
1316 distance_to_best[0] = ((unsigned int) nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1317 distance_to_best[1] = ((unsigned int) nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1318 distance_to_best[2] = ((unsigned int) nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1319 distance_to_best[3] = ((unsigned int) nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1320
4a073c53
MD
1321 /* Consider worse distance above best */
1322 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1323 distance_to_best[0] = distance_to_best[1];
4a073c53 1324 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1325 distance_to_best[0] = distance_to_best[2];
4a073c53 1326 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1327 distance_to_best[0] = distance_to_best[3];
4a073c53 1328
19ddcd04
MD
1329 /*
1330 * If our worse distance is better than overall,
1331 * we become new best candidate.
1332 */
1333 if (distance_to_best[0] < overall_best_distance) {
1334 overall_best_distance = distance_to_best[0];
1335 bitsel[0] = bit_i;
1336 bitsel[1] = bit_j;
1337 }
1338 }
1339 }
1340
1341 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1342
1343 /* Return our bit selection */
1344 _bitsel[0] = bitsel[0];
1345 _bitsel[1] = bitsel[1];
1346}
1347
48cbe001
MD
1348static
1349unsigned int find_nearest_type_index(unsigned int type_index,
1350 unsigned int nr_nodes)
1351{
1352 const struct cds_ja_type *type;
1353
1354 assert(type_index != NODE_INDEX_NULL);
1355 if (nr_nodes == 0)
1356 return NODE_INDEX_NULL;
1357 for (;;) {
1358 type = &ja_types[type_index];
1359 if (nr_nodes < type->min_child)
1360 type_index--;
1361 else if (nr_nodes > type->max_child)
1362 type_index++;
1363 else
1364 break;
1365 }
1366 return type_index;
1367}
1368
7a0b2331
MD
1369/*
1370 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1371 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1372 * error value otherwise.
7a0b2331
MD
1373 */
1374static
2e313670
MD
1375int ja_node_recompact(enum ja_recompact mode,
1376 struct cds_ja *ja,
e1db2db5 1377 unsigned int old_type_index,
d96bfb0d 1378 const struct cds_ja_type *old_type,
b4540e8a 1379 struct cds_ja_inode *old_node,
5a9a87dd 1380 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1381 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45 1382 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1383 struct cds_ja_inode_flag **nullify_node_flag_ptr,
1384 int level)
7a0b2331 1385{
e1db2db5 1386 unsigned int new_type_index;
b4540e8a 1387 struct cds_ja_inode *new_node;
af3cbd45 1388 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1389 const struct cds_ja_type *new_type;
3d8fe307 1390 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1391 int ret;
f07b240f 1392 int fallback = 0;
7a0b2331 1393
3d8fe307
MD
1394 old_node_flag = *old_node_flag_ptr;
1395
48cbe001
MD
1396 /*
1397 * Need to find nearest type index even for ADD_SAME, because
1398 * this recompaction, when applied to linear nodes, will garbage
1399 * collect dummy (NULL) entries, and can therefore cause a few
1400 * linear representations to be skipped.
1401 */
2e313670 1402 switch (mode) {
19ddcd04 1403 case JA_RECOMPACT_ADD_SAME:
48cbe001
MD
1404 new_type_index = find_nearest_type_index(old_type_index,
1405 shadow_node->nr_child + 1);
1406 dbg_printf("Recompact for node with %u children\n",
1407 shadow_node->nr_child + 1);
2e313670 1408 break;
19ddcd04 1409 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1410 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1411 new_type_index = 0;
48cbe001 1412 dbg_printf("Recompact for NULL\n");
2e313670 1413 } else {
48cbe001
MD
1414 new_type_index = find_nearest_type_index(old_type_index,
1415 shadow_node->nr_child + 1);
1416 dbg_printf("Recompact for node with %u children\n",
1417 shadow_node->nr_child + 1);
2e313670
MD
1418 }
1419 break;
1420 case JA_RECOMPACT_DEL:
48cbe001
MD
1421 new_type_index = find_nearest_type_index(old_type_index,
1422 shadow_node->nr_child - 1);
1423 dbg_printf("Recompact for node with %u children\n",
1424 shadow_node->nr_child - 1);
2e313670
MD
1425 break;
1426 default:
1427 assert(0);
7a0b2331 1428 }
a2a7ff59 1429
f07b240f 1430retry: /* for fallback */
582a6ade
MD
1431 dbg_printf("Recompact from type %d to type %d\n",
1432 old_type_index, new_type_index);
7a0b2331 1433 new_type = &ja_types[new_type_index];
2e313670 1434 if (new_type_index != NODE_INDEX_NULL) {
354981c2 1435 new_node = alloc_cds_ja_node(ja, new_type);
2e313670
MD
1436 if (!new_node)
1437 return -ENOMEM;
b1a90ce3
MD
1438
1439 if (new_type->type_class == RCU_JA_POOL) {
1440 switch (new_type->nr_pool_order) {
1441 case 1:
1442 {
19ddcd04
MD
1443 unsigned int node_distrib_bitsel;
1444
b1a90ce3
MD
1445 node_distrib_bitsel =
1446 ja_node_sum_distribution_1d(mode, ja,
1447 old_type_index, old_type,
1448 old_node, shadow_node,
1449 n, child_node_flag,
1450 nullify_node_flag_ptr);
1451 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1452 new_node_flag = ja_node_flag_pool_1d(new_node,
1453 new_type_index, node_distrib_bitsel);
1454 break;
1455 }
1456 case 2:
1457 {
19ddcd04
MD
1458 unsigned int node_distrib_bitsel[2];
1459
1460 ja_node_sum_distribution_2d(mode, ja,
1461 old_type_index, old_type,
1462 old_node, shadow_node,
1463 n, child_node_flag,
1464 nullify_node_flag_ptr,
1465 node_distrib_bitsel);
b1a90ce3
MD
1466 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1467 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1468 new_node_flag = ja_node_flag_pool_2d(new_node,
1469 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1470 break;
1471 }
1472 default:
1473 assert(0);
1474 }
1475 } else {
1476 new_node_flag = ja_node_flag(new_node, new_type_index);
1477 }
1478
2e313670 1479 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
48cbe001 1480 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja, level);
2e313670 1481 if (!new_shadow_node) {
354981c2 1482 free_cds_ja_node(ja, new_node);
2e313670
MD
1483 return -ENOMEM;
1484 }
1485 if (fallback)
1486 new_shadow_node->fallback_removal_count =
1487 JA_FALLBACK_REMOVAL_COUNT;
1488 } else {
1489 new_node = NULL;
1490 new_node_flag = NULL;
e1db2db5 1491 }
11c5e016 1492
19ddcd04 1493 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1494
1495 if (new_type_index == NODE_INDEX_NULL)
1496 goto skip_copy;
1497
11c5e016
MD
1498 switch (old_type->type_class) {
1499 case RCU_JA_LINEAR:
1500 {
1501 uint8_t nr_child =
1502 ja_linear_node_get_nr_child(old_type, old_node);
1503 unsigned int i;
1504
1505 for (i = 0; i < nr_child; i++) {
b4540e8a 1506 struct cds_ja_inode_flag *iter;
11c5e016
MD
1507 uint8_t v;
1508
1509 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1510 if (!iter)
1511 continue;
af3cbd45 1512 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1513 continue;
b1a90ce3 1514 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1515 new_shadow_node,
11c5e016 1516 v, iter);
f07b240f
MD
1517 if (new_type->type_class == RCU_JA_POOL && ret) {
1518 goto fallback_toosmall;
1519 }
11c5e016
MD
1520 assert(!ret);
1521 }
1522 break;
1523 }
1524 case RCU_JA_POOL:
1525 {
1526 unsigned int pool_nr;
1527
1528 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1529 struct cds_ja_inode *pool =
11c5e016
MD
1530 ja_pool_node_get_ith_pool(old_type,
1531 old_node, pool_nr);
1532 uint8_t nr_child =
1533 ja_linear_node_get_nr_child(old_type, pool);
1534 unsigned int j;
1535
1536 for (j = 0; j < nr_child; j++) {
b4540e8a 1537 struct cds_ja_inode_flag *iter;
11c5e016
MD
1538 uint8_t v;
1539
1540 ja_linear_node_get_ith_pos(old_type, pool,
1541 j, &v, &iter);
1542 if (!iter)
1543 continue;
af3cbd45 1544 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1545 continue;
b1a90ce3 1546 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1547 new_shadow_node,
11c5e016 1548 v, iter);
f07b240f
MD
1549 if (new_type->type_class == RCU_JA_POOL
1550 && ret) {
1551 goto fallback_toosmall;
1552 }
11c5e016
MD
1553 assert(!ret);
1554 }
1555 }
1556 break;
7a0b2331 1557 }
a2a7ff59 1558 case RCU_JA_NULL:
19ddcd04 1559 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1560 break;
11c5e016 1561 case RCU_JA_PIGEON:
2e313670 1562 {
2e313670
MD
1563 unsigned int i;
1564
1565 assert(mode == JA_RECOMPACT_DEL);
48cbe001 1566 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2e313670
MD
1567 struct cds_ja_inode_flag *iter;
1568
1569 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1570 if (!iter)
1571 continue;
af3cbd45 1572 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1573 continue;
b1a90ce3 1574 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1575 new_shadow_node,
1576 i, iter);
1577 if (new_type->type_class == RCU_JA_POOL && ret) {
1578 goto fallback_toosmall;
1579 }
1580 assert(!ret);
1581 }
1582 break;
1583 }
11c5e016
MD
1584 default:
1585 assert(0);
5a9a87dd 1586 ret = -EINVAL;
f07b240f 1587 goto end;
11c5e016 1588 }
2e313670 1589skip_copy:
11c5e016 1590
19ddcd04 1591 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1592 /* add node */
b1a90ce3 1593 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1594 new_shadow_node,
1595 n, child_node_flag);
7b413155
MD
1596 if (new_type->type_class == RCU_JA_POOL && ret) {
1597 goto fallback_toosmall;
1598 }
2e313670
MD
1599 assert(!ret);
1600 }
19ddcd04
MD
1601
1602 if (fallback) {
1603 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1604 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1605 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
354981c2 1606 uatomic_inc(&ja->node_fallback_count_distribution[new_shadow_node->nr_child]);
19ddcd04
MD
1607 }
1608
3d8fe307
MD
1609 /* Return pointer to new recompacted node through old_node_flag_ptr */
1610 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1611 if (old_node) {
2e313670
MD
1612 int flags;
1613
1614 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1615 /*
1616 * It is OK to free the lock associated with a node
1617 * going to NULL, since we are holding the parent lock.
1618 * This synchronizes removal with re-add of that node.
1619 */
1620 if (new_type_index == NODE_INDEX_NULL)
48cbe001 1621 flags |= RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1622 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1623 flags);
a2a7ff59
MD
1624 assert(!ret);
1625 }
5a9a87dd
MD
1626
1627 ret = 0;
f07b240f 1628end:
5a9a87dd 1629 return ret;
f07b240f
MD
1630
1631fallback_toosmall:
1632 /* fallback if next pool is too small */
af3cbd45 1633 assert(new_shadow_node);
3d8fe307 1634 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1635 RCUJA_SHADOW_CLEAR_FREE_NODE);
1636 assert(!ret);
1637
19ddcd04
MD
1638 switch (mode) {
1639 case JA_RECOMPACT_ADD_SAME:
1640 /*
1641 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1642 * node within a pool has unused entries. It should
1643 * therefore _never_ be too small.
1644 */
4a073c53 1645 assert(0);
4cde8267
MD
1646
1647 /* Fall-through */
19ddcd04
MD
1648 case JA_RECOMPACT_ADD_NEXT:
1649 {
1650 const struct cds_ja_type *next_type;
1651
1652 /*
1653 * Recompaction attempt on add failed. Should only
1654 * happen if target node type is pool. Caused by
1655 * hard-to-split distribution. Recompact using the next
1656 * distribution size.
1657 */
1658 assert(new_type->type_class == RCU_JA_POOL);
1659 next_type = &ja_types[new_type_index + 1];
1660 /*
1661 * Try going to the next pool size if our population
1662 * fits within its range. This is not flagged as a
1663 * fallback.
1664 */
1665 if (shadow_node->nr_child + 1 >= next_type->min_child
1666 && shadow_node->nr_child + 1 <= next_type->max_child) {
1667 new_type_index++;
1668 goto retry;
1669 } else {
1670 new_type_index++;
1671 dbg_printf("Add fallback to type %d\n", new_type_index);
1672 uatomic_inc(&ja->nr_fallback);
1673 fallback = 1;
1674 goto retry;
1675 }
1676 break;
1677 }
1678 case JA_RECOMPACT_DEL:
1679 /*
1680 * Recompaction attempt on delete failed. Should only
1681 * happen if target node type is pool. This is caused by
1682 * a hard-to-split distribution. Recompact on same node
1683 * size, but flag current node as "fallback" to ensure
1684 * we don't attempt recompaction before some activity
1685 * has reshuffled our node.
1686 */
1687 assert(new_type->type_class == RCU_JA_POOL);
1688 new_type_index = old_type_index;
1689 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1690 uatomic_inc(&ja->nr_fallback);
1691 fallback = 1;
1692 goto retry;
1693 default:
1694 assert(0);
1695 return -EINVAL;
1696 }
1697
1698 /*
1699 * Last resort fallback: pigeon.
1700 */
f07b240f
MD
1701 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1702 dbg_printf("Fallback to type %d\n", new_type_index);
1703 uatomic_inc(&ja->nr_fallback);
1704 fallback = 1;
1705 goto retry;
7a0b2331
MD
1706}
1707
5a9a87dd 1708/*
2e313670 1709 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1710 * error value otherwise.
1711 */
7a0b2331 1712static
d96bfb0d 1713int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1714 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd 1715 struct cds_ja_inode_flag *child_node_flag,
48cbe001
MD
1716 struct cds_ja_shadow_node *shadow_node,
1717 int level)
7a0b2331
MD
1718{
1719 int ret;
e1db2db5 1720 unsigned int type_index;
d96bfb0d 1721 const struct cds_ja_type *type;
b4540e8a 1722 struct cds_ja_inode *node;
7a0b2331 1723
a2a7ff59
MD
1724 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1725 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1726
e1db2db5
MD
1727 node = ja_node_ptr(*node_flag);
1728 type_index = ja_node_type(*node_flag);
1729 type = &ja_types[type_index];
b1a90ce3 1730 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1731 n, child_node_flag);
2e313670
MD
1732 switch (ret) {
1733 case -ENOSPC:
19ddcd04
MD
1734 /* Not enough space in node, need to recompact to next type. */
1735 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
48cbe001 1736 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1737 break;
1738 case -ERANGE:
1739 /* Node needs to be recompacted. */
19ddcd04 1740 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
48cbe001 1741 shadow_node, node_flag, n, child_node_flag, NULL, level);
2e313670
MD
1742 break;
1743 }
1744 return ret;
1745}
1746
1747/*
1748 * Return 0 on success, -EAGAIN if need to retry, or other negative
1749 * error value otherwise.
1750 */
1751static
af3cbd45
MD
1752int ja_node_clear_ptr(struct cds_ja *ja,
1753 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1754 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1755 struct cds_ja_shadow_node *shadow_node, /* of parent */
48cbe001 1756 uint8_t n, int level)
2e313670
MD
1757{
1758 int ret;
1759 unsigned int type_index;
1760 const struct cds_ja_type *type;
1761 struct cds_ja_inode *node;
1762
af3cbd45
MD
1763 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1764 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1765
af3cbd45
MD
1766 node = ja_node_ptr(*parent_node_flag_ptr);
1767 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1768 type = &ja_types[type_index];
19ddcd04 1769 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1770 if (ret == -EFBIG) {
19ddcd04 1771 /* Should try recompaction. */
2e313670 1772 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45 1773 shadow_node, parent_node_flag_ptr, n, NULL,
48cbe001 1774 node_flag_ptr, level);
7a0b2331
MD
1775 }
1776 return ret;
1777}
be9a7474 1778
03ec1aeb 1779struct cds_ja_node *cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1780{
41975c12
MD
1781 unsigned int tree_depth, i;
1782 struct cds_ja_inode_flag *node_flag;
1783
1784 if (caa_unlikely(key > ja->key_max))
03ec1aeb 1785 return NULL;
41975c12 1786 tree_depth = ja->tree_depth;
5a9a87dd 1787 node_flag = rcu_dereference(ja->root);
41975c12 1788
5a9a87dd
MD
1789 /* level 0: root node */
1790 if (!ja_node_ptr(node_flag))
03ec1aeb 1791 return NULL;
5a9a87dd
MD
1792
1793 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1794 uint8_t iter_key;
1795
1796 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
48cbe001 1797 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
582a6ade
MD
1798 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1799 (unsigned int) iter_key, node_flag);
41975c12 1800 if (!ja_node_ptr(node_flag))
03ec1aeb 1801 return NULL;
41975c12
MD
1802 }
1803
5a9a87dd 1804 /* Last level lookup succeded. We got an actual match. */
03ec1aeb 1805 return (struct cds_ja_node *) node_flag;
5a9a87dd
MD
1806}
1807
b023ba9f
MD
1808static
1809struct cds_ja_node *cds_ja_lookup_inequality(struct cds_ja *ja, uint64_t key,
36305a3d 1810 uint64_t *result_key, enum ja_lookup_inequality mode)
291b2543
MD
1811{
1812 int tree_depth, level;
1813 struct cds_ja_inode_flag *node_flag, *cur_node_depth[JA_MAX_DEPTH];
36305a3d
MD
1814 uint8_t cur_key[JA_MAX_DEPTH];
1815 uint64_t _result_key = 0;
b023ba9f 1816 enum ja_direction dir;
291b2543 1817
b023ba9f
MD
1818 switch (mode) {
1819 case JA_LOOKUP_BE:
1820 if (caa_unlikely(key > ja->key_max || key == 0))
1821 return NULL;
1822 break;
1823 case JA_LOOKUP_AE:
1824 if (caa_unlikely(key >= ja->key_max))
1825 return NULL;
1826 break;
1827 default:
03ec1aeb 1828 return NULL;
b023ba9f 1829 }
291b2543
MD
1830
1831 memset(cur_node_depth, 0, sizeof(cur_node_depth));
36305a3d 1832 memset(cur_key, 0, sizeof(cur_key));
291b2543
MD
1833 tree_depth = ja->tree_depth;
1834 node_flag = rcu_dereference(ja->root);
1835 cur_node_depth[0] = node_flag;
1836
1837 /* level 0: root node */
1838 if (!ja_node_ptr(node_flag))
03ec1aeb 1839 return NULL;
291b2543
MD
1840
1841 for (level = 1; level < tree_depth; level++) {
1842 uint8_t iter_key;
1843
1844 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
1845 node_flag = ja_node_get_nth(node_flag, NULL, iter_key);
1846 if (!ja_node_ptr(node_flag))
1847 break;
36305a3d 1848 cur_key[level - 1] = iter_key;
291b2543 1849 cur_node_depth[level] = node_flag;
b023ba9f 1850 dbg_printf("cds_ja_lookup_inequality iter key lookup %u finds node_flag %p\n",
291b2543
MD
1851 (unsigned int) iter_key, node_flag);
1852 }
1853
1854 if (level == tree_depth) {
1855 /* Last level lookup succeded. We got an equal match. */
36305a3d
MD
1856 if (result_key)
1857 *result_key = key;
03ec1aeb 1858 return (struct cds_ja_node *) node_flag;
291b2543
MD
1859 }
1860
1861 /*
b023ba9f 1862 * Find highest value left/right of current node.
291b2543 1863 * Current node is cur_node_depth[level].
b023ba9f
MD
1864 * Start at current level. If we cannot find any key left/right
1865 * of ours, go one level up, seek highest value left/right of
1866 * current (recursively), and when we find one, get the
1867 * rightmost/leftmost child of its rightmost/leftmost child
1868 * (recursively).
291b2543 1869 */
b023ba9f
MD
1870 switch (mode) {
1871 case JA_LOOKUP_BE:
1872 dir = JA_LEFT;
1873 break;
1874 case JA_LOOKUP_AE:
1875 dir = JA_RIGHT;
1876 break;
1877 default:
1878 assert(0);
1879 }
291b2543
MD
1880 for (; level > 0; level--) {
1881 uint8_t iter_key;
1882
1883 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - level - 1)));
b023ba9f 1884 node_flag = ja_node_get_leftright(cur_node_depth[level - 1],
36305a3d
MD
1885 iter_key, &cur_key[level - 1], dir);
1886 /* If found left/right sibling, find rightmost/leftmost child. */
291b2543
MD
1887 if (ja_node_ptr(node_flag))
1888 break;
1889 }
1890
1891 if (!level) {
b023ba9f 1892 /* Reached the root and could not find a left/right sibling. */
03ec1aeb 1893 return NULL;
291b2543
MD
1894 }
1895
1896 level++;
3c52f0f9
MD
1897
1898 /*
4cef6f97 1899 * From this point, we are guaranteed to be able to find a
b023ba9f
MD
1900 * "below than"/"above than" match. ja_attach_node() and
1901 * ja_detach_node() both guarantee that it is not possible for a
1902 * lookup to reach a dead-end.
3c52f0f9
MD
1903 */
1904
b023ba9f
MD
1905 /*
1906 * Find rightmost/leftmost child of rightmost/leftmost child
1907 * (recursively).
1908 */
1909 switch (mode) {
1910 case JA_LOOKUP_BE:
1911 dir = JA_RIGHTMOST;
1912 break;
1913 case JA_LOOKUP_AE:
1914 dir = JA_LEFTMOST;
1915 break;
1916 default:
1917 assert(0);
1918 }
291b2543 1919 for (; level < tree_depth; level++) {
36305a3d 1920 node_flag = ja_node_get_minmax(node_flag, &cur_key[level - 1], dir);
291b2543
MD
1921 if (!ja_node_ptr(node_flag))
1922 break;
1923 }
1924
4cef6f97 1925 assert(level == tree_depth);
291b2543 1926
36305a3d
MD
1927 if (result_key) {
1928 for (level = 1; level < tree_depth; level++) {
1929 _result_key |= ((uint64_t) cur_key[level - 1])
1930 << (JA_BITS_PER_BYTE * (tree_depth - level - 1));
1931 }
1932 *result_key = _result_key;
1933 }
03ec1aeb 1934 return (struct cds_ja_node *) node_flag;
291b2543
MD
1935}
1936
36305a3d
MD
1937struct cds_ja_node *cds_ja_lookup_below_equal(struct cds_ja *ja,
1938 uint64_t key, uint64_t *result_key)
b023ba9f 1939{
36305a3d 1940 return cds_ja_lookup_inequality(ja, key, result_key, JA_LOOKUP_BE);
b023ba9f
MD
1941}
1942
36305a3d
MD
1943struct cds_ja_node *cds_ja_lookup_above_equal(struct cds_ja *ja,
1944 uint64_t key, uint64_t *result_key)
b023ba9f 1945{
36305a3d 1946 return cds_ja_lookup_inequality(ja, key, result_key, JA_LOOKUP_AE);
b023ba9f
MD
1947}
1948
5a9a87dd
MD
1949/*
1950 * We reached an unpopulated node. Create it and the children we need,
1951 * and then attach the entire branch to the current node. This may
1952 * trigger recompaction of the current node. Locks needed: node lock
1953 * (for add), and, possibly, parent node lock (to update pointer due to
1954 * node recompaction).
1955 *
1956 * First take node lock, check if recompaction is needed, then take
1957 * parent lock (if needed). Then we can proceed to create the new
1958 * branch. Publish the new branch, and release locks.
1959 * TODO: we currently always take the parent lock even when not needed.
47d2eab3
MD
1960 *
1961 * ja_attach_node() ensures that a lookup will _never_ see a branch that
1962 * leads to a dead-end: before attaching a branch, the entire content of
1963 * the new branch is populated, thus creating a cluster, before
1964 * attaching the cluster to the rest of the tree, thus making it visible
1965 * to lookups.
5a9a87dd
MD
1966 */
1967static
1968int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1969 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1970 struct cds_ja_inode_flag *attach_node_flag,
48cbe001
MD
1971 struct cds_ja_inode_flag *parent_attach_node_flag,
1972 struct cds_ja_inode_flag **old_node_flag_ptr,
1973 struct cds_ja_inode_flag *old_node_flag,
5a9a87dd 1974 uint64_t key,
79b41067 1975 unsigned int level,
5a9a87dd
MD
1976 struct cds_ja_node *child_node)
1977{
1978 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1979 *parent_shadow_node = NULL;
5a9a87dd
MD
1980 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1981 int ret, i;
a2a7ff59 1982 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1983 int nr_created_nodes = 0;
1984
48cbe001
MD
1985 dbg_printf("Attach node at level %u (old_node_flag %p, attach_node_flag_ptr %p attach_node_flag %p, parent_attach_node_flag %p)\n",
1986 level, old_node_flag, attach_node_flag_ptr, attach_node_flag, parent_attach_node_flag);
a2a7ff59 1987
48cbe001
MD
1988 assert(!old_node_flag);
1989 if (attach_node_flag) {
1990 shadow_node = rcuja_shadow_lookup_lock(ja->ht, attach_node_flag);
1991 if (!shadow_node) {
1992 ret = -EAGAIN;
1993 goto end;
1994 }
5a9a87dd 1995 }
48cbe001 1996 if (parent_attach_node_flag) {
5a9a87dd 1997 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
48cbe001 1998 parent_attach_node_flag);
5a9a87dd 1999 if (!parent_shadow_node) {
2e313670 2000 ret = -EAGAIN;
5a9a87dd
MD
2001 goto unlock_shadow;
2002 }
2003 }
2004
48cbe001 2005 if (old_node_flag_ptr && ja_node_ptr(*old_node_flag_ptr)) {
b306a0fe 2006 /*
c112acaa
MD
2007 * Target node has been updated between RCU lookup and
2008 * lock acquisition. We need to re-try lookup and
2009 * attach.
2010 */
2011 ret = -EAGAIN;
2012 goto unlock_parent;
2013 }
2014
9be99d4a
MD
2015 /*
2016 * Perform a lookup query to handle the case where
2017 * old_node_flag_ptr is NULL. We cannot use it to check if the
2018 * node has been populated between RCU lookup and mutex
2019 * acquisition.
2020 */
2021 if (!old_node_flag_ptr) {
2022 uint8_t iter_key;
2023 struct cds_ja_inode_flag *lookup_node_flag;
2024 struct cds_ja_inode_flag **lookup_node_flag_ptr;
2025
2026 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
2027 lookup_node_flag = ja_node_get_nth(attach_node_flag,
2028 &lookup_node_flag_ptr,
2029 iter_key);
2030 if (lookup_node_flag) {
2031 ret = -EEXIST;
2032 goto unlock_parent;
2033 }
2034 }
2035
c112acaa 2036 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 2037 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
2038 /*
2039 * Target node has been updated between RCU lookup and
2040 * lock acquisition. We need to re-try lookup and
2041 * attach.
b306a0fe
MD
2042 */
2043 ret = -EAGAIN;
2044 goto unlock_parent;
2045 }
2046
a2a7ff59 2047 /* Create new branch, starting from bottom */
03ec1aeb 2048 iter_node_flag = (struct cds_ja_inode_flag *) child_node;
5a9a87dd 2049
48cbe001 2050 for (i = ja->tree_depth - 1; i >= (int) level; i--) {
79b41067
MD
2051 uint8_t iter_key;
2052
48cbe001 2053 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i - 1)));
79b41067 2054 dbg_printf("branch creation level %d, key %u\n",
48cbe001 2055 i, (unsigned int) iter_key);
5a9a87dd
MD
2056 iter_dest_node_flag = NULL;
2057 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 2058 iter_key,
5a9a87dd 2059 iter_node_flag,
48cbe001 2060 NULL, i);
9be99d4a
MD
2061 if (ret) {
2062 dbg_printf("branch creation error %d\n", ret);
5a9a87dd 2063 goto check_error;
9be99d4a 2064 }
5a9a87dd
MD
2065 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
2066 iter_node_flag = iter_dest_node_flag;
2067 }
48cbe001 2068 assert(level > 0);
5a9a87dd 2069
48cbe001
MD
2070 /* Publish branch */
2071 if (level == 1) {
2072 /*
2073 * Attaching to root node.
2074 */
2075 rcu_assign_pointer(ja->root, iter_node_flag);
2076 } else {
79b41067
MD
2077 uint8_t iter_key;
2078
2079 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
48cbe001
MD
2080 dbg_printf("publish branch at level %d, key %u\n",
2081 level - 1, (unsigned int) iter_key);
a2a7ff59 2082 /* We need to use set_nth on the previous level. */
48cbe001 2083 iter_dest_node_flag = attach_node_flag;
a2a7ff59 2084 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 2085 iter_key,
a2a7ff59 2086 iter_node_flag,
48cbe001 2087 shadow_node, level - 1);
9be99d4a
MD
2088 if (ret) {
2089 dbg_printf("branch publish error %d\n", ret);
a2a7ff59 2090 goto check_error;
9be99d4a 2091 }
48cbe001
MD
2092 /*
2093 * Attach branch
2094 */
2095 rcu_assign_pointer(*attach_node_flag_ptr, iter_dest_node_flag);
a2a7ff59
MD
2096 }
2097
5a9a87dd
MD
2098 /* Success */
2099 ret = 0;
2100
2101check_error:
2102 if (ret) {
2103 for (i = 0; i < nr_created_nodes; i++) {
2104 int tmpret;
a2a7ff59
MD
2105 int flags;
2106
2107 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
2108 if (i)
2109 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 2110 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 2111 created_nodes[i],
a2a7ff59
MD
2112 NULL,
2113 flags);
5a9a87dd
MD
2114 assert(!tmpret);
2115 }
2116 }
b306a0fe 2117unlock_parent:
5a9a87dd
MD
2118 if (parent_shadow_node)
2119 rcuja_shadow_unlock(parent_shadow_node);
2120unlock_shadow:
2121 if (shadow_node)
2122 rcuja_shadow_unlock(shadow_node);
2123end:
2124 return ret;
2125}
2126
2127/*
03ec1aeb
MD
2128 * Lock the parent containing the pointer to list of duplicates, and add
2129 * node to this list. Failure can happen if concurrent update changes
2130 * the parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
2131 * Return 0 on success, negative error value on failure.
2132 */
2133static
2134int ja_chain_node(struct cds_ja *ja,
af3cbd45 2135 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2136 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 2137 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
2138 struct cds_ja_node *node)
2139{
2140 struct cds_ja_shadow_node *shadow_node;
fa112799 2141 int ret = 0;
5a9a87dd 2142
3d8fe307 2143 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 2144 if (!shadow_node) {
2e313670 2145 return -EAGAIN;
b306a0fe 2146 }
c112acaa 2147 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2148 ret = -EAGAIN;
2149 goto end;
2150 }
03ec1aeb
MD
2151 /*
2152 * Add node to head of list. Safe against concurrent RCU read
2153 * traversals.
2154 */
2155 node->next = (struct cds_ja_node *) node_flag;
2156 rcu_assign_pointer(*node_flag_ptr, (struct cds_ja_inode_flag *) node);
fa112799 2157end:
5a9a87dd 2158 rcuja_shadow_unlock(shadow_node);
fa112799 2159 return ret;
5a9a87dd
MD
2160}
2161
75d573aa
MD
2162static
2163int _cds_ja_add(struct cds_ja *ja, uint64_t key,
6475613c 2164 struct cds_ja_node *node,
75d573aa 2165 struct cds_ja_node **unique_node_ret)
5a9a87dd
MD
2166{
2167 unsigned int tree_depth, i;
48cbe001 2168 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd 2169 *parent_node_flag,
b62a8d0c 2170 *parent2_node_flag,
48cbe001
MD
2171 *node_flag,
2172 *parent_attach_node_flag;
2173 struct cds_ja_inode_flag **attach_node_flag_ptr,
2174 **parent_node_flag_ptr,
2175 **node_flag_ptr;
5a9a87dd
MD
2176 int ret;
2177
b306a0fe 2178 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 2179 return -EINVAL;
b306a0fe 2180 }
5a9a87dd
MD
2181 tree_depth = ja->tree_depth;
2182
2183retry:
a2a7ff59 2184 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
6475613c 2185 key, node);
5a9a87dd 2186 parent2_node_flag = NULL;
b0f74e47
MD
2187 parent_node_flag =
2188 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
48cbe001 2189 parent_node_flag_ptr = NULL;
35170a44 2190 node_flag = rcu_dereference(ja->root);
48cbe001 2191 node_flag_ptr = &ja->root;
5a9a87dd
MD
2192
2193 /* Iterate on all internal levels */
a2a7ff59 2194 for (i = 1; i < tree_depth; i++) {
79b41067
MD
2195 uint8_t iter_key;
2196
48cbe001
MD
2197 if (!ja_node_ptr(node_flag))
2198 break;
2199 dbg_printf("cds_ja_add iter parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2200 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
79b41067 2201 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
2202 parent2_node_flag = parent_node_flag;
2203 parent_node_flag = node_flag;
48cbe001 2204 parent_node_flag_ptr = node_flag_ptr;
5a9a87dd
MD
2205 node_flag = ja_node_get_nth(node_flag,
2206 &node_flag_ptr,
79b41067 2207 iter_key);
5a9a87dd
MD
2208 }
2209
2210 /*
48cbe001
MD
2211 * We reached either bottom of tree or internal NULL node,
2212 * simply add node to last internal level, or chain it if key is
2213 * already present.
5a9a87dd
MD
2214 */
2215 if (!ja_node_ptr(node_flag)) {
48cbe001
MD
2216 dbg_printf("cds_ja_add NULL parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2217 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2218
48cbe001
MD
2219 attach_node_flag = parent_node_flag;
2220 attach_node_flag_ptr = parent_node_flag_ptr;
2221 parent_attach_node_flag = parent2_node_flag;
2222
b0ca2d21 2223 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 2224 attach_node_flag,
48cbe001
MD
2225 parent_attach_node_flag,
2226 node_flag_ptr,
2227 node_flag,
6475613c 2228 key, i, node);
5a9a87dd 2229 } else {
75d573aa
MD
2230 if (unique_node_ret) {
2231 *unique_node_ret = (struct cds_ja_node *) ja_node_ptr(node_flag);
2232 return -EEXIST;
2233 }
2234
48cbe001
MD
2235 dbg_printf("cds_ja_add duplicate parent2_node_flag %p parent_node_flag %p node_flag_ptr %p node_flag %p\n",
2236 parent2_node_flag, parent_node_flag, node_flag_ptr, node_flag);
75d573aa 2237
48cbe001
MD
2238 attach_node_flag = node_flag;
2239 attach_node_flag_ptr = node_flag_ptr;
2240 parent_attach_node_flag = parent_node_flag;
2241
5a9a87dd 2242 ret = ja_chain_node(ja,
48cbe001
MD
2243 parent_attach_node_flag,
2244 attach_node_flag_ptr,
2245 attach_node_flag,
6475613c 2246 node);
5a9a87dd 2247 }
b306a0fe 2248 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd 2249 goto retry;
48cbe001 2250
5a9a87dd 2251 return ret;
b4540e8a
MD
2252}
2253
75d573aa 2254int cds_ja_add(struct cds_ja *ja, uint64_t key,
6475613c 2255 struct cds_ja_node *node)
75d573aa 2256{
6475613c 2257 return _cds_ja_add(ja, key, node, NULL);
75d573aa
MD
2258}
2259
2260struct cds_ja_node *cds_ja_add_unique(struct cds_ja *ja, uint64_t key,
6475613c 2261 struct cds_ja_node *node)
75d573aa
MD
2262{
2263 int ret;
2264 struct cds_ja_node *ret_node;
2265
6475613c 2266 ret = _cds_ja_add(ja, key, node, &ret_node);
75d573aa
MD
2267 if (ret == -EEXIST)
2268 return ret_node;
2269 else
6475613c 2270 return node;
75d573aa
MD
2271}
2272
af3cbd45
MD
2273/*
2274 * Note: there is no need to lookup the pointer address associated with
2275 * each node's nth item after taking the lock: it's already been done by
2276 * cds_ja_del while holding the rcu read-side lock, and our node rules
2277 * ensure that when a match value -> pointer is found in a node, it is
2278 * _NEVER_ changed for that node without recompaction, and recompaction
2279 * reallocates the node.
b306a0fe
MD
2280 * However, when a child is removed from "linear" nodes, its pointer
2281 * is set to NULL. We therefore check, while holding the locks, if this
2282 * pointer is NULL, and return -ENOENT to the caller if it is the case.
4cef6f97
MD
2283 *
2284 * ja_detach_node() ensures that a lookup will _never_ see a branch that
2285 * leads to a dead-end: when removing branch, it makes sure to perform
2286 * the "cut" at the highest node that has only one child, effectively
2287 * replacing it with a NULL pointer.
af3cbd45 2288 */
35170a44
MD
2289static
2290int ja_detach_node(struct cds_ja *ja,
2291 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
2292 struct cds_ja_inode_flag ***snapshot_ptr,
2293 uint8_t *snapshot_n,
35170a44
MD
2294 int nr_snapshot,
2295 uint64_t key,
2296 struct cds_ja_node *node)
2297{
af3cbd45
MD
2298 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
2299 struct cds_ja_inode_flag **node_flag_ptr = NULL,
2300 *parent_node_flag = NULL,
2301 **parent_node_flag_ptr = NULL;
b62a8d0c 2302 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
2303 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
2304 uint8_t n = 0;
35170a44 2305
4d6ef45e 2306 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 2307
af3cbd45
MD
2308 /*
2309 * From the last internal level node going up, get the node
2310 * lock, check if the node has only one child left. If it is the
2311 * case, we continue iterating upward. When we reach a node
2312 * which has more that one child left, we lock the parent, and
2313 * proceed to the node deletion (removing its children too).
2314 */
4d6ef45e 2315 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
2316 struct cds_ja_shadow_node *shadow_node;
2317
2318 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2319 snapshot[i]);
af3cbd45
MD
2320 if (!shadow_node) {
2321 ret = -EAGAIN;
2322 goto end;
2323 }
af3cbd45 2324 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2325
2326 /*
2327 * Check if node has been removed between RCU
2328 * lookup and lock acquisition.
2329 */
2330 assert(snapshot_ptr[i + 1]);
2331 if (ja_node_ptr(*snapshot_ptr[i + 1])
2332 != ja_node_ptr(snapshot[i + 1])) {
2333 ret = -ENOENT;
2334 goto end;
2335 }
2336
2337 assert(shadow_node->nr_child > 0);
d810c97f 2338 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
2339 nr_clear++;
2340 nr_branch++;
af3cbd45
MD
2341 if (shadow_node->nr_child > 1 || i == 1) {
2342 /* Lock parent and break */
2343 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2344 snapshot[i - 1]);
af3cbd45
MD
2345 if (!shadow_node) {
2346 ret = -EAGAIN;
2347 goto end;
2348 }
2349 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 2350
c112acaa
MD
2351 /*
2352 * Check if node has been removed between RCU
2353 * lookup and lock acquisition.
2354 */
b62a8d0c
MD
2355 assert(snapshot_ptr[i]);
2356 if (ja_node_ptr(*snapshot_ptr[i])
2357 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
2358 ret = -ENOENT;
2359 goto end;
2360 }
2361
b62a8d0c 2362 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
2363 n = snapshot_n[i + 1];
2364 parent_node_flag_ptr = snapshot_ptr[i];
2365 parent_node_flag = snapshot[i];
c112acaa 2366
af3cbd45
MD
2367 if (i > 1) {
2368 /*
2369 * Lock parent's parent, in case we need
2370 * to recompact parent.
2371 */
2372 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2373 snapshot[i - 2]);
af3cbd45
MD
2374 if (!shadow_node) {
2375 ret = -EAGAIN;
2376 goto end;
2377 }
2378 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2379
2380 /*
2381 * Check if node has been removed between RCU
2382 * lookup and lock acquisition.
2383 */
2384 assert(snapshot_ptr[i - 1]);
2385 if (ja_node_ptr(*snapshot_ptr[i - 1])
2386 != ja_node_ptr(snapshot[i - 1])) {
2387 ret = -ENOENT;
2388 goto end;
2389 }
af3cbd45 2390 }
b62a8d0c 2391
af3cbd45
MD
2392 break;
2393 }
2394 }
2395
2396 /*
4d6ef45e
MD
2397 * At this point, we want to delete all nodes that are about to
2398 * be removed from shadow_nodes (except the last one, which is
2399 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2400 * child). OK to free lock here, because RCU read lock is held,
2401 * and free only performed in call_rcu.
af3cbd45
MD
2402 */
2403
2404 for (i = 0; i < nr_clear; i++) {
2405 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2406 shadow_nodes[i]->node_flag,
af3cbd45
MD
2407 shadow_nodes[i],
2408 RCUJA_SHADOW_CLEAR_FREE_NODE
2409 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2410 assert(!ret);
2411 }
2412
2413 iter_node_flag = parent_node_flag;
2414 /* Remove from parent */
2415 ret = ja_node_clear_ptr(ja,
2416 node_flag_ptr, /* Pointer to location to nullify */
2417 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2418 shadow_nodes[nr_branch - 1], /* of parent */
48cbe001 2419 n, nr_branch - 1);
b306a0fe
MD
2420 if (ret)
2421 goto end;
af3cbd45 2422
4d6ef45e
MD
2423 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2424 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2425 /* Update address of parent ptr in its parent */
2426 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2427
2428end:
2429 for (i = 0; i < nr_shadow; i++)
2430 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2431 return ret;
2432}
2433
af3cbd45
MD
2434static
2435int ja_unchain_node(struct cds_ja *ja,
2436 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2437 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2438 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2439 struct cds_ja_node *node)
2440{
2441 struct cds_ja_shadow_node *shadow_node;
03ec1aeb 2442 struct cds_ja_node *iter_node, **iter_node_ptr, **prev_node_ptr = NULL;
013a6083 2443 int ret = 0, count = 0, found = 0;
af3cbd45 2444
3d8fe307 2445 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2446 if (!shadow_node)
2447 return -EAGAIN;
013a6083 2448 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2449 ret = -EAGAIN;
2450 goto end;
2451 }
af3cbd45 2452 /*
03ec1aeb
MD
2453 * Find the previous node's next pointer pointing to our node,
2454 * so we can update it. Retry if another thread removed all but
2455 * one of duplicates since check (this check was performed
2456 * without lock). Ensure that the node we are about to remove is
2457 * still in the list (while holding lock). No need for RCU
2458 * traversal here since we hold the lock on the parent.
af3cbd45 2459 */
03ec1aeb
MD
2460 iter_node_ptr = (struct cds_ja_node **) node_flag_ptr;
2461 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2462 cds_ja_for_each_duplicate(iter_node) {
f2758d14 2463 count++;
03ec1aeb
MD
2464 if (iter_node == node) {
2465 prev_node_ptr = iter_node_ptr;
013a6083 2466 found++;
03ec1aeb
MD
2467 }
2468 iter_node_ptr = &iter_node->next;
f2758d14 2469 }
013a6083
MD
2470 assert(found <= 1);
2471 if (!found || count == 1) {
af3cbd45
MD
2472 ret = -EAGAIN;
2473 goto end;
2474 }
03ec1aeb 2475 CMM_STORE_SHARED(*prev_node_ptr, node->next);
ade342cb
MD
2476 /*
2477 * Validate that we indeed removed the node from linked list.
2478 */
2479 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2480end:
2481 rcuja_shadow_unlock(shadow_node);
2482 return ret;
2483}
2484
2485/*
2486 * Called with RCU read lock held.
2487 */
35170a44
MD
2488int cds_ja_del(struct cds_ja *ja, uint64_t key,
2489 struct cds_ja_node *node)
2490{
2491 unsigned int tree_depth, i;
2492 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2493 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2494 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2495 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2496 struct cds_ja_inode_flag **prev_node_flag_ptr,
2497 **node_flag_ptr;
4d6ef45e 2498 int nr_snapshot;
35170a44
MD
2499 int ret;
2500
2501 if (caa_unlikely(key > ja->key_max))
2502 return -EINVAL;
2503 tree_depth = ja->tree_depth;
2504
2505retry:
4d6ef45e 2506 nr_snapshot = 0;
35170a44
MD
2507 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2508 key, node);
2509
2510 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2511 snapshot_n[0] = 0;
2512 snapshot_n[1] = 0;
af3cbd45 2513 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2514 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2515 node_flag = rcu_dereference(ja->root);
af3cbd45 2516 prev_node_flag_ptr = &ja->root;
fa112799 2517 node_flag_ptr = &ja->root;
35170a44
MD
2518
2519 /* Iterate on all internal levels */
2520 for (i = 1; i < tree_depth; i++) {
2521 uint8_t iter_key;
2522
2523 dbg_printf("cds_ja_del iter node_flag %p\n",
2524 node_flag);
2525 if (!ja_node_ptr(node_flag)) {
2526 return -ENOENT;
2527 }
35170a44 2528 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2529 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2530 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2531 snapshot[nr_snapshot++] = node_flag;
35170a44 2532 node_flag = ja_node_get_nth(node_flag,
fa112799 2533 &node_flag_ptr,
35170a44 2534 iter_key);
48cbe001
MD
2535 if (node_flag)
2536 prev_node_flag_ptr = node_flag_ptr;
af3cbd45
MD
2537 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2538 (unsigned int) iter_key, node_flag,
2539 prev_node_flag_ptr);
35170a44 2540 }
35170a44
MD
2541 /*
2542 * We reached bottom of tree, try to find the node we are trying
2543 * to remove. Fail if we cannot find it.
2544 */
2545 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2546 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2547 key);
35170a44
MD
2548 return -ENOENT;
2549 } else {
03ec1aeb 2550 struct cds_ja_node *iter_node, *match = NULL;
af3cbd45 2551 int count = 0;
35170a44 2552
03ec1aeb
MD
2553 iter_node = (struct cds_ja_node *) ja_node_ptr(node_flag);
2554 cds_ja_for_each_duplicate_rcu(iter_node) {
2555 dbg_printf("cds_ja_del: compare %p with iter_node %p\n", node, iter_node);
2556 if (iter_node == node)
2557 match = iter_node;
af3cbd45 2558 count++;
35170a44 2559 }
03ec1aeb 2560
4d6ef45e
MD
2561 if (!match) {
2562 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2563 return -ENOENT;
4d6ef45e 2564 }
af3cbd45
MD
2565 assert(count > 0);
2566 if (count == 1) {
2567 /*
4d6ef45e
MD
2568 * Removing last of duplicates. Last snapshot
2569 * does not have a shadow node (external leafs).
af3cbd45
MD
2570 */
2571 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2572 snapshot[nr_snapshot++] = node_flag;
2573 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2574 snapshot_n, nr_snapshot, key, node);
2575 } else {
f2758d14 2576 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2577 node_flag_ptr, node_flag, match);
af3cbd45 2578 }
35170a44 2579 }
b306a0fe
MD
2580 /*
2581 * Explanation of -ENOENT handling: caused by concurrent delete
2582 * between RCU lookup and actual removal. Need to re-do the
2583 * lookup and removal attempt.
2584 */
2585 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2586 goto retry;
2587 return ret;
2588}
2589
b4540e8a
MD
2590struct cds_ja *_cds_ja_new(unsigned int key_bits,
2591 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2592{
2593 struct cds_ja *ja;
b0f74e47 2594 int ret;
f07b240f 2595 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2596
2597 ja = calloc(sizeof(*ja), 1);
2598 if (!ja)
2599 goto ja_error;
b4540e8a
MD
2600
2601 switch (key_bits) {
2602 case 8:
b4540e8a 2603 case 16:
1216b3d2 2604 case 24:
b4540e8a 2605 case 32:
1216b3d2
MD
2606 case 40:
2607 case 48:
2608 case 56:
2609 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2610 break;
2611 case 64:
2612 ja->key_max = UINT64_MAX;
2613 break;
2614 default:
2615 goto check_error;
2616 }
2617
be9a7474 2618 /* ja->root is NULL */
5a9a87dd 2619 /* tree_depth 0 is for pointer to root node */
582a6ade 2620 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2621 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2622 ja->ht = rcuja_create_ht(flavor);
2623 if (!ja->ht)
2624 goto ht_error;
b0f74e47
MD
2625
2626 /*
2627 * Note: we should not free this node until judy array destroy.
2628 */
f07b240f 2629 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307 2630 (struct cds_ja_inode_flag *) &ja->root,
48cbe001 2631 NULL, ja, 0);
f07b240f
MD
2632 if (!root_shadow_node) {
2633 ret = -ENOMEM;
b0f74e47 2634 goto ht_node_error;
f07b240f 2635 }
b0f74e47 2636
be9a7474
MD
2637 return ja;
2638
b0f74e47
MD
2639ht_node_error:
2640 ret = rcuja_delete_ht(ja->ht);
2641 assert(!ret);
be9a7474 2642ht_error:
b4540e8a 2643check_error:
be9a7474
MD
2644 free(ja);
2645ja_error:
2646 return NULL;
2647}
2648
3d8fe307
MD
2649/*
2650 * Called from RCU read-side CS.
2651 */
2652__attribute__((visibility("protected")))
2653void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2654 struct cds_ja_inode_flag *node_flag,
21ac4c56 2655 void (*rcu_free_node)(struct cds_ja_node *node))
3d8fe307 2656{
3d8fe307
MD
2657 unsigned int type_index;
2658 struct cds_ja_inode *node;
2659 const struct cds_ja_type *type;
2660
3d8fe307
MD
2661 node = ja_node_ptr(node_flag);
2662 assert(node != NULL);
2663 type_index = ja_node_type(node_flag);
2664 type = &ja_types[type_index];
2665
2666 switch (type->type_class) {
2667 case RCU_JA_LINEAR:
2668 {
2669 uint8_t nr_child =
2670 ja_linear_node_get_nr_child(type, node);
2671 unsigned int i;
2672
2673 for (i = 0; i < nr_child; i++) {
2674 struct cds_ja_inode_flag *iter;
03ec1aeb 2675 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2676 uint8_t v;
2677
2678 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
03ec1aeb
MD
2679 node_iter = (struct cds_ja_node *) iter;
2680 cds_ja_for_each_duplicate_safe(node_iter, n) {
2681 rcu_free_node(node_iter);
3d8fe307
MD
2682 }
2683 }
2684 break;
2685 }
2686 case RCU_JA_POOL:
2687 {
2688 unsigned int pool_nr;
2689
2690 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2691 struct cds_ja_inode *pool =
2692 ja_pool_node_get_ith_pool(type, node, pool_nr);
2693 uint8_t nr_child =
2694 ja_linear_node_get_nr_child(type, pool);
2695 unsigned int j;
2696
2697 for (j = 0; j < nr_child; j++) {
2698 struct cds_ja_inode_flag *iter;
03ec1aeb 2699 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2700 uint8_t v;
2701
75d573aa 2702 ja_linear_node_get_ith_pos(type, pool, j, &v, &iter);
03ec1aeb
MD
2703 node_iter = (struct cds_ja_node *) iter;
2704 cds_ja_for_each_duplicate_safe(node_iter, n) {
2705 rcu_free_node(node_iter);
3d8fe307
MD
2706 }
2707 }
2708 }
2709 break;
2710 }
2711 case RCU_JA_NULL:
2712 break;
2713 case RCU_JA_PIGEON:
2714 {
3d8fe307
MD
2715 unsigned int i;
2716
48cbe001 2717 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
3d8fe307 2718 struct cds_ja_inode_flag *iter;
03ec1aeb 2719 struct cds_ja_node *node_iter, *n;
3d8fe307
MD
2720
2721 iter = ja_pigeon_node_get_ith_pos(type, node, i);
03ec1aeb
MD
2722 node_iter = (struct cds_ja_node *) iter;
2723 cds_ja_for_each_duplicate_safe(node_iter, n) {
2724 rcu_free_node(node_iter);
3d8fe307
MD
2725 }
2726 }
2727 break;
2728 }
2729 default:
2730 assert(0);
2731 }
2732}
2733
19ddcd04 2734static
354981c2 2735void print_debug_fallback_distribution(struct cds_ja *ja)
19ddcd04
MD
2736{
2737 int i;
2738
2739 fprintf(stderr, "Fallback node distribution:\n");
2740 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
354981c2 2741 if (!ja->node_fallback_count_distribution[i])
19ddcd04
MD
2742 continue;
2743 fprintf(stderr, " %3u: %4lu\n",
354981c2 2744 i, ja->node_fallback_count_distribution[i]);
19ddcd04
MD
2745 }
2746}
2747
021c72c0 2748static
19a748d9 2749int ja_final_checks(struct cds_ja *ja)
021c72c0
MD
2750{
2751 double fallback_ratio;
2752 unsigned long na, nf, nr_fallback;
19a748d9 2753 int ret = 0;
021c72c0
MD
2754
2755 fallback_ratio = (double) uatomic_read(&ja->nr_fallback);
2756 fallback_ratio /= (double) uatomic_read(&ja->nr_nodes_allocated);
2757 nr_fallback = uatomic_read(&ja->nr_fallback);
2758 if (nr_fallback)
2759 fprintf(stderr,
2760 "[warning] RCU Judy Array used %lu fallback node(s) (ratio: %g)\n",
2761 uatomic_read(&ja->nr_fallback),
2762 fallback_ratio);
2763
2764 na = uatomic_read(&ja->nr_nodes_allocated);
2765 nf = uatomic_read(&ja->nr_nodes_freed);
19a748d9
MD
2766 dbg_printf("Nodes allocated: %lu, Nodes freed: %lu.\n", na, nf);
2767 if (nr_fallback)
2768 print_debug_fallback_distribution(ja);
2769
021c72c0
MD
2770 if (na != nf) {
2771 fprintf(stderr, "[error] Judy array leaked %ld nodes. Allocated: %lu, freed: %lu.\n",
2772 (long) na - nf, na, nf);
19a748d9 2773 ret = -1;
021c72c0 2774 }
19a748d9 2775 return ret;
021c72c0
MD
2776}
2777
be9a7474 2778/*
dc0e9798
MD
2779 * There should be no more concurrent add, delete, nor look-up performed
2780 * on the Judy array while it is being destroyed (ensured by the
2781 * caller).
be9a7474 2782 */
3d8fe307 2783int cds_ja_destroy(struct cds_ja *ja,
dc0e9798 2784 void (*free_node_cb)(struct cds_ja_node *node))
be9a7474 2785{
48cbe001 2786 const struct rcu_flavor_struct *flavor;
b4540e8a
MD
2787 int ret;
2788
48cbe001 2789 flavor = cds_lfht_rcu_flavor(ja->ht);
be9a7474 2790 rcuja_shadow_prune(ja->ht,
3d8fe307 2791 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
dc0e9798 2792 free_node_cb);
48cbe001 2793 flavor->thread_offline();
b4540e8a
MD
2794 ret = rcuja_delete_ht(ja->ht);
2795 if (ret)
2796 return ret;
f2ae7af7
MD
2797
2798 /* Wait for in-flight call_rcu free to complete. */
2799 flavor->barrier();
2800
48cbe001 2801 flavor->thread_online();
19a748d9 2802 ret = ja_final_checks(ja);
b4540e8a 2803 free(ja);
19a748d9 2804 return ret;
be9a7474 2805}
This page took 0.161161 seconds and 4 git commands to generate.