rcuja: print info about allocated/freed/fallback nodes
[userspace-rcu.git] / rcuja / rcuja.c
CommitLineData
61009379
MD
1/*
2 * rcuja/rcuja.c
3 *
4 * Userspace RCU library - RCU Judy Array
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
195e72d3 23#define _LGPL_SOURCE
e5227865 24#include <stdint.h>
8e519e3c 25#include <errno.h>
d68c6810 26#include <limits.h>
b1a90ce3 27#include <string.h>
61009379 28#include <urcu/rcuja.h>
d68c6810
MD
29#include <urcu/compiler.h>
30#include <urcu/arch.h>
31#include <assert.h>
8e519e3c 32#include <urcu-pointer.h>
f07b240f 33#include <urcu/uatomic.h>
b4540e8a 34#include <stdint.h>
8e519e3c 35
61009379 36#include "rcuja-internal.h"
d68c6810 37#include "bitfield.h"
61009379 38
b1a90ce3
MD
39#ifndef abs
40#define abs_int(a) ((int) (a) > 0 ? (int) (a) : -((int) (a)))
41#endif
42
d96bfb0d 43enum cds_ja_type_class {
e5227865 44 RCU_JA_LINEAR = 0, /* Type A */
fd800776
MD
45 /* 32-bit: 1 to 25 children, 8 to 128 bytes */
46 /* 64-bit: 1 to 28 children, 16 to 256 bytes */
47 RCU_JA_POOL = 1, /* Type B */
48 /* 32-bit: 26 to 100 children, 256 to 512 bytes */
49 /* 64-bit: 29 to 112 children, 512 to 1024 bytes */
e5227865 50 RCU_JA_PIGEON = 2, /* Type C */
fd800776
MD
51 /* 32-bit: 101 to 256 children, 1024 bytes */
52 /* 64-bit: 113 to 256 children, 2048 bytes */
e5227865 53 /* Leaf nodes are implicit from their height in the tree */
1db4943c 54 RCU_JA_NR_TYPES,
e1db2db5
MD
55
56 RCU_JA_NULL, /* not an encoded type, but keeps code regular */
e5227865
MD
57};
58
d96bfb0d
MD
59struct cds_ja_type {
60 enum cds_ja_type_class type_class;
8e519e3c
MD
61 uint16_t min_child; /* minimum number of children: 1 to 256 */
62 uint16_t max_child; /* maximum number of children: 1 to 256 */
63 uint16_t max_linear_child; /* per-pool max nr. children: 1 to 256 */
64 uint16_t order; /* node size is (1 << order), in bytes */
fd800776
MD
65 uint16_t nr_pool_order; /* number of pools */
66 uint16_t pool_size_order; /* pool size */
e5227865
MD
67};
68
69/*
70 * Iteration on the array to find the right node size for the number of
d68c6810 71 * children stops when it reaches .max_child == 256 (this is the largest
e5227865 72 * possible node size, which contains 256 children).
d68c6810
MD
73 * The min_child overlaps with the previous max_child to provide an
74 * hysteresis loop to reallocation for patterns of cyclic add/removal
75 * within the same node.
76 * The node the index within the following arrays is represented on 3
77 * bits. It identifies the node type, min/max number of children, and
78 * the size order.
3d45251f
MD
79 * The max_child values for the RCU_JA_POOL below result from
80 * statistical approximation: over million populations, the max_child
81 * covers between 97% and 99% of the populations generated. Therefore, a
82 * fallback should exist to cover the rare extreme population unbalance
83 * cases, but it will not have a major impact on speed nor space
84 * consumption, since those are rare cases.
e5227865 85 */
e5227865 86
d68c6810
MD
87#if (CAA_BITS_PER_LONG < 64)
88/* 32-bit pointers */
1db4943c
MD
89enum {
90 ja_type_0_max_child = 1,
91 ja_type_1_max_child = 3,
92 ja_type_2_max_child = 6,
93 ja_type_3_max_child = 12,
94 ja_type_4_max_child = 25,
95 ja_type_5_max_child = 48,
96 ja_type_6_max_child = 92,
97 ja_type_7_max_child = 256,
e1db2db5 98 ja_type_8_max_child = 0, /* NULL */
1db4943c
MD
99};
100
8e519e3c
MD
101enum {
102 ja_type_0_max_linear_child = 1,
103 ja_type_1_max_linear_child = 3,
104 ja_type_2_max_linear_child = 6,
105 ja_type_3_max_linear_child = 12,
106 ja_type_4_max_linear_child = 25,
107 ja_type_5_max_linear_child = 24,
108 ja_type_6_max_linear_child = 23,
109};
110
1db4943c
MD
111enum {
112 ja_type_5_nr_pool_order = 1,
113 ja_type_6_nr_pool_order = 2,
114};
115
d96bfb0d 116const struct cds_ja_type ja_types[] = {
8e519e3c
MD
117 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 3, },
118 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 4, },
119 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 5, },
120 { .type_class = RCU_JA_LINEAR, .min_child = 4, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 6, },
121 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 7, },
e5227865 122
fd800776 123 /* Pools may fill sooner than max_child */
8e519e3c
MD
124 { .type_class = RCU_JA_POOL, .min_child = 20, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 8, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 7, },
125 { .type_class = RCU_JA_POOL, .min_child = 45, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 9, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 7, },
3d45251f
MD
126
127 /*
b1a90ce3
MD
128 * Upon node removal below min_child, if child pool is filled
129 * beyond capacity, we roll back to pigeon.
3d45251f 130 */
1db4943c 131 { .type_class = RCU_JA_PIGEON, .min_child = 89, .max_child = ja_type_7_max_child, .order = 10, },
e1db2db5
MD
132
133 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
d68c6810 134};
d68c6810
MD
135#else /* !(CAA_BITS_PER_LONG < 64) */
136/* 64-bit pointers */
1db4943c
MD
137enum {
138 ja_type_0_max_child = 1,
139 ja_type_1_max_child = 3,
140 ja_type_2_max_child = 7,
141 ja_type_3_max_child = 14,
142 ja_type_4_max_child = 28,
143 ja_type_5_max_child = 54,
144 ja_type_6_max_child = 104,
145 ja_type_7_max_child = 256,
e1db2db5 146 ja_type_8_max_child = 256,
1db4943c
MD
147};
148
8e519e3c
MD
149enum {
150 ja_type_0_max_linear_child = 1,
151 ja_type_1_max_linear_child = 3,
152 ja_type_2_max_linear_child = 7,
153 ja_type_3_max_linear_child = 14,
154 ja_type_4_max_linear_child = 28,
155 ja_type_5_max_linear_child = 27,
156 ja_type_6_max_linear_child = 26,
157};
158
1db4943c
MD
159enum {
160 ja_type_5_nr_pool_order = 1,
161 ja_type_6_nr_pool_order = 2,
162};
163
d96bfb0d 164const struct cds_ja_type ja_types[] = {
8e519e3c
MD
165 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_0_max_child, .max_linear_child = ja_type_0_max_linear_child, .order = 4, },
166 { .type_class = RCU_JA_LINEAR, .min_child = 1, .max_child = ja_type_1_max_child, .max_linear_child = ja_type_1_max_linear_child, .order = 5, },
167 { .type_class = RCU_JA_LINEAR, .min_child = 3, .max_child = ja_type_2_max_child, .max_linear_child = ja_type_2_max_linear_child, .order = 6, },
168 { .type_class = RCU_JA_LINEAR, .min_child = 5, .max_child = ja_type_3_max_child, .max_linear_child = ja_type_3_max_linear_child, .order = 7, },
169 { .type_class = RCU_JA_LINEAR, .min_child = 10, .max_child = ja_type_4_max_child, .max_linear_child = ja_type_4_max_linear_child, .order = 8, },
e5227865 170
3d45251f 171 /* Pools may fill sooner than max_child. */
8e519e3c
MD
172 { .type_class = RCU_JA_POOL, .min_child = 22, .max_child = ja_type_5_max_child, .max_linear_child = ja_type_5_max_linear_child, .order = 9, .nr_pool_order = ja_type_5_nr_pool_order, .pool_size_order = 8, },
173 { .type_class = RCU_JA_POOL, .min_child = 51, .max_child = ja_type_6_max_child, .max_linear_child = ja_type_6_max_linear_child, .order = 10, .nr_pool_order = ja_type_6_nr_pool_order, .pool_size_order = 8, },
e5227865 174
3d45251f 175 /*
b1a90ce3
MD
176 * Upon node removal below min_child, if child pool is filled
177 * beyond capacity, we roll back to pigeon.
3d45251f 178 */
1db4943c 179 { .type_class = RCU_JA_PIGEON, .min_child = 101, .max_child = ja_type_7_max_child, .order = 11, },
e1db2db5
MD
180
181 { .type_class = RCU_JA_NULL, .min_child = 0, .max_child = ja_type_8_max_child, },
e5227865 182};
d68c6810 183#endif /* !(BITS_PER_LONG < 64) */
e5227865 184
1db4943c
MD
185static inline __attribute__((unused))
186void static_array_size_check(void)
187{
e1db2db5 188 CAA_BUILD_BUG_ON(CAA_ARRAY_SIZE(ja_types) < JA_TYPE_MAX_NR);
1db4943c
MD
189}
190
e5227865 191/*
d96bfb0d 192 * The cds_ja_node contains the compressed node data needed for
1db4943c
MD
193 * read-side. For linear and pool node configurations, it starts with a
194 * byte counting the number of children in the node. Then, the
195 * node-specific data is placed.
196 * The node mutex, if any is needed, protecting concurrent updated of
197 * each node is placed in a separate hash table indexed by node address.
198 * For the pigeon configuration, the number of children is also kept in
199 * a separate hash table, indexed by node address, because it is only
200 * required for updates.
e5227865 201 */
1db4943c 202
ff38c745
MD
203#define DECLARE_LINEAR_NODE(index) \
204 struct { \
205 uint8_t nr_child; \
206 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 207 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
208 }
209
210#define DECLARE_POOL_NODE(index) \
211 struct { \
212 struct { \
213 uint8_t nr_child; \
214 uint8_t child_value[ja_type_## index ##_max_linear_child]; \
b4540e8a 215 struct cds_ja_inode_flag *child_ptr[ja_type_## index ##_max_linear_child]; \
ff38c745
MD
216 } linear[1U << ja_type_## index ##_nr_pool_order]; \
217 }
1db4943c 218
b4540e8a 219struct cds_ja_inode {
1db4943c
MD
220 union {
221 /* Linear configuration */
222 DECLARE_LINEAR_NODE(0) conf_0;
223 DECLARE_LINEAR_NODE(1) conf_1;
224 DECLARE_LINEAR_NODE(2) conf_2;
225 DECLARE_LINEAR_NODE(3) conf_3;
226 DECLARE_LINEAR_NODE(4) conf_4;
227
228 /* Pool configuration */
229 DECLARE_POOL_NODE(5) conf_5;
230 DECLARE_POOL_NODE(6) conf_6;
231
232 /* Pigeon configuration */
233 struct {
b4540e8a 234 struct cds_ja_inode_flag *child[ja_type_7_max_child];
1db4943c
MD
235 } conf_7;
236 /* data aliasing nodes for computed accesses */
b4540e8a 237 uint8_t data[sizeof(struct cds_ja_inode_flag *) * ja_type_7_max_child];
1db4943c 238 } u;
e5227865
MD
239};
240
2e313670 241enum ja_recompact {
19ddcd04
MD
242 JA_RECOMPACT_ADD_SAME,
243 JA_RECOMPACT_ADD_NEXT,
2e313670
MD
244 JA_RECOMPACT_DEL,
245};
246
19ddcd04
MD
247static
248unsigned long node_fallback_count_distribution[JA_ENTRY_PER_NODE];
efbd222a
MD
249static
250unsigned long nr_nodes_allocated, nr_nodes_freed;
19ddcd04 251
b1a90ce3
MD
252static
253struct cds_ja_inode *_ja_node_mask_ptr(struct cds_ja_inode_flag *node)
254{
255 return (struct cds_ja_inode *) (((unsigned long) node) & JA_PTR_MASK);
256}
257
258unsigned long ja_node_type(struct cds_ja_inode_flag *node)
259{
260 unsigned long type;
261
262 if (_ja_node_mask_ptr(node) == NULL) {
263 return NODE_INDEX_NULL;
264 }
265 type = (unsigned int) ((unsigned long) node & JA_TYPE_MASK);
266 assert(type < (1UL << JA_TYPE_BITS));
267 return type;
268}
269
270struct cds_ja_inode *ja_node_ptr(struct cds_ja_inode_flag *node)
271{
272 unsigned long type_index = ja_node_type(node);
273 const struct cds_ja_type *type;
274
275 type = &ja_types[type_index];
276 switch (type->type_class) {
277 case RCU_JA_LINEAR:
278 case RCU_JA_PIGEON: /* fall-through */
279 case RCU_JA_NULL: /* fall-through */
280 default: /* fall-through */
281 return _ja_node_mask_ptr(node);
282 case RCU_JA_POOL:
283 switch (type->nr_pool_order) {
284 case 1:
285 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_1D_MASK | JA_TYPE_MASK));
286 case 2:
287 return (struct cds_ja_inode *) (((unsigned long) node) & ~(JA_POOL_2D_MASK | JA_POOL_1D_MASK | JA_TYPE_MASK));
288 default:
289 assert(0);
290 }
291 }
292}
293
b4540e8a 294struct cds_ja_inode *alloc_cds_ja_node(const struct cds_ja_type *ja_type)
e5227865 295{
b1a90ce3
MD
296 size_t len = 1U << ja_type->order;
297 void *p;
298 int ret;
299
300 ret = posix_memalign(&p, len, len);
301 if (ret || !p) {
302 return NULL;
303 }
304 memset(p, 0, len);
efbd222a 305 uatomic_inc(&nr_nodes_allocated);
b1a90ce3 306 return p;
e5227865
MD
307}
308
b4540e8a 309void free_cds_ja_node(struct cds_ja_inode *node)
e5227865
MD
310{
311 free(node);
efbd222a 312 uatomic_inc(&nr_nodes_freed);
e5227865
MD
313}
314
d68c6810
MD
315#define __JA_ALIGN_MASK(v, mask) (((v) + (mask)) & ~(mask))
316#define JA_ALIGN(v, align) __JA_ALIGN_MASK(v, (typeof(v)) (align) - 1)
317#define __JA_FLOOR_MASK(v, mask) ((v) & ~(mask))
318#define JA_FLOOR(v, align) __JA_FLOOR_MASK(v, (typeof(v)) (align) - 1)
319
320static
1db4943c 321uint8_t *align_ptr_size(uint8_t *ptr)
d68c6810 322{
1db4943c 323 return (uint8_t *) JA_ALIGN((unsigned long) ptr, sizeof(void *));
d68c6810
MD
324}
325
11c5e016 326static
d96bfb0d 327uint8_t ja_linear_node_get_nr_child(const struct cds_ja_type *type,
b4540e8a 328 struct cds_ja_inode *node)
11c5e016
MD
329{
330 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
2e313670 331 return rcu_dereference(node->u.data[0]);
11c5e016
MD
332}
333
13a7f5a6
MD
334/*
335 * The order in which values and pointers are does does not matter: if
336 * a value is missing, we return NULL. If a value is there, but its
337 * associated pointers is still NULL, we return NULL too.
338 */
d68c6810 339static
b4540e8a
MD
340struct cds_ja_inode_flag *ja_linear_node_get_nth(const struct cds_ja_type *type,
341 struct cds_ja_inode *node,
5a9a87dd 342 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 343 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 344 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 345 uint8_t n)
d68c6810
MD
346{
347 uint8_t nr_child;
348 uint8_t *values;
b4540e8a
MD
349 struct cds_ja_inode_flag **pointers;
350 struct cds_ja_inode_flag *ptr;
d68c6810
MD
351 unsigned int i;
352
8e519e3c 353 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
d68c6810 354
11c5e016 355 nr_child = ja_linear_node_get_nr_child(type, node);
13a7f5a6 356 cmm_smp_rmb(); /* read nr_child before values and pointers */
8e519e3c
MD
357 assert(nr_child <= type->max_linear_child);
358 assert(type->type_class != RCU_JA_LINEAR || nr_child >= type->min_child);
d68c6810 359
1db4943c 360 values = &node->u.data[1];
d68c6810 361 for (i = 0; i < nr_child; i++) {
13a7f5a6 362 if (CMM_LOAD_SHARED(values[i]) == n)
d68c6810
MD
363 break;
364 }
b0ca2d21
MD
365 if (i >= nr_child) {
366 if (caa_unlikely(node_flag_ptr))
367 *node_flag_ptr = NULL;
d68c6810 368 return NULL;
b0ca2d21 369 }
b4540e8a 370 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
13a7f5a6 371 ptr = rcu_dereference(pointers[i]);
2e313670
MD
372 if (caa_unlikely(child_node_flag_ptr) && ptr)
373 *child_node_flag_ptr = &pointers[i];
b62a8d0c
MD
374 if (caa_unlikely(child_node_flag_v) && ptr)
375 *child_node_flag_v = ptr;
b0ca2d21
MD
376 if (caa_unlikely(node_flag_ptr))
377 *node_flag_ptr = &pointers[i];
d68c6810
MD
378 return ptr;
379}
380
11c5e016 381static
5a9a87dd 382void ja_linear_node_get_ith_pos(const struct cds_ja_type *type,
b4540e8a 383 struct cds_ja_inode *node,
11c5e016
MD
384 uint8_t i,
385 uint8_t *v,
b4540e8a 386 struct cds_ja_inode_flag **iter)
11c5e016
MD
387{
388 uint8_t *values;
b4540e8a 389 struct cds_ja_inode_flag **pointers;
11c5e016
MD
390
391 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
392 assert(i < ja_linear_node_get_nr_child(type, node));
393
394 values = &node->u.data[1];
395 *v = values[i];
b4540e8a 396 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
11c5e016
MD
397 *iter = pointers[i];
398}
399
d68c6810 400static
b4540e8a
MD
401struct cds_ja_inode_flag *ja_pool_node_get_nth(const struct cds_ja_type *type,
402 struct cds_ja_inode *node,
b1a90ce3 403 struct cds_ja_inode_flag *node_flag,
5a9a87dd 404 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 405 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 406 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 407 uint8_t n)
d68c6810 408{
b4540e8a 409 struct cds_ja_inode *linear;
d68c6810 410
fd800776 411 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
412
413 switch (type->nr_pool_order) {
414 case 1:
415 {
416 unsigned long bitsel, index;
417
418 bitsel = ja_node_pool_1d_bitsel(node_flag);
419 assert(bitsel < CHAR_BIT);
19ddcd04 420 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
421 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
422 break;
423 }
424 case 2:
425 {
19ddcd04
MD
426 unsigned long bitsel[2], index[2], rindex;
427
428 ja_node_pool_2d_bitsel(node_flag, bitsel);
429 assert(bitsel[0] < CHAR_BIT);
430 assert(bitsel[1] < CHAR_BIT);
431 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
432 index[0] <<= 1;
433 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
434 rindex = index[0] | index[1];
435 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
436 break;
437 }
438 default:
439 linear = NULL;
440 assert(0);
441 }
b0ca2d21 442 return ja_linear_node_get_nth(type, linear, child_node_flag_ptr,
b62a8d0c 443 child_node_flag_v, node_flag_ptr, n);
d68c6810
MD
444}
445
11c5e016 446static
b4540e8a
MD
447struct cds_ja_inode *ja_pool_node_get_ith_pool(const struct cds_ja_type *type,
448 struct cds_ja_inode *node,
11c5e016
MD
449 uint8_t i)
450{
451 assert(type->type_class == RCU_JA_POOL);
b4540e8a 452 return (struct cds_ja_inode *)
11c5e016
MD
453 &node->u.data[(unsigned int) i << type->pool_size_order];
454}
455
d68c6810 456static
b4540e8a
MD
457struct cds_ja_inode_flag *ja_pigeon_node_get_nth(const struct cds_ja_type *type,
458 struct cds_ja_inode *node,
5a9a87dd 459 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 460 struct cds_ja_inode_flag **child_node_flag_v,
b0ca2d21 461 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 462 uint8_t n)
d68c6810 463{
5a9a87dd 464 struct cds_ja_inode_flag **child_node_flag;
b62a8d0c 465 struct cds_ja_inode_flag *child_node_flag_read;
5a9a87dd 466
d68c6810 467 assert(type->type_class == RCU_JA_PIGEON);
5a9a87dd 468 child_node_flag = &((struct cds_ja_inode_flag **) node->u.data)[n];
b62a8d0c 469 child_node_flag_read = rcu_dereference(*child_node_flag);
582a6ade
MD
470 dbg_printf("ja_pigeon_node_get_nth child_node_flag_ptr %p\n",
471 child_node_flag);
b62a8d0c 472 if (caa_unlikely(child_node_flag_ptr) && child_node_flag_read)
5a9a87dd 473 *child_node_flag_ptr = child_node_flag;
b62a8d0c
MD
474 if (caa_unlikely(child_node_flag_v) && child_node_flag_read)
475 *child_node_flag_v = child_node_flag_read;
b0ca2d21
MD
476 if (caa_unlikely(node_flag_ptr))
477 *node_flag_ptr = child_node_flag;
b62a8d0c 478 return child_node_flag_read;
d68c6810
MD
479}
480
2e313670
MD
481static
482struct cds_ja_inode_flag *ja_pigeon_node_get_ith_pos(const struct cds_ja_type *type,
483 struct cds_ja_inode *node,
484 uint8_t i)
485{
b62a8d0c 486 return ja_pigeon_node_get_nth(type, node, NULL, NULL, NULL, i);
2e313670
MD
487}
488
13a7f5a6
MD
489/*
490 * ja_node_get_nth: get nth item from a node.
491 * node_flag is already rcu_dereference'd.
492 */
d68c6810 493static
b62a8d0c 494struct cds_ja_inode_flag *ja_node_get_nth(struct cds_ja_inode_flag *node_flag,
5a9a87dd 495 struct cds_ja_inode_flag ***child_node_flag_ptr,
b62a8d0c 496 struct cds_ja_inode_flag **child_node_flag,
b0ca2d21 497 struct cds_ja_inode_flag ***node_flag_ptr,
8e519e3c 498 uint8_t n)
d68c6810
MD
499{
500 unsigned int type_index;
b4540e8a 501 struct cds_ja_inode *node;
d96bfb0d 502 const struct cds_ja_type *type;
d68c6810 503
d68c6810 504 node = ja_node_ptr(node_flag);
5a9a87dd 505 assert(node != NULL);
d68c6810
MD
506 type_index = ja_node_type(node_flag);
507 type = &ja_types[type_index];
508
509 switch (type->type_class) {
510 case RCU_JA_LINEAR:
5a9a87dd 511 return ja_linear_node_get_nth(type, node,
b62a8d0c
MD
512 child_node_flag_ptr, child_node_flag,
513 node_flag_ptr, n);
fd800776 514 case RCU_JA_POOL:
b1a90ce3 515 return ja_pool_node_get_nth(type, node, node_flag,
b62a8d0c
MD
516 child_node_flag_ptr, child_node_flag,
517 node_flag_ptr, n);
d68c6810 518 case RCU_JA_PIGEON:
5a9a87dd 519 return ja_pigeon_node_get_nth(type, node,
b62a8d0c
MD
520 child_node_flag_ptr, child_node_flag,
521 node_flag_ptr, n);
d68c6810
MD
522 default:
523 assert(0);
524 return (void *) -1UL;
525 }
526}
527
8e519e3c 528static
d96bfb0d 529int ja_linear_node_set_nth(const struct cds_ja_type *type,
b4540e8a 530 struct cds_ja_inode *node,
d96bfb0d 531 struct cds_ja_shadow_node *shadow_node,
8e519e3c 532 uint8_t n,
b4540e8a 533 struct cds_ja_inode_flag *child_node_flag)
8e519e3c
MD
534{
535 uint8_t nr_child;
536 uint8_t *values, *nr_child_ptr;
b4540e8a 537 struct cds_ja_inode_flag **pointers;
2e313670 538 unsigned int i, unused = 0;
8e519e3c
MD
539
540 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
541
542 nr_child_ptr = &node->u.data[0];
a2a7ff59 543 dbg_printf("linear set nth: nr_child_ptr %p\n", nr_child_ptr);
8e519e3c
MD
544 nr_child = *nr_child_ptr;
545 assert(nr_child <= type->max_linear_child);
8e519e3c
MD
546
547 values = &node->u.data[1];
2e313670
MD
548 pointers = (struct cds_ja_inode_flag **) align_ptr_size(&values[type->max_linear_child]);
549 /* Check if node value is already populated */
8e519e3c 550 for (i = 0; i < nr_child; i++) {
2e313670
MD
551 if (values[i] == n) {
552 if (pointers[i])
553 return -EEXIST;
554 else
555 break;
556 } else {
557 if (!pointers[i])
558 unused++;
559 }
8e519e3c 560 }
2e313670
MD
561 if (i == nr_child && nr_child >= type->max_linear_child) {
562 if (unused)
563 return -ERANGE; /* recompact node */
564 else
565 return -ENOSPC; /* No space left in this node type */
566 }
567
568 assert(pointers[i] == NULL);
569 rcu_assign_pointer(pointers[i], child_node_flag);
570 /* If we expanded the nr_child, increment it */
571 if (i == nr_child) {
572 CMM_STORE_SHARED(values[nr_child], n);
573 /* write pointer and value before nr_child */
574 cmm_smp_wmb();
575 CMM_STORE_SHARED(*nr_child_ptr, nr_child + 1);
8e519e3c 576 }
e1db2db5 577 shadow_node->nr_child++;
a2a7ff59
MD
578 dbg_printf("linear set nth: %u child, shadow: %u child, for node %p shadow %p\n",
579 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
580 (unsigned int) shadow_node->nr_child,
581 node, shadow_node);
582
8e519e3c
MD
583 return 0;
584}
585
586static
d96bfb0d 587int ja_pool_node_set_nth(const struct cds_ja_type *type,
b4540e8a 588 struct cds_ja_inode *node,
b1a90ce3 589 struct cds_ja_inode_flag *node_flag,
d96bfb0d 590 struct cds_ja_shadow_node *shadow_node,
8e519e3c 591 uint8_t n,
b4540e8a 592 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 593{
b4540e8a 594 struct cds_ja_inode *linear;
8e519e3c
MD
595
596 assert(type->type_class == RCU_JA_POOL);
b1a90ce3
MD
597
598 switch (type->nr_pool_order) {
599 case 1:
600 {
601 unsigned long bitsel, index;
602
603 bitsel = ja_node_pool_1d_bitsel(node_flag);
604 assert(bitsel < CHAR_BIT);
19ddcd04 605 index = ((unsigned long) n >> bitsel) & 0x1;
b1a90ce3
MD
606 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
607 break;
608 }
609 case 2:
610 {
19ddcd04
MD
611 unsigned long bitsel[2], index[2], rindex;
612
613 ja_node_pool_2d_bitsel(node_flag, bitsel);
614 assert(bitsel[0] < CHAR_BIT);
615 assert(bitsel[1] < CHAR_BIT);
616 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
617 index[0] <<= 1;
618 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
619 rindex = index[0] | index[1];
620 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
b1a90ce3
MD
621 break;
622 }
623 default:
624 linear = NULL;
625 assert(0);
626 }
627
e1db2db5
MD
628 return ja_linear_node_set_nth(type, linear, shadow_node,
629 n, child_node_flag);
8e519e3c
MD
630}
631
632static
d96bfb0d 633int ja_pigeon_node_set_nth(const struct cds_ja_type *type,
b4540e8a 634 struct cds_ja_inode *node,
d96bfb0d 635 struct cds_ja_shadow_node *shadow_node,
8e519e3c 636 uint8_t n,
b4540e8a 637 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 638{
b4540e8a 639 struct cds_ja_inode_flag **ptr;
8e519e3c
MD
640
641 assert(type->type_class == RCU_JA_PIGEON);
b4540e8a 642 ptr = &((struct cds_ja_inode_flag **) node->u.data)[n];
5a9a87dd 643 if (*ptr)
8e519e3c
MD
644 return -EEXIST;
645 rcu_assign_pointer(*ptr, child_node_flag);
e1db2db5 646 shadow_node->nr_child++;
8e519e3c
MD
647 return 0;
648}
649
d68c6810 650/*
7a0b2331 651 * _ja_node_set_nth: set nth item within a node. Return an error
8e519e3c 652 * (negative error value) if it is already there.
d68c6810 653 */
8e519e3c 654static
d96bfb0d 655int _ja_node_set_nth(const struct cds_ja_type *type,
b4540e8a 656 struct cds_ja_inode *node,
b1a90ce3 657 struct cds_ja_inode_flag *node_flag,
d96bfb0d 658 struct cds_ja_shadow_node *shadow_node,
e1db2db5 659 uint8_t n,
b4540e8a 660 struct cds_ja_inode_flag *child_node_flag)
8e519e3c 661{
8e519e3c
MD
662 switch (type->type_class) {
663 case RCU_JA_LINEAR:
e1db2db5 664 return ja_linear_node_set_nth(type, node, shadow_node, n,
8e519e3c
MD
665 child_node_flag);
666 case RCU_JA_POOL:
b1a90ce3 667 return ja_pool_node_set_nth(type, node, node_flag, shadow_node, n,
8e519e3c
MD
668 child_node_flag);
669 case RCU_JA_PIGEON:
e1db2db5 670 return ja_pigeon_node_set_nth(type, node, shadow_node, n,
8e519e3c 671 child_node_flag);
e1db2db5
MD
672 case RCU_JA_NULL:
673 return -ENOSPC;
8e519e3c
MD
674 default:
675 assert(0);
676 return -EINVAL;
677 }
678
679 return 0;
680}
7a0b2331 681
2e313670 682static
af3cbd45 683int ja_linear_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
684 struct cds_ja_inode *node,
685 struct cds_ja_shadow_node *shadow_node,
af3cbd45 686 struct cds_ja_inode_flag **node_flag_ptr)
2e313670
MD
687{
688 uint8_t nr_child;
af3cbd45 689 uint8_t *nr_child_ptr;
2e313670
MD
690
691 assert(type->type_class == RCU_JA_LINEAR || type->type_class == RCU_JA_POOL);
692
693 nr_child_ptr = &node->u.data[0];
2e313670
MD
694 nr_child = *nr_child_ptr;
695 assert(nr_child <= type->max_linear_child);
696
2e313670
MD
697 if (shadow_node->fallback_removal_count) {
698 shadow_node->fallback_removal_count--;
699 } else {
19ddcd04
MD
700 if (type->type_class == RCU_JA_LINEAR
701 && shadow_node->nr_child <= type->min_child) {
2e313670
MD
702 /* We need to try recompacting the node */
703 return -EFBIG;
704 }
705 }
19ddcd04 706 dbg_printf("linear clear ptr: nr_child_ptr %p\n", nr_child_ptr);
af3cbd45
MD
707 assert(*node_flag_ptr != NULL);
708 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
709 /*
710 * Value and nr_child are never changed (would cause ABA issue).
711 * Instead, we leave the pointer to NULL and recompact the node
712 * once in a while. It is allowed to set a NULL pointer to a new
713 * value without recompaction though.
714 * Only update the shadow node accounting.
715 */
716 shadow_node->nr_child--;
af3cbd45 717 dbg_printf("linear clear ptr: %u child, shadow: %u child, for node %p shadow %p\n",
2e313670
MD
718 (unsigned int) CMM_LOAD_SHARED(*nr_child_ptr),
719 (unsigned int) shadow_node->nr_child,
720 node, shadow_node);
2e313670
MD
721 return 0;
722}
723
724static
af3cbd45 725int ja_pool_node_clear_ptr(const struct cds_ja_type *type,
2e313670 726 struct cds_ja_inode *node,
19ddcd04 727 struct cds_ja_inode_flag *node_flag,
2e313670 728 struct cds_ja_shadow_node *shadow_node,
af3cbd45 729 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
730 uint8_t n)
731{
732 struct cds_ja_inode *linear;
733
734 assert(type->type_class == RCU_JA_POOL);
19ddcd04
MD
735
736 if (shadow_node->fallback_removal_count) {
737 shadow_node->fallback_removal_count--;
738 } else {
739 /* We should try recompacting the node */
740 if (shadow_node->nr_child <= type->min_child)
741 return -EFBIG;
742 }
743
744 switch (type->nr_pool_order) {
745 case 1:
746 {
747 unsigned long bitsel, index;
748
749 bitsel = ja_node_pool_1d_bitsel(node_flag);
750 assert(bitsel < CHAR_BIT);
751 index = ((unsigned long) n >> bitsel) & type->nr_pool_order;
752 linear = (struct cds_ja_inode *) &node->u.data[index << type->pool_size_order];
753 break;
754 }
755 case 2:
756 {
757 unsigned long bitsel[2], index[2], rindex;
758
759 ja_node_pool_2d_bitsel(node_flag, bitsel);
760 assert(bitsel[0] < CHAR_BIT);
761 assert(bitsel[1] < CHAR_BIT);
762 index[0] = ((unsigned long) n >> bitsel[0]) & 0x1;
763 index[0] <<= 1;
764 index[1] = ((unsigned long) n >> bitsel[1]) & 0x1;
765 rindex = index[0] | index[1];
766 linear = (struct cds_ja_inode *) &node->u.data[rindex << type->pool_size_order];
767 break;
768 }
769 default:
770 linear = NULL;
771 assert(0);
772 }
773
af3cbd45 774 return ja_linear_node_clear_ptr(type, linear, shadow_node, node_flag_ptr);
2e313670
MD
775}
776
777static
af3cbd45 778int ja_pigeon_node_clear_ptr(const struct cds_ja_type *type,
2e313670
MD
779 struct cds_ja_inode *node,
780 struct cds_ja_shadow_node *shadow_node,
af3cbd45 781 struct cds_ja_inode_flag **node_flag_ptr)
2e313670 782{
2e313670 783 assert(type->type_class == RCU_JA_PIGEON);
19ddcd04
MD
784
785 if (shadow_node->fallback_removal_count) {
786 shadow_node->fallback_removal_count--;
787 } else {
788 /* We should try recompacting the node */
789 if (shadow_node->nr_child <= type->min_child)
790 return -EFBIG;
791 }
4d6ef45e 792 dbg_printf("ja_pigeon_node_clear_ptr: clearing ptr: %p\n", *node_flag_ptr);
af3cbd45 793 rcu_assign_pointer(*node_flag_ptr, NULL);
2e313670
MD
794 shadow_node->nr_child--;
795 return 0;
796}
797
798/*
af3cbd45 799 * _ja_node_clear_ptr: clear ptr item within a node. Return an error
2e313670
MD
800 * (negative error value) if it is not found (-ENOENT).
801 */
802static
af3cbd45 803int _ja_node_clear_ptr(const struct cds_ja_type *type,
2e313670 804 struct cds_ja_inode *node,
19ddcd04 805 struct cds_ja_inode_flag *node_flag,
2e313670 806 struct cds_ja_shadow_node *shadow_node,
af3cbd45 807 struct cds_ja_inode_flag **node_flag_ptr,
2e313670
MD
808 uint8_t n)
809{
810 switch (type->type_class) {
811 case RCU_JA_LINEAR:
af3cbd45 812 return ja_linear_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670 813 case RCU_JA_POOL:
19ddcd04 814 return ja_pool_node_clear_ptr(type, node, node_flag, shadow_node, node_flag_ptr, n);
2e313670 815 case RCU_JA_PIGEON:
af3cbd45 816 return ja_pigeon_node_clear_ptr(type, node, shadow_node, node_flag_ptr);
2e313670
MD
817 case RCU_JA_NULL:
818 return -ENOENT;
819 default:
820 assert(0);
821 return -EINVAL;
822 }
823
824 return 0;
825}
826
b1a90ce3
MD
827/*
828 * Calculate bit distribution. Returns the bit (0 to 7) that splits the
829 * distribution in two sub-distributions containing as much elements one
830 * compared to the other.
831 */
832static
833unsigned int ja_node_sum_distribution_1d(enum ja_recompact mode,
834 struct cds_ja *ja,
835 unsigned int type_index,
836 const struct cds_ja_type *type,
837 struct cds_ja_inode *node,
838 struct cds_ja_shadow_node *shadow_node,
839 uint8_t n,
840 struct cds_ja_inode_flag *child_node_flag,
841 struct cds_ja_inode_flag **nullify_node_flag_ptr)
842{
843 uint8_t nr_one[JA_BITS_PER_BYTE];
844 unsigned int bitsel = 0, bit_i, overall_best_distance = UINT_MAX;
845 unsigned int distrib_nr_child = 0;
846
847 memset(nr_one, 0, sizeof(nr_one));
848
849 switch (type->type_class) {
850 case RCU_JA_LINEAR:
851 {
852 uint8_t nr_child =
853 ja_linear_node_get_nr_child(type, node);
854 unsigned int i;
855
856 for (i = 0; i < nr_child; i++) {
857 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
858 uint8_t v;
859
860 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
861 if (!iter)
862 continue;
863 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
864 continue;
f5531dd9
MD
865 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
866 if (v & (1U << bit_i))
867 nr_one[bit_i]++;
b1a90ce3
MD
868 }
869 distrib_nr_child++;
870 }
871 break;
872 }
873 case RCU_JA_POOL:
874 {
875 unsigned int pool_nr;
876
877 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
878 struct cds_ja_inode *pool =
879 ja_pool_node_get_ith_pool(type,
880 node, pool_nr);
881 uint8_t nr_child =
882 ja_linear_node_get_nr_child(type, pool);
883 unsigned int j;
884
885 for (j = 0; j < nr_child; j++) {
886 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
887 uint8_t v;
888
889 ja_linear_node_get_ith_pos(type, pool,
890 j, &v, &iter);
891 if (!iter)
892 continue;
893 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
894 continue;
f5531dd9
MD
895 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
896 if (v & (1U << bit_i))
897 nr_one[bit_i]++;
b1a90ce3
MD
898 }
899 distrib_nr_child++;
900 }
901 }
902 break;
903 }
904 case RCU_JA_PIGEON:
905 {
906 uint8_t nr_child;
907 unsigned int i;
908
909 assert(mode == JA_RECOMPACT_DEL);
910 nr_child = shadow_node->nr_child;
911 for (i = 0; i < nr_child; i++) {
912 struct cds_ja_inode_flag *iter;
b1a90ce3
MD
913
914 iter = ja_pigeon_node_get_ith_pos(type, node, i);
915 if (!iter)
916 continue;
917 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
918 continue;
f5531dd9
MD
919 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
920 if (i & (1U << bit_i))
921 nr_one[bit_i]++;
b1a90ce3
MD
922 }
923 distrib_nr_child++;
924 }
925 break;
926 }
927 case RCU_JA_NULL:
19ddcd04 928 assert(mode == JA_RECOMPACT_ADD_NEXT);
b1a90ce3
MD
929 break;
930 default:
931 assert(0);
932 break;
933 }
934
19ddcd04 935 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
f5531dd9
MD
936 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
937 if (n & (1U << bit_i))
938 nr_one[bit_i]++;
b1a90ce3
MD
939 }
940 distrib_nr_child++;
941 }
942
943 /*
944 * The best bit selector is that for which the number of ones is
945 * closest to half of the number of children in the
f5531dd9
MD
946 * distribution. We calculate the distance using the double of
947 * the sub-distribution sizes to eliminate truncation error.
b1a90ce3
MD
948 */
949 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
950 unsigned int distance_to_best;
951
f5531dd9 952 distance_to_best = abs_int((nr_one[bit_i] << 1U) - distrib_nr_child);
b1a90ce3
MD
953 if (distance_to_best < overall_best_distance) {
954 overall_best_distance = distance_to_best;
955 bitsel = bit_i;
956 }
957 }
958 dbg_printf("1 dimension pool bit selection: (%u)\n", bitsel);
959 return bitsel;
960}
961
19ddcd04
MD
962/*
963 * Calculate bit distribution in two dimensions. Returns the two bits
964 * (each 0 to 7) that splits the distribution in four sub-distributions
965 * containing as much elements one compared to the other.
966 */
967static
968void ja_node_sum_distribution_2d(enum ja_recompact mode,
969 struct cds_ja *ja,
970 unsigned int type_index,
971 const struct cds_ja_type *type,
972 struct cds_ja_inode *node,
973 struct cds_ja_shadow_node *shadow_node,
974 uint8_t n,
975 struct cds_ja_inode_flag *child_node_flag,
976 struct cds_ja_inode_flag **nullify_node_flag_ptr,
977 unsigned int *_bitsel)
978{
979 uint8_t nr_2d_11[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
980 nr_2d_10[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
981 nr_2d_01[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE],
982 nr_2d_00[JA_BITS_PER_BYTE][JA_BITS_PER_BYTE];
983 unsigned int bitsel[2] = { 0, 1 };
4a073c53
MD
984 unsigned int bit_i, bit_j;
985 int overall_best_distance = INT_MAX;
19ddcd04
MD
986 unsigned int distrib_nr_child = 0;
987
988 memset(nr_2d_11, 0, sizeof(nr_2d_11));
989 memset(nr_2d_10, 0, sizeof(nr_2d_10));
4a073c53
MD
990 memset(nr_2d_01, 0, sizeof(nr_2d_01));
991 memset(nr_2d_00, 0, sizeof(nr_2d_00));
19ddcd04
MD
992
993 switch (type->type_class) {
994 case RCU_JA_LINEAR:
995 {
996 uint8_t nr_child =
997 ja_linear_node_get_nr_child(type, node);
998 unsigned int i;
999
1000 for (i = 0; i < nr_child; i++) {
1001 struct cds_ja_inode_flag *iter;
1002 uint8_t v;
1003
1004 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
1005 if (!iter)
1006 continue;
1007 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1008 continue;
1009 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1010 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1011 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1012 nr_2d_11[bit_i][bit_j]++;
1013 }
1014 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1015 nr_2d_10[bit_i][bit_j]++;
1016 }
1017 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1018 nr_2d_01[bit_i][bit_j]++;
1019 }
1020 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1021 nr_2d_00[bit_i][bit_j]++;
1022 }
1023 }
1024 }
1025 distrib_nr_child++;
1026 }
1027 break;
1028 }
1029 case RCU_JA_POOL:
1030 {
1031 unsigned int pool_nr;
1032
1033 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
1034 struct cds_ja_inode *pool =
1035 ja_pool_node_get_ith_pool(type,
1036 node, pool_nr);
1037 uint8_t nr_child =
1038 ja_linear_node_get_nr_child(type, pool);
1039 unsigned int j;
1040
1041 for (j = 0; j < nr_child; j++) {
1042 struct cds_ja_inode_flag *iter;
1043 uint8_t v;
1044
1045 ja_linear_node_get_ith_pos(type, pool,
1046 j, &v, &iter);
1047 if (!iter)
1048 continue;
1049 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1050 continue;
1051 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1052 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1053 if ((v & (1U << bit_i)) && (v & (1U << bit_j))) {
1054 nr_2d_11[bit_i][bit_j]++;
1055 }
1056 if ((v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1057 nr_2d_10[bit_i][bit_j]++;
1058 }
1059 if (!(v & (1U << bit_i)) && (v & (1U << bit_j))) {
1060 nr_2d_01[bit_i][bit_j]++;
1061 }
1062 if (!(v & (1U << bit_i)) && !(v & (1U << bit_j))) {
1063 nr_2d_00[bit_i][bit_j]++;
1064 }
1065 }
1066 }
1067 distrib_nr_child++;
1068 }
1069 }
1070 break;
1071 }
1072 case RCU_JA_PIGEON:
1073 {
1074 uint8_t nr_child;
1075 unsigned int i;
1076
1077 assert(mode == JA_RECOMPACT_DEL);
1078 nr_child = shadow_node->nr_child;
1079 for (i = 0; i < nr_child; i++) {
1080 struct cds_ja_inode_flag *iter;
1081
1082 iter = ja_pigeon_node_get_ith_pos(type, node, i);
1083 if (!iter)
1084 continue;
1085 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
1086 continue;
1087 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1088 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1089 if ((i & (1U << bit_i)) && (i & (1U << bit_j))) {
1090 nr_2d_11[bit_i][bit_j]++;
1091 }
1092 if ((i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1093 nr_2d_10[bit_i][bit_j]++;
1094 }
1095 if (!(i & (1U << bit_i)) && (i & (1U << bit_j))) {
1096 nr_2d_01[bit_i][bit_j]++;
1097 }
1098 if (!(i & (1U << bit_i)) && !(i & (1U << bit_j))) {
1099 nr_2d_00[bit_i][bit_j]++;
1100 }
1101 }
1102 }
1103 distrib_nr_child++;
1104 }
1105 break;
1106 }
1107 case RCU_JA_NULL:
1108 assert(mode == JA_RECOMPACT_ADD_NEXT);
1109 break;
1110 default:
1111 assert(0);
1112 break;
1113 }
1114
1115 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
1116 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1117 for (bit_j = 0; bit_j < bit_i; bit_j++) {
1118 if ((n & (1U << bit_i)) && (n & (1U << bit_j))) {
1119 nr_2d_11[bit_i][bit_j]++;
1120 }
1121 if ((n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1122 nr_2d_10[bit_i][bit_j]++;
1123 }
1124 if (!(n & (1U << bit_i)) && (n & (1U << bit_j))) {
1125 nr_2d_01[bit_i][bit_j]++;
1126 }
1127 if (!(n & (1U << bit_i)) && !(n & (1U << bit_j))) {
1128 nr_2d_00[bit_i][bit_j]++;
1129 }
1130 }
1131 }
1132 distrib_nr_child++;
1133 }
1134
1135 /*
1136 * The best bit selector is that for which the number of nodes
1137 * in each sub-class is closest to one-fourth of the number of
1138 * children in the distribution. We calculate the distance using
1139 * 4 times the size of the sub-distribution to eliminate
1140 * truncation error.
1141 */
1142 for (bit_i = 0; bit_i < JA_BITS_PER_BYTE; bit_i++) {
1143 for (bit_j = 0; bit_j < bit_i; bit_j++) {
4a073c53 1144 int distance_to_best[4];
19ddcd04 1145
4a073c53
MD
1146 distance_to_best[0] = (nr_2d_11[bit_i][bit_j] << 2U) - distrib_nr_child;
1147 distance_to_best[1] = (nr_2d_10[bit_i][bit_j] << 2U) - distrib_nr_child;
1148 distance_to_best[2] = (nr_2d_01[bit_i][bit_j] << 2U) - distrib_nr_child;
1149 distance_to_best[3] = (nr_2d_00[bit_i][bit_j] << 2U) - distrib_nr_child;
19ddcd04 1150
4a073c53
MD
1151 /* Consider worse distance above best */
1152 if (distance_to_best[1] > 0 && distance_to_best[1] > distance_to_best[0])
19ddcd04 1153 distance_to_best[0] = distance_to_best[1];
4a073c53 1154 if (distance_to_best[2] > 0 && distance_to_best[2] > distance_to_best[0])
19ddcd04 1155 distance_to_best[0] = distance_to_best[2];
4a073c53 1156 if (distance_to_best[3] > 0 && distance_to_best[3] > distance_to_best[0])
19ddcd04 1157 distance_to_best[0] = distance_to_best[3];
4a073c53 1158
19ddcd04
MD
1159 /*
1160 * If our worse distance is better than overall,
1161 * we become new best candidate.
1162 */
1163 if (distance_to_best[0] < overall_best_distance) {
1164 overall_best_distance = distance_to_best[0];
1165 bitsel[0] = bit_i;
1166 bitsel[1] = bit_j;
1167 }
1168 }
1169 }
1170
1171 dbg_printf("2 dimensions pool bit selection: (%u,%u)\n", bitsel[0], bitsel[1]);
1172
1173 /* Return our bit selection */
1174 _bitsel[0] = bitsel[0];
1175 _bitsel[1] = bitsel[1];
1176}
1177
7a0b2331
MD
1178/*
1179 * ja_node_recompact_add: recompact a node, adding a new child.
2e313670 1180 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd 1181 * error value otherwise.
7a0b2331
MD
1182 */
1183static
2e313670
MD
1184int ja_node_recompact(enum ja_recompact mode,
1185 struct cds_ja *ja,
e1db2db5 1186 unsigned int old_type_index,
d96bfb0d 1187 const struct cds_ja_type *old_type,
b4540e8a 1188 struct cds_ja_inode *old_node,
5a9a87dd 1189 struct cds_ja_shadow_node *shadow_node,
3d8fe307 1190 struct cds_ja_inode_flag **old_node_flag_ptr, uint8_t n,
af3cbd45
MD
1191 struct cds_ja_inode_flag *child_node_flag,
1192 struct cds_ja_inode_flag **nullify_node_flag_ptr)
7a0b2331 1193{
e1db2db5 1194 unsigned int new_type_index;
b4540e8a 1195 struct cds_ja_inode *new_node;
af3cbd45 1196 struct cds_ja_shadow_node *new_shadow_node = NULL;
d96bfb0d 1197 const struct cds_ja_type *new_type;
3d8fe307 1198 struct cds_ja_inode_flag *new_node_flag, *old_node_flag;
7a0b2331 1199 int ret;
f07b240f 1200 int fallback = 0;
7a0b2331 1201
3d8fe307
MD
1202 old_node_flag = *old_node_flag_ptr;
1203
2e313670 1204 switch (mode) {
19ddcd04
MD
1205 case JA_RECOMPACT_ADD_SAME:
1206 if (old_type->type_class == RCU_JA_POOL) {
1207 /*
1208 * For pool type, try redistributing
1209 * into a different distribution of same
1210 * size if we have not reached limits.
1211 */
1212 if (shadow_node->nr_child + 1 > old_type->max_child) {
1213 new_type_index = old_type_index + 1;
1214 } else if (shadow_node->nr_child + 1 < old_type->min_child) {
1215 new_type_index = old_type_index - 1;
1216 } else {
1217 new_type_index = old_type_index;
1218 }
1219 } else {
1220 new_type_index = old_type_index;
1221 }
2e313670 1222 break;
19ddcd04 1223 case JA_RECOMPACT_ADD_NEXT:
2e313670
MD
1224 if (!shadow_node || old_type_index == NODE_INDEX_NULL) {
1225 new_type_index = 0;
1226 } else {
19ddcd04
MD
1227 if (old_type->type_class == RCU_JA_POOL) {
1228 /*
1229 * For pool type, try redistributing
1230 * into a different distribution of same
1231 * size if we have not reached limits.
1232 */
1233 if (shadow_node->nr_child + 1 > old_type->max_child) {
1234 new_type_index = old_type_index + 1;
1235 } else {
1236 new_type_index = old_type_index;
1237 }
1238 } else {
1239 new_type_index = old_type_index + 1;
1240 }
2e313670
MD
1241 }
1242 break;
1243 case JA_RECOMPACT_DEL:
1244 if (old_type_index == 0) {
1245 new_type_index = NODE_INDEX_NULL;
1246 } else {
19ddcd04
MD
1247 if (old_type->type_class == RCU_JA_POOL) {
1248 /*
1249 * For pool type, try redistributing
1250 * into a different distribution of same
1251 * size if we have not reached limits.
1252 */
1253 if (shadow_node->nr_child - 1 < old_type->min_child) {
1254 new_type_index = old_type_index - 1;
1255 } else {
1256 new_type_index = old_type_index;
1257 }
1258 } else {
1259 new_type_index = old_type_index - 1;
1260 }
2e313670
MD
1261 }
1262 break;
1263 default:
1264 assert(0);
7a0b2331 1265 }
a2a7ff59 1266
f07b240f 1267retry: /* for fallback */
582a6ade
MD
1268 dbg_printf("Recompact from type %d to type %d\n",
1269 old_type_index, new_type_index);
7a0b2331 1270 new_type = &ja_types[new_type_index];
2e313670
MD
1271 if (new_type_index != NODE_INDEX_NULL) {
1272 new_node = alloc_cds_ja_node(new_type);
1273 if (!new_node)
1274 return -ENOMEM;
b1a90ce3
MD
1275
1276 if (new_type->type_class == RCU_JA_POOL) {
1277 switch (new_type->nr_pool_order) {
1278 case 1:
1279 {
19ddcd04
MD
1280 unsigned int node_distrib_bitsel;
1281
b1a90ce3
MD
1282 node_distrib_bitsel =
1283 ja_node_sum_distribution_1d(mode, ja,
1284 old_type_index, old_type,
1285 old_node, shadow_node,
1286 n, child_node_flag,
1287 nullify_node_flag_ptr);
1288 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1289 new_node_flag = ja_node_flag_pool_1d(new_node,
1290 new_type_index, node_distrib_bitsel);
1291 break;
1292 }
1293 case 2:
1294 {
19ddcd04
MD
1295 unsigned int node_distrib_bitsel[2];
1296
1297 ja_node_sum_distribution_2d(mode, ja,
1298 old_type_index, old_type,
1299 old_node, shadow_node,
1300 n, child_node_flag,
1301 nullify_node_flag_ptr,
1302 node_distrib_bitsel);
b1a90ce3
MD
1303 assert(!((unsigned long) new_node & JA_POOL_1D_MASK));
1304 assert(!((unsigned long) new_node & JA_POOL_2D_MASK));
19ddcd04
MD
1305 new_node_flag = ja_node_flag_pool_2d(new_node,
1306 new_type_index, node_distrib_bitsel);
b1a90ce3
MD
1307 break;
1308 }
1309 default:
1310 assert(0);
1311 }
1312 } else {
1313 new_node_flag = ja_node_flag(new_node, new_type_index);
1314 }
1315
2e313670 1316 dbg_printf("Recompact inherit lock from %p\n", shadow_node);
3d8fe307 1317 new_shadow_node = rcuja_shadow_set(ja->ht, new_node_flag, shadow_node, ja);
2e313670
MD
1318 if (!new_shadow_node) {
1319 free(new_node);
1320 return -ENOMEM;
1321 }
1322 if (fallback)
1323 new_shadow_node->fallback_removal_count =
1324 JA_FALLBACK_REMOVAL_COUNT;
1325 } else {
1326 new_node = NULL;
1327 new_node_flag = NULL;
e1db2db5 1328 }
11c5e016 1329
19ddcd04 1330 assert(mode != JA_RECOMPACT_ADD_NEXT || old_type->type_class != RCU_JA_PIGEON);
2e313670
MD
1331
1332 if (new_type_index == NODE_INDEX_NULL)
1333 goto skip_copy;
1334
11c5e016
MD
1335 switch (old_type->type_class) {
1336 case RCU_JA_LINEAR:
1337 {
1338 uint8_t nr_child =
1339 ja_linear_node_get_nr_child(old_type, old_node);
1340 unsigned int i;
1341
1342 for (i = 0; i < nr_child; i++) {
b4540e8a 1343 struct cds_ja_inode_flag *iter;
11c5e016
MD
1344 uint8_t v;
1345
1346 ja_linear_node_get_ith_pos(old_type, old_node, i, &v, &iter);
1347 if (!iter)
1348 continue;
af3cbd45 1349 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1350 continue;
b1a90ce3 1351 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1352 new_shadow_node,
11c5e016 1353 v, iter);
f07b240f
MD
1354 if (new_type->type_class == RCU_JA_POOL && ret) {
1355 goto fallback_toosmall;
1356 }
11c5e016
MD
1357 assert(!ret);
1358 }
1359 break;
1360 }
1361 case RCU_JA_POOL:
1362 {
1363 unsigned int pool_nr;
1364
1365 for (pool_nr = 0; pool_nr < (1U << old_type->nr_pool_order); pool_nr++) {
b4540e8a 1366 struct cds_ja_inode *pool =
11c5e016
MD
1367 ja_pool_node_get_ith_pool(old_type,
1368 old_node, pool_nr);
1369 uint8_t nr_child =
1370 ja_linear_node_get_nr_child(old_type, pool);
1371 unsigned int j;
1372
1373 for (j = 0; j < nr_child; j++) {
b4540e8a 1374 struct cds_ja_inode_flag *iter;
11c5e016
MD
1375 uint8_t v;
1376
1377 ja_linear_node_get_ith_pos(old_type, pool,
1378 j, &v, &iter);
1379 if (!iter)
1380 continue;
af3cbd45 1381 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1382 continue;
b1a90ce3 1383 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
f07b240f 1384 new_shadow_node,
11c5e016 1385 v, iter);
f07b240f
MD
1386 if (new_type->type_class == RCU_JA_POOL
1387 && ret) {
1388 goto fallback_toosmall;
1389 }
11c5e016
MD
1390 assert(!ret);
1391 }
1392 }
1393 break;
7a0b2331 1394 }
a2a7ff59 1395 case RCU_JA_NULL:
19ddcd04 1396 assert(mode == JA_RECOMPACT_ADD_NEXT);
a2a7ff59 1397 break;
11c5e016 1398 case RCU_JA_PIGEON:
2e313670
MD
1399 {
1400 uint8_t nr_child;
1401 unsigned int i;
1402
1403 assert(mode == JA_RECOMPACT_DEL);
1404 nr_child = shadow_node->nr_child;
1405 for (i = 0; i < nr_child; i++) {
1406 struct cds_ja_inode_flag *iter;
1407
1408 iter = ja_pigeon_node_get_ith_pos(old_type, old_node, i);
1409 if (!iter)
1410 continue;
af3cbd45 1411 if (mode == JA_RECOMPACT_DEL && *nullify_node_flag_ptr == iter)
2e313670 1412 continue;
b1a90ce3 1413 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1414 new_shadow_node,
1415 i, iter);
1416 if (new_type->type_class == RCU_JA_POOL && ret) {
1417 goto fallback_toosmall;
1418 }
1419 assert(!ret);
1420 }
1421 break;
1422 }
11c5e016
MD
1423 default:
1424 assert(0);
5a9a87dd 1425 ret = -EINVAL;
f07b240f 1426 goto end;
11c5e016 1427 }
2e313670 1428skip_copy:
11c5e016 1429
19ddcd04 1430 if (mode == JA_RECOMPACT_ADD_NEXT || mode == JA_RECOMPACT_ADD_SAME) {
2e313670 1431 /* add node */
b1a90ce3 1432 ret = _ja_node_set_nth(new_type, new_node, new_node_flag,
2e313670
MD
1433 new_shadow_node,
1434 n, child_node_flag);
7b413155
MD
1435 if (new_type->type_class == RCU_JA_POOL && ret) {
1436 goto fallback_toosmall;
1437 }
2e313670
MD
1438 assert(!ret);
1439 }
19ddcd04
MD
1440
1441 if (fallback) {
1442 dbg_printf("Using fallback for %u children, node type index: %u, mode %s\n",
1443 new_shadow_node->nr_child, old_type_index, mode == JA_RECOMPACT_ADD_NEXT ? "add_next" :
1444 (mode == JA_RECOMPACT_DEL ? "del" : "add_same"));
1445 uatomic_inc(&node_fallback_count_distribution[new_shadow_node->nr_child]);
1446 }
1447
3d8fe307
MD
1448 /* Return pointer to new recompacted node through old_node_flag_ptr */
1449 *old_node_flag_ptr = new_node_flag;
a2a7ff59 1450 if (old_node) {
2e313670
MD
1451 int flags;
1452
1453 flags = RCUJA_SHADOW_CLEAR_FREE_NODE;
1454 /*
1455 * It is OK to free the lock associated with a node
1456 * going to NULL, since we are holding the parent lock.
1457 * This synchronizes removal with re-add of that node.
1458 */
1459 if (new_type_index == NODE_INDEX_NULL)
1460 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
3d8fe307 1461 ret = rcuja_shadow_clear(ja->ht, old_node_flag, shadow_node,
2e313670 1462 flags);
a2a7ff59
MD
1463 assert(!ret);
1464 }
5a9a87dd
MD
1465
1466 ret = 0;
f07b240f 1467end:
5a9a87dd 1468 return ret;
f07b240f
MD
1469
1470fallback_toosmall:
1471 /* fallback if next pool is too small */
af3cbd45 1472 assert(new_shadow_node);
3d8fe307 1473 ret = rcuja_shadow_clear(ja->ht, new_node_flag, new_shadow_node,
f07b240f
MD
1474 RCUJA_SHADOW_CLEAR_FREE_NODE);
1475 assert(!ret);
1476
19ddcd04
MD
1477 switch (mode) {
1478 case JA_RECOMPACT_ADD_SAME:
1479 /*
1480 * JA_RECOMPACT_ADD_SAME is only triggered if a linear
1481 * node within a pool has unused entries. It should
1482 * therefore _never_ be too small.
1483 */
4a073c53 1484 assert(0);
4cde8267
MD
1485
1486 /* Fall-through */
19ddcd04
MD
1487 case JA_RECOMPACT_ADD_NEXT:
1488 {
1489 const struct cds_ja_type *next_type;
1490
1491 /*
1492 * Recompaction attempt on add failed. Should only
1493 * happen if target node type is pool. Caused by
1494 * hard-to-split distribution. Recompact using the next
1495 * distribution size.
1496 */
1497 assert(new_type->type_class == RCU_JA_POOL);
1498 next_type = &ja_types[new_type_index + 1];
1499 /*
1500 * Try going to the next pool size if our population
1501 * fits within its range. This is not flagged as a
1502 * fallback.
1503 */
1504 if (shadow_node->nr_child + 1 >= next_type->min_child
1505 && shadow_node->nr_child + 1 <= next_type->max_child) {
1506 new_type_index++;
1507 goto retry;
1508 } else {
1509 new_type_index++;
1510 dbg_printf("Add fallback to type %d\n", new_type_index);
1511 uatomic_inc(&ja->nr_fallback);
1512 fallback = 1;
1513 goto retry;
1514 }
1515 break;
1516 }
1517 case JA_RECOMPACT_DEL:
1518 /*
1519 * Recompaction attempt on delete failed. Should only
1520 * happen if target node type is pool. This is caused by
1521 * a hard-to-split distribution. Recompact on same node
1522 * size, but flag current node as "fallback" to ensure
1523 * we don't attempt recompaction before some activity
1524 * has reshuffled our node.
1525 */
1526 assert(new_type->type_class == RCU_JA_POOL);
1527 new_type_index = old_type_index;
1528 dbg_printf("Delete fallback keeping type %d\n", new_type_index);
1529 uatomic_inc(&ja->nr_fallback);
1530 fallback = 1;
1531 goto retry;
1532 default:
1533 assert(0);
1534 return -EINVAL;
1535 }
1536
1537 /*
1538 * Last resort fallback: pigeon.
1539 */
f07b240f
MD
1540 new_type_index = (1UL << JA_TYPE_BITS) - 1;
1541 dbg_printf("Fallback to type %d\n", new_type_index);
1542 uatomic_inc(&ja->nr_fallback);
1543 fallback = 1;
1544 goto retry;
7a0b2331
MD
1545}
1546
5a9a87dd 1547/*
2e313670 1548 * Return 0 on success, -EAGAIN if need to retry, or other negative
5a9a87dd
MD
1549 * error value otherwise.
1550 */
7a0b2331 1551static
d96bfb0d 1552int ja_node_set_nth(struct cds_ja *ja,
b4540e8a 1553 struct cds_ja_inode_flag **node_flag, uint8_t n,
5a9a87dd
MD
1554 struct cds_ja_inode_flag *child_node_flag,
1555 struct cds_ja_shadow_node *shadow_node)
7a0b2331
MD
1556{
1557 int ret;
e1db2db5 1558 unsigned int type_index;
d96bfb0d 1559 const struct cds_ja_type *type;
b4540e8a 1560 struct cds_ja_inode *node;
7a0b2331 1561
a2a7ff59
MD
1562 dbg_printf("ja_node_set_nth for n=%u, node %p, shadow %p\n",
1563 (unsigned int) n, ja_node_ptr(*node_flag), shadow_node);
1564
e1db2db5
MD
1565 node = ja_node_ptr(*node_flag);
1566 type_index = ja_node_type(*node_flag);
1567 type = &ja_types[type_index];
b1a90ce3 1568 ret = _ja_node_set_nth(type, node, *node_flag, shadow_node,
e1db2db5 1569 n, child_node_flag);
2e313670
MD
1570 switch (ret) {
1571 case -ENOSPC:
19ddcd04
MD
1572 /* Not enough space in node, need to recompact to next type. */
1573 ret = ja_node_recompact(JA_RECOMPACT_ADD_NEXT, ja, type_index, type, node,
af3cbd45 1574 shadow_node, node_flag, n, child_node_flag, NULL);
2e313670
MD
1575 break;
1576 case -ERANGE:
1577 /* Node needs to be recompacted. */
19ddcd04 1578 ret = ja_node_recompact(JA_RECOMPACT_ADD_SAME, ja, type_index, type, node,
af3cbd45 1579 shadow_node, node_flag, n, child_node_flag, NULL);
2e313670
MD
1580 break;
1581 }
1582 return ret;
1583}
1584
1585/*
1586 * Return 0 on success, -EAGAIN if need to retry, or other negative
1587 * error value otherwise.
1588 */
1589static
af3cbd45
MD
1590int ja_node_clear_ptr(struct cds_ja *ja,
1591 struct cds_ja_inode_flag **node_flag_ptr, /* Pointer to location to nullify */
1592 struct cds_ja_inode_flag **parent_node_flag_ptr, /* Address of parent ptr in its parent */
1593 struct cds_ja_shadow_node *shadow_node, /* of parent */
1594 uint8_t n)
2e313670
MD
1595{
1596 int ret;
1597 unsigned int type_index;
1598 const struct cds_ja_type *type;
1599 struct cds_ja_inode *node;
1600
af3cbd45
MD
1601 dbg_printf("ja_node_clear_ptr for node %p, shadow %p, target ptr %p\n",
1602 ja_node_ptr(*parent_node_flag_ptr), shadow_node, node_flag_ptr);
2e313670 1603
af3cbd45
MD
1604 node = ja_node_ptr(*parent_node_flag_ptr);
1605 type_index = ja_node_type(*parent_node_flag_ptr);
2e313670 1606 type = &ja_types[type_index];
19ddcd04 1607 ret = _ja_node_clear_ptr(type, node, *parent_node_flag_ptr, shadow_node, node_flag_ptr, n);
2e313670 1608 if (ret == -EFBIG) {
19ddcd04 1609 /* Should try recompaction. */
2e313670 1610 ret = ja_node_recompact(JA_RECOMPACT_DEL, ja, type_index, type, node,
af3cbd45
MD
1611 shadow_node, parent_node_flag_ptr, n, NULL,
1612 node_flag_ptr);
7a0b2331
MD
1613 }
1614 return ret;
1615}
be9a7474 1616
af3cbd45 1617struct cds_hlist_head cds_ja_lookup(struct cds_ja *ja, uint64_t key)
b4540e8a 1618{
41975c12
MD
1619 unsigned int tree_depth, i;
1620 struct cds_ja_inode_flag *node_flag;
af3cbd45 1621 struct cds_hlist_head head = { NULL };
41975c12
MD
1622
1623 if (caa_unlikely(key > ja->key_max))
af3cbd45 1624 return head;
41975c12 1625 tree_depth = ja->tree_depth;
5a9a87dd 1626 node_flag = rcu_dereference(ja->root);
41975c12 1627
5a9a87dd
MD
1628 /* level 0: root node */
1629 if (!ja_node_ptr(node_flag))
af3cbd45 1630 return head;
5a9a87dd
MD
1631
1632 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1633 uint8_t iter_key;
1634
1635 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
b62a8d0c 1636 node_flag = ja_node_get_nth(node_flag, NULL, NULL, NULL,
79b41067 1637 iter_key);
582a6ade
MD
1638 dbg_printf("cds_ja_lookup iter key lookup %u finds node_flag %p\n",
1639 (unsigned int) iter_key, node_flag);
41975c12 1640 if (!ja_node_ptr(node_flag))
af3cbd45 1641 return head;
41975c12
MD
1642 }
1643
5a9a87dd 1644 /* Last level lookup succeded. We got an actual match. */
af3cbd45
MD
1645 head.next = (struct cds_hlist_node *) node_flag;
1646 return head;
5a9a87dd
MD
1647}
1648
1649/*
1650 * We reached an unpopulated node. Create it and the children we need,
1651 * and then attach the entire branch to the current node. This may
1652 * trigger recompaction of the current node. Locks needed: node lock
1653 * (for add), and, possibly, parent node lock (to update pointer due to
1654 * node recompaction).
1655 *
1656 * First take node lock, check if recompaction is needed, then take
1657 * parent lock (if needed). Then we can proceed to create the new
1658 * branch. Publish the new branch, and release locks.
1659 * TODO: we currently always take the parent lock even when not needed.
1660 */
1661static
1662int ja_attach_node(struct cds_ja *ja,
b0ca2d21 1663 struct cds_ja_inode_flag **attach_node_flag_ptr,
b62a8d0c 1664 struct cds_ja_inode_flag *attach_node_flag,
5a9a87dd
MD
1665 struct cds_ja_inode_flag **node_flag_ptr,
1666 struct cds_ja_inode_flag *node_flag,
1667 struct cds_ja_inode_flag *parent_node_flag,
1668 uint64_t key,
79b41067 1669 unsigned int level,
5a9a87dd
MD
1670 struct cds_ja_node *child_node)
1671{
1672 struct cds_ja_shadow_node *shadow_node = NULL,
af3cbd45 1673 *parent_shadow_node = NULL;
5a9a87dd
MD
1674 struct cds_ja_inode *node = ja_node_ptr(node_flag);
1675 struct cds_ja_inode *parent_node = ja_node_ptr(parent_node_flag);
1676 struct cds_hlist_head head;
1677 struct cds_ja_inode_flag *iter_node_flag, *iter_dest_node_flag;
1678 int ret, i;
a2a7ff59 1679 struct cds_ja_inode_flag *created_nodes[JA_MAX_DEPTH];
5a9a87dd
MD
1680 int nr_created_nodes = 0;
1681
582a6ade
MD
1682 dbg_printf("Attach node at level %u (node %p, node_flag %p)\n",
1683 level, node, node_flag);
a2a7ff59 1684
5a9a87dd 1685 assert(node);
3d8fe307 1686 shadow_node = rcuja_shadow_lookup_lock(ja->ht, node_flag);
5a9a87dd 1687 if (!shadow_node) {
2e313670 1688 ret = -EAGAIN;
5a9a87dd
MD
1689 goto end;
1690 }
1691 if (parent_node) {
1692 parent_shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1693 parent_node_flag);
5a9a87dd 1694 if (!parent_shadow_node) {
2e313670 1695 ret = -EAGAIN;
5a9a87dd
MD
1696 goto unlock_shadow;
1697 }
1698 }
1699
b62a8d0c 1700 if (node_flag_ptr && ja_node_ptr(*node_flag_ptr)) {
b306a0fe 1701 /*
c112acaa
MD
1702 * Target node has been updated between RCU lookup and
1703 * lock acquisition. We need to re-try lookup and
1704 * attach.
1705 */
1706 ret = -EAGAIN;
1707 goto unlock_parent;
1708 }
1709
1710 if (attach_node_flag_ptr && ja_node_ptr(*attach_node_flag_ptr) !=
b62a8d0c 1711 ja_node_ptr(attach_node_flag)) {
c112acaa
MD
1712 /*
1713 * Target node has been updated between RCU lookup and
1714 * lock acquisition. We need to re-try lookup and
1715 * attach.
b306a0fe
MD
1716 */
1717 ret = -EAGAIN;
1718 goto unlock_parent;
1719 }
1720
a2a7ff59 1721 /* Create new branch, starting from bottom */
5a9a87dd
MD
1722 CDS_INIT_HLIST_HEAD(&head);
1723 cds_hlist_add_head_rcu(&child_node->list, &head);
a2a7ff59 1724 iter_node_flag = (struct cds_ja_inode_flag *) head.next;
5a9a87dd 1725
79b41067
MD
1726 for (i = ja->tree_depth; i > (int) level; i--) {
1727 uint8_t iter_key;
1728
1729 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - i)));
1730 dbg_printf("branch creation level %d, key %u\n",
1731 i - 1, (unsigned int) iter_key);
5a9a87dd
MD
1732 iter_dest_node_flag = NULL;
1733 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1734 iter_key,
5a9a87dd
MD
1735 iter_node_flag,
1736 NULL);
1737 if (ret)
1738 goto check_error;
1739 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1740 iter_node_flag = iter_dest_node_flag;
1741 }
1742
79b41067
MD
1743 if (level > 1) {
1744 uint8_t iter_key;
1745
1746 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (ja->tree_depth - level)));
a2a7ff59
MD
1747 /* We need to use set_nth on the previous level. */
1748 iter_dest_node_flag = node_flag;
1749 ret = ja_node_set_nth(ja, &iter_dest_node_flag,
79b41067 1750 iter_key,
a2a7ff59
MD
1751 iter_node_flag,
1752 shadow_node);
1753 if (ret)
1754 goto check_error;
1755 created_nodes[nr_created_nodes++] = iter_dest_node_flag;
1756 iter_node_flag = iter_dest_node_flag;
1757 }
1758
5a9a87dd 1759 /* Publish new branch */
a2a7ff59 1760 dbg_printf("Publish branch %p, replacing %p\n",
b0ca2d21
MD
1761 iter_node_flag, *attach_node_flag_ptr);
1762 rcu_assign_pointer(*attach_node_flag_ptr, iter_node_flag);
5a9a87dd
MD
1763
1764 /* Success */
1765 ret = 0;
1766
1767check_error:
1768 if (ret) {
1769 for (i = 0; i < nr_created_nodes; i++) {
1770 int tmpret;
a2a7ff59
MD
1771 int flags;
1772
1773 flags = RCUJA_SHADOW_CLEAR_FREE_LOCK;
1774 if (i)
1775 flags |= RCUJA_SHADOW_CLEAR_FREE_NODE;
5a9a87dd 1776 tmpret = rcuja_shadow_clear(ja->ht,
3d8fe307 1777 created_nodes[i],
a2a7ff59
MD
1778 NULL,
1779 flags);
5a9a87dd
MD
1780 assert(!tmpret);
1781 }
1782 }
b306a0fe 1783unlock_parent:
5a9a87dd
MD
1784 if (parent_shadow_node)
1785 rcuja_shadow_unlock(parent_shadow_node);
1786unlock_shadow:
1787 if (shadow_node)
1788 rcuja_shadow_unlock(shadow_node);
1789end:
1790 return ret;
1791}
1792
1793/*
af3cbd45
MD
1794 * Lock the parent containing the hlist head pointer, and add node to list of
1795 * duplicates. Failure can happen if concurrent update changes the
1796 * parent before we get the lock. We return -EAGAIN in that case.
5a9a87dd
MD
1797 * Return 0 on success, negative error value on failure.
1798 */
1799static
1800int ja_chain_node(struct cds_ja *ja,
af3cbd45 1801 struct cds_ja_inode_flag *parent_node_flag,
fa112799 1802 struct cds_ja_inode_flag **node_flag_ptr,
c112acaa 1803 struct cds_ja_inode_flag *node_flag,
5a9a87dd
MD
1804 struct cds_hlist_head *head,
1805 struct cds_ja_node *node)
1806{
1807 struct cds_ja_shadow_node *shadow_node;
fa112799 1808 int ret = 0;
5a9a87dd 1809
3d8fe307 1810 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
b306a0fe 1811 if (!shadow_node) {
2e313670 1812 return -EAGAIN;
b306a0fe 1813 }
c112acaa 1814 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
1815 ret = -EAGAIN;
1816 goto end;
1817 }
5a9a87dd 1818 cds_hlist_add_head_rcu(&node->list, head);
fa112799 1819end:
5a9a87dd 1820 rcuja_shadow_unlock(shadow_node);
fa112799 1821 return ret;
5a9a87dd
MD
1822}
1823
1824int cds_ja_add(struct cds_ja *ja, uint64_t key,
1825 struct cds_ja_node *new_node)
1826{
1827 unsigned int tree_depth, i;
b0ca2d21
MD
1828 struct cds_ja_inode_flag **attach_node_flag_ptr,
1829 **node_flag_ptr;
5a9a87dd
MD
1830 struct cds_ja_inode_flag *node_flag,
1831 *parent_node_flag,
b62a8d0c
MD
1832 *parent2_node_flag,
1833 *attach_node_flag;
5a9a87dd
MD
1834 int ret;
1835
b306a0fe 1836 if (caa_unlikely(key > ja->key_max)) {
5a9a87dd 1837 return -EINVAL;
b306a0fe 1838 }
5a9a87dd
MD
1839 tree_depth = ja->tree_depth;
1840
1841retry:
a2a7ff59
MD
1842 dbg_printf("cds_ja_add attempt: key %" PRIu64 ", node %p\n",
1843 key, new_node);
5a9a87dd 1844 parent2_node_flag = NULL;
b0f74e47
MD
1845 parent_node_flag =
1846 (struct cds_ja_inode_flag *) &ja->root; /* Use root ptr address as key for mutex */
b0ca2d21 1847 attach_node_flag_ptr = &ja->root;
b62a8d0c 1848 attach_node_flag = rcu_dereference(ja->root);
5a9a87dd 1849 node_flag_ptr = &ja->root;
35170a44 1850 node_flag = rcu_dereference(ja->root);
5a9a87dd
MD
1851
1852 /* Iterate on all internal levels */
a2a7ff59 1853 for (i = 1; i < tree_depth; i++) {
79b41067
MD
1854 uint8_t iter_key;
1855
b0ca2d21 1856 dbg_printf("cds_ja_add iter attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
c112acaa 1857 attach_node_flag_ptr, node_flag_ptr, node_flag);
5a9a87dd 1858 if (!ja_node_ptr(node_flag)) {
b0ca2d21 1859 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 1860 attach_node_flag,
b0ca2d21 1861 node_flag_ptr,
c112acaa
MD
1862 parent_node_flag,
1863 parent2_node_flag,
5a9a87dd 1864 key, i, new_node);
2e313670 1865 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd
MD
1866 goto retry;
1867 else
1868 goto end;
1869 }
79b41067 1870 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
5a9a87dd
MD
1871 parent2_node_flag = parent_node_flag;
1872 parent_node_flag = node_flag;
1873 node_flag = ja_node_get_nth(node_flag,
b0ca2d21 1874 &attach_node_flag_ptr,
b62a8d0c 1875 &attach_node_flag,
5a9a87dd 1876 &node_flag_ptr,
79b41067 1877 iter_key);
b0ca2d21
MD
1878 dbg_printf("cds_ja_add iter key lookup %u finds node_flag %p attach_node_flag_ptr %p node_flag_ptr %p\n",
1879 (unsigned int) iter_key, node_flag,
c112acaa
MD
1880 attach_node_flag_ptr,
1881 node_flag_ptr);
5a9a87dd
MD
1882 }
1883
1884 /*
1885 * We reached bottom of tree, simply add node to last internal
1886 * level, or chain it if key is already present.
1887 */
1888 if (!ja_node_ptr(node_flag)) {
c112acaa
MD
1889 dbg_printf("cds_ja_add attach_node_flag_ptr %p node_flag_ptr %p node_flag %p\n",
1890 attach_node_flag_ptr, node_flag_ptr, node_flag);
b0ca2d21 1891 ret = ja_attach_node(ja, attach_node_flag_ptr,
b62a8d0c 1892 attach_node_flag,
b0ca2d21 1893 node_flag_ptr, parent_node_flag,
5a9a87dd
MD
1894 parent2_node_flag, key, i, new_node);
1895 } else {
1896 ret = ja_chain_node(ja,
af3cbd45 1897 parent_node_flag,
fa112799 1898 node_flag_ptr,
c112acaa 1899 node_flag,
b0ca2d21 1900 (struct cds_hlist_head *) attach_node_flag_ptr,
5a9a87dd
MD
1901 new_node);
1902 }
b306a0fe 1903 if (ret == -EAGAIN || ret == -EEXIST)
5a9a87dd
MD
1904 goto retry;
1905end:
1906 return ret;
b4540e8a
MD
1907}
1908
af3cbd45
MD
1909/*
1910 * Note: there is no need to lookup the pointer address associated with
1911 * each node's nth item after taking the lock: it's already been done by
1912 * cds_ja_del while holding the rcu read-side lock, and our node rules
1913 * ensure that when a match value -> pointer is found in a node, it is
1914 * _NEVER_ changed for that node without recompaction, and recompaction
1915 * reallocates the node.
b306a0fe
MD
1916 * However, when a child is removed from "linear" nodes, its pointer
1917 * is set to NULL. We therefore check, while holding the locks, if this
1918 * pointer is NULL, and return -ENOENT to the caller if it is the case.
af3cbd45 1919 */
35170a44
MD
1920static
1921int ja_detach_node(struct cds_ja *ja,
1922 struct cds_ja_inode_flag **snapshot,
af3cbd45
MD
1923 struct cds_ja_inode_flag ***snapshot_ptr,
1924 uint8_t *snapshot_n,
35170a44
MD
1925 int nr_snapshot,
1926 uint64_t key,
1927 struct cds_ja_node *node)
1928{
af3cbd45
MD
1929 struct cds_ja_shadow_node *shadow_nodes[JA_MAX_DEPTH];
1930 struct cds_ja_inode_flag **node_flag_ptr = NULL,
1931 *parent_node_flag = NULL,
1932 **parent_node_flag_ptr = NULL;
b62a8d0c 1933 struct cds_ja_inode_flag *iter_node_flag;
4d6ef45e
MD
1934 int ret, i, nr_shadow = 0, nr_clear = 0, nr_branch = 0;
1935 uint8_t n = 0;
35170a44 1936
4d6ef45e 1937 assert(nr_snapshot == ja->tree_depth + 1);
35170a44 1938
af3cbd45
MD
1939 /*
1940 * From the last internal level node going up, get the node
1941 * lock, check if the node has only one child left. If it is the
1942 * case, we continue iterating upward. When we reach a node
1943 * which has more that one child left, we lock the parent, and
1944 * proceed to the node deletion (removing its children too).
1945 */
4d6ef45e 1946 for (i = nr_snapshot - 2; i >= 1; i--) {
af3cbd45
MD
1947 struct cds_ja_shadow_node *shadow_node;
1948
1949 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1950 snapshot[i]);
af3cbd45
MD
1951 if (!shadow_node) {
1952 ret = -EAGAIN;
1953 goto end;
1954 }
af3cbd45 1955 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
1956
1957 /*
1958 * Check if node has been removed between RCU
1959 * lookup and lock acquisition.
1960 */
1961 assert(snapshot_ptr[i + 1]);
1962 if (ja_node_ptr(*snapshot_ptr[i + 1])
1963 != ja_node_ptr(snapshot[i + 1])) {
1964 ret = -ENOENT;
1965 goto end;
1966 }
1967
1968 assert(shadow_node->nr_child > 0);
d810c97f 1969 if (shadow_node->nr_child == 1 && i > 1)
4d6ef45e
MD
1970 nr_clear++;
1971 nr_branch++;
af3cbd45
MD
1972 if (shadow_node->nr_child > 1 || i == 1) {
1973 /* Lock parent and break */
1974 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 1975 snapshot[i - 1]);
af3cbd45
MD
1976 if (!shadow_node) {
1977 ret = -EAGAIN;
1978 goto end;
1979 }
1980 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c 1981
c112acaa
MD
1982 /*
1983 * Check if node has been removed between RCU
1984 * lookup and lock acquisition.
1985 */
b62a8d0c
MD
1986 assert(snapshot_ptr[i]);
1987 if (ja_node_ptr(*snapshot_ptr[i])
1988 != ja_node_ptr(snapshot[i])) {
c112acaa
MD
1989 ret = -ENOENT;
1990 goto end;
1991 }
1992
b62a8d0c 1993 node_flag_ptr = snapshot_ptr[i + 1];
4d6ef45e
MD
1994 n = snapshot_n[i + 1];
1995 parent_node_flag_ptr = snapshot_ptr[i];
1996 parent_node_flag = snapshot[i];
c112acaa 1997
af3cbd45
MD
1998 if (i > 1) {
1999 /*
2000 * Lock parent's parent, in case we need
2001 * to recompact parent.
2002 */
2003 shadow_node = rcuja_shadow_lookup_lock(ja->ht,
3d8fe307 2004 snapshot[i - 2]);
af3cbd45
MD
2005 if (!shadow_node) {
2006 ret = -EAGAIN;
2007 goto end;
2008 }
2009 shadow_nodes[nr_shadow++] = shadow_node;
b62a8d0c
MD
2010
2011 /*
2012 * Check if node has been removed between RCU
2013 * lookup and lock acquisition.
2014 */
2015 assert(snapshot_ptr[i - 1]);
2016 if (ja_node_ptr(*snapshot_ptr[i - 1])
2017 != ja_node_ptr(snapshot[i - 1])) {
2018 ret = -ENOENT;
2019 goto end;
2020 }
af3cbd45 2021 }
b62a8d0c 2022
af3cbd45
MD
2023 break;
2024 }
2025 }
2026
2027 /*
4d6ef45e
MD
2028 * At this point, we want to delete all nodes that are about to
2029 * be removed from shadow_nodes (except the last one, which is
2030 * either the root or the parent of the upmost node with 1
b62a8d0c
MD
2031 * child). OK to free lock here, because RCU read lock is held,
2032 * and free only performed in call_rcu.
af3cbd45
MD
2033 */
2034
2035 for (i = 0; i < nr_clear; i++) {
2036 ret = rcuja_shadow_clear(ja->ht,
3d8fe307 2037 shadow_nodes[i]->node_flag,
af3cbd45
MD
2038 shadow_nodes[i],
2039 RCUJA_SHADOW_CLEAR_FREE_NODE
2040 | RCUJA_SHADOW_CLEAR_FREE_LOCK);
2041 assert(!ret);
2042 }
2043
2044 iter_node_flag = parent_node_flag;
2045 /* Remove from parent */
2046 ret = ja_node_clear_ptr(ja,
2047 node_flag_ptr, /* Pointer to location to nullify */
2048 &iter_node_flag, /* Old new parent ptr in its parent */
4d6ef45e 2049 shadow_nodes[nr_branch - 1], /* of parent */
af3cbd45 2050 n);
b306a0fe
MD
2051 if (ret)
2052 goto end;
af3cbd45 2053
4d6ef45e
MD
2054 dbg_printf("ja_detach_node: publish %p instead of %p\n",
2055 iter_node_flag, *parent_node_flag_ptr);
af3cbd45
MD
2056 /* Update address of parent ptr in its parent */
2057 rcu_assign_pointer(*parent_node_flag_ptr, iter_node_flag);
2058
2059end:
2060 for (i = 0; i < nr_shadow; i++)
2061 rcuja_shadow_unlock(shadow_nodes[i]);
35170a44
MD
2062 return ret;
2063}
2064
af3cbd45
MD
2065static
2066int ja_unchain_node(struct cds_ja *ja,
2067 struct cds_ja_inode_flag *parent_node_flag,
fa112799 2068 struct cds_ja_inode_flag **node_flag_ptr,
013a6083 2069 struct cds_ja_inode_flag *node_flag,
af3cbd45
MD
2070 struct cds_ja_node *node)
2071{
2072 struct cds_ja_shadow_node *shadow_node;
f2758d14 2073 struct cds_hlist_node *hlist_node;
013a6083
MD
2074 struct cds_hlist_head hlist_head;
2075 int ret = 0, count = 0, found = 0;
af3cbd45 2076
3d8fe307 2077 shadow_node = rcuja_shadow_lookup_lock(ja->ht, parent_node_flag);
af3cbd45
MD
2078 if (!shadow_node)
2079 return -EAGAIN;
013a6083 2080 if (ja_node_ptr(*node_flag_ptr) != ja_node_ptr(node_flag)) {
fa112799
MD
2081 ret = -EAGAIN;
2082 goto end;
2083 }
013a6083 2084 hlist_head.next = (struct cds_hlist_node *) ja_node_ptr(node_flag);
af3cbd45
MD
2085 /*
2086 * Retry if another thread removed all but one of duplicates
fa112799 2087 * since check (this check was performed without lock).
013a6083
MD
2088 * Ensure that the node we are about to remove is still in the
2089 * list (while holding lock).
af3cbd45 2090 */
013a6083 2091 cds_hlist_for_each_rcu(hlist_node, &hlist_head) {
ade342cb
MD
2092 if (count == 0) {
2093 /* FIXME: currently a work-around */
2094 hlist_node->prev = (struct cds_hlist_node *) node_flag_ptr;
2095 }
f2758d14 2096 count++;
013a6083
MD
2097 if (hlist_node == &node->list)
2098 found++;
f2758d14 2099 }
013a6083
MD
2100 assert(found <= 1);
2101 if (!found || count == 1) {
af3cbd45
MD
2102 ret = -EAGAIN;
2103 goto end;
2104 }
2105 cds_hlist_del_rcu(&node->list);
ade342cb
MD
2106 /*
2107 * Validate that we indeed removed the node from linked list.
2108 */
2109 assert(ja_node_ptr(*node_flag_ptr) != (struct cds_ja_inode *) node);
af3cbd45
MD
2110end:
2111 rcuja_shadow_unlock(shadow_node);
2112 return ret;
2113}
2114
2115/*
2116 * Called with RCU read lock held.
2117 */
35170a44
MD
2118int cds_ja_del(struct cds_ja *ja, uint64_t key,
2119 struct cds_ja_node *node)
2120{
2121 unsigned int tree_depth, i;
2122 struct cds_ja_inode_flag *snapshot[JA_MAX_DEPTH];
af3cbd45
MD
2123 struct cds_ja_inode_flag **snapshot_ptr[JA_MAX_DEPTH];
2124 uint8_t snapshot_n[JA_MAX_DEPTH];
35170a44 2125 struct cds_ja_inode_flag *node_flag;
fa112799
MD
2126 struct cds_ja_inode_flag **prev_node_flag_ptr,
2127 **node_flag_ptr;
4d6ef45e 2128 int nr_snapshot;
35170a44
MD
2129 int ret;
2130
2131 if (caa_unlikely(key > ja->key_max))
2132 return -EINVAL;
2133 tree_depth = ja->tree_depth;
2134
2135retry:
4d6ef45e 2136 nr_snapshot = 0;
35170a44
MD
2137 dbg_printf("cds_ja_del attempt: key %" PRIu64 ", node %p\n",
2138 key, node);
2139
2140 /* snapshot for level 0 is only for shadow node lookup */
4d6ef45e
MD
2141 snapshot_n[0] = 0;
2142 snapshot_n[1] = 0;
af3cbd45 2143 snapshot_ptr[nr_snapshot] = NULL;
35170a44
MD
2144 snapshot[nr_snapshot++] = (struct cds_ja_inode_flag *) &ja->root;
2145 node_flag = rcu_dereference(ja->root);
af3cbd45 2146 prev_node_flag_ptr = &ja->root;
fa112799 2147 node_flag_ptr = &ja->root;
35170a44
MD
2148
2149 /* Iterate on all internal levels */
2150 for (i = 1; i < tree_depth; i++) {
2151 uint8_t iter_key;
2152
2153 dbg_printf("cds_ja_del iter node_flag %p\n",
2154 node_flag);
2155 if (!ja_node_ptr(node_flag)) {
2156 return -ENOENT;
2157 }
35170a44 2158 iter_key = (uint8_t) (key >> (JA_BITS_PER_BYTE * (tree_depth - i - 1)));
4d6ef45e 2159 snapshot_n[nr_snapshot + 1] = iter_key;
af3cbd45
MD
2160 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2161 snapshot[nr_snapshot++] = node_flag;
35170a44 2162 node_flag = ja_node_get_nth(node_flag,
af3cbd45 2163 &prev_node_flag_ptr,
b62a8d0c 2164 NULL,
fa112799 2165 &node_flag_ptr,
35170a44 2166 iter_key);
af3cbd45
MD
2167 dbg_printf("cds_ja_del iter key lookup %u finds node_flag %p, prev_node_flag_ptr %p\n",
2168 (unsigned int) iter_key, node_flag,
2169 prev_node_flag_ptr);
35170a44 2170 }
35170a44
MD
2171 /*
2172 * We reached bottom of tree, try to find the node we are trying
2173 * to remove. Fail if we cannot find it.
2174 */
2175 if (!ja_node_ptr(node_flag)) {
4d6ef45e
MD
2176 dbg_printf("cds_ja_del: no node found for key %" PRIu64 "\n",
2177 key);
35170a44
MD
2178 return -ENOENT;
2179 } else {
4d6ef45e 2180 struct cds_hlist_head hlist_head;
35170a44 2181 struct cds_hlist_node *hlist_node;
af3cbd45
MD
2182 struct cds_ja_node *entry, *match = NULL;
2183 int count = 0;
35170a44 2184
4d6ef45e
MD
2185 hlist_head.next =
2186 (struct cds_hlist_node *) ja_node_ptr(node_flag);
af3cbd45 2187 cds_hlist_for_each_entry_rcu(entry,
35170a44 2188 hlist_node,
4d6ef45e 2189 &hlist_head,
35170a44 2190 list) {
4d6ef45e 2191 dbg_printf("cds_ja_del: compare %p with entry %p\n", node, entry);
af3cbd45
MD
2192 if (entry == node)
2193 match = entry;
2194 count++;
35170a44 2195 }
4d6ef45e
MD
2196 if (!match) {
2197 dbg_printf("cds_ja_del: no node match for node %p key %" PRIu64 "\n", node, key);
35170a44 2198 return -ENOENT;
4d6ef45e 2199 }
af3cbd45
MD
2200 assert(count > 0);
2201 if (count == 1) {
2202 /*
4d6ef45e
MD
2203 * Removing last of duplicates. Last snapshot
2204 * does not have a shadow node (external leafs).
af3cbd45
MD
2205 */
2206 snapshot_ptr[nr_snapshot] = prev_node_flag_ptr;
2207 snapshot[nr_snapshot++] = node_flag;
2208 ret = ja_detach_node(ja, snapshot, snapshot_ptr,
2209 snapshot_n, nr_snapshot, key, node);
2210 } else {
f2758d14 2211 ret = ja_unchain_node(ja, snapshot[nr_snapshot - 1],
013a6083 2212 node_flag_ptr, node_flag, match);
af3cbd45 2213 }
35170a44 2214 }
b306a0fe
MD
2215 /*
2216 * Explanation of -ENOENT handling: caused by concurrent delete
2217 * between RCU lookup and actual removal. Need to re-do the
2218 * lookup and removal attempt.
2219 */
2220 if (ret == -EAGAIN || ret == -ENOENT)
35170a44
MD
2221 goto retry;
2222 return ret;
2223}
2224
b4540e8a
MD
2225struct cds_ja *_cds_ja_new(unsigned int key_bits,
2226 const struct rcu_flavor_struct *flavor)
be9a7474
MD
2227{
2228 struct cds_ja *ja;
b0f74e47 2229 int ret;
f07b240f 2230 struct cds_ja_shadow_node *root_shadow_node;
be9a7474
MD
2231
2232 ja = calloc(sizeof(*ja), 1);
2233 if (!ja)
2234 goto ja_error;
b4540e8a
MD
2235
2236 switch (key_bits) {
2237 case 8:
b4540e8a 2238 case 16:
1216b3d2 2239 case 24:
b4540e8a 2240 case 32:
1216b3d2
MD
2241 case 40:
2242 case 48:
2243 case 56:
2244 ja->key_max = (1ULL << key_bits) - 1;
b4540e8a
MD
2245 break;
2246 case 64:
2247 ja->key_max = UINT64_MAX;
2248 break;
2249 default:
2250 goto check_error;
2251 }
2252
be9a7474 2253 /* ja->root is NULL */
5a9a87dd 2254 /* tree_depth 0 is for pointer to root node */
582a6ade 2255 ja->tree_depth = (key_bits >> JA_LOG2_BITS_PER_BYTE) + 1;
a2a7ff59 2256 assert(ja->tree_depth <= JA_MAX_DEPTH);
be9a7474
MD
2257 ja->ht = rcuja_create_ht(flavor);
2258 if (!ja->ht)
2259 goto ht_error;
b0f74e47
MD
2260
2261 /*
2262 * Note: we should not free this node until judy array destroy.
2263 */
f07b240f 2264 root_shadow_node = rcuja_shadow_set(ja->ht,
3d8fe307
MD
2265 (struct cds_ja_inode_flag *) &ja->root,
2266 NULL, ja);
f07b240f
MD
2267 if (!root_shadow_node) {
2268 ret = -ENOMEM;
b0f74e47 2269 goto ht_node_error;
f07b240f 2270 }
3d8fe307 2271 root_shadow_node->level = 0;
b0f74e47 2272
be9a7474
MD
2273 return ja;
2274
b0f74e47
MD
2275ht_node_error:
2276 ret = rcuja_delete_ht(ja->ht);
2277 assert(!ret);
be9a7474 2278ht_error:
b4540e8a 2279check_error:
be9a7474
MD
2280 free(ja);
2281ja_error:
2282 return NULL;
2283}
2284
3d8fe307
MD
2285/*
2286 * Called from RCU read-side CS.
2287 */
2288__attribute__((visibility("protected")))
2289void rcuja_free_all_children(struct cds_ja_shadow_node *shadow_node,
2290 struct cds_ja_inode_flag *node_flag,
2291 void (*free_node_cb)(struct rcu_head *head))
2292{
2293 const struct rcu_flavor_struct *flavor;
2294 unsigned int type_index;
2295 struct cds_ja_inode *node;
2296 const struct cds_ja_type *type;
2297
2298 flavor = cds_lfht_rcu_flavor(shadow_node->ja->ht);
2299 node = ja_node_ptr(node_flag);
2300 assert(node != NULL);
2301 type_index = ja_node_type(node_flag);
2302 type = &ja_types[type_index];
2303
2304 switch (type->type_class) {
2305 case RCU_JA_LINEAR:
2306 {
2307 uint8_t nr_child =
2308 ja_linear_node_get_nr_child(type, node);
2309 unsigned int i;
2310
2311 for (i = 0; i < nr_child; i++) {
2312 struct cds_ja_inode_flag *iter;
2313 struct cds_hlist_head head;
2314 struct cds_ja_node *entry;
2315 struct cds_hlist_node *pos;
2316 uint8_t v;
2317
2318 ja_linear_node_get_ith_pos(type, node, i, &v, &iter);
2319 if (!iter)
2320 continue;
2321 head.next = (struct cds_hlist_node *) iter;
2322 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2323 flavor->update_call_rcu(&entry->head, free_node_cb);
2324 }
2325 }
2326 break;
2327 }
2328 case RCU_JA_POOL:
2329 {
2330 unsigned int pool_nr;
2331
2332 for (pool_nr = 0; pool_nr < (1U << type->nr_pool_order); pool_nr++) {
2333 struct cds_ja_inode *pool =
2334 ja_pool_node_get_ith_pool(type, node, pool_nr);
2335 uint8_t nr_child =
2336 ja_linear_node_get_nr_child(type, pool);
2337 unsigned int j;
2338
2339 for (j = 0; j < nr_child; j++) {
2340 struct cds_ja_inode_flag *iter;
2341 struct cds_hlist_head head;
2342 struct cds_ja_node *entry;
2343 struct cds_hlist_node *pos;
2344 uint8_t v;
2345
2346 ja_linear_node_get_ith_pos(type, node, j, &v, &iter);
2347 if (!iter)
2348 continue;
2349 head.next = (struct cds_hlist_node *) iter;
2350 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2351 flavor->update_call_rcu(&entry->head, free_node_cb);
2352 }
2353 }
2354 }
2355 break;
2356 }
2357 case RCU_JA_NULL:
2358 break;
2359 case RCU_JA_PIGEON:
2360 {
2361 uint8_t nr_child;
2362 unsigned int i;
2363
2364 nr_child = shadow_node->nr_child;
2365 for (i = 0; i < nr_child; i++) {
2366 struct cds_ja_inode_flag *iter;
2367 struct cds_hlist_head head;
2368 struct cds_ja_node *entry;
2369 struct cds_hlist_node *pos;
2370
2371 iter = ja_pigeon_node_get_ith_pos(type, node, i);
2372 if (!iter)
2373 continue;
2374 head.next = (struct cds_hlist_node *) iter;
2375 cds_hlist_for_each_entry_rcu(entry, pos, &head, list) {
2376 flavor->update_call_rcu(&entry->head, free_node_cb);
2377 }
2378 }
2379 break;
2380 }
2381 default:
2382 assert(0);
2383 }
2384}
2385
19ddcd04
MD
2386static
2387void print_debug_fallback_distribution(void)
2388{
2389 int i;
2390
2391 fprintf(stderr, "Fallback node distribution:\n");
2392 for (i = 0; i < JA_ENTRY_PER_NODE; i++) {
2393 if (!node_fallback_count_distribution[i])
2394 continue;
2395 fprintf(stderr, " %3u: %4lu\n",
2396 i, node_fallback_count_distribution[i]);
2397 }
2398}
2399
be9a7474
MD
2400/*
2401 * There should be no more concurrent add to the judy array while it is
2402 * being destroyed (ensured by the caller).
2403 */
3d8fe307
MD
2404int cds_ja_destroy(struct cds_ja *ja,
2405 void (*free_node_cb)(struct rcu_head *head))
be9a7474 2406{
b4540e8a
MD
2407 int ret;
2408
be9a7474 2409 rcuja_shadow_prune(ja->ht,
3d8fe307
MD
2410 RCUJA_SHADOW_CLEAR_FREE_NODE | RCUJA_SHADOW_CLEAR_FREE_LOCK,
2411 free_node_cb);
b4540e8a
MD
2412 ret = rcuja_delete_ht(ja->ht);
2413 if (ret)
2414 return ret;
f07b240f
MD
2415 if (uatomic_read(&ja->nr_fallback))
2416 fprintf(stderr,
2417 "[warning] RCU Judy Array used %lu fallback node(s)\n",
2418 uatomic_read(&ja->nr_fallback));
efbd222a
MD
2419 fprintf(stderr, "Nodes allocated: %lu, Nodes freed: %lu. Fallback ratio: %g\n",
2420 uatomic_read(&nr_nodes_allocated),
2421 uatomic_read(&nr_nodes_freed),
2422 (double) uatomic_read(&ja->nr_fallback) / (double) uatomic_read(&nr_nodes_allocated));
19ddcd04 2423 print_debug_fallback_distribution();
b4540e8a 2424 free(ja);
41975c12 2425 return 0;
be9a7474 2426}
This page took 0.140768 seconds and 4 git commands to generate.