Commit | Line | Data |
---|---|---|
acdb82a2 MJ |
1 | // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
2 | // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation. | |
3 | // | |
4 | // SPDX-License-Identifier: LGPL-2.1-or-later | |
5 | ||
fdee2e6d | 6 | /* |
fdee2e6d MD |
7 | * Userspace RCU library, "bulletproof" version. |
8 | * | |
fdee2e6d MD |
9 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. |
10 | */ | |
11 | ||
e37faee1 | 12 | #define URCU_NO_COMPAT_IDENTIFIERS |
71c811bf | 13 | #define _LGPL_SOURCE |
fdee2e6d MD |
14 | #include <stdio.h> |
15 | #include <pthread.h> | |
16 | #include <signal.h> | |
fdee2e6d MD |
17 | #include <stdlib.h> |
18 | #include <string.h> | |
19 | #include <errno.h> | |
20 | #include <poll.h> | |
21 | #include <unistd.h> | |
3745305b | 22 | #include <stdbool.h> |
fdee2e6d MD |
23 | #include <sys/mman.h> |
24 | ||
01477510 | 25 | #include <urcu/assert.h> |
375db287 | 26 | #include <urcu/config.h> |
4477a870 MD |
27 | #include <urcu/arch.h> |
28 | #include <urcu/wfcqueue.h> | |
29 | #include <urcu/map/urcu-bp.h> | |
30 | #include <urcu/static/urcu-bp.h> | |
31 | #include <urcu/pointer.h> | |
32 | #include <urcu/tls-compat.h> | |
71c811bf | 33 | |
4a6d7378 | 34 | #include "urcu-die.h" |
ce28e67a | 35 | #include "urcu-utils.h" |
4a6d7378 | 36 | |
4477a870 | 37 | #define URCU_API_MAP |
fdee2e6d | 38 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
71c811bf | 39 | #undef _LGPL_SOURCE |
4477a870 | 40 | #include <urcu/urcu-bp.h> |
71c811bf | 41 | #define _LGPL_SOURCE |
fdee2e6d | 42 | |
4c1ae2ea MD |
43 | #ifndef MAP_ANONYMOUS |
44 | #define MAP_ANONYMOUS MAP_ANON | |
45 | #endif | |
46 | ||
c7eaf61c MD |
47 | #ifdef __linux__ |
48 | static | |
49 | void *mremap_wrapper(void *old_address, size_t old_size, | |
50 | size_t new_size, int flags) | |
51 | { | |
52 | return mremap(old_address, old_size, new_size, flags); | |
53 | } | |
54 | #else | |
45a4872f MD |
55 | |
56 | #define MREMAP_MAYMOVE 1 | |
57 | #define MREMAP_FIXED 2 | |
58 | ||
59 | /* | |
95b94246 | 60 | * mremap wrapper for non-Linux systems not allowing MAYMOVE. |
45a4872f MD |
61 | * This is not generic. |
62 | */ | |
c7eaf61c | 63 | static |
a142df4e MJ |
64 | void *mremap_wrapper(void *old_address __attribute__((unused)), |
65 | size_t old_size __attribute__((unused)), | |
66 | size_t new_size __attribute__((unused)), | |
67 | int flags) | |
45a4872f | 68 | { |
01477510 | 69 | urcu_posix_assert(!(flags & MREMAP_MAYMOVE)); |
95b94246 MD |
70 | |
71 | return MAP_FAILED; | |
45a4872f MD |
72 | } |
73 | #endif | |
74 | ||
9340c38d MD |
75 | /* Sleep delay in ms */ |
76 | #define RCU_SLEEP_DELAY_MS 10 | |
95b94246 MD |
77 | #define INIT_NR_THREADS 8 |
78 | #define ARENA_INIT_ALLOC \ | |
79 | sizeof(struct registry_chunk) \ | |
4477a870 | 80 | + INIT_NR_THREADS * sizeof(struct urcu_bp_reader) |
fdee2e6d | 81 | |
b7b6a8f5 PB |
82 | /* |
83 | * Active attempts to check for reader Q.S. before calling sleep(). | |
84 | */ | |
85 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
86 | ||
76d6a951 | 87 | static |
4477a870 | 88 | int urcu_bp_refcount; |
76d6a951 | 89 | |
999991c6 MD |
90 | /* If the headers do not support membarrier system call, fall back smp_mb. */ |
91 | #ifdef __NR_membarrier | |
92 | # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__) | |
f541831e MD |
93 | #else |
94 | # define membarrier(...) -ENOSYS | |
95 | #endif | |
96 | ||
97 | enum membarrier_cmd { | |
3745305b MD |
98 | MEMBARRIER_CMD_QUERY = 0, |
99 | MEMBARRIER_CMD_SHARED = (1 << 0), | |
100 | /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ | |
101 | /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ | |
102 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), | |
103 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), | |
f541831e MD |
104 | }; |
105 | ||
c1be8fb9 | 106 | static |
4477a870 | 107 | void __attribute__((constructor)) _urcu_bp_init(void); |
c1be8fb9 | 108 | static |
d8befef2 MD |
109 | void urcu_bp_exit(void); |
110 | static | |
111 | void __attribute__((destructor)) urcu_bp_exit_destructor(void); | |
90f72b8c | 112 | static void urcu_call_rcu_exit(void); |
fdee2e6d | 113 | |
d8d9a340 | 114 | #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER |
f541831e | 115 | int urcu_bp_has_sys_membarrier; |
d8d9a340 | 116 | #endif |
f541831e | 117 | |
731ccb96 MD |
118 | /* |
119 | * rcu_gp_lock ensures mutual exclusion between threads calling | |
120 | * synchronize_rcu(). | |
121 | */ | |
6abb4bd5 | 122 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; |
731ccb96 MD |
123 | /* |
124 | * rcu_registry_lock ensures mutual exclusion between threads | |
125 | * registering and unregistering themselves to/from the registry, and | |
126 | * with threads reading that registry from synchronize_rcu(). However, | |
127 | * this lock is not held all the way through the completion of awaiting | |
128 | * for the grace period. It is sporadically released between iterations | |
129 | * on the registry. | |
130 | * rcu_registry_lock may nest inside rcu_gp_lock. | |
131 | */ | |
132 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; | |
fdee2e6d | 133 | |
c1be8fb9 MD |
134 | static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; |
135 | static int initialized; | |
136 | ||
137 | static pthread_key_t urcu_bp_key; | |
138 | ||
4477a870 | 139 | struct urcu_bp_gp urcu_bp_gp = { .ctr = URCU_BP_GP_COUNT }; |
fdee2e6d MD |
140 | |
141 | /* | |
142 | * Pointer to registry elements. Written to only by each individual reader. Read | |
143 | * by both the reader and the writers. | |
144 | */ | |
4477a870 | 145 | DEFINE_URCU_TLS(struct urcu_bp_reader *, urcu_bp_reader); |
fdee2e6d | 146 | |
16aa9ee8 | 147 | static CDS_LIST_HEAD(registry); |
fdee2e6d | 148 | |
95b94246 MD |
149 | struct registry_chunk { |
150 | size_t data_len; /* data length */ | |
c1be8fb9 | 151 | size_t used; /* amount of data used */ |
95b94246 MD |
152 | struct cds_list_head node; /* chunk_list node */ |
153 | char data[]; | |
154 | }; | |
155 | ||
fdee2e6d | 156 | struct registry_arena { |
95b94246 | 157 | struct cds_list_head chunk_list; |
fdee2e6d MD |
158 | }; |
159 | ||
95b94246 MD |
160 | static struct registry_arena registry_arena = { |
161 | .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list), | |
162 | }; | |
fdee2e6d | 163 | |
4cf1675f MD |
164 | /* Saved fork signal mask, protected by rcu_gp_lock */ |
165 | static sigset_t saved_fork_signal_mask; | |
166 | ||
6abb4bd5 | 167 | static void mutex_lock(pthread_mutex_t *mutex) |
fdee2e6d MD |
168 | { |
169 | int ret; | |
170 | ||
171 | #ifndef DISTRUST_SIGNALS_EXTREME | |
6abb4bd5 | 172 | ret = pthread_mutex_lock(mutex); |
4a6d7378 MD |
173 | if (ret) |
174 | urcu_die(ret); | |
fdee2e6d | 175 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
6abb4bd5 | 176 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { |
4a6d7378 MD |
177 | if (ret != EBUSY && ret != EINTR) |
178 | urcu_die(ret); | |
fdee2e6d MD |
179 | poll(NULL,0,10); |
180 | } | |
181 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
182 | } | |
183 | ||
6abb4bd5 | 184 | static void mutex_unlock(pthread_mutex_t *mutex) |
fdee2e6d MD |
185 | { |
186 | int ret; | |
187 | ||
6abb4bd5 | 188 | ret = pthread_mutex_unlock(mutex); |
4a6d7378 MD |
189 | if (ret) |
190 | urcu_die(ret); | |
fdee2e6d MD |
191 | } |
192 | ||
f541831e MD |
193 | static void smp_mb_master(void) |
194 | { | |
3745305b MD |
195 | if (caa_likely(urcu_bp_has_sys_membarrier)) { |
196 | if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0)) | |
197 | urcu_die(errno); | |
198 | } else { | |
f541831e | 199 | cmm_smp_mb(); |
3745305b | 200 | } |
f541831e MD |
201 | } |
202 | ||
731ccb96 MD |
203 | /* |
204 | * Always called with rcu_registry lock held. Releases this lock between | |
205 | * iterations and grabs it again. Holds the lock when it returns. | |
206 | */ | |
52c75091 MD |
207 | static void wait_for_readers(struct cds_list_head *input_readers, |
208 | struct cds_list_head *cur_snap_readers, | |
209 | struct cds_list_head *qsreaders) | |
fdee2e6d | 210 | { |
9340c38d | 211 | unsigned int wait_loops = 0; |
4477a870 | 212 | struct urcu_bp_reader *index, *tmp; |
fdee2e6d | 213 | |
fdee2e6d | 214 | /* |
4477a870 | 215 | * Wait for each thread URCU_TLS(urcu_bp_reader).ctr to either |
dd61d077 | 216 | * indicate quiescence (not nested), or observe the current |
c13c2e55 | 217 | * rcu_gp.ctr value. |
fdee2e6d MD |
218 | */ |
219 | for (;;) { | |
9340c38d MD |
220 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) |
221 | wait_loops++; | |
222 | ||
52c75091 | 223 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { |
4477a870 MD |
224 | switch (urcu_bp_reader_state(&index->ctr)) { |
225 | case URCU_BP_READER_ACTIVE_CURRENT: | |
52c75091 MD |
226 | if (cur_snap_readers) { |
227 | cds_list_move(&index->node, | |
228 | cur_snap_readers); | |
229 | break; | |
230 | } | |
231 | /* Fall-through */ | |
4477a870 | 232 | case URCU_BP_READER_INACTIVE: |
52c75091 MD |
233 | cds_list_move(&index->node, qsreaders); |
234 | break; | |
4477a870 | 235 | case URCU_BP_READER_ACTIVE_OLD: |
52c75091 MD |
236 | /* |
237 | * Old snapshot. Leaving node in | |
238 | * input_readers will make us busy-loop | |
239 | * until the snapshot becomes current or | |
240 | * the reader becomes inactive. | |
241 | */ | |
242 | break; | |
243 | } | |
fdee2e6d MD |
244 | } |
245 | ||
52c75091 | 246 | if (cds_list_empty(input_readers)) { |
fdee2e6d MD |
247 | break; |
248 | } else { | |
731ccb96 MD |
249 | /* Temporarily unlock the registry lock. */ |
250 | mutex_unlock(&rcu_registry_lock); | |
9340c38d MD |
251 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) |
252 | (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS); | |
fdee2e6d | 253 | else |
06f22bdb | 254 | caa_cpu_relax(); |
731ccb96 MD |
255 | /* Re-lock the registry lock before the next loop. */ |
256 | mutex_lock(&rcu_registry_lock); | |
fdee2e6d MD |
257 | } |
258 | } | |
fdee2e6d MD |
259 | } |
260 | ||
4477a870 | 261 | void urcu_bp_synchronize_rcu(void) |
fdee2e6d | 262 | { |
52c75091 MD |
263 | CDS_LIST_HEAD(cur_snap_readers); |
264 | CDS_LIST_HEAD(qsreaders); | |
fdee2e6d MD |
265 | sigset_t newmask, oldmask; |
266 | int ret; | |
267 | ||
6ed4b2e6 | 268 | ret = sigfillset(&newmask); |
01477510 | 269 | urcu_posix_assert(!ret); |
6ed4b2e6 | 270 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); |
01477510 | 271 | urcu_posix_assert(!ret); |
fdee2e6d | 272 | |
6abb4bd5 | 273 | mutex_lock(&rcu_gp_lock); |
fdee2e6d | 274 | |
731ccb96 MD |
275 | mutex_lock(&rcu_registry_lock); |
276 | ||
16aa9ee8 | 277 | if (cds_list_empty(®istry)) |
2dfb8b5e | 278 | goto out; |
fdee2e6d MD |
279 | |
280 | /* All threads should read qparity before accessing data structure | |
2dfb8b5e | 281 | * where new ptr points to. */ |
fdee2e6d | 282 | /* Write new ptr before changing the qparity */ |
f541831e | 283 | smp_mb_master(); |
fdee2e6d | 284 | |
fdee2e6d | 285 | /* |
dd61d077 | 286 | * Wait for readers to observe original parity or be quiescent. |
731ccb96 | 287 | * wait_for_readers() can release and grab again rcu_registry_lock |
f99c6e92 | 288 | * internally. |
dd61d077 | 289 | */ |
52c75091 | 290 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); |
dd61d077 MD |
291 | |
292 | /* | |
293 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
294 | * model easier to understand. It does not have a big performance impact | |
295 | * anyway, given this is the write-side. | |
296 | */ | |
297 | cmm_smp_mb(); | |
298 | ||
299 | /* Switch parity: 0 -> 1, 1 -> 0 */ | |
4477a870 | 300 | CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ URCU_BP_GP_CTR_PHASE); |
dd61d077 MD |
301 | |
302 | /* | |
303 | * Must commit qparity update to memory before waiting for other parity | |
304 | * quiescent state. Failure to do so could result in the writer waiting | |
305 | * forever while new readers are always accessing data (no progress). | |
306 | * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. | |
fdee2e6d | 307 | */ |
fdee2e6d MD |
308 | |
309 | /* | |
5481ddb3 | 310 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
fdee2e6d MD |
311 | * model easier to understand. It does not have a big performance impact |
312 | * anyway, given this is the write-side. | |
313 | */ | |
5481ddb3 | 314 | cmm_smp_mb(); |
fdee2e6d | 315 | |
fdee2e6d | 316 | /* |
dd61d077 | 317 | * Wait for readers to observe new parity or be quiescent. |
731ccb96 | 318 | * wait_for_readers() can release and grab again rcu_registry_lock |
f99c6e92 | 319 | * internally. |
fdee2e6d | 320 | */ |
52c75091 MD |
321 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); |
322 | ||
323 | /* | |
324 | * Put quiescent reader list back into registry. | |
325 | */ | |
326 | cds_list_splice(&qsreaders, ®istry); | |
fdee2e6d MD |
327 | |
328 | /* | |
2dfb8b5e MD |
329 | * Finish waiting for reader threads before letting the old ptr being |
330 | * freed. | |
fdee2e6d | 331 | */ |
f541831e | 332 | smp_mb_master(); |
2dfb8b5e | 333 | out: |
731ccb96 | 334 | mutex_unlock(&rcu_registry_lock); |
6abb4bd5 | 335 | mutex_unlock(&rcu_gp_lock); |
fdee2e6d | 336 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); |
01477510 | 337 | urcu_posix_assert(!ret); |
fdee2e6d MD |
338 | } |
339 | ||
340 | /* | |
341 | * library wrappers to be used by non-LGPL compatible source code. | |
342 | */ | |
343 | ||
4477a870 | 344 | void urcu_bp_read_lock(void) |
fdee2e6d | 345 | { |
4477a870 | 346 | _urcu_bp_read_lock(); |
fdee2e6d MD |
347 | } |
348 | ||
4477a870 | 349 | void urcu_bp_read_unlock(void) |
fdee2e6d | 350 | { |
4477a870 | 351 | _urcu_bp_read_unlock(); |
fdee2e6d MD |
352 | } |
353 | ||
4477a870 | 354 | int urcu_bp_read_ongoing(void) |
882f3357 | 355 | { |
4477a870 | 356 | return _urcu_bp_read_ongoing(); |
882f3357 MD |
357 | } |
358 | ||
fdee2e6d | 359 | /* |
95b94246 MD |
360 | * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk. |
361 | * Else, try expanding the last chunk. If this fails, allocate a new | |
362 | * chunk twice as big as the last chunk. | |
363 | * Memory used by chunks _never_ moves. A chunk could theoretically be | |
364 | * freed when all "used" slots are released, but we don't do it at this | |
365 | * point. | |
fdee2e6d | 366 | */ |
95b94246 MD |
367 | static |
368 | void expand_arena(struct registry_arena *arena) | |
fdee2e6d | 369 | { |
95b94246 MD |
370 | struct registry_chunk *new_chunk, *last_chunk; |
371 | size_t old_chunk_len, new_chunk_len; | |
372 | ||
373 | /* No chunk. */ | |
374 | if (cds_list_empty(&arena->chunk_list)) { | |
01477510 | 375 | urcu_posix_assert(ARENA_INIT_ALLOC >= |
95b94246 MD |
376 | sizeof(struct registry_chunk) |
377 | + sizeof(struct rcu_reader)); | |
378 | new_chunk_len = ARENA_INIT_ALLOC; | |
5592d049 MJ |
379 | new_chunk = (struct registry_chunk *) mmap(NULL, |
380 | new_chunk_len, | |
9d8612b7 MD |
381 | PROT_READ | PROT_WRITE, |
382 | MAP_ANONYMOUS | MAP_PRIVATE, | |
383 | -1, 0); | |
95b94246 MD |
384 | if (new_chunk == MAP_FAILED) |
385 | abort(); | |
d3ac5bb7 | 386 | memset(new_chunk, 0, new_chunk_len); |
95b94246 MD |
387 | new_chunk->data_len = |
388 | new_chunk_len - sizeof(struct registry_chunk); | |
389 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
390 | return; /* We're done. */ | |
391 | } | |
9d8612b7 | 392 | |
95b94246 MD |
393 | /* Try expanding last chunk. */ |
394 | last_chunk = cds_list_entry(arena->chunk_list.prev, | |
395 | struct registry_chunk, node); | |
396 | old_chunk_len = | |
397 | last_chunk->data_len + sizeof(struct registry_chunk); | |
398 | new_chunk_len = old_chunk_len << 1; | |
399 | ||
400 | /* Don't allow memory mapping to move, just expand. */ | |
401 | new_chunk = mremap_wrapper(last_chunk, old_chunk_len, | |
402 | new_chunk_len, 0); | |
403 | if (new_chunk != MAP_FAILED) { | |
404 | /* Should not have moved. */ | |
01477510 | 405 | urcu_posix_assert(new_chunk == last_chunk); |
d3ac5bb7 | 406 | memset((char *) last_chunk + old_chunk_len, 0, |
95b94246 MD |
407 | new_chunk_len - old_chunk_len); |
408 | last_chunk->data_len = | |
409 | new_chunk_len - sizeof(struct registry_chunk); | |
410 | return; /* We're done. */ | |
411 | } | |
0617bf4c | 412 | |
95b94246 | 413 | /* Remap did not succeed, we need to add a new chunk. */ |
5592d049 MJ |
414 | new_chunk = (struct registry_chunk *) mmap(NULL, |
415 | new_chunk_len, | |
95b94246 MD |
416 | PROT_READ | PROT_WRITE, |
417 | MAP_ANONYMOUS | MAP_PRIVATE, | |
418 | -1, 0); | |
419 | if (new_chunk == MAP_FAILED) | |
420 | abort(); | |
d3ac5bb7 | 421 | memset(new_chunk, 0, new_chunk_len); |
95b94246 MD |
422 | new_chunk->data_len = |
423 | new_chunk_len - sizeof(struct registry_chunk); | |
424 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
425 | } | |
fdee2e6d | 426 | |
95b94246 MD |
427 | static |
428 | struct rcu_reader *arena_alloc(struct registry_arena *arena) | |
429 | { | |
430 | struct registry_chunk *chunk; | |
431 | struct rcu_reader *rcu_reader_reg; | |
432 | int expand_done = 0; /* Only allow to expand once per alloc */ | |
433 | size_t len = sizeof(struct rcu_reader); | |
434 | ||
435 | retry: | |
436 | cds_list_for_each_entry(chunk, &arena->chunk_list, node) { | |
437 | if (chunk->data_len - chunk->used < len) | |
438 | continue; | |
439 | /* Find spot */ | |
440 | for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0]; | |
441 | rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len]; | |
442 | rcu_reader_reg++) { | |
443 | if (!rcu_reader_reg->alloc) { | |
444 | rcu_reader_reg->alloc = 1; | |
445 | chunk->used += len; | |
446 | return rcu_reader_reg; | |
447 | } | |
448 | } | |
449 | } | |
450 | ||
451 | if (!expand_done) { | |
452 | expand_arena(arena); | |
453 | expand_done = 1; | |
454 | goto retry; | |
455 | } | |
456 | ||
457 | return NULL; | |
fdee2e6d MD |
458 | } |
459 | ||
460 | /* Called with signals off and mutex locked */ | |
95b94246 MD |
461 | static |
462 | void add_thread(void) | |
fdee2e6d | 463 | { |
02be5561 | 464 | struct rcu_reader *rcu_reader_reg; |
c1be8fb9 | 465 | int ret; |
fdee2e6d | 466 | |
95b94246 MD |
467 | rcu_reader_reg = arena_alloc(®istry_arena); |
468 | if (!rcu_reader_reg) | |
469 | abort(); | |
c1be8fb9 MD |
470 | ret = pthread_setspecific(urcu_bp_key, rcu_reader_reg); |
471 | if (ret) | |
472 | abort(); | |
fdee2e6d MD |
473 | |
474 | /* Add to registry */ | |
02be5561 | 475 | rcu_reader_reg->tid = pthread_self(); |
01477510 | 476 | urcu_posix_assert(rcu_reader_reg->ctr == 0); |
16aa9ee8 | 477 | cds_list_add(&rcu_reader_reg->node, ®istry); |
95b94246 MD |
478 | /* |
479 | * Reader threads are pointing to the reader registry. This is | |
480 | * why its memory should never be relocated. | |
481 | */ | |
4477a870 | 482 | URCU_TLS(urcu_bp_reader) = rcu_reader_reg; |
fdee2e6d MD |
483 | } |
484 | ||
c1be8fb9 MD |
485 | /* Called with mutex locked */ |
486 | static | |
487 | void cleanup_thread(struct registry_chunk *chunk, | |
488 | struct rcu_reader *rcu_reader_reg) | |
489 | { | |
490 | rcu_reader_reg->ctr = 0; | |
491 | cds_list_del(&rcu_reader_reg->node); | |
492 | rcu_reader_reg->tid = 0; | |
493 | rcu_reader_reg->alloc = 0; | |
494 | chunk->used -= sizeof(struct rcu_reader); | |
495 | } | |
496 | ||
497 | static | |
498 | struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg) | |
fdee2e6d | 499 | { |
95b94246 | 500 | struct registry_chunk *chunk; |
fdee2e6d | 501 | |
95b94246 | 502 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { |
c1be8fb9 MD |
503 | if (rcu_reader_reg < (struct rcu_reader *) &chunk->data[0]) |
504 | continue; | |
505 | if (rcu_reader_reg >= (struct rcu_reader *) &chunk->data[chunk->data_len]) | |
506 | continue; | |
507 | return chunk; | |
508 | } | |
509 | return NULL; | |
510 | } | |
95b94246 | 511 | |
c1be8fb9 MD |
512 | /* Called with signals off and mutex locked */ |
513 | static | |
76d6a951 | 514 | void remove_thread(struct rcu_reader *rcu_reader_reg) |
c1be8fb9 | 515 | { |
c1be8fb9 | 516 | cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg); |
4477a870 | 517 | URCU_TLS(urcu_bp_reader) = NULL; |
fdee2e6d MD |
518 | } |
519 | ||
520 | /* Disable signals, take mutex, add to registry */ | |
4477a870 | 521 | void urcu_bp_register(void) |
fdee2e6d MD |
522 | { |
523 | sigset_t newmask, oldmask; | |
524 | int ret; | |
525 | ||
6ed4b2e6 | 526 | ret = sigfillset(&newmask); |
c1be8fb9 MD |
527 | if (ret) |
528 | abort(); | |
6ed4b2e6 | 529 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); |
c1be8fb9 MD |
530 | if (ret) |
531 | abort(); | |
fdee2e6d MD |
532 | |
533 | /* | |
534 | * Check if a signal concurrently registered our thread since | |
c1be8fb9 MD |
535 | * the check in rcu_read_lock(). |
536 | */ | |
4477a870 | 537 | if (URCU_TLS(urcu_bp_reader)) |
fdee2e6d MD |
538 | goto end; |
539 | ||
c1be8fb9 MD |
540 | /* |
541 | * Take care of early registration before urcu_bp constructor. | |
542 | */ | |
4477a870 | 543 | _urcu_bp_init(); |
c1be8fb9 | 544 | |
731ccb96 | 545 | mutex_lock(&rcu_registry_lock); |
fdee2e6d | 546 | add_thread(); |
731ccb96 | 547 | mutex_unlock(&rcu_registry_lock); |
fdee2e6d MD |
548 | end: |
549 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
c1be8fb9 MD |
550 | if (ret) |
551 | abort(); | |
552 | } | |
553 | ||
5b46e39d MD |
554 | void urcu_bp_register_thread(void) |
555 | { | |
556 | if (caa_unlikely(!URCU_TLS(urcu_bp_reader))) | |
557 | urcu_bp_register(); /* If not yet registered. */ | |
558 | } | |
559 | ||
c1be8fb9 MD |
560 | /* Disable signals, take mutex, remove from registry */ |
561 | static | |
4477a870 | 562 | void urcu_bp_unregister(struct rcu_reader *rcu_reader_reg) |
c1be8fb9 MD |
563 | { |
564 | sigset_t newmask, oldmask; | |
565 | int ret; | |
566 | ||
567 | ret = sigfillset(&newmask); | |
568 | if (ret) | |
569 | abort(); | |
570 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
571 | if (ret) | |
572 | abort(); | |
573 | ||
731ccb96 | 574 | mutex_lock(&rcu_registry_lock); |
76d6a951 | 575 | remove_thread(rcu_reader_reg); |
731ccb96 | 576 | mutex_unlock(&rcu_registry_lock); |
c1be8fb9 MD |
577 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); |
578 | if (ret) | |
579 | abort(); | |
4477a870 | 580 | urcu_bp_exit(); |
c1be8fb9 MD |
581 | } |
582 | ||
583 | /* | |
584 | * Remove thread from the registry when it exits, and flag it as | |
585 | * destroyed so garbage collection can take care of it. | |
586 | */ | |
587 | static | |
588 | void urcu_bp_thread_exit_notifier(void *rcu_key) | |
589 | { | |
4477a870 | 590 | urcu_bp_unregister(rcu_key); |
c1be8fb9 MD |
591 | } |
592 | ||
d8d9a340 MD |
593 | #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER |
594 | static | |
4477a870 | 595 | void urcu_bp_sys_membarrier_status(bool available) |
d8d9a340 MD |
596 | { |
597 | if (!available) | |
598 | abort(); | |
599 | } | |
600 | #else | |
601 | static | |
4477a870 | 602 | void urcu_bp_sys_membarrier_status(bool available) |
d8d9a340 | 603 | { |
3745305b MD |
604 | if (!available) |
605 | return; | |
606 | urcu_bp_has_sys_membarrier = 1; | |
d8d9a340 MD |
607 | } |
608 | #endif | |
609 | ||
3745305b | 610 | static |
4477a870 | 611 | void urcu_bp_sys_membarrier_init(void) |
3745305b MD |
612 | { |
613 | bool available = false; | |
614 | int mask; | |
615 | ||
616 | mask = membarrier(MEMBARRIER_CMD_QUERY, 0); | |
617 | if (mask >= 0) { | |
618 | if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) { | |
619 | if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0)) | |
620 | urcu_die(errno); | |
621 | available = true; | |
622 | } | |
623 | } | |
4477a870 | 624 | urcu_bp_sys_membarrier_status(available); |
3745305b MD |
625 | } |
626 | ||
c1be8fb9 | 627 | static |
4477a870 | 628 | void _urcu_bp_init(void) |
c1be8fb9 MD |
629 | { |
630 | mutex_lock(&init_lock); | |
4477a870 | 631 | if (!urcu_bp_refcount++) { |
c1be8fb9 MD |
632 | int ret; |
633 | ||
634 | ret = pthread_key_create(&urcu_bp_key, | |
635 | urcu_bp_thread_exit_notifier); | |
636 | if (ret) | |
637 | abort(); | |
4477a870 | 638 | urcu_bp_sys_membarrier_init(); |
c1be8fb9 MD |
639 | initialized = 1; |
640 | } | |
641 | mutex_unlock(&init_lock); | |
fdee2e6d MD |
642 | } |
643 | ||
c1be8fb9 | 644 | static |
4477a870 | 645 | void urcu_bp_exit(void) |
fdee2e6d | 646 | { |
76d6a951 | 647 | mutex_lock(&init_lock); |
4477a870 | 648 | if (!--urcu_bp_refcount) { |
76d6a951 MD |
649 | struct registry_chunk *chunk, *tmp; |
650 | int ret; | |
95b94246 | 651 | |
76d6a951 MD |
652 | cds_list_for_each_entry_safe(chunk, tmp, |
653 | ®istry_arena.chunk_list, node) { | |
5592d049 | 654 | munmap((void *) chunk, chunk->data_len |
76d6a951 MD |
655 | + sizeof(struct registry_chunk)); |
656 | } | |
7937ae1c | 657 | CDS_INIT_LIST_HEAD(®istry_arena.chunk_list); |
76d6a951 MD |
658 | ret = pthread_key_delete(urcu_bp_key); |
659 | if (ret) | |
660 | abort(); | |
95b94246 | 661 | } |
76d6a951 | 662 | mutex_unlock(&init_lock); |
fdee2e6d | 663 | } |
4cf1675f | 664 | |
d8befef2 MD |
665 | static |
666 | void urcu_bp_exit_destructor(void) | |
667 | { | |
668 | urcu_call_rcu_exit(); | |
669 | urcu_bp_exit(); | |
670 | } | |
671 | ||
4cf1675f | 672 | /* |
731ccb96 MD |
673 | * Holding the rcu_gp_lock and rcu_registry_lock across fork will make |
674 | * sure we fork() don't race with a concurrent thread executing with | |
675 | * any of those locks held. This ensures that the registry and data | |
676 | * protected by rcu_gp_lock are in a coherent state in the child. | |
4cf1675f | 677 | */ |
4477a870 | 678 | void urcu_bp_before_fork(void) |
4cf1675f MD |
679 | { |
680 | sigset_t newmask, oldmask; | |
681 | int ret; | |
682 | ||
6ed4b2e6 | 683 | ret = sigfillset(&newmask); |
01477510 | 684 | urcu_posix_assert(!ret); |
6ed4b2e6 | 685 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); |
01477510 | 686 | urcu_posix_assert(!ret); |
4cf1675f | 687 | mutex_lock(&rcu_gp_lock); |
731ccb96 | 688 | mutex_lock(&rcu_registry_lock); |
4cf1675f MD |
689 | saved_fork_signal_mask = oldmask; |
690 | } | |
691 | ||
4477a870 | 692 | void urcu_bp_after_fork_parent(void) |
4cf1675f MD |
693 | { |
694 | sigset_t oldmask; | |
695 | int ret; | |
696 | ||
697 | oldmask = saved_fork_signal_mask; | |
731ccb96 | 698 | mutex_unlock(&rcu_registry_lock); |
4cf1675f MD |
699 | mutex_unlock(&rcu_gp_lock); |
700 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
01477510 | 701 | urcu_posix_assert(!ret); |
4cf1675f MD |
702 | } |
703 | ||
c1be8fb9 MD |
704 | /* |
705 | * Prune all entries from registry except our own thread. Fits the Linux | |
731ccb96 | 706 | * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held. |
c1be8fb9 MD |
707 | */ |
708 | static | |
709 | void urcu_bp_prune_registry(void) | |
710 | { | |
711 | struct registry_chunk *chunk; | |
4477a870 | 712 | struct urcu_bp_reader *rcu_reader_reg; |
c1be8fb9 MD |
713 | |
714 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
4477a870 MD |
715 | for (rcu_reader_reg = (struct urcu_bp_reader *) &chunk->data[0]; |
716 | rcu_reader_reg < (struct urcu_bp_reader *) &chunk->data[chunk->data_len]; | |
c1be8fb9 MD |
717 | rcu_reader_reg++) { |
718 | if (!rcu_reader_reg->alloc) | |
719 | continue; | |
720 | if (rcu_reader_reg->tid == pthread_self()) | |
721 | continue; | |
722 | cleanup_thread(chunk, rcu_reader_reg); | |
723 | } | |
724 | } | |
725 | } | |
726 | ||
4477a870 | 727 | void urcu_bp_after_fork_child(void) |
4cf1675f MD |
728 | { |
729 | sigset_t oldmask; | |
730 | int ret; | |
731 | ||
c1be8fb9 | 732 | urcu_bp_prune_registry(); |
4cf1675f | 733 | oldmask = saved_fork_signal_mask; |
731ccb96 | 734 | mutex_unlock(&rcu_registry_lock); |
4cf1675f MD |
735 | mutex_unlock(&rcu_gp_lock); |
736 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
01477510 | 737 | urcu_posix_assert(!ret); |
4cf1675f | 738 | } |
5e77fc1f | 739 | |
4477a870 | 740 | void *urcu_bp_dereference_sym(void *p) |
9b7981bb MD |
741 | { |
742 | return _rcu_dereference(p); | |
743 | } | |
744 | ||
4477a870 | 745 | void *urcu_bp_set_pointer_sym(void **p, void *v) |
5efd3cd2 MD |
746 | { |
747 | cmm_wmb(); | |
424d4ed5 MD |
748 | uatomic_set(p, v); |
749 | return v; | |
5efd3cd2 MD |
750 | } |
751 | ||
4477a870 | 752 | void *urcu_bp_xchg_pointer_sym(void **p, void *v) |
5efd3cd2 MD |
753 | { |
754 | cmm_wmb(); | |
755 | return uatomic_xchg(p, v); | |
756 | } | |
757 | ||
4477a870 | 758 | void *urcu_bp_cmpxchg_pointer_sym(void **p, void *old, void *_new) |
5efd3cd2 MD |
759 | { |
760 | cmm_wmb(); | |
761 | return uatomic_cmpxchg(p, old, _new); | |
762 | } | |
763 | ||
5e6b23a6 | 764 | DEFINE_RCU_FLAVOR(rcu_flavor); |
541d828d | 765 | |
5e77fc1f | 766 | #include "urcu-call-rcu-impl.h" |
0376e7b2 | 767 | #include "urcu-defer-impl.h" |
111bda8f | 768 | #include "urcu-poll-impl.h" |