5d07f040f38c2ef0b33fce285c49de8e14188528
[urcu.git] / src / urcu-bp.c
1 /*
2 * urcu-bp.c
3 *
4 * Userspace RCU library, "bulletproof" version.
5 *
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
26 #define _LGPL_SOURCE
27 #include <stdio.h>
28 #include <pthread.h>
29 #include <signal.h>
30 #include <assert.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <errno.h>
34 #include <poll.h>
35 #include <unistd.h>
36 #include <stdbool.h>
37 #include <sys/mman.h>
38
39 #include "urcu/arch.h"
40 #include "urcu/wfcqueue.h"
41 #include "urcu/map/urcu-bp.h"
42 #include "urcu/static/urcu-bp.h"
43 #include "urcu-pointer.h"
44 #include "urcu/tls-compat.h"
45
46 #include "urcu-die.h"
47
48 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
49 #undef _LGPL_SOURCE
50 #include "urcu-bp.h"
51 #define _LGPL_SOURCE
52
53 #ifndef MAP_ANONYMOUS
54 #define MAP_ANONYMOUS MAP_ANON
55 #endif
56
57 #ifdef __linux__
58 static
59 void *mremap_wrapper(void *old_address, size_t old_size,
60 size_t new_size, int flags)
61 {
62 return mremap(old_address, old_size, new_size, flags);
63 }
64 #else
65
66 #define MREMAP_MAYMOVE 1
67 #define MREMAP_FIXED 2
68
69 /*
70 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
71 * This is not generic.
72 */
73 static
74 void *mremap_wrapper(void *old_address, size_t old_size,
75 size_t new_size, int flags)
76 {
77 assert(!(flags & MREMAP_MAYMOVE));
78
79 return MAP_FAILED;
80 }
81 #endif
82
83 /* Sleep delay in ms */
84 #define RCU_SLEEP_DELAY_MS 10
85 #define INIT_NR_THREADS 8
86 #define ARENA_INIT_ALLOC \
87 sizeof(struct registry_chunk) \
88 + INIT_NR_THREADS * sizeof(struct rcu_reader)
89
90 /*
91 * Active attempts to check for reader Q.S. before calling sleep().
92 */
93 #define RCU_QS_ACTIVE_ATTEMPTS 100
94
95 static
96 int rcu_bp_refcount;
97
98 /* If the headers do not support membarrier system call, fall back smp_mb. */
99 #ifdef __NR_membarrier
100 # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__)
101 #else
102 # define membarrier(...) -ENOSYS
103 #endif
104
105 enum membarrier_cmd {
106 MEMBARRIER_CMD_QUERY = 0,
107 MEMBARRIER_CMD_SHARED = (1 << 0),
108 /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */
109 /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */
110 MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3),
111 MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4),
112 };
113
114 static
115 void __attribute__((constructor)) rcu_bp_init(void);
116 static
117 void __attribute__((destructor)) rcu_bp_exit(void);
118
119 #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER
120 int urcu_bp_has_sys_membarrier;
121 #endif
122
123 /*
124 * rcu_gp_lock ensures mutual exclusion between threads calling
125 * synchronize_rcu().
126 */
127 static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
128 /*
129 * rcu_registry_lock ensures mutual exclusion between threads
130 * registering and unregistering themselves to/from the registry, and
131 * with threads reading that registry from synchronize_rcu(). However,
132 * this lock is not held all the way through the completion of awaiting
133 * for the grace period. It is sporadically released between iterations
134 * on the registry.
135 * rcu_registry_lock may nest inside rcu_gp_lock.
136 */
137 static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
138
139 static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER;
140 static int initialized;
141
142 static pthread_key_t urcu_bp_key;
143
144 struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT };
145
146 /*
147 * Pointer to registry elements. Written to only by each individual reader. Read
148 * by both the reader and the writers.
149 */
150 DEFINE_URCU_TLS(struct rcu_reader *, rcu_reader);
151
152 static CDS_LIST_HEAD(registry);
153
154 struct registry_chunk {
155 size_t data_len; /* data length */
156 size_t used; /* amount of data used */
157 struct cds_list_head node; /* chunk_list node */
158 char data[];
159 };
160
161 struct registry_arena {
162 struct cds_list_head chunk_list;
163 };
164
165 static struct registry_arena registry_arena = {
166 .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list),
167 };
168
169 /* Saved fork signal mask, protected by rcu_gp_lock */
170 static sigset_t saved_fork_signal_mask;
171
172 static void mutex_lock(pthread_mutex_t *mutex)
173 {
174 int ret;
175
176 #ifndef DISTRUST_SIGNALS_EXTREME
177 ret = pthread_mutex_lock(mutex);
178 if (ret)
179 urcu_die(ret);
180 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
181 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
182 if (ret != EBUSY && ret != EINTR)
183 urcu_die(ret);
184 poll(NULL,0,10);
185 }
186 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
187 }
188
189 static void mutex_unlock(pthread_mutex_t *mutex)
190 {
191 int ret;
192
193 ret = pthread_mutex_unlock(mutex);
194 if (ret)
195 urcu_die(ret);
196 }
197
198 static void smp_mb_master(void)
199 {
200 if (caa_likely(urcu_bp_has_sys_membarrier)) {
201 if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0))
202 urcu_die(errno);
203 } else {
204 cmm_smp_mb();
205 }
206 }
207
208 /*
209 * Always called with rcu_registry lock held. Releases this lock between
210 * iterations and grabs it again. Holds the lock when it returns.
211 */
212 static void wait_for_readers(struct cds_list_head *input_readers,
213 struct cds_list_head *cur_snap_readers,
214 struct cds_list_head *qsreaders)
215 {
216 unsigned int wait_loops = 0;
217 struct rcu_reader *index, *tmp;
218
219 /*
220 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
221 * indicate quiescence (not nested), or observe the current
222 * rcu_gp.ctr value.
223 */
224 for (;;) {
225 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
226 wait_loops++;
227
228 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
229 switch (rcu_reader_state(&index->ctr)) {
230 case RCU_READER_ACTIVE_CURRENT:
231 if (cur_snap_readers) {
232 cds_list_move(&index->node,
233 cur_snap_readers);
234 break;
235 }
236 /* Fall-through */
237 case RCU_READER_INACTIVE:
238 cds_list_move(&index->node, qsreaders);
239 break;
240 case RCU_READER_ACTIVE_OLD:
241 /*
242 * Old snapshot. Leaving node in
243 * input_readers will make us busy-loop
244 * until the snapshot becomes current or
245 * the reader becomes inactive.
246 */
247 break;
248 }
249 }
250
251 if (cds_list_empty(input_readers)) {
252 break;
253 } else {
254 /* Temporarily unlock the registry lock. */
255 mutex_unlock(&rcu_registry_lock);
256 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS)
257 (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS);
258 else
259 caa_cpu_relax();
260 /* Re-lock the registry lock before the next loop. */
261 mutex_lock(&rcu_registry_lock);
262 }
263 }
264 }
265
266 void synchronize_rcu(void)
267 {
268 CDS_LIST_HEAD(cur_snap_readers);
269 CDS_LIST_HEAD(qsreaders);
270 sigset_t newmask, oldmask;
271 int ret;
272
273 ret = sigfillset(&newmask);
274 assert(!ret);
275 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
276 assert(!ret);
277
278 mutex_lock(&rcu_gp_lock);
279
280 mutex_lock(&rcu_registry_lock);
281
282 if (cds_list_empty(&registry))
283 goto out;
284
285 /* All threads should read qparity before accessing data structure
286 * where new ptr points to. */
287 /* Write new ptr before changing the qparity */
288 smp_mb_master();
289
290 /*
291 * Wait for readers to observe original parity or be quiescent.
292 * wait_for_readers() can release and grab again rcu_registry_lock
293 * interally.
294 */
295 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
296
297 /*
298 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
299 * model easier to understand. It does not have a big performance impact
300 * anyway, given this is the write-side.
301 */
302 cmm_smp_mb();
303
304 /* Switch parity: 0 -> 1, 1 -> 0 */
305 CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR_PHASE);
306
307 /*
308 * Must commit qparity update to memory before waiting for other parity
309 * quiescent state. Failure to do so could result in the writer waiting
310 * forever while new readers are always accessing data (no progress).
311 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
312 */
313
314 /*
315 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
316 * model easier to understand. It does not have a big performance impact
317 * anyway, given this is the write-side.
318 */
319 cmm_smp_mb();
320
321 /*
322 * Wait for readers to observe new parity or be quiescent.
323 * wait_for_readers() can release and grab again rcu_registry_lock
324 * interally.
325 */
326 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
327
328 /*
329 * Put quiescent reader list back into registry.
330 */
331 cds_list_splice(&qsreaders, &registry);
332
333 /*
334 * Finish waiting for reader threads before letting the old ptr being
335 * freed.
336 */
337 smp_mb_master();
338 out:
339 mutex_unlock(&rcu_registry_lock);
340 mutex_unlock(&rcu_gp_lock);
341 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
342 assert(!ret);
343 }
344
345 /*
346 * library wrappers to be used by non-LGPL compatible source code.
347 */
348
349 void rcu_read_lock(void)
350 {
351 _rcu_read_lock();
352 }
353
354 void rcu_read_unlock(void)
355 {
356 _rcu_read_unlock();
357 }
358
359 int rcu_read_ongoing(void)
360 {
361 return _rcu_read_ongoing();
362 }
363
364 /*
365 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
366 * Else, try expanding the last chunk. If this fails, allocate a new
367 * chunk twice as big as the last chunk.
368 * Memory used by chunks _never_ moves. A chunk could theoretically be
369 * freed when all "used" slots are released, but we don't do it at this
370 * point.
371 */
372 static
373 void expand_arena(struct registry_arena *arena)
374 {
375 struct registry_chunk *new_chunk, *last_chunk;
376 size_t old_chunk_len, new_chunk_len;
377
378 /* No chunk. */
379 if (cds_list_empty(&arena->chunk_list)) {
380 assert(ARENA_INIT_ALLOC >=
381 sizeof(struct registry_chunk)
382 + sizeof(struct rcu_reader));
383 new_chunk_len = ARENA_INIT_ALLOC;
384 new_chunk = (struct registry_chunk *) mmap(NULL,
385 new_chunk_len,
386 PROT_READ | PROT_WRITE,
387 MAP_ANONYMOUS | MAP_PRIVATE,
388 -1, 0);
389 if (new_chunk == MAP_FAILED)
390 abort();
391 memset(new_chunk, 0, new_chunk_len);
392 new_chunk->data_len =
393 new_chunk_len - sizeof(struct registry_chunk);
394 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
395 return; /* We're done. */
396 }
397
398 /* Try expanding last chunk. */
399 last_chunk = cds_list_entry(arena->chunk_list.prev,
400 struct registry_chunk, node);
401 old_chunk_len =
402 last_chunk->data_len + sizeof(struct registry_chunk);
403 new_chunk_len = old_chunk_len << 1;
404
405 /* Don't allow memory mapping to move, just expand. */
406 new_chunk = mremap_wrapper(last_chunk, old_chunk_len,
407 new_chunk_len, 0);
408 if (new_chunk != MAP_FAILED) {
409 /* Should not have moved. */
410 assert(new_chunk == last_chunk);
411 memset((char *) last_chunk + old_chunk_len, 0,
412 new_chunk_len - old_chunk_len);
413 last_chunk->data_len =
414 new_chunk_len - sizeof(struct registry_chunk);
415 return; /* We're done. */
416 }
417
418 /* Remap did not succeed, we need to add a new chunk. */
419 new_chunk = (struct registry_chunk *) mmap(NULL,
420 new_chunk_len,
421 PROT_READ | PROT_WRITE,
422 MAP_ANONYMOUS | MAP_PRIVATE,
423 -1, 0);
424 if (new_chunk == MAP_FAILED)
425 abort();
426 memset(new_chunk, 0, new_chunk_len);
427 new_chunk->data_len =
428 new_chunk_len - sizeof(struct registry_chunk);
429 cds_list_add_tail(&new_chunk->node, &arena->chunk_list);
430 }
431
432 static
433 struct rcu_reader *arena_alloc(struct registry_arena *arena)
434 {
435 struct registry_chunk *chunk;
436 struct rcu_reader *rcu_reader_reg;
437 int expand_done = 0; /* Only allow to expand once per alloc */
438 size_t len = sizeof(struct rcu_reader);
439
440 retry:
441 cds_list_for_each_entry(chunk, &arena->chunk_list, node) {
442 if (chunk->data_len - chunk->used < len)
443 continue;
444 /* Find spot */
445 for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
446 rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
447 rcu_reader_reg++) {
448 if (!rcu_reader_reg->alloc) {
449 rcu_reader_reg->alloc = 1;
450 chunk->used += len;
451 return rcu_reader_reg;
452 }
453 }
454 }
455
456 if (!expand_done) {
457 expand_arena(arena);
458 expand_done = 1;
459 goto retry;
460 }
461
462 return NULL;
463 }
464
465 /* Called with signals off and mutex locked */
466 static
467 void add_thread(void)
468 {
469 struct rcu_reader *rcu_reader_reg;
470 int ret;
471
472 rcu_reader_reg = arena_alloc(&registry_arena);
473 if (!rcu_reader_reg)
474 abort();
475 ret = pthread_setspecific(urcu_bp_key, rcu_reader_reg);
476 if (ret)
477 abort();
478
479 /* Add to registry */
480 rcu_reader_reg->tid = pthread_self();
481 assert(rcu_reader_reg->ctr == 0);
482 cds_list_add(&rcu_reader_reg->node, &registry);
483 /*
484 * Reader threads are pointing to the reader registry. This is
485 * why its memory should never be relocated.
486 */
487 URCU_TLS(rcu_reader) = rcu_reader_reg;
488 }
489
490 /* Called with mutex locked */
491 static
492 void cleanup_thread(struct registry_chunk *chunk,
493 struct rcu_reader *rcu_reader_reg)
494 {
495 rcu_reader_reg->ctr = 0;
496 cds_list_del(&rcu_reader_reg->node);
497 rcu_reader_reg->tid = 0;
498 rcu_reader_reg->alloc = 0;
499 chunk->used -= sizeof(struct rcu_reader);
500 }
501
502 static
503 struct registry_chunk *find_chunk(struct rcu_reader *rcu_reader_reg)
504 {
505 struct registry_chunk *chunk;
506
507 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
508 if (rcu_reader_reg < (struct rcu_reader *) &chunk->data[0])
509 continue;
510 if (rcu_reader_reg >= (struct rcu_reader *) &chunk->data[chunk->data_len])
511 continue;
512 return chunk;
513 }
514 return NULL;
515 }
516
517 /* Called with signals off and mutex locked */
518 static
519 void remove_thread(struct rcu_reader *rcu_reader_reg)
520 {
521 cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg);
522 URCU_TLS(rcu_reader) = NULL;
523 }
524
525 /* Disable signals, take mutex, add to registry */
526 void rcu_bp_register(void)
527 {
528 sigset_t newmask, oldmask;
529 int ret;
530
531 ret = sigfillset(&newmask);
532 if (ret)
533 abort();
534 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
535 if (ret)
536 abort();
537
538 /*
539 * Check if a signal concurrently registered our thread since
540 * the check in rcu_read_lock().
541 */
542 if (URCU_TLS(rcu_reader))
543 goto end;
544
545 /*
546 * Take care of early registration before urcu_bp constructor.
547 */
548 rcu_bp_init();
549
550 mutex_lock(&rcu_registry_lock);
551 add_thread();
552 mutex_unlock(&rcu_registry_lock);
553 end:
554 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
555 if (ret)
556 abort();
557 }
558
559 /* Disable signals, take mutex, remove from registry */
560 static
561 void rcu_bp_unregister(struct rcu_reader *rcu_reader_reg)
562 {
563 sigset_t newmask, oldmask;
564 int ret;
565
566 ret = sigfillset(&newmask);
567 if (ret)
568 abort();
569 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
570 if (ret)
571 abort();
572
573 mutex_lock(&rcu_registry_lock);
574 remove_thread(rcu_reader_reg);
575 mutex_unlock(&rcu_registry_lock);
576 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
577 if (ret)
578 abort();
579 rcu_bp_exit();
580 }
581
582 /*
583 * Remove thread from the registry when it exits, and flag it as
584 * destroyed so garbage collection can take care of it.
585 */
586 static
587 void urcu_bp_thread_exit_notifier(void *rcu_key)
588 {
589 rcu_bp_unregister(rcu_key);
590 }
591
592 #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER
593 static
594 void rcu_sys_membarrier_status(bool available)
595 {
596 if (!available)
597 abort();
598 }
599 #else
600 static
601 void rcu_sys_membarrier_status(bool available)
602 {
603 if (!available)
604 return;
605 urcu_bp_has_sys_membarrier = 1;
606 }
607 #endif
608
609 static
610 void rcu_sys_membarrier_init(void)
611 {
612 bool available = false;
613 int mask;
614
615 mask = membarrier(MEMBARRIER_CMD_QUERY, 0);
616 if (mask >= 0) {
617 if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) {
618 if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0))
619 urcu_die(errno);
620 available = true;
621 }
622 }
623 rcu_sys_membarrier_status(available);
624 }
625
626 static
627 void rcu_bp_init(void)
628 {
629 mutex_lock(&init_lock);
630 if (!rcu_bp_refcount++) {
631 int ret;
632
633 ret = pthread_key_create(&urcu_bp_key,
634 urcu_bp_thread_exit_notifier);
635 if (ret)
636 abort();
637 rcu_sys_membarrier_init();
638 initialized = 1;
639 }
640 mutex_unlock(&init_lock);
641 }
642
643 static
644 void rcu_bp_exit(void)
645 {
646 mutex_lock(&init_lock);
647 if (!--rcu_bp_refcount) {
648 struct registry_chunk *chunk, *tmp;
649 int ret;
650
651 cds_list_for_each_entry_safe(chunk, tmp,
652 &registry_arena.chunk_list, node) {
653 munmap((void *) chunk, chunk->data_len
654 + sizeof(struct registry_chunk));
655 }
656 CDS_INIT_LIST_HEAD(&registry_arena.chunk_list);
657 ret = pthread_key_delete(urcu_bp_key);
658 if (ret)
659 abort();
660 }
661 mutex_unlock(&init_lock);
662 }
663
664 /*
665 * Holding the rcu_gp_lock and rcu_registry_lock across fork will make
666 * sure we fork() don't race with a concurrent thread executing with
667 * any of those locks held. This ensures that the registry and data
668 * protected by rcu_gp_lock are in a coherent state in the child.
669 */
670 void rcu_bp_before_fork(void)
671 {
672 sigset_t newmask, oldmask;
673 int ret;
674
675 ret = sigfillset(&newmask);
676 assert(!ret);
677 ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask);
678 assert(!ret);
679 mutex_lock(&rcu_gp_lock);
680 mutex_lock(&rcu_registry_lock);
681 saved_fork_signal_mask = oldmask;
682 }
683
684 void rcu_bp_after_fork_parent(void)
685 {
686 sigset_t oldmask;
687 int ret;
688
689 oldmask = saved_fork_signal_mask;
690 mutex_unlock(&rcu_registry_lock);
691 mutex_unlock(&rcu_gp_lock);
692 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
693 assert(!ret);
694 }
695
696 /*
697 * Prune all entries from registry except our own thread. Fits the Linux
698 * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held.
699 */
700 static
701 void urcu_bp_prune_registry(void)
702 {
703 struct registry_chunk *chunk;
704 struct rcu_reader *rcu_reader_reg;
705
706 cds_list_for_each_entry(chunk, &registry_arena.chunk_list, node) {
707 for (rcu_reader_reg = (struct rcu_reader *) &chunk->data[0];
708 rcu_reader_reg < (struct rcu_reader *) &chunk->data[chunk->data_len];
709 rcu_reader_reg++) {
710 if (!rcu_reader_reg->alloc)
711 continue;
712 if (rcu_reader_reg->tid == pthread_self())
713 continue;
714 cleanup_thread(chunk, rcu_reader_reg);
715 }
716 }
717 }
718
719 void rcu_bp_after_fork_child(void)
720 {
721 sigset_t oldmask;
722 int ret;
723
724 urcu_bp_prune_registry();
725 oldmask = saved_fork_signal_mask;
726 mutex_unlock(&rcu_registry_lock);
727 mutex_unlock(&rcu_gp_lock);
728 ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL);
729 assert(!ret);
730 }
731
732 void *rcu_dereference_sym_bp(void *p)
733 {
734 return _rcu_dereference(p);
735 }
736
737 void *rcu_set_pointer_sym_bp(void **p, void *v)
738 {
739 cmm_wmb();
740 uatomic_set(p, v);
741 return v;
742 }
743
744 void *rcu_xchg_pointer_sym_bp(void **p, void *v)
745 {
746 cmm_wmb();
747 return uatomic_xchg(p, v);
748 }
749
750 void *rcu_cmpxchg_pointer_sym_bp(void **p, void *old, void *_new)
751 {
752 cmm_wmb();
753 return uatomic_cmpxchg(p, old, _new);
754 }
755
756 DEFINE_RCU_FLAVOR(rcu_flavor);
757
758 #include "urcu-call-rcu-impl.h"
759 #include "urcu-defer-impl.h"
This page took 0.043323 seconds and 4 git commands to generate.