4 * Userspace RCU library, "bulletproof" version.
6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
39 #include "urcu/wfcqueue.h"
40 #include "urcu/map/urcu-bp.h"
41 #include "urcu/static/urcu-bp.h"
42 #include "urcu-pointer.h"
43 #include "urcu/tls-compat.h"
47 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
53 #define MAP_ANONYMOUS MAP_ANON
58 void *mremap_wrapper(void *old_address
, size_t old_size
,
59 size_t new_size
, int flags
)
61 return mremap(old_address
, old_size
, new_size
, flags
);
65 #define MREMAP_MAYMOVE 1
66 #define MREMAP_FIXED 2
69 * mremap wrapper for non-Linux systems not allowing MAYMOVE.
70 * This is not generic.
73 void *mremap_wrapper(void *old_address
, size_t old_size
,
74 size_t new_size
, int flags
)
76 assert(!(flags
& MREMAP_MAYMOVE
));
82 /* Sleep delay in us */
83 #define RCU_SLEEP_DELAY 1000
84 #define INIT_NR_THREADS 8
85 #define ARENA_INIT_ALLOC \
86 sizeof(struct registry_chunk) \
87 + INIT_NR_THREADS * sizeof(struct rcu_reader)
90 * Active attempts to check for reader Q.S. before calling sleep().
92 #define RCU_QS_ACTIVE_ATTEMPTS 100
94 void __attribute__((destructor
)) rcu_bp_exit(void);
96 static pthread_mutex_t rcu_gp_lock
= PTHREAD_MUTEX_INITIALIZER
;
99 unsigned int rcu_yield_active
;
100 DEFINE_URCU_TLS(unsigned int, rcu_rand_yield
);
103 struct rcu_gp rcu_gp
= { .ctr
= RCU_GP_COUNT
};
106 * Pointer to registry elements. Written to only by each individual reader. Read
107 * by both the reader and the writers.
109 DEFINE_URCU_TLS(struct rcu_reader
*, rcu_reader
);
111 static CDS_LIST_HEAD(registry
);
113 struct registry_chunk
{
114 size_t data_len
; /* data length */
115 size_t used
; /* data used */
116 struct cds_list_head node
; /* chunk_list node */
120 struct registry_arena
{
121 struct cds_list_head chunk_list
;
124 static struct registry_arena registry_arena
= {
125 .chunk_list
= CDS_LIST_HEAD_INIT(registry_arena
.chunk_list
),
128 /* Saved fork signal mask, protected by rcu_gp_lock */
129 static sigset_t saved_fork_signal_mask
;
131 static void rcu_gc_registry(void);
133 static void mutex_lock(pthread_mutex_t
*mutex
)
137 #ifndef DISTRUST_SIGNALS_EXTREME
138 ret
= pthread_mutex_lock(mutex
);
141 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
142 while ((ret
= pthread_mutex_trylock(mutex
)) != 0) {
143 if (ret
!= EBUSY
&& ret
!= EINTR
)
147 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
150 static void mutex_unlock(pthread_mutex_t
*mutex
)
154 ret
= pthread_mutex_unlock(mutex
);
159 static void wait_for_readers(struct cds_list_head
*input_readers
,
160 struct cds_list_head
*cur_snap_readers
,
161 struct cds_list_head
*qsreaders
)
164 struct rcu_reader
*index
, *tmp
;
167 * Wait for each thread URCU_TLS(rcu_reader).ctr to either
168 * indicate quiescence (not nested), or observe the current
173 cds_list_for_each_entry_safe(index
, tmp
, input_readers
, node
) {
174 switch (rcu_reader_state(&index
->ctr
)) {
175 case RCU_READER_ACTIVE_CURRENT
:
176 if (cur_snap_readers
) {
177 cds_list_move(&index
->node
,
182 case RCU_READER_INACTIVE
:
183 cds_list_move(&index
->node
, qsreaders
);
185 case RCU_READER_ACTIVE_OLD
:
187 * Old snapshot. Leaving node in
188 * input_readers will make us busy-loop
189 * until the snapshot becomes current or
190 * the reader becomes inactive.
196 if (cds_list_empty(input_readers
)) {
199 if (wait_loops
== RCU_QS_ACTIVE_ATTEMPTS
)
200 usleep(RCU_SLEEP_DELAY
);
207 void synchronize_rcu(void)
209 CDS_LIST_HEAD(cur_snap_readers
);
210 CDS_LIST_HEAD(qsreaders
);
211 sigset_t newmask
, oldmask
;
214 ret
= sigfillset(&newmask
);
216 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
219 mutex_lock(&rcu_gp_lock
);
221 if (cds_list_empty(®istry
))
224 /* All threads should read qparity before accessing data structure
225 * where new ptr points to. */
226 /* Write new ptr before changing the qparity */
229 /* Remove old registry elements */
233 * Wait for readers to observe original parity or be quiescent.
235 wait_for_readers(®istry
, &cur_snap_readers
, &qsreaders
);
238 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
239 * model easier to understand. It does not have a big performance impact
240 * anyway, given this is the write-side.
244 /* Switch parity: 0 -> 1, 1 -> 0 */
245 CMM_STORE_SHARED(rcu_gp
.ctr
, rcu_gp
.ctr
^ RCU_GP_CTR_PHASE
);
248 * Must commit qparity update to memory before waiting for other parity
249 * quiescent state. Failure to do so could result in the writer waiting
250 * forever while new readers are always accessing data (no progress).
251 * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED.
255 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
256 * model easier to understand. It does not have a big performance impact
257 * anyway, given this is the write-side.
262 * Wait for readers to observe new parity or be quiescent.
264 wait_for_readers(&cur_snap_readers
, NULL
, &qsreaders
);
267 * Put quiescent reader list back into registry.
269 cds_list_splice(&qsreaders
, ®istry
);
272 * Finish waiting for reader threads before letting the old ptr being
277 mutex_unlock(&rcu_gp_lock
);
278 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
283 * library wrappers to be used by non-LGPL compatible source code.
286 void rcu_read_lock(void)
291 void rcu_read_unlock(void)
296 int rcu_read_ongoing(void)
298 return _rcu_read_ongoing();
302 * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk.
303 * Else, try expanding the last chunk. If this fails, allocate a new
304 * chunk twice as big as the last chunk.
305 * Memory used by chunks _never_ moves. A chunk could theoretically be
306 * freed when all "used" slots are released, but we don't do it at this
310 void expand_arena(struct registry_arena
*arena
)
312 struct registry_chunk
*new_chunk
, *last_chunk
;
313 size_t old_chunk_len
, new_chunk_len
;
316 if (cds_list_empty(&arena
->chunk_list
)) {
317 assert(ARENA_INIT_ALLOC
>=
318 sizeof(struct registry_chunk
)
319 + sizeof(struct rcu_reader
));
320 new_chunk_len
= ARENA_INIT_ALLOC
;
321 new_chunk
= mmap(NULL
, new_chunk_len
,
322 PROT_READ
| PROT_WRITE
,
323 MAP_ANONYMOUS
| MAP_PRIVATE
,
325 if (new_chunk
== MAP_FAILED
)
327 bzero(new_chunk
, new_chunk_len
);
328 new_chunk
->data_len
=
329 new_chunk_len
- sizeof(struct registry_chunk
);
330 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
331 return; /* We're done. */
334 /* Try expanding last chunk. */
335 last_chunk
= cds_list_entry(arena
->chunk_list
.prev
,
336 struct registry_chunk
, node
);
338 last_chunk
->data_len
+ sizeof(struct registry_chunk
);
339 new_chunk_len
= old_chunk_len
<< 1;
341 /* Don't allow memory mapping to move, just expand. */
342 new_chunk
= mremap_wrapper(last_chunk
, old_chunk_len
,
344 if (new_chunk
!= MAP_FAILED
) {
345 /* Should not have moved. */
346 assert(new_chunk
== last_chunk
);
347 bzero((char *) last_chunk
+ old_chunk_len
,
348 new_chunk_len
- old_chunk_len
);
349 last_chunk
->data_len
=
350 new_chunk_len
- sizeof(struct registry_chunk
);
351 return; /* We're done. */
354 /* Remap did not succeed, we need to add a new chunk. */
355 new_chunk
= mmap(NULL
, new_chunk_len
,
356 PROT_READ
| PROT_WRITE
,
357 MAP_ANONYMOUS
| MAP_PRIVATE
,
359 if (new_chunk
== MAP_FAILED
)
361 bzero(new_chunk
, new_chunk_len
);
362 new_chunk
->data_len
=
363 new_chunk_len
- sizeof(struct registry_chunk
);
364 cds_list_add_tail(&new_chunk
->node
, &arena
->chunk_list
);
368 struct rcu_reader
*arena_alloc(struct registry_arena
*arena
)
370 struct registry_chunk
*chunk
;
371 struct rcu_reader
*rcu_reader_reg
;
372 int expand_done
= 0; /* Only allow to expand once per alloc */
373 size_t len
= sizeof(struct rcu_reader
);
376 cds_list_for_each_entry(chunk
, &arena
->chunk_list
, node
) {
377 if (chunk
->data_len
- chunk
->used
< len
)
380 for (rcu_reader_reg
= (struct rcu_reader
*) &chunk
->data
[0];
381 rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
];
383 if (!rcu_reader_reg
->alloc
) {
384 rcu_reader_reg
->alloc
= 1;
386 return rcu_reader_reg
;
400 /* Called with signals off and mutex locked */
402 void add_thread(void)
404 struct rcu_reader
*rcu_reader_reg
;
406 rcu_reader_reg
= arena_alloc(®istry_arena
);
410 /* Add to registry */
411 rcu_reader_reg
->tid
= pthread_self();
412 assert(rcu_reader_reg
->ctr
== 0);
413 cds_list_add(&rcu_reader_reg
->node
, ®istry
);
415 * Reader threads are pointing to the reader registry. This is
416 * why its memory should never be relocated.
418 URCU_TLS(rcu_reader
) = rcu_reader_reg
;
421 /* Called with signals off and mutex locked */
422 static void rcu_gc_registry(void)
424 struct registry_chunk
*chunk
;
425 struct rcu_reader
*rcu_reader_reg
;
427 cds_list_for_each_entry(chunk
, ®istry_arena
.chunk_list
, node
) {
428 for (rcu_reader_reg
= (struct rcu_reader
*) &chunk
->data
[0];
429 rcu_reader_reg
< (struct rcu_reader
*) &chunk
->data
[chunk
->data_len
];
434 if (!rcu_reader_reg
->alloc
)
436 tid
= rcu_reader_reg
->tid
;
437 ret
= pthread_kill(tid
, 0);
438 assert(ret
!= EINVAL
);
440 cds_list_del(&rcu_reader_reg
->node
);
441 rcu_reader_reg
->ctr
= 0;
442 rcu_reader_reg
->alloc
= 0;
443 chunk
->used
-= sizeof(struct rcu_reader
);
450 /* Disable signals, take mutex, add to registry */
451 void rcu_bp_register(void)
453 sigset_t newmask
, oldmask
;
456 ret
= sigfillset(&newmask
);
458 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
462 * Check if a signal concurrently registered our thread since
463 * the check in rcu_read_lock(). */
464 if (URCU_TLS(rcu_reader
))
467 mutex_lock(&rcu_gp_lock
);
469 mutex_unlock(&rcu_gp_lock
);
471 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
475 void rcu_bp_exit(void)
477 struct registry_chunk
*chunk
, *tmp
;
479 cds_list_for_each_entry_safe(chunk
, tmp
,
480 ®istry_arena
.chunk_list
, node
) {
481 munmap(chunk
, chunk
->data_len
+ sizeof(struct registry_chunk
));
486 * Holding the rcu_gp_lock across fork will make sure we fork() don't race with
487 * a concurrent thread executing with this same lock held. This ensures that the
488 * registry is in a coherent state in the child.
490 void rcu_bp_before_fork(void)
492 sigset_t newmask
, oldmask
;
495 ret
= sigfillset(&newmask
);
497 ret
= pthread_sigmask(SIG_BLOCK
, &newmask
, &oldmask
);
499 mutex_lock(&rcu_gp_lock
);
500 saved_fork_signal_mask
= oldmask
;
503 void rcu_bp_after_fork_parent(void)
508 oldmask
= saved_fork_signal_mask
;
509 mutex_unlock(&rcu_gp_lock
);
510 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
514 void rcu_bp_after_fork_child(void)
520 oldmask
= saved_fork_signal_mask
;
521 mutex_unlock(&rcu_gp_lock
);
522 ret
= pthread_sigmask(SIG_SETMASK
, &oldmask
, NULL
);
526 void *rcu_dereference_sym_bp(void *p
)
528 return _rcu_dereference(p
);
531 void *rcu_set_pointer_sym_bp(void **p
, void *v
)
538 void *rcu_xchg_pointer_sym_bp(void **p
, void *v
)
541 return uatomic_xchg(p
, v
);
544 void *rcu_cmpxchg_pointer_sym_bp(void **p
, void *old
, void *_new
)
547 return uatomic_cmpxchg(p
, old
, _new
);
550 DEFINE_RCU_FLAVOR(rcu_flavor
);
552 #include "urcu-call-rcu-impl.h"
553 #include "urcu-defer-impl.h"