| 1 | /* |
| 2 | * urcu.c |
| 3 | * |
| 4 | * Userspace RCU library |
| 5 | * |
| 6 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
| 7 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. |
| 8 | * |
| 9 | * This library is free software; you can redistribute it and/or |
| 10 | * modify it under the terms of the GNU Lesser General Public |
| 11 | * License as published by the Free Software Foundation; either |
| 12 | * version 2.1 of the License, or (at your option) any later version. |
| 13 | * |
| 14 | * This library is distributed in the hope that it will be useful, |
| 15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 17 | * Lesser General Public License for more details. |
| 18 | * |
| 19 | * You should have received a copy of the GNU Lesser General Public |
| 20 | * License along with this library; if not, write to the Free Software |
| 21 | * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA |
| 22 | * |
| 23 | * IBM's contributions to this file may be relicensed under LGPLv2 or later. |
| 24 | */ |
| 25 | |
| 26 | #define _BSD_SOURCE |
| 27 | #define _LGPL_SOURCE |
| 28 | #define _DEFAULT_SOURCE |
| 29 | #include <stdio.h> |
| 30 | #include <pthread.h> |
| 31 | #include <signal.h> |
| 32 | #include <assert.h> |
| 33 | #include <stdlib.h> |
| 34 | #include <stdint.h> |
| 35 | #include <string.h> |
| 36 | #include <errno.h> |
| 37 | #include <poll.h> |
| 38 | |
| 39 | #include "urcu/arch.h" |
| 40 | #include "urcu/wfcqueue.h" |
| 41 | #include "urcu/map/urcu.h" |
| 42 | #include "urcu/static/urcu.h" |
| 43 | #include "urcu-pointer.h" |
| 44 | #include "urcu/tls-compat.h" |
| 45 | |
| 46 | #include "urcu-die.h" |
| 47 | #include "urcu-wait.h" |
| 48 | |
| 49 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ |
| 50 | #undef _LGPL_SOURCE |
| 51 | #include "urcu.h" |
| 52 | #define _LGPL_SOURCE |
| 53 | |
| 54 | /* |
| 55 | * If a reader is really non-cooperative and refuses to commit its |
| 56 | * rcu_active_readers count to memory (there is no barrier in the reader |
| 57 | * per-se), kick it after 10 loops waiting for it. |
| 58 | */ |
| 59 | #define KICK_READER_LOOPS 10 |
| 60 | |
| 61 | /* |
| 62 | * Active attempts to check for reader Q.S. before calling futex(). |
| 63 | */ |
| 64 | #define RCU_QS_ACTIVE_ATTEMPTS 100 |
| 65 | |
| 66 | /* If the headers do not support membarrier system call, fall back on RCU_MB */ |
| 67 | #ifdef __NR_membarrier |
| 68 | # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__) |
| 69 | #else |
| 70 | # define membarrier(...) -ENOSYS |
| 71 | #endif |
| 72 | |
| 73 | enum membarrier_cmd { |
| 74 | MEMBARRIER_CMD_QUERY = 0, |
| 75 | MEMBARRIER_CMD_SHARED = (1 << 0), |
| 76 | }; |
| 77 | |
| 78 | #ifdef RCU_MEMBARRIER |
| 79 | static int init_done; |
| 80 | #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER |
| 81 | int rcu_has_sys_membarrier_memb; |
| 82 | #endif |
| 83 | |
| 84 | void __attribute__((constructor)) rcu_init(void); |
| 85 | #endif |
| 86 | |
| 87 | #ifdef RCU_MB |
| 88 | void rcu_init(void) |
| 89 | { |
| 90 | } |
| 91 | #endif |
| 92 | |
| 93 | #ifdef RCU_SIGNAL |
| 94 | static int init_done; |
| 95 | |
| 96 | void __attribute__((constructor)) rcu_init(void); |
| 97 | void __attribute__((destructor)) rcu_exit(void); |
| 98 | #endif |
| 99 | |
| 100 | /* |
| 101 | * rcu_gp_lock ensures mutual exclusion between threads calling |
| 102 | * synchronize_rcu(). |
| 103 | */ |
| 104 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; |
| 105 | /* |
| 106 | * rcu_registry_lock ensures mutual exclusion between threads |
| 107 | * registering and unregistering themselves to/from the registry, and |
| 108 | * with threads reading that registry from synchronize_rcu(). However, |
| 109 | * this lock is not held all the way through the completion of awaiting |
| 110 | * for the grace period. It is sporadically released between iterations |
| 111 | * on the registry. |
| 112 | * rcu_registry_lock may nest inside rcu_gp_lock. |
| 113 | */ |
| 114 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; |
| 115 | struct rcu_gp rcu_gp = { .ctr = RCU_GP_COUNT }; |
| 116 | |
| 117 | /* |
| 118 | * Written to only by each individual reader. Read by both the reader and the |
| 119 | * writers. |
| 120 | */ |
| 121 | DEFINE_URCU_TLS(struct rcu_reader, rcu_reader); |
| 122 | |
| 123 | static CDS_LIST_HEAD(registry); |
| 124 | |
| 125 | /* |
| 126 | * Queue keeping threads awaiting to wait for a grace period. Contains |
| 127 | * struct gp_waiters_thread objects. |
| 128 | */ |
| 129 | static DEFINE_URCU_WAIT_QUEUE(gp_waiters); |
| 130 | |
| 131 | static void mutex_lock(pthread_mutex_t *mutex) |
| 132 | { |
| 133 | int ret; |
| 134 | |
| 135 | #ifndef DISTRUST_SIGNALS_EXTREME |
| 136 | ret = pthread_mutex_lock(mutex); |
| 137 | if (ret) |
| 138 | urcu_die(ret); |
| 139 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ |
| 140 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { |
| 141 | if (ret != EBUSY && ret != EINTR) |
| 142 | urcu_die(ret); |
| 143 | if (CMM_LOAD_SHARED(URCU_TLS(rcu_reader).need_mb)) { |
| 144 | cmm_smp_mb(); |
| 145 | _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0); |
| 146 | cmm_smp_mb(); |
| 147 | } |
| 148 | (void) poll(NULL, 0, 10); |
| 149 | } |
| 150 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ |
| 151 | } |
| 152 | |
| 153 | static void mutex_unlock(pthread_mutex_t *mutex) |
| 154 | { |
| 155 | int ret; |
| 156 | |
| 157 | ret = pthread_mutex_unlock(mutex); |
| 158 | if (ret) |
| 159 | urcu_die(ret); |
| 160 | } |
| 161 | |
| 162 | #ifdef RCU_MEMBARRIER |
| 163 | static void smp_mb_master(void) |
| 164 | { |
| 165 | if (caa_likely(rcu_has_sys_membarrier_memb)) |
| 166 | (void) membarrier(MEMBARRIER_CMD_SHARED, 0); |
| 167 | else |
| 168 | cmm_smp_mb(); |
| 169 | } |
| 170 | #endif |
| 171 | |
| 172 | #ifdef RCU_MB |
| 173 | static void smp_mb_master(void) |
| 174 | { |
| 175 | cmm_smp_mb(); |
| 176 | } |
| 177 | #endif |
| 178 | |
| 179 | #ifdef RCU_SIGNAL |
| 180 | static void force_mb_all_readers(void) |
| 181 | { |
| 182 | struct rcu_reader *index; |
| 183 | |
| 184 | /* |
| 185 | * Ask for each threads to execute a cmm_smp_mb() so we can consider the |
| 186 | * compiler barriers around rcu read lock as real memory barriers. |
| 187 | */ |
| 188 | if (cds_list_empty(®istry)) |
| 189 | return; |
| 190 | /* |
| 191 | * pthread_kill has a cmm_smp_mb(). But beware, we assume it performs |
| 192 | * a cache flush on architectures with non-coherent cache. Let's play |
| 193 | * safe and don't assume anything : we use cmm_smp_mc() to make sure the |
| 194 | * cache flush is enforced. |
| 195 | */ |
| 196 | cds_list_for_each_entry(index, ®istry, node) { |
| 197 | CMM_STORE_SHARED(index->need_mb, 1); |
| 198 | pthread_kill(index->tid, SIGRCU); |
| 199 | } |
| 200 | /* |
| 201 | * Wait for sighandler (and thus mb()) to execute on every thread. |
| 202 | * |
| 203 | * Note that the pthread_kill() will never be executed on systems |
| 204 | * that correctly deliver signals in a timely manner. However, it |
| 205 | * is not uncommon for kernels to have bugs that can result in |
| 206 | * lost or unduly delayed signals. |
| 207 | * |
| 208 | * If you are seeing the below pthread_kill() executing much at |
| 209 | * all, we suggest testing the underlying kernel and filing the |
| 210 | * relevant bug report. For Linux kernels, we recommend getting |
| 211 | * the Linux Test Project (LTP). |
| 212 | */ |
| 213 | cds_list_for_each_entry(index, ®istry, node) { |
| 214 | while (CMM_LOAD_SHARED(index->need_mb)) { |
| 215 | pthread_kill(index->tid, SIGRCU); |
| 216 | (void) poll(NULL, 0, 1); |
| 217 | } |
| 218 | } |
| 219 | cmm_smp_mb(); /* read ->need_mb before ending the barrier */ |
| 220 | } |
| 221 | |
| 222 | static void smp_mb_master(void) |
| 223 | { |
| 224 | force_mb_all_readers(); |
| 225 | } |
| 226 | #endif /* #ifdef RCU_SIGNAL */ |
| 227 | |
| 228 | /* |
| 229 | * synchronize_rcu() waiting. Single thread. |
| 230 | * Always called with rcu_registry lock held. Releases this lock and |
| 231 | * grabs it again. Holds the lock when it returns. |
| 232 | */ |
| 233 | static void wait_gp(void) |
| 234 | { |
| 235 | /* |
| 236 | * Read reader_gp before read futex. smp_mb_master() needs to |
| 237 | * be called with the rcu registry lock held in RCU_SIGNAL |
| 238 | * flavor. |
| 239 | */ |
| 240 | smp_mb_master(); |
| 241 | /* Temporarily unlock the registry lock. */ |
| 242 | mutex_unlock(&rcu_registry_lock); |
| 243 | if (uatomic_read(&rcu_gp.futex) != -1) |
| 244 | goto end; |
| 245 | while (futex_async(&rcu_gp.futex, FUTEX_WAIT, -1, |
| 246 | NULL, NULL, 0)) { |
| 247 | switch (errno) { |
| 248 | case EWOULDBLOCK: |
| 249 | /* Value already changed. */ |
| 250 | goto end; |
| 251 | case EINTR: |
| 252 | /* Retry if interrupted by signal. */ |
| 253 | break; /* Get out of switch. */ |
| 254 | default: |
| 255 | /* Unexpected error. */ |
| 256 | urcu_die(errno); |
| 257 | } |
| 258 | } |
| 259 | end: |
| 260 | /* |
| 261 | * Re-lock the registry lock before the next loop. |
| 262 | */ |
| 263 | mutex_lock(&rcu_registry_lock); |
| 264 | } |
| 265 | |
| 266 | /* |
| 267 | * Always called with rcu_registry lock held. Releases this lock between |
| 268 | * iterations and grabs it again. Holds the lock when it returns. |
| 269 | */ |
| 270 | static void wait_for_readers(struct cds_list_head *input_readers, |
| 271 | struct cds_list_head *cur_snap_readers, |
| 272 | struct cds_list_head *qsreaders) |
| 273 | { |
| 274 | unsigned int wait_loops = 0; |
| 275 | struct rcu_reader *index, *tmp; |
| 276 | #ifdef HAS_INCOHERENT_CACHES |
| 277 | unsigned int wait_gp_loops = 0; |
| 278 | #endif /* HAS_INCOHERENT_CACHES */ |
| 279 | |
| 280 | /* |
| 281 | * Wait for each thread URCU_TLS(rcu_reader).ctr to either |
| 282 | * indicate quiescence (not nested), or observe the current |
| 283 | * rcu_gp.ctr value. |
| 284 | */ |
| 285 | for (;;) { |
| 286 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) |
| 287 | wait_loops++; |
| 288 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
| 289 | uatomic_dec(&rcu_gp.futex); |
| 290 | /* Write futex before read reader_gp */ |
| 291 | smp_mb_master(); |
| 292 | } |
| 293 | |
| 294 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { |
| 295 | switch (rcu_reader_state(&index->ctr)) { |
| 296 | case RCU_READER_ACTIVE_CURRENT: |
| 297 | if (cur_snap_readers) { |
| 298 | cds_list_move(&index->node, |
| 299 | cur_snap_readers); |
| 300 | break; |
| 301 | } |
| 302 | /* Fall-through */ |
| 303 | case RCU_READER_INACTIVE: |
| 304 | cds_list_move(&index->node, qsreaders); |
| 305 | break; |
| 306 | case RCU_READER_ACTIVE_OLD: |
| 307 | /* |
| 308 | * Old snapshot. Leaving node in |
| 309 | * input_readers will make us busy-loop |
| 310 | * until the snapshot becomes current or |
| 311 | * the reader becomes inactive. |
| 312 | */ |
| 313 | break; |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | #ifndef HAS_INCOHERENT_CACHES |
| 318 | if (cds_list_empty(input_readers)) { |
| 319 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
| 320 | /* Read reader_gp before write futex */ |
| 321 | smp_mb_master(); |
| 322 | uatomic_set(&rcu_gp.futex, 0); |
| 323 | } |
| 324 | break; |
| 325 | } else { |
| 326 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
| 327 | /* wait_gp unlocks/locks registry lock. */ |
| 328 | wait_gp(); |
| 329 | } else { |
| 330 | /* Temporarily unlock the registry lock. */ |
| 331 | mutex_unlock(&rcu_registry_lock); |
| 332 | caa_cpu_relax(); |
| 333 | /* |
| 334 | * Re-lock the registry lock before the |
| 335 | * next loop. |
| 336 | */ |
| 337 | mutex_lock(&rcu_registry_lock); |
| 338 | } |
| 339 | } |
| 340 | #else /* #ifndef HAS_INCOHERENT_CACHES */ |
| 341 | /* |
| 342 | * BUSY-LOOP. Force the reader thread to commit its |
| 343 | * URCU_TLS(rcu_reader).ctr update to memory if we wait |
| 344 | * for too long. |
| 345 | */ |
| 346 | if (cds_list_empty(input_readers)) { |
| 347 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
| 348 | /* Read reader_gp before write futex */ |
| 349 | smp_mb_master(); |
| 350 | uatomic_set(&rcu_gp.futex, 0); |
| 351 | } |
| 352 | break; |
| 353 | } else { |
| 354 | if (wait_gp_loops == KICK_READER_LOOPS) { |
| 355 | smp_mb_master(); |
| 356 | wait_gp_loops = 0; |
| 357 | } |
| 358 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) { |
| 359 | /* wait_gp unlocks/locks registry lock. */ |
| 360 | wait_gp(); |
| 361 | wait_gp_loops++; |
| 362 | } else { |
| 363 | /* Temporarily unlock the registry lock. */ |
| 364 | mutex_unlock(&rcu_registry_lock); |
| 365 | caa_cpu_relax(); |
| 366 | /* |
| 367 | * Re-lock the registry lock before the |
| 368 | * next loop. |
| 369 | */ |
| 370 | mutex_lock(&rcu_registry_lock); |
| 371 | } |
| 372 | } |
| 373 | #endif /* #else #ifndef HAS_INCOHERENT_CACHES */ |
| 374 | } |
| 375 | } |
| 376 | |
| 377 | void synchronize_rcu(void) |
| 378 | { |
| 379 | CDS_LIST_HEAD(cur_snap_readers); |
| 380 | CDS_LIST_HEAD(qsreaders); |
| 381 | DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING); |
| 382 | struct urcu_waiters waiters; |
| 383 | |
| 384 | /* |
| 385 | * Add ourself to gp_waiters queue of threads awaiting to wait |
| 386 | * for a grace period. Proceed to perform the grace period only |
| 387 | * if we are the first thread added into the queue. |
| 388 | * The implicit memory barrier before urcu_wait_add() |
| 389 | * orders prior memory accesses of threads put into the wait |
| 390 | * queue before their insertion into the wait queue. |
| 391 | */ |
| 392 | if (urcu_wait_add(&gp_waiters, &wait) != 0) { |
| 393 | /* Not first in queue: will be awakened by another thread. */ |
| 394 | urcu_adaptative_busy_wait(&wait); |
| 395 | /* Order following memory accesses after grace period. */ |
| 396 | cmm_smp_mb(); |
| 397 | return; |
| 398 | } |
| 399 | /* We won't need to wake ourself up */ |
| 400 | urcu_wait_set_state(&wait, URCU_WAIT_RUNNING); |
| 401 | |
| 402 | mutex_lock(&rcu_gp_lock); |
| 403 | |
| 404 | /* |
| 405 | * Move all waiters into our local queue. |
| 406 | */ |
| 407 | urcu_move_waiters(&waiters, &gp_waiters); |
| 408 | |
| 409 | mutex_lock(&rcu_registry_lock); |
| 410 | |
| 411 | if (cds_list_empty(®istry)) |
| 412 | goto out; |
| 413 | |
| 414 | /* |
| 415 | * All threads should read qparity before accessing data structure |
| 416 | * where new ptr points to. Must be done within rcu_registry_lock |
| 417 | * because it iterates on reader threads. |
| 418 | */ |
| 419 | /* Write new ptr before changing the qparity */ |
| 420 | smp_mb_master(); |
| 421 | |
| 422 | /* |
| 423 | * Wait for readers to observe original parity or be quiescent. |
| 424 | * wait_for_readers() can release and grab again rcu_registry_lock |
| 425 | * interally. |
| 426 | */ |
| 427 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); |
| 428 | |
| 429 | /* |
| 430 | * Must finish waiting for quiescent state for original parity before |
| 431 | * committing next rcu_gp.ctr update to memory. Failure to do so could |
| 432 | * result in the writer waiting forever while new readers are always |
| 433 | * accessing data (no progress). Enforce compiler-order of load |
| 434 | * URCU_TLS(rcu_reader).ctr before store to rcu_gp.ctr. |
| 435 | */ |
| 436 | cmm_barrier(); |
| 437 | |
| 438 | /* |
| 439 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
| 440 | * model easier to understand. It does not have a big performance impact |
| 441 | * anyway, given this is the write-side. |
| 442 | */ |
| 443 | cmm_smp_mb(); |
| 444 | |
| 445 | /* Switch parity: 0 -> 1, 1 -> 0 */ |
| 446 | CMM_STORE_SHARED(rcu_gp.ctr, rcu_gp.ctr ^ RCU_GP_CTR_PHASE); |
| 447 | |
| 448 | /* |
| 449 | * Must commit rcu_gp.ctr update to memory before waiting for quiescent |
| 450 | * state. Failure to do so could result in the writer waiting forever |
| 451 | * while new readers are always accessing data (no progress). Enforce |
| 452 | * compiler-order of store to rcu_gp.ctr before load rcu_reader ctr. |
| 453 | */ |
| 454 | cmm_barrier(); |
| 455 | |
| 456 | /* |
| 457 | * |
| 458 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the |
| 459 | * model easier to understand. It does not have a big performance impact |
| 460 | * anyway, given this is the write-side. |
| 461 | */ |
| 462 | cmm_smp_mb(); |
| 463 | |
| 464 | /* |
| 465 | * Wait for readers to observe new parity or be quiescent. |
| 466 | * wait_for_readers() can release and grab again rcu_registry_lock |
| 467 | * interally. |
| 468 | */ |
| 469 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); |
| 470 | |
| 471 | /* |
| 472 | * Put quiescent reader list back into registry. |
| 473 | */ |
| 474 | cds_list_splice(&qsreaders, ®istry); |
| 475 | |
| 476 | /* |
| 477 | * Finish waiting for reader threads before letting the old ptr |
| 478 | * being freed. Must be done within rcu_registry_lock because it |
| 479 | * iterates on reader threads. |
| 480 | */ |
| 481 | smp_mb_master(); |
| 482 | out: |
| 483 | mutex_unlock(&rcu_registry_lock); |
| 484 | mutex_unlock(&rcu_gp_lock); |
| 485 | |
| 486 | /* |
| 487 | * Wakeup waiters only after we have completed the grace period |
| 488 | * and have ensured the memory barriers at the end of the grace |
| 489 | * period have been issued. |
| 490 | */ |
| 491 | urcu_wake_all_waiters(&waiters); |
| 492 | } |
| 493 | |
| 494 | /* |
| 495 | * library wrappers to be used by non-LGPL compatible source code. |
| 496 | */ |
| 497 | |
| 498 | void rcu_read_lock(void) |
| 499 | { |
| 500 | _rcu_read_lock(); |
| 501 | } |
| 502 | |
| 503 | void rcu_read_unlock(void) |
| 504 | { |
| 505 | _rcu_read_unlock(); |
| 506 | } |
| 507 | |
| 508 | int rcu_read_ongoing(void) |
| 509 | { |
| 510 | return _rcu_read_ongoing(); |
| 511 | } |
| 512 | |
| 513 | void rcu_register_thread(void) |
| 514 | { |
| 515 | URCU_TLS(rcu_reader).tid = pthread_self(); |
| 516 | assert(URCU_TLS(rcu_reader).need_mb == 0); |
| 517 | assert(!(URCU_TLS(rcu_reader).ctr & RCU_GP_CTR_NEST_MASK)); |
| 518 | |
| 519 | mutex_lock(&rcu_registry_lock); |
| 520 | assert(!URCU_TLS(rcu_reader).registered); |
| 521 | URCU_TLS(rcu_reader).registered = 1; |
| 522 | rcu_init(); /* In case gcc does not support constructor attribute */ |
| 523 | cds_list_add(&URCU_TLS(rcu_reader).node, ®istry); |
| 524 | mutex_unlock(&rcu_registry_lock); |
| 525 | } |
| 526 | |
| 527 | void rcu_unregister_thread(void) |
| 528 | { |
| 529 | mutex_lock(&rcu_registry_lock); |
| 530 | assert(URCU_TLS(rcu_reader).registered); |
| 531 | URCU_TLS(rcu_reader).registered = 0; |
| 532 | cds_list_del(&URCU_TLS(rcu_reader).node); |
| 533 | mutex_unlock(&rcu_registry_lock); |
| 534 | } |
| 535 | |
| 536 | #ifdef RCU_MEMBARRIER |
| 537 | |
| 538 | #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER |
| 539 | static |
| 540 | void rcu_sys_membarrier_status(int available) |
| 541 | { |
| 542 | if (!available) |
| 543 | abort(); |
| 544 | } |
| 545 | #else |
| 546 | static |
| 547 | void rcu_sys_membarrier_status(int available) |
| 548 | { |
| 549 | if (available) |
| 550 | rcu_has_sys_membarrier_memb = 1; |
| 551 | } |
| 552 | #endif |
| 553 | |
| 554 | void rcu_init(void) |
| 555 | { |
| 556 | int ret; |
| 557 | |
| 558 | if (init_done) |
| 559 | return; |
| 560 | init_done = 1; |
| 561 | ret = membarrier(MEMBARRIER_CMD_QUERY, 0); |
| 562 | rcu_sys_membarrier_status(ret >= 0 && (ret & MEMBARRIER_CMD_SHARED)); |
| 563 | } |
| 564 | #endif |
| 565 | |
| 566 | #ifdef RCU_SIGNAL |
| 567 | static void sigrcu_handler(int signo, siginfo_t *siginfo, void *context) |
| 568 | { |
| 569 | /* |
| 570 | * Executing this cmm_smp_mb() is the only purpose of this signal handler. |
| 571 | * It punctually promotes cmm_barrier() into cmm_smp_mb() on every thread it is |
| 572 | * executed on. |
| 573 | */ |
| 574 | cmm_smp_mb(); |
| 575 | _CMM_STORE_SHARED(URCU_TLS(rcu_reader).need_mb, 0); |
| 576 | cmm_smp_mb(); |
| 577 | } |
| 578 | |
| 579 | /* |
| 580 | * rcu_init constructor. Called when the library is linked, but also when |
| 581 | * reader threads are calling rcu_register_thread(). |
| 582 | * Should only be called by a single thread at a given time. This is ensured by |
| 583 | * holing the rcu_registry_lock from rcu_register_thread() or by running |
| 584 | * at library load time, which should not be executed by multiple |
| 585 | * threads nor concurrently with rcu_register_thread() anyway. |
| 586 | */ |
| 587 | void rcu_init(void) |
| 588 | { |
| 589 | struct sigaction act; |
| 590 | int ret; |
| 591 | |
| 592 | if (init_done) |
| 593 | return; |
| 594 | init_done = 1; |
| 595 | |
| 596 | act.sa_sigaction = sigrcu_handler; |
| 597 | act.sa_flags = SA_SIGINFO | SA_RESTART; |
| 598 | sigemptyset(&act.sa_mask); |
| 599 | ret = sigaction(SIGRCU, &act, NULL); |
| 600 | if (ret) |
| 601 | urcu_die(errno); |
| 602 | } |
| 603 | |
| 604 | void rcu_exit(void) |
| 605 | { |
| 606 | /* |
| 607 | * Don't unregister the SIGRCU signal handler anymore, because |
| 608 | * call_rcu threads could still be using it shortly before the |
| 609 | * application exits. |
| 610 | * Assertion disabled because call_rcu threads are now rcu |
| 611 | * readers, and left running at exit. |
| 612 | * assert(cds_list_empty(®istry)); |
| 613 | */ |
| 614 | } |
| 615 | |
| 616 | #endif /* #ifdef RCU_SIGNAL */ |
| 617 | |
| 618 | DEFINE_RCU_FLAVOR(rcu_flavor); |
| 619 | |
| 620 | #include "urcu-call-rcu-impl.h" |
| 621 | #include "urcu-defer-impl.h" |