Fix: auto-resize hash table destroy deadlock
[urcu.git] / src / urcu-qsbr.c
CommitLineData
9f1621ca 1/*
7ac06cef 2 * urcu-qsbr.c
9f1621ca 3 *
7ac06cef 4 * Userspace RCU QSBR library
9f1621ca 5 *
6982d6d7 6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
9f1621ca
MD
7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
8 *
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
13 *
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 *
23 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
24 */
25
e37faee1 26#define URCU_NO_COMPAT_IDENTIFIERS
71c811bf 27#define _LGPL_SOURCE
9f1621ca
MD
28#include <stdio.h>
29#include <pthread.h>
30#include <signal.h>
9f1621ca 31#include <stdlib.h>
6d841bc2 32#include <stdint.h>
9f1621ca
MD
33#include <string.h>
34#include <errno.h>
35#include <poll.h>
36
01477510 37#include <urcu/assert.h>
4477a870
MD
38#include <urcu/wfcqueue.h>
39#include <urcu/map/urcu-qsbr.h>
727f819d 40#define BUILD_QSBR_LIB
4477a870
MD
41#include <urcu/static/urcu-qsbr.h>
42#include <urcu/pointer.h>
43#include <urcu/tls-compat.h>
71c811bf 44
4a6d7378 45#include "urcu-die.h"
cba82d7b 46#include "urcu-wait.h"
ce28e67a 47#include "urcu-utils.h"
4a6d7378 48
4477a870 49#define URCU_API_MAP
9f1621ca 50/* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
71c811bf 51#undef _LGPL_SOURCE
4477a870 52#include <urcu/urcu-qsbr.h>
71c811bf 53#define _LGPL_SOURCE
9f1621ca 54
4477a870 55void __attribute__((destructor)) urcu_qsbr_exit(void);
f6d18c64 56
731ccb96
MD
57/*
58 * rcu_gp_lock ensures mutual exclusion between threads calling
59 * synchronize_rcu().
60 */
6abb4bd5 61static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
731ccb96
MD
62/*
63 * rcu_registry_lock ensures mutual exclusion between threads
64 * registering and unregistering themselves to/from the registry, and
65 * with threads reading that registry from synchronize_rcu(). However,
66 * this lock is not held all the way through the completion of awaiting
67 * for the grace period. It is sporadically released between iterations
68 * on the registry.
69 * rcu_registry_lock may nest inside rcu_gp_lock.
70 */
71static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
4477a870 72struct urcu_gp urcu_qsbr_gp = { .ctr = URCU_QSBR_GP_ONLINE };
9f1621ca 73
408f6d92
PB
74/*
75 * Active attempts to check for reader Q.S. before calling futex().
76 */
77#define RCU_QS_ACTIVE_ATTEMPTS 100
78
9f1621ca
MD
79/*
80 * Written to only by each individual reader. Read by both the reader and the
81 * writers.
82 */
4477a870 83DEFINE_URCU_TLS(struct urcu_qsbr_reader, urcu_qsbr_reader);
9f1621ca 84
16aa9ee8 85static CDS_LIST_HEAD(registry);
9f1621ca 86
6362f68f 87/*
bf6822a6 88 * Queue keeping threads awaiting to wait for a grace period. Contains
6362f68f
MD
89 * struct gp_waiters_thread objects.
90 */
bf6822a6 91static DEFINE_URCU_WAIT_QUEUE(gp_waiters);
6362f68f 92
6abb4bd5 93static void mutex_lock(pthread_mutex_t *mutex)
9f1621ca
MD
94{
95 int ret;
96
97#ifndef DISTRUST_SIGNALS_EXTREME
6abb4bd5 98 ret = pthread_mutex_lock(mutex);
4a6d7378
MD
99 if (ret)
100 urcu_die(ret);
9f1621ca 101#else /* #ifndef DISTRUST_SIGNALS_EXTREME */
6abb4bd5 102 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
4a6d7378
MD
103 if (ret != EBUSY && ret != EINTR)
104 urcu_die(ret);
9f1621ca
MD
105 poll(NULL,0,10);
106 }
107#endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
108}
109
6abb4bd5 110static void mutex_unlock(pthread_mutex_t *mutex)
9f1621ca
MD
111{
112 int ret;
113
6abb4bd5 114 ret = pthread_mutex_unlock(mutex);
4a6d7378
MD
115 if (ret)
116 urcu_die(ret);
9f1621ca
MD
117}
118
bc6c15bb
MD
119/*
120 * synchronize_rcu() waiting. Single thread.
121 */
4d703340 122static void wait_gp(void)
bc6c15bb 123{
4d703340 124 /* Read reader_gp before read futex */
5481ddb3 125 cmm_smp_rmb();
4974ad5f
MD
126 while (uatomic_read(&urcu_qsbr_gp.futex) == -1) {
127 if (!futex_noasync(&urcu_qsbr_gp.futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
128 /*
129 * Prior queued wakeups queued by unrelated code
130 * using the same address can cause futex wait to
131 * return 0 even through the futex value is still
132 * -1 (spurious wakeups). Check the value again
133 * in user-space to validate whether it really
134 * differs from -1.
135 */
136 continue;
137 }
b0a841b4 138 switch (errno) {
4974ad5f 139 case EAGAIN:
b0a841b4
MD
140 /* Value already changed. */
141 return;
142 case EINTR:
143 /* Retry if interrupted by signal. */
4974ad5f 144 break; /* Get out of switch. Check again. */
b0a841b4
MD
145 default:
146 /* Unexpected error. */
147 urcu_die(errno);
148 }
149 }
bc6c15bb
MD
150}
151
731ccb96
MD
152/*
153 * Always called with rcu_registry lock held. Releases this lock between
154 * iterations and grabs it again. Holds the lock when it returns.
155 */
708d89f0
MD
156static void wait_for_readers(struct cds_list_head *input_readers,
157 struct cds_list_head *cur_snap_readers,
158 struct cds_list_head *qsreaders)
9f1621ca 159{
9340c38d 160 unsigned int wait_loops = 0;
4477a870 161 struct urcu_qsbr_reader *index, *tmp;
9f1621ca 162
9f1621ca 163 /*
4477a870 164 * Wait for each thread URCU_TLS(urcu_qsbr_reader).ctr to either
f6b42f9c 165 * indicate quiescence (offline), or for them to observe the
4477a870 166 * current urcu_qsbr_gp.ctr value.
9f1621ca 167 */
4d703340 168 for (;;) {
5e81fed7
MD
169 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
170 wait_loops++;
83a2c421 171 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
4477a870 172 uatomic_set(&urcu_qsbr_gp.futex, -1);
83a2c421
PB
173 /*
174 * Write futex before write waiting (the other side
175 * reads them in the opposite order).
176 */
177 cmm_smp_wmb();
708d89f0 178 cds_list_for_each_entry(index, input_readers, node) {
83a2c421
PB
179 _CMM_STORE_SHARED(index->waiting, 1);
180 }
4d703340 181 /* Write futex before read reader_gp */
5481ddb3 182 cmm_smp_mb();
4d703340 183 }
708d89f0 184 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
4477a870
MD
185 switch (urcu_qsbr_reader_state(&index->ctr)) {
186 case URCU_READER_ACTIVE_CURRENT:
708d89f0
MD
187 if (cur_snap_readers) {
188 cds_list_move(&index->node,
189 cur_snap_readers);
190 break;
191 }
192 /* Fall-through */
4477a870 193 case URCU_READER_INACTIVE:
708d89f0
MD
194 cds_list_move(&index->node, qsreaders);
195 break;
4477a870 196 case URCU_READER_ACTIVE_OLD:
708d89f0
MD
197 /*
198 * Old snapshot. Leaving node in
199 * input_readers will make us busy-loop
200 * until the snapshot becomes current or
201 * the reader becomes inactive.
202 */
203 break;
204 }
4d703340 205 }
bc6c15bb 206
708d89f0 207 if (cds_list_empty(input_readers)) {
83a2c421 208 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
4d703340 209 /* Read reader_gp before write futex */
5481ddb3 210 cmm_smp_mb();
4477a870 211 uatomic_set(&urcu_qsbr_gp.futex, 0);
4d703340
MD
212 }
213 break;
214 } else {
731ccb96
MD
215 /* Temporarily unlock the registry lock. */
216 mutex_unlock(&rcu_registry_lock);
83a2c421 217 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
4d703340 218 wait_gp();
bc6c15bb 219 } else {
9f1621ca 220#ifndef HAS_INCOHERENT_CACHES
06f22bdb 221 caa_cpu_relax();
9f1621ca 222#else /* #ifndef HAS_INCOHERENT_CACHES */
5481ddb3 223 cmm_smp_mb();
9f1621ca 224#endif /* #else #ifndef HAS_INCOHERENT_CACHES */
bc6c15bb 225 }
731ccb96
MD
226 /* Re-lock the registry lock before the next loop. */
227 mutex_lock(&rcu_registry_lock);
bc6c15bb 228 }
9f1621ca
MD
229 }
230}
231
47d2f29e
MD
232/*
233 * Using a two-subphases algorithm for architectures with smaller than 64-bit
234 * long-size to ensure we do not encounter an overflow bug.
235 */
236
b39e1761 237#if (CAA_BITS_PER_LONG < 64)
4477a870 238void urcu_qsbr_synchronize_rcu(void)
47d2f29e 239{
708d89f0
MD
240 CDS_LIST_HEAD(cur_snap_readers);
241 CDS_LIST_HEAD(qsreaders);
bc49c323 242 unsigned long was_online;
bf6822a6
MD
243 DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
244 struct urcu_waiters waiters;
bc49c323 245
4477a870 246 was_online = urcu_qsbr_read_ongoing();
bc49c323 247
47d2f29e 248 /* All threads should read qparity before accessing data structure
27b940e7
PB
249 * where new ptr points to. In the "then" case, rcu_thread_offline
250 * includes a memory barrier.
251 *
bc49c323 252 * Mark the writer thread offline to make sure we don't wait for
5e77fc1f
PM
253 * our own quiescent state. This allows using synchronize_rcu()
254 * in threads registered as readers.
bc49c323 255 */
27b940e7 256 if (was_online)
4477a870 257 urcu_qsbr_thread_offline();
27b940e7
PB
258 else
259 cmm_smp_mb();
bc49c323 260
6362f68f 261 /*
bf6822a6 262 * Add ourself to gp_waiters queue of threads awaiting to wait
6362f68f 263 * for a grace period. Proceed to perform the grace period only
bf6822a6 264 * if we are the first thread added into the queue.
6362f68f 265 */
bf6822a6
MD
266 if (urcu_wait_add(&gp_waiters, &wait) != 0) {
267 /* Not first in queue: will be awakened by another thread. */
268 urcu_adaptative_busy_wait(&wait);
6362f68f
MD
269 goto gp_end;
270 }
bf6822a6
MD
271 /* We won't need to wake ourself up */
272 urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
6362f68f 273
6abb4bd5 274 mutex_lock(&rcu_gp_lock);
47d2f29e 275
6362f68f 276 /*
bf6822a6 277 * Move all waiters into our local queue.
6362f68f 278 */
bf6822a6 279 urcu_move_waiters(&waiters, &gp_waiters);
6362f68f 280
731ccb96
MD
281 mutex_lock(&rcu_registry_lock);
282
16aa9ee8 283 if (cds_list_empty(&registry))
2dfb8b5e 284 goto out;
47d2f29e
MD
285
286 /*
f6b42f9c 287 * Wait for readers to observe original parity or be quiescent.
731ccb96 288 * wait_for_readers() can release and grab again rcu_registry_lock
f99c6e92 289 * internally.
47d2f29e 290 */
708d89f0 291 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
47d2f29e
MD
292
293 /*
f6b42f9c 294 * Must finish waiting for quiescent state for original parity
4477a870 295 * before committing next urcu_qsbr_gp.ctr update to memory. Failure
f6b42f9c 296 * to do so could result in the writer waiting forever while new
5e77fc1f 297 * readers are always accessing data (no progress). Enforce
4477a870
MD
298 * compiler-order of load URCU_TLS(urcu_qsbr_reader).ctr before store
299 * to urcu_qsbr_gp.ctr.
47d2f29e 300 */
5481ddb3 301 cmm_barrier();
47d2f29e 302
47d2f29e 303 /*
5481ddb3 304 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
2dfb8b5e
MD
305 * model easier to understand. It does not have a big performance impact
306 * anyway, given this is the write-side.
47d2f29e 307 */
5481ddb3 308 cmm_smp_mb();
47d2f29e 309
f6b42f9c 310 /* Switch parity: 0 -> 1, 1 -> 0 */
4477a870 311 CMM_STORE_SHARED(urcu_qsbr_gp.ctr, urcu_qsbr_gp.ctr ^ URCU_QSBR_GP_CTR);
f6b42f9c 312
47d2f29e 313 /*
4477a870 314 * Must commit urcu_qsbr_gp.ctr update to memory before waiting for
f6b42f9c
MD
315 * quiescent state. Failure to do so could result in the writer
316 * waiting forever while new readers are always accessing data
4477a870
MD
317 * (no progress). Enforce compiler-order of store to urcu_qsbr_gp.ctr
318 * before load URCU_TLS(urcu_qsbr_reader).ctr.
47d2f29e 319 */
f6b42f9c
MD
320 cmm_barrier();
321
322 /*
323 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
324 * model easier to understand. It does not have a big performance impact
325 * anyway, given this is the write-side.
326 */
327 cmm_smp_mb();
328
329 /*
330 * Wait for readers to observe new parity or be quiescent.
731ccb96 331 * wait_for_readers() can release and grab again rcu_registry_lock
f99c6e92 332 * internally.
f6b42f9c 333 */
708d89f0
MD
334 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
335
336 /*
337 * Put quiescent reader list back into registry.
338 */
339 cds_list_splice(&qsreaders, &registry);
2dfb8b5e 340out:
731ccb96 341 mutex_unlock(&rcu_registry_lock);
6abb4bd5 342 mutex_unlock(&rcu_gp_lock);
bf6822a6 343 urcu_wake_all_waiters(&waiters);
6362f68f 344gp_end:
bc49c323
MD
345 /*
346 * Finish waiting for reader threads before letting the old ptr being
47d2f29e
MD
347 * freed.
348 */
bc49c323 349 if (was_online)
4477a870 350 urcu_qsbr_thread_online();
27b940e7
PB
351 else
352 cmm_smp_mb();
47d2f29e 353}
b39e1761 354#else /* !(CAA_BITS_PER_LONG < 64) */
4477a870 355void urcu_qsbr_synchronize_rcu(void)
9f1621ca 356{
708d89f0 357 CDS_LIST_HEAD(qsreaders);
f0f7dbdd 358 unsigned long was_online;
bf6822a6
MD
359 DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
360 struct urcu_waiters waiters;
ff2f67a0 361
4477a870 362 was_online = urcu_qsbr_read_ongoing();
ff2f67a0
MD
363
364 /*
365 * Mark the writer thread offline to make sure we don't wait for
5e77fc1f
PM
366 * our own quiescent state. This allows using synchronize_rcu()
367 * in threads registered as readers.
ff2f67a0 368 */
27b940e7 369 if (was_online)
4477a870 370 urcu_qsbr_thread_offline();
27b940e7
PB
371 else
372 cmm_smp_mb();
ff2f67a0 373
6362f68f 374 /*
bf6822a6 375 * Add ourself to gp_waiters queue of threads awaiting to wait
6362f68f 376 * for a grace period. Proceed to perform the grace period only
bf6822a6 377 * if we are the first thread added into the queue.
6362f68f 378 */
bf6822a6
MD
379 if (urcu_wait_add(&gp_waiters, &wait) != 0) {
380 /* Not first in queue: will be awakened by another thread. */
381 urcu_adaptative_busy_wait(&wait);
6362f68f
MD
382 goto gp_end;
383 }
bf6822a6
MD
384 /* We won't need to wake ourself up */
385 urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
6362f68f 386
6abb4bd5 387 mutex_lock(&rcu_gp_lock);
6362f68f
MD
388
389 /*
bf6822a6 390 * Move all waiters into our local queue.
6362f68f 391 */
bf6822a6 392 urcu_move_waiters(&waiters, &gp_waiters);
6362f68f 393
731ccb96
MD
394 mutex_lock(&rcu_registry_lock);
395
16aa9ee8 396 if (cds_list_empty(&registry))
2dfb8b5e 397 goto out;
f6b42f9c
MD
398
399 /* Increment current G.P. */
4477a870 400 CMM_STORE_SHARED(urcu_qsbr_gp.ctr, urcu_qsbr_gp.ctr + URCU_QSBR_GP_CTR);
f6b42f9c
MD
401
402 /*
4477a870 403 * Must commit urcu_qsbr_gp.ctr update to memory before waiting for
f6b42f9c
MD
404 * quiescent state. Failure to do so could result in the writer
405 * waiting forever while new readers are always accessing data
4477a870
MD
406 * (no progress). Enforce compiler-order of store to urcu_qsbr_gp.ctr
407 * before load URCU_TLS(urcu_qsbr_reader).ctr.
f6b42f9c
MD
408 */
409 cmm_barrier();
410
411 /*
412 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
413 * model easier to understand. It does not have a big performance impact
414 * anyway, given this is the write-side.
415 */
416 cmm_smp_mb();
417
418 /*
419 * Wait for readers to observe new count of be quiescent.
731ccb96 420 * wait_for_readers() can release and grab again rcu_registry_lock
f99c6e92 421 * internally.
f6b42f9c 422 */
708d89f0
MD
423 wait_for_readers(&registry, NULL, &qsreaders);
424
425 /*
426 * Put quiescent reader list back into registry.
427 */
428 cds_list_splice(&qsreaders, &registry);
2dfb8b5e 429out:
731ccb96 430 mutex_unlock(&rcu_registry_lock);
6abb4bd5 431 mutex_unlock(&rcu_gp_lock);
bf6822a6 432 urcu_wake_all_waiters(&waiters);
6362f68f 433gp_end:
ff2f67a0 434 if (was_online)
4477a870 435 urcu_qsbr_thread_online();
27b940e7
PB
436 else
437 cmm_smp_mb();
9f1621ca 438}
b39e1761 439#endif /* !(CAA_BITS_PER_LONG < 64) */
9f1621ca
MD
440
441/*
442 * library wrappers to be used by non-LGPL compatible source code.
443 */
444
4477a870 445void urcu_qsbr_read_lock(void)
9f1621ca 446{
4477a870 447 _urcu_qsbr_read_lock();
9f1621ca
MD
448}
449
4477a870 450void urcu_qsbr_read_unlock(void)
9f1621ca 451{
4477a870 452 _urcu_qsbr_read_unlock();
9f1621ca
MD
453}
454
4477a870 455int urcu_qsbr_read_ongoing(void)
882f3357 456{
4477a870 457 return _urcu_qsbr_read_ongoing();
882f3357 458}
4477a870 459void rcu_read_ongoing_qsbr();
882f3357 460
4477a870 461void urcu_qsbr_quiescent_state(void)
7ac06cef 462{
4477a870 463 _urcu_qsbr_quiescent_state();
7ac06cef 464}
4477a870 465void rcu_quiescent_state_qsbr();
7ac06cef 466
4477a870 467void urcu_qsbr_thread_offline(void)
7ac06cef 468{
4477a870 469 _urcu_qsbr_thread_offline();
7ac06cef 470}
4477a870 471void rcu_thread_offline_qsbr();
7ac06cef 472
4477a870 473void urcu_qsbr_thread_online(void)
7ac06cef 474{
4477a870 475 _urcu_qsbr_thread_online();
7ac06cef
MD
476}
477
4477a870 478void urcu_qsbr_register_thread(void)
9f1621ca 479{
4477a870 480 URCU_TLS(urcu_qsbr_reader).tid = pthread_self();
01477510 481 urcu_posix_assert(URCU_TLS(urcu_qsbr_reader).ctr == 0);
4f8e3380 482
731ccb96 483 mutex_lock(&rcu_registry_lock);
01477510 484 urcu_posix_assert(!URCU_TLS(urcu_qsbr_reader).registered);
4477a870
MD
485 URCU_TLS(urcu_qsbr_reader).registered = 1;
486 cds_list_add(&URCU_TLS(urcu_qsbr_reader).node, &registry);
731ccb96 487 mutex_unlock(&rcu_registry_lock);
4477a870 488 _urcu_qsbr_thread_online();
9f1621ca
MD
489}
490
4477a870 491void urcu_qsbr_unregister_thread(void)
9f1621ca 492{
76f3022f
MD
493 /*
494 * We have to make the thread offline otherwise we end up dealocking
495 * with a waiting writer.
496 */
4477a870 497 _urcu_qsbr_thread_offline();
01477510 498 urcu_posix_assert(URCU_TLS(urcu_qsbr_reader).registered);
4477a870 499 URCU_TLS(urcu_qsbr_reader).registered = 0;
731ccb96 500 mutex_lock(&rcu_registry_lock);
4477a870 501 cds_list_del(&URCU_TLS(urcu_qsbr_reader).node);
731ccb96 502 mutex_unlock(&rcu_registry_lock);
9f1621ca 503}
f6d18c64 504
4477a870 505void urcu_qsbr_exit(void)
f6d18c64 506{
01cadde4
MD
507 /*
508 * Assertion disabled because call_rcu threads are now rcu
509 * readers, and left running at exit.
01477510 510 * urcu_posix_assert(cds_list_empty(&registry));
01cadde4 511 */
f6d18c64 512}
5e77fc1f 513
5e6b23a6 514DEFINE_RCU_FLAVOR(rcu_flavor);
541d828d 515
5e77fc1f 516#include "urcu-call-rcu-impl.h"
0376e7b2 517#include "urcu-defer-impl.h"
This page took 0.08203 seconds and 4 git commands to generate.