src: use SPDX identifiers
[urcu.git] / src / urcu-qsbr.c
1 // SPDX-FileCopyrightText: 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 // SPDX-FileCopyrightText: 2009 Paul E. McKenney, IBM Corporation.
3 //
4 // SPDX-License-Identifier: LGPL-2.1-or-later
5
6 /*
7 * Userspace RCU QSBR library
8 *
9 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
10 */
11
12 #define URCU_NO_COMPAT_IDENTIFIERS
13 #define _LGPL_SOURCE
14 #include <stdio.h>
15 #include <pthread.h>
16 #include <signal.h>
17 #include <stdlib.h>
18 #include <stdint.h>
19 #include <string.h>
20 #include <errno.h>
21 #include <poll.h>
22
23 #include <urcu/assert.h>
24 #include <urcu/wfcqueue.h>
25 #include <urcu/map/urcu-qsbr.h>
26 #define BUILD_QSBR_LIB
27 #include <urcu/static/urcu-qsbr.h>
28 #include <urcu/pointer.h>
29 #include <urcu/tls-compat.h>
30
31 #include "urcu-die.h"
32 #include "urcu-wait.h"
33 #include "urcu-utils.h"
34
35 #define URCU_API_MAP
36 /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */
37 #undef _LGPL_SOURCE
38 #include <urcu/urcu-qsbr.h>
39 #define _LGPL_SOURCE
40
41 void __attribute__((destructor)) urcu_qsbr_exit(void);
42 static void urcu_call_rcu_exit(void);
43
44 /*
45 * rcu_gp_lock ensures mutual exclusion between threads calling
46 * synchronize_rcu().
47 */
48 static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER;
49 /*
50 * rcu_registry_lock ensures mutual exclusion between threads
51 * registering and unregistering themselves to/from the registry, and
52 * with threads reading that registry from synchronize_rcu(). However,
53 * this lock is not held all the way through the completion of awaiting
54 * for the grace period. It is sporadically released between iterations
55 * on the registry.
56 * rcu_registry_lock may nest inside rcu_gp_lock.
57 */
58 static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER;
59 struct urcu_gp urcu_qsbr_gp = { .ctr = URCU_QSBR_GP_ONLINE };
60
61 /*
62 * Active attempts to check for reader Q.S. before calling futex().
63 */
64 #define RCU_QS_ACTIVE_ATTEMPTS 100
65
66 /*
67 * Written to only by each individual reader. Read by both the reader and the
68 * writers.
69 */
70 DEFINE_URCU_TLS(struct urcu_qsbr_reader, urcu_qsbr_reader);
71
72 static CDS_LIST_HEAD(registry);
73
74 /*
75 * Queue keeping threads awaiting to wait for a grace period. Contains
76 * struct gp_waiters_thread objects.
77 */
78 static DEFINE_URCU_WAIT_QUEUE(gp_waiters);
79
80 static void mutex_lock(pthread_mutex_t *mutex)
81 {
82 int ret;
83
84 #ifndef DISTRUST_SIGNALS_EXTREME
85 ret = pthread_mutex_lock(mutex);
86 if (ret)
87 urcu_die(ret);
88 #else /* #ifndef DISTRUST_SIGNALS_EXTREME */
89 while ((ret = pthread_mutex_trylock(mutex)) != 0) {
90 if (ret != EBUSY && ret != EINTR)
91 urcu_die(ret);
92 poll(NULL,0,10);
93 }
94 #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */
95 }
96
97 static void mutex_unlock(pthread_mutex_t *mutex)
98 {
99 int ret;
100
101 ret = pthread_mutex_unlock(mutex);
102 if (ret)
103 urcu_die(ret);
104 }
105
106 /*
107 * synchronize_rcu() waiting. Single thread.
108 */
109 static void wait_gp(void)
110 {
111 /* Read reader_gp before read futex */
112 cmm_smp_rmb();
113 while (uatomic_read(&urcu_qsbr_gp.futex) == -1) {
114 if (!futex_noasync(&urcu_qsbr_gp.futex, FUTEX_WAIT, -1, NULL, NULL, 0)) {
115 /*
116 * Prior queued wakeups queued by unrelated code
117 * using the same address can cause futex wait to
118 * return 0 even through the futex value is still
119 * -1 (spurious wakeups). Check the value again
120 * in user-space to validate whether it really
121 * differs from -1.
122 */
123 continue;
124 }
125 switch (errno) {
126 case EAGAIN:
127 /* Value already changed. */
128 return;
129 case EINTR:
130 /* Retry if interrupted by signal. */
131 break; /* Get out of switch. Check again. */
132 default:
133 /* Unexpected error. */
134 urcu_die(errno);
135 }
136 }
137 }
138
139 /*
140 * Always called with rcu_registry lock held. Releases this lock between
141 * iterations and grabs it again. Holds the lock when it returns.
142 */
143 static void wait_for_readers(struct cds_list_head *input_readers,
144 struct cds_list_head *cur_snap_readers,
145 struct cds_list_head *qsreaders)
146 {
147 unsigned int wait_loops = 0;
148 struct urcu_qsbr_reader *index, *tmp;
149
150 /*
151 * Wait for each thread URCU_TLS(urcu_qsbr_reader).ctr to either
152 * indicate quiescence (offline), or for them to observe the
153 * current urcu_qsbr_gp.ctr value.
154 */
155 for (;;) {
156 if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS)
157 wait_loops++;
158 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
159 uatomic_set(&urcu_qsbr_gp.futex, -1);
160 /*
161 * Write futex before write waiting (the other side
162 * reads them in the opposite order).
163 */
164 cmm_smp_wmb();
165 cds_list_for_each_entry(index, input_readers, node) {
166 _CMM_STORE_SHARED(index->waiting, 1);
167 }
168 /* Write futex before read reader_gp */
169 cmm_smp_mb();
170 }
171 cds_list_for_each_entry_safe(index, tmp, input_readers, node) {
172 switch (urcu_qsbr_reader_state(&index->ctr)) {
173 case URCU_READER_ACTIVE_CURRENT:
174 if (cur_snap_readers) {
175 cds_list_move(&index->node,
176 cur_snap_readers);
177 break;
178 }
179 /* Fall-through */
180 case URCU_READER_INACTIVE:
181 cds_list_move(&index->node, qsreaders);
182 break;
183 case URCU_READER_ACTIVE_OLD:
184 /*
185 * Old snapshot. Leaving node in
186 * input_readers will make us busy-loop
187 * until the snapshot becomes current or
188 * the reader becomes inactive.
189 */
190 break;
191 }
192 }
193
194 if (cds_list_empty(input_readers)) {
195 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
196 /* Read reader_gp before write futex */
197 cmm_smp_mb();
198 uatomic_set(&urcu_qsbr_gp.futex, 0);
199 }
200 break;
201 } else {
202 /* Temporarily unlock the registry lock. */
203 mutex_unlock(&rcu_registry_lock);
204 if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) {
205 wait_gp();
206 } else {
207 #ifndef HAS_INCOHERENT_CACHES
208 caa_cpu_relax();
209 #else /* #ifndef HAS_INCOHERENT_CACHES */
210 cmm_smp_mb();
211 #endif /* #else #ifndef HAS_INCOHERENT_CACHES */
212 }
213 /* Re-lock the registry lock before the next loop. */
214 mutex_lock(&rcu_registry_lock);
215 }
216 }
217 }
218
219 /*
220 * Using a two-subphases algorithm for architectures with smaller than 64-bit
221 * long-size to ensure we do not encounter an overflow bug.
222 */
223
224 #if (CAA_BITS_PER_LONG < 64)
225 void urcu_qsbr_synchronize_rcu(void)
226 {
227 CDS_LIST_HEAD(cur_snap_readers);
228 CDS_LIST_HEAD(qsreaders);
229 unsigned long was_online;
230 DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
231 struct urcu_waiters waiters;
232
233 was_online = urcu_qsbr_read_ongoing();
234
235 /* All threads should read qparity before accessing data structure
236 * where new ptr points to. In the "then" case, rcu_thread_offline
237 * includes a memory barrier.
238 *
239 * Mark the writer thread offline to make sure we don't wait for
240 * our own quiescent state. This allows using synchronize_rcu()
241 * in threads registered as readers.
242 */
243 if (was_online)
244 urcu_qsbr_thread_offline();
245 else
246 cmm_smp_mb();
247
248 /*
249 * Add ourself to gp_waiters queue of threads awaiting to wait
250 * for a grace period. Proceed to perform the grace period only
251 * if we are the first thread added into the queue.
252 */
253 if (urcu_wait_add(&gp_waiters, &wait) != 0) {
254 /* Not first in queue: will be awakened by another thread. */
255 urcu_adaptative_busy_wait(&wait);
256 goto gp_end;
257 }
258 /* We won't need to wake ourself up */
259 urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
260
261 mutex_lock(&rcu_gp_lock);
262
263 /*
264 * Move all waiters into our local queue.
265 */
266 urcu_move_waiters(&waiters, &gp_waiters);
267
268 mutex_lock(&rcu_registry_lock);
269
270 if (cds_list_empty(&registry))
271 goto out;
272
273 /*
274 * Wait for readers to observe original parity or be quiescent.
275 * wait_for_readers() can release and grab again rcu_registry_lock
276 * internally.
277 */
278 wait_for_readers(&registry, &cur_snap_readers, &qsreaders);
279
280 /*
281 * Must finish waiting for quiescent state for original parity
282 * before committing next urcu_qsbr_gp.ctr update to memory. Failure
283 * to do so could result in the writer waiting forever while new
284 * readers are always accessing data (no progress). Enforce
285 * compiler-order of load URCU_TLS(urcu_qsbr_reader).ctr before store
286 * to urcu_qsbr_gp.ctr.
287 */
288 cmm_barrier();
289
290 /*
291 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
292 * model easier to understand. It does not have a big performance impact
293 * anyway, given this is the write-side.
294 */
295 cmm_smp_mb();
296
297 /* Switch parity: 0 -> 1, 1 -> 0 */
298 CMM_STORE_SHARED(urcu_qsbr_gp.ctr, urcu_qsbr_gp.ctr ^ URCU_QSBR_GP_CTR);
299
300 /*
301 * Must commit urcu_qsbr_gp.ctr update to memory before waiting for
302 * quiescent state. Failure to do so could result in the writer
303 * waiting forever while new readers are always accessing data
304 * (no progress). Enforce compiler-order of store to urcu_qsbr_gp.ctr
305 * before load URCU_TLS(urcu_qsbr_reader).ctr.
306 */
307 cmm_barrier();
308
309 /*
310 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
311 * model easier to understand. It does not have a big performance impact
312 * anyway, given this is the write-side.
313 */
314 cmm_smp_mb();
315
316 /*
317 * Wait for readers to observe new parity or be quiescent.
318 * wait_for_readers() can release and grab again rcu_registry_lock
319 * internally.
320 */
321 wait_for_readers(&cur_snap_readers, NULL, &qsreaders);
322
323 /*
324 * Put quiescent reader list back into registry.
325 */
326 cds_list_splice(&qsreaders, &registry);
327 out:
328 mutex_unlock(&rcu_registry_lock);
329 mutex_unlock(&rcu_gp_lock);
330 urcu_wake_all_waiters(&waiters);
331 gp_end:
332 /*
333 * Finish waiting for reader threads before letting the old ptr being
334 * freed.
335 */
336 if (was_online)
337 urcu_qsbr_thread_online();
338 else
339 cmm_smp_mb();
340 }
341 #else /* !(CAA_BITS_PER_LONG < 64) */
342 void urcu_qsbr_synchronize_rcu(void)
343 {
344 CDS_LIST_HEAD(qsreaders);
345 unsigned long was_online;
346 DEFINE_URCU_WAIT_NODE(wait, URCU_WAIT_WAITING);
347 struct urcu_waiters waiters;
348
349 was_online = urcu_qsbr_read_ongoing();
350
351 /*
352 * Mark the writer thread offline to make sure we don't wait for
353 * our own quiescent state. This allows using synchronize_rcu()
354 * in threads registered as readers.
355 */
356 if (was_online)
357 urcu_qsbr_thread_offline();
358 else
359 cmm_smp_mb();
360
361 /*
362 * Add ourself to gp_waiters queue of threads awaiting to wait
363 * for a grace period. Proceed to perform the grace period only
364 * if we are the first thread added into the queue.
365 */
366 if (urcu_wait_add(&gp_waiters, &wait) != 0) {
367 /* Not first in queue: will be awakened by another thread. */
368 urcu_adaptative_busy_wait(&wait);
369 goto gp_end;
370 }
371 /* We won't need to wake ourself up */
372 urcu_wait_set_state(&wait, URCU_WAIT_RUNNING);
373
374 mutex_lock(&rcu_gp_lock);
375
376 /*
377 * Move all waiters into our local queue.
378 */
379 urcu_move_waiters(&waiters, &gp_waiters);
380
381 mutex_lock(&rcu_registry_lock);
382
383 if (cds_list_empty(&registry))
384 goto out;
385
386 /* Increment current G.P. */
387 CMM_STORE_SHARED(urcu_qsbr_gp.ctr, urcu_qsbr_gp.ctr + URCU_QSBR_GP_CTR);
388
389 /*
390 * Must commit urcu_qsbr_gp.ctr update to memory before waiting for
391 * quiescent state. Failure to do so could result in the writer
392 * waiting forever while new readers are always accessing data
393 * (no progress). Enforce compiler-order of store to urcu_qsbr_gp.ctr
394 * before load URCU_TLS(urcu_qsbr_reader).ctr.
395 */
396 cmm_barrier();
397
398 /*
399 * Adding a cmm_smp_mb() which is _not_ formally required, but makes the
400 * model easier to understand. It does not have a big performance impact
401 * anyway, given this is the write-side.
402 */
403 cmm_smp_mb();
404
405 /*
406 * Wait for readers to observe new count of be quiescent.
407 * wait_for_readers() can release and grab again rcu_registry_lock
408 * internally.
409 */
410 wait_for_readers(&registry, NULL, &qsreaders);
411
412 /*
413 * Put quiescent reader list back into registry.
414 */
415 cds_list_splice(&qsreaders, &registry);
416 out:
417 mutex_unlock(&rcu_registry_lock);
418 mutex_unlock(&rcu_gp_lock);
419 urcu_wake_all_waiters(&waiters);
420 gp_end:
421 if (was_online)
422 urcu_qsbr_thread_online();
423 else
424 cmm_smp_mb();
425 }
426 #endif /* !(CAA_BITS_PER_LONG < 64) */
427
428 /*
429 * library wrappers to be used by non-LGPL compatible source code.
430 */
431
432 void urcu_qsbr_read_lock(void)
433 {
434 _urcu_qsbr_read_lock();
435 }
436
437 void urcu_qsbr_read_unlock(void)
438 {
439 _urcu_qsbr_read_unlock();
440 }
441
442 int urcu_qsbr_read_ongoing(void)
443 {
444 return _urcu_qsbr_read_ongoing();
445 }
446 void rcu_read_ongoing_qsbr();
447
448 void urcu_qsbr_quiescent_state(void)
449 {
450 _urcu_qsbr_quiescent_state();
451 }
452 void rcu_quiescent_state_qsbr();
453
454 void urcu_qsbr_thread_offline(void)
455 {
456 _urcu_qsbr_thread_offline();
457 }
458 void rcu_thread_offline_qsbr();
459
460 void urcu_qsbr_thread_online(void)
461 {
462 _urcu_qsbr_thread_online();
463 }
464
465 void urcu_qsbr_register_thread(void)
466 {
467 URCU_TLS(urcu_qsbr_reader).tid = pthread_self();
468 urcu_posix_assert(URCU_TLS(urcu_qsbr_reader).ctr == 0);
469
470 mutex_lock(&rcu_registry_lock);
471 urcu_posix_assert(!URCU_TLS(urcu_qsbr_reader).registered);
472 URCU_TLS(urcu_qsbr_reader).registered = 1;
473 cds_list_add(&URCU_TLS(urcu_qsbr_reader).node, &registry);
474 mutex_unlock(&rcu_registry_lock);
475 _urcu_qsbr_thread_online();
476 }
477
478 void urcu_qsbr_unregister_thread(void)
479 {
480 /*
481 * We have to make the thread offline otherwise we end up dealocking
482 * with a waiting writer.
483 */
484 _urcu_qsbr_thread_offline();
485 urcu_posix_assert(URCU_TLS(urcu_qsbr_reader).registered);
486 URCU_TLS(urcu_qsbr_reader).registered = 0;
487 mutex_lock(&rcu_registry_lock);
488 cds_list_del(&URCU_TLS(urcu_qsbr_reader).node);
489 mutex_unlock(&rcu_registry_lock);
490 }
491
492 void urcu_qsbr_exit(void)
493 {
494 /*
495 * Assertion disabled because call_rcu threads are now rcu
496 * readers, and left running at exit.
497 * urcu_posix_assert(cds_list_empty(&registry));
498 */
499 urcu_call_rcu_exit();
500 }
501
502 DEFINE_RCU_FLAVOR(rcu_flavor);
503
504 #include "urcu-call-rcu-impl.h"
505 #include "urcu-defer-impl.h"
506 #include "urcu-poll-impl.h"
This page took 0.03881 seconds and 5 git commands to generate.