Use workqueue in rculfhash
[urcu.git] / src / urcu-call-rcu-impl.h
1 /*
2 * urcu-call-rcu.c
3 *
4 * Userspace RCU library - batch memory reclamation with kernel API
5 *
6 * Copyright (c) 2010 Paul E. McKenney <paulmck@linux.vnet.ibm.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <stdio.h>
25 #include <pthread.h>
26 #include <signal.h>
27 #include <assert.h>
28 #include <stdlib.h>
29 #include <stdint.h>
30 #include <string.h>
31 #include <errno.h>
32 #include <poll.h>
33 #include <sys/time.h>
34 #include <unistd.h>
35 #include <sched.h>
36
37 #include "compat-getcpu.h"
38 #include "urcu/wfcqueue.h"
39 #include "urcu-call-rcu.h"
40 #include "urcu-pointer.h"
41 #include "urcu/list.h"
42 #include "urcu/futex.h"
43 #include "urcu/tls-compat.h"
44 #include "urcu/ref.h"
45 #include "urcu-die.h"
46
47 #define SET_AFFINITY_CHECK_PERIOD (1U << 8) /* 256 */
48 #define SET_AFFINITY_CHECK_PERIOD_MASK (SET_AFFINITY_CHECK_PERIOD - 1)
49
50 /* Data structure that identifies a call_rcu thread. */
51
52 struct call_rcu_data {
53 /*
54 * We do not align head on a different cache-line than tail
55 * mainly because call_rcu callback-invocation threads use
56 * batching ("splice") to get an entire list of callbacks, which
57 * effectively empties the queue, and requires to touch the tail
58 * anyway.
59 */
60 struct cds_wfcq_tail cbs_tail;
61 struct cds_wfcq_head cbs_head;
62 unsigned long flags;
63 int32_t futex;
64 unsigned long qlen; /* maintained for debugging. */
65 pthread_t tid;
66 int cpu_affinity;
67 unsigned long gp_count;
68 struct cds_list_head list;
69 } __attribute__((aligned(CAA_CACHE_LINE_SIZE)));
70
71 struct call_rcu_completion {
72 int barrier_count;
73 int32_t futex;
74 struct urcu_ref ref;
75 };
76
77 struct call_rcu_completion_work {
78 struct rcu_head head;
79 struct call_rcu_completion *completion;
80 };
81
82 /*
83 * List of all call_rcu_data structures to keep valgrind happy.
84 * Protected by call_rcu_mutex.
85 */
86
87 static CDS_LIST_HEAD(call_rcu_data_list);
88
89 /* Link a thread using call_rcu() to its call_rcu thread. */
90
91 static DEFINE_URCU_TLS(struct call_rcu_data *, thread_call_rcu_data);
92
93 /*
94 * Guard call_rcu thread creation and atfork handlers.
95 */
96 static pthread_mutex_t call_rcu_mutex = PTHREAD_MUTEX_INITIALIZER;
97
98 /* If a given thread does not have its own call_rcu thread, this is default. */
99
100 static struct call_rcu_data *default_call_rcu_data;
101
102 static struct urcu_atfork *registered_rculfhash_atfork;
103 static unsigned long registered_rculfhash_atfork_refcount;
104
105 /*
106 * If the sched_getcpu() and sysconf(_SC_NPROCESSORS_CONF) calls are
107 * available, then we can have call_rcu threads assigned to individual
108 * CPUs rather than only to specific threads.
109 */
110
111 #if defined(HAVE_SYSCONF) && (defined(HAVE_SCHED_GETCPU) || defined(HAVE_GETCPUID))
112
113 /*
114 * Pointer to array of pointers to per-CPU call_rcu_data structures
115 * and # CPUs. per_cpu_call_rcu_data is a RCU-protected pointer to an
116 * array of RCU-protected pointers to call_rcu_data. call_rcu acts as a
117 * RCU read-side and reads per_cpu_call_rcu_data and the per-cpu pointer
118 * without mutex. The call_rcu_mutex protects updates.
119 */
120
121 static struct call_rcu_data **per_cpu_call_rcu_data;
122 static long maxcpus;
123
124 static void maxcpus_reset(void)
125 {
126 maxcpus = 0;
127 }
128
129 /* Allocate the array if it has not already been allocated. */
130
131 static void alloc_cpu_call_rcu_data(void)
132 {
133 struct call_rcu_data **p;
134 static int warned = 0;
135
136 if (maxcpus != 0)
137 return;
138 maxcpus = sysconf(_SC_NPROCESSORS_CONF);
139 if (maxcpus <= 0) {
140 return;
141 }
142 p = malloc(maxcpus * sizeof(*per_cpu_call_rcu_data));
143 if (p != NULL) {
144 memset(p, '\0', maxcpus * sizeof(*per_cpu_call_rcu_data));
145 rcu_set_pointer(&per_cpu_call_rcu_data, p);
146 } else {
147 if (!warned) {
148 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
149 }
150 warned = 1;
151 }
152 }
153
154 #else /* #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
155
156 /*
157 * per_cpu_call_rcu_data should be constant, but some functions below, used both
158 * for cases where cpu number is available and not available, assume it it not
159 * constant.
160 */
161 static struct call_rcu_data **per_cpu_call_rcu_data = NULL;
162 static const long maxcpus = -1;
163
164 static void maxcpus_reset(void)
165 {
166 }
167
168 static void alloc_cpu_call_rcu_data(void)
169 {
170 }
171
172 #endif /* #else #if defined(HAVE_SYSCONF) && defined(HAVE_SCHED_GETCPU) */
173
174 /* Acquire the specified pthread mutex. */
175
176 static void call_rcu_lock(pthread_mutex_t *pmp)
177 {
178 int ret;
179
180 ret = pthread_mutex_lock(pmp);
181 if (ret)
182 urcu_die(ret);
183 }
184
185 /* Release the specified pthread mutex. */
186
187 static void call_rcu_unlock(pthread_mutex_t *pmp)
188 {
189 int ret;
190
191 ret = pthread_mutex_unlock(pmp);
192 if (ret)
193 urcu_die(ret);
194 }
195
196 /*
197 * Periodically retry setting CPU affinity if we migrate.
198 * Losing affinity can be caused by CPU hotunplug/hotplug, or by
199 * cpuset(7).
200 */
201 #if HAVE_SCHED_SETAFFINITY
202 static
203 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
204 {
205 cpu_set_t mask;
206 int ret;
207
208 if (crdp->cpu_affinity < 0)
209 return 0;
210 if (++crdp->gp_count & SET_AFFINITY_CHECK_PERIOD_MASK)
211 return 0;
212 if (urcu_sched_getcpu() == crdp->cpu_affinity)
213 return 0;
214
215 CPU_ZERO(&mask);
216 CPU_SET(crdp->cpu_affinity, &mask);
217 #if SCHED_SETAFFINITY_ARGS == 2
218 ret = sched_setaffinity(0, &mask);
219 #else
220 ret = sched_setaffinity(0, sizeof(mask), &mask);
221 #endif
222 /*
223 * EINVAL is fine: can be caused by hotunplugged CPUs, or by
224 * cpuset(7). This is why we should always retry if we detect
225 * migration.
226 */
227 if (ret && errno == EINVAL) {
228 ret = 0;
229 errno = 0;
230 }
231 return ret;
232 }
233 #else
234 static
235 int set_thread_cpu_affinity(struct call_rcu_data *crdp)
236 {
237 return 0;
238 }
239 #endif
240
241 static void call_rcu_wait(struct call_rcu_data *crdp)
242 {
243 /* Read call_rcu list before read futex */
244 cmm_smp_mb();
245 if (uatomic_read(&crdp->futex) != -1)
246 return;
247 while (futex_async(&crdp->futex, FUTEX_WAIT, -1,
248 NULL, NULL, 0)) {
249 switch (errno) {
250 case EWOULDBLOCK:
251 /* Value already changed. */
252 return;
253 case EINTR:
254 /* Retry if interrupted by signal. */
255 break; /* Get out of switch. */
256 default:
257 /* Unexpected error. */
258 urcu_die(errno);
259 }
260 }
261 }
262
263 static void call_rcu_wake_up(struct call_rcu_data *crdp)
264 {
265 /* Write to call_rcu list before reading/writing futex */
266 cmm_smp_mb();
267 if (caa_unlikely(uatomic_read(&crdp->futex) == -1)) {
268 uatomic_set(&crdp->futex, 0);
269 if (futex_async(&crdp->futex, FUTEX_WAKE, 1,
270 NULL, NULL, 0) < 0)
271 urcu_die(errno);
272 }
273 }
274
275 static void call_rcu_completion_wait(struct call_rcu_completion *completion)
276 {
277 /* Read completion barrier count before read futex */
278 cmm_smp_mb();
279 if (uatomic_read(&completion->futex) != -1)
280 return;
281 while (futex_async(&completion->futex, FUTEX_WAIT, -1,
282 NULL, NULL, 0)) {
283 switch (errno) {
284 case EWOULDBLOCK:
285 /* Value already changed. */
286 return;
287 case EINTR:
288 /* Retry if interrupted by signal. */
289 break; /* Get out of switch. */
290 default:
291 /* Unexpected error. */
292 urcu_die(errno);
293 }
294 }
295 }
296
297 static void call_rcu_completion_wake_up(struct call_rcu_completion *completion)
298 {
299 /* Write to completion barrier count before reading/writing futex */
300 cmm_smp_mb();
301 if (caa_unlikely(uatomic_read(&completion->futex) == -1)) {
302 uatomic_set(&completion->futex, 0);
303 if (futex_async(&completion->futex, FUTEX_WAKE, 1,
304 NULL, NULL, 0) < 0)
305 urcu_die(errno);
306 }
307 }
308
309 /* This is the code run by each call_rcu thread. */
310
311 static void *call_rcu_thread(void *arg)
312 {
313 unsigned long cbcount;
314 struct call_rcu_data *crdp = (struct call_rcu_data *) arg;
315 int rt = !!(uatomic_read(&crdp->flags) & URCU_CALL_RCU_RT);
316
317 if (set_thread_cpu_affinity(crdp))
318 urcu_die(errno);
319
320 /*
321 * If callbacks take a read-side lock, we need to be registered.
322 */
323 rcu_register_thread();
324
325 URCU_TLS(thread_call_rcu_data) = crdp;
326 if (!rt) {
327 uatomic_dec(&crdp->futex);
328 /* Decrement futex before reading call_rcu list */
329 cmm_smp_mb();
330 }
331 for (;;) {
332 struct cds_wfcq_head cbs_tmp_head;
333 struct cds_wfcq_tail cbs_tmp_tail;
334 struct cds_wfcq_node *cbs, *cbs_tmp_n;
335 enum cds_wfcq_ret splice_ret;
336
337 if (set_thread_cpu_affinity(crdp))
338 urcu_die(errno);
339
340 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) {
341 /*
342 * Pause requested. Become quiescent: remove
343 * ourself from all global lists, and don't
344 * process any callback. The callback lists may
345 * still be non-empty though.
346 */
347 rcu_unregister_thread();
348 cmm_smp_mb__before_uatomic_or();
349 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSED);
350 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSE) != 0)
351 (void) poll(NULL, 0, 1);
352 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSED);
353 cmm_smp_mb__after_uatomic_and();
354 rcu_register_thread();
355 }
356
357 cds_wfcq_init(&cbs_tmp_head, &cbs_tmp_tail);
358 splice_ret = __cds_wfcq_splice_blocking(&cbs_tmp_head,
359 &cbs_tmp_tail, &crdp->cbs_head, &crdp->cbs_tail);
360 assert(splice_ret != CDS_WFCQ_RET_WOULDBLOCK);
361 assert(splice_ret != CDS_WFCQ_RET_DEST_NON_EMPTY);
362 if (splice_ret != CDS_WFCQ_RET_SRC_EMPTY) {
363 synchronize_rcu();
364 cbcount = 0;
365 __cds_wfcq_for_each_blocking_safe(&cbs_tmp_head,
366 &cbs_tmp_tail, cbs, cbs_tmp_n) {
367 struct rcu_head *rhp;
368
369 rhp = caa_container_of(cbs,
370 struct rcu_head, next);
371 rhp->func(rhp);
372 cbcount++;
373 }
374 uatomic_sub(&crdp->qlen, cbcount);
375 }
376 if (uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOP)
377 break;
378 rcu_thread_offline();
379 if (!rt) {
380 if (cds_wfcq_empty(&crdp->cbs_head,
381 &crdp->cbs_tail)) {
382 call_rcu_wait(crdp);
383 (void) poll(NULL, 0, 10);
384 uatomic_dec(&crdp->futex);
385 /*
386 * Decrement futex before reading
387 * call_rcu list.
388 */
389 cmm_smp_mb();
390 } else {
391 (void) poll(NULL, 0, 10);
392 }
393 } else {
394 (void) poll(NULL, 0, 10);
395 }
396 rcu_thread_online();
397 }
398 if (!rt) {
399 /*
400 * Read call_rcu list before write futex.
401 */
402 cmm_smp_mb();
403 uatomic_set(&crdp->futex, 0);
404 }
405 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOPPED);
406 rcu_unregister_thread();
407 return NULL;
408 }
409
410 /*
411 * Create both a call_rcu thread and the corresponding call_rcu_data
412 * structure, linking the structure in as specified. Caller must hold
413 * call_rcu_mutex.
414 */
415
416 static void call_rcu_data_init(struct call_rcu_data **crdpp,
417 unsigned long flags,
418 int cpu_affinity)
419 {
420 struct call_rcu_data *crdp;
421 int ret;
422
423 crdp = malloc(sizeof(*crdp));
424 if (crdp == NULL)
425 urcu_die(errno);
426 memset(crdp, '\0', sizeof(*crdp));
427 cds_wfcq_init(&crdp->cbs_head, &crdp->cbs_tail);
428 crdp->qlen = 0;
429 crdp->futex = 0;
430 crdp->flags = flags;
431 cds_list_add(&crdp->list, &call_rcu_data_list);
432 crdp->cpu_affinity = cpu_affinity;
433 crdp->gp_count = 0;
434 cmm_smp_mb(); /* Structure initialized before pointer is planted. */
435 *crdpp = crdp;
436 ret = pthread_create(&crdp->tid, NULL, call_rcu_thread, crdp);
437 if (ret)
438 urcu_die(ret);
439 }
440
441 /*
442 * Return a pointer to the call_rcu_data structure for the specified
443 * CPU, returning NULL if there is none. We cannot automatically
444 * created it because the platform we are running on might not define
445 * urcu_sched_getcpu().
446 *
447 * The call to this function and use of the returned call_rcu_data
448 * should be protected by RCU read-side lock.
449 */
450
451 struct call_rcu_data *get_cpu_call_rcu_data(int cpu)
452 {
453 static int warned = 0;
454 struct call_rcu_data **pcpu_crdp;
455
456 pcpu_crdp = rcu_dereference(per_cpu_call_rcu_data);
457 if (pcpu_crdp == NULL)
458 return NULL;
459 if (!warned && maxcpus > 0 && (cpu < 0 || maxcpus <= cpu)) {
460 fprintf(stderr, "[error] liburcu: get CPU # out of range\n");
461 warned = 1;
462 }
463 if (cpu < 0 || maxcpus <= cpu)
464 return NULL;
465 return rcu_dereference(pcpu_crdp[cpu]);
466 }
467
468 /*
469 * Return the tid corresponding to the call_rcu thread whose
470 * call_rcu_data structure is specified.
471 */
472
473 pthread_t get_call_rcu_thread(struct call_rcu_data *crdp)
474 {
475 return crdp->tid;
476 }
477
478 /*
479 * Create a call_rcu_data structure (with thread) and return a pointer.
480 */
481
482 static struct call_rcu_data *__create_call_rcu_data(unsigned long flags,
483 int cpu_affinity)
484 {
485 struct call_rcu_data *crdp;
486
487 call_rcu_data_init(&crdp, flags, cpu_affinity);
488 return crdp;
489 }
490
491 struct call_rcu_data *create_call_rcu_data(unsigned long flags,
492 int cpu_affinity)
493 {
494 struct call_rcu_data *crdp;
495
496 call_rcu_lock(&call_rcu_mutex);
497 crdp = __create_call_rcu_data(flags, cpu_affinity);
498 call_rcu_unlock(&call_rcu_mutex);
499 return crdp;
500 }
501
502 /*
503 * Set the specified CPU to use the specified call_rcu_data structure.
504 *
505 * Use NULL to remove a CPU's call_rcu_data structure, but it is
506 * the caller's responsibility to dispose of the removed structure.
507 * Use get_cpu_call_rcu_data() to obtain a pointer to the old structure
508 * (prior to NULLing it out, of course).
509 *
510 * The caller must wait for a grace-period to pass between return from
511 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
512 * previous call rcu data as argument.
513 */
514
515 int set_cpu_call_rcu_data(int cpu, struct call_rcu_data *crdp)
516 {
517 static int warned = 0;
518
519 call_rcu_lock(&call_rcu_mutex);
520 alloc_cpu_call_rcu_data();
521 if (cpu < 0 || maxcpus <= cpu) {
522 if (!warned) {
523 fprintf(stderr, "[error] liburcu: set CPU # out of range\n");
524 warned = 1;
525 }
526 call_rcu_unlock(&call_rcu_mutex);
527 errno = EINVAL;
528 return -EINVAL;
529 }
530
531 if (per_cpu_call_rcu_data == NULL) {
532 call_rcu_unlock(&call_rcu_mutex);
533 errno = ENOMEM;
534 return -ENOMEM;
535 }
536
537 if (per_cpu_call_rcu_data[cpu] != NULL && crdp != NULL) {
538 call_rcu_unlock(&call_rcu_mutex);
539 errno = EEXIST;
540 return -EEXIST;
541 }
542
543 rcu_set_pointer(&per_cpu_call_rcu_data[cpu], crdp);
544 call_rcu_unlock(&call_rcu_mutex);
545 return 0;
546 }
547
548 /*
549 * Return a pointer to the default call_rcu_data structure, creating
550 * one if need be. Because we never free call_rcu_data structures,
551 * we don't need to be in an RCU read-side critical section.
552 */
553
554 struct call_rcu_data *get_default_call_rcu_data(void)
555 {
556 if (default_call_rcu_data != NULL)
557 return rcu_dereference(default_call_rcu_data);
558 call_rcu_lock(&call_rcu_mutex);
559 if (default_call_rcu_data != NULL) {
560 call_rcu_unlock(&call_rcu_mutex);
561 return default_call_rcu_data;
562 }
563 call_rcu_data_init(&default_call_rcu_data, 0, -1);
564 call_rcu_unlock(&call_rcu_mutex);
565 return default_call_rcu_data;
566 }
567
568 /*
569 * Return the call_rcu_data structure that applies to the currently
570 * running thread. Any call_rcu_data structure assigned specifically
571 * to this thread has first priority, followed by any call_rcu_data
572 * structure assigned to the CPU on which the thread is running,
573 * followed by the default call_rcu_data structure. If there is not
574 * yet a default call_rcu_data structure, one will be created.
575 *
576 * Calls to this function and use of the returned call_rcu_data should
577 * be protected by RCU read-side lock.
578 */
579 struct call_rcu_data *get_call_rcu_data(void)
580 {
581 struct call_rcu_data *crd;
582
583 if (URCU_TLS(thread_call_rcu_data) != NULL)
584 return URCU_TLS(thread_call_rcu_data);
585
586 if (maxcpus > 0) {
587 crd = get_cpu_call_rcu_data(urcu_sched_getcpu());
588 if (crd)
589 return crd;
590 }
591
592 return get_default_call_rcu_data();
593 }
594
595 /*
596 * Return a pointer to this task's call_rcu_data if there is one.
597 */
598
599 struct call_rcu_data *get_thread_call_rcu_data(void)
600 {
601 return URCU_TLS(thread_call_rcu_data);
602 }
603
604 /*
605 * Set this task's call_rcu_data structure as specified, regardless
606 * of whether or not this task already had one. (This allows switching
607 * to and from real-time call_rcu threads, for example.)
608 *
609 * Use NULL to remove a thread's call_rcu_data structure, but it is
610 * the caller's responsibility to dispose of the removed structure.
611 * Use get_thread_call_rcu_data() to obtain a pointer to the old structure
612 * (prior to NULLing it out, of course).
613 */
614
615 void set_thread_call_rcu_data(struct call_rcu_data *crdp)
616 {
617 URCU_TLS(thread_call_rcu_data) = crdp;
618 }
619
620 /*
621 * Create a separate call_rcu thread for each CPU. This does not
622 * replace a pre-existing call_rcu thread -- use the set_cpu_call_rcu_data()
623 * function if you want that behavior. Should be paired with
624 * free_all_cpu_call_rcu_data() to teardown these call_rcu worker
625 * threads.
626 */
627
628 int create_all_cpu_call_rcu_data(unsigned long flags)
629 {
630 int i;
631 struct call_rcu_data *crdp;
632 int ret;
633
634 call_rcu_lock(&call_rcu_mutex);
635 alloc_cpu_call_rcu_data();
636 call_rcu_unlock(&call_rcu_mutex);
637 if (maxcpus <= 0) {
638 errno = EINVAL;
639 return -EINVAL;
640 }
641 if (per_cpu_call_rcu_data == NULL) {
642 errno = ENOMEM;
643 return -ENOMEM;
644 }
645 for (i = 0; i < maxcpus; i++) {
646 call_rcu_lock(&call_rcu_mutex);
647 if (get_cpu_call_rcu_data(i)) {
648 call_rcu_unlock(&call_rcu_mutex);
649 continue;
650 }
651 crdp = __create_call_rcu_data(flags, i);
652 if (crdp == NULL) {
653 call_rcu_unlock(&call_rcu_mutex);
654 errno = ENOMEM;
655 return -ENOMEM;
656 }
657 call_rcu_unlock(&call_rcu_mutex);
658 if ((ret = set_cpu_call_rcu_data(i, crdp)) != 0) {
659 call_rcu_data_free(crdp);
660
661 /* it has been created by other thread */
662 if (ret == -EEXIST)
663 continue;
664
665 return ret;
666 }
667 }
668 return 0;
669 }
670
671 /*
672 * Wake up the call_rcu thread corresponding to the specified
673 * call_rcu_data structure.
674 */
675 static void wake_call_rcu_thread(struct call_rcu_data *crdp)
676 {
677 if (!(_CMM_LOAD_SHARED(crdp->flags) & URCU_CALL_RCU_RT))
678 call_rcu_wake_up(crdp);
679 }
680
681 static void _call_rcu(struct rcu_head *head,
682 void (*func)(struct rcu_head *head),
683 struct call_rcu_data *crdp)
684 {
685 cds_wfcq_node_init(&head->next);
686 head->func = func;
687 cds_wfcq_enqueue(&crdp->cbs_head, &crdp->cbs_tail, &head->next);
688 uatomic_inc(&crdp->qlen);
689 wake_call_rcu_thread(crdp);
690 }
691
692 /*
693 * Schedule a function to be invoked after a following grace period.
694 * This is the only function that must be called -- the others are
695 * only present to allow applications to tune their use of RCU for
696 * maximum performance.
697 *
698 * Note that unless a call_rcu thread has not already been created,
699 * the first invocation of call_rcu() will create one. So, if you
700 * need the first invocation of call_rcu() to be fast, make sure
701 * to create a call_rcu thread first. One way to accomplish this is
702 * "get_call_rcu_data();", and another is create_all_cpu_call_rcu_data().
703 *
704 * call_rcu must be called by registered RCU read-side threads.
705 */
706 void call_rcu(struct rcu_head *head,
707 void (*func)(struct rcu_head *head))
708 {
709 struct call_rcu_data *crdp;
710
711 /* Holding rcu read-side lock across use of per-cpu crdp */
712 _rcu_read_lock();
713 crdp = get_call_rcu_data();
714 _call_rcu(head, func, crdp);
715 _rcu_read_unlock();
716 }
717
718 /*
719 * Free up the specified call_rcu_data structure, terminating the
720 * associated call_rcu thread. The caller must have previously
721 * removed the call_rcu_data structure from per-thread or per-CPU
722 * usage. For example, set_cpu_call_rcu_data(cpu, NULL) for per-CPU
723 * call_rcu_data structures or set_thread_call_rcu_data(NULL) for
724 * per-thread call_rcu_data structures.
725 *
726 * We silently refuse to free up the default call_rcu_data structure
727 * because that is where we put any leftover callbacks. Note that
728 * the possibility of self-spawning callbacks makes it impossible
729 * to execute all the callbacks in finite time without putting any
730 * newly spawned callbacks somewhere else. The "somewhere else" of
731 * last resort is the default call_rcu_data structure.
732 *
733 * We also silently refuse to free NULL pointers. This simplifies
734 * the calling code.
735 *
736 * The caller must wait for a grace-period to pass between return from
737 * set_cpu_call_rcu_data() and call to call_rcu_data_free() passing the
738 * previous call rcu data as argument.
739 *
740 * Note: introducing __cds_wfcq_splice_blocking() in this function fixed
741 * a list corruption bug in the 0.7.x series. The equivalent fix
742 * appeared in 0.6.8 for the stable-0.6 branch.
743 */
744 void call_rcu_data_free(struct call_rcu_data *crdp)
745 {
746 if (crdp == NULL || crdp == default_call_rcu_data) {
747 return;
748 }
749 if ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0) {
750 uatomic_or(&crdp->flags, URCU_CALL_RCU_STOP);
751 wake_call_rcu_thread(crdp);
752 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_STOPPED) == 0)
753 (void) poll(NULL, 0, 1);
754 }
755 if (!cds_wfcq_empty(&crdp->cbs_head, &crdp->cbs_tail)) {
756 /* Create default call rcu data if need be */
757 (void) get_default_call_rcu_data();
758 __cds_wfcq_splice_blocking(&default_call_rcu_data->cbs_head,
759 &default_call_rcu_data->cbs_tail,
760 &crdp->cbs_head, &crdp->cbs_tail);
761 uatomic_add(&default_call_rcu_data->qlen,
762 uatomic_read(&crdp->qlen));
763 wake_call_rcu_thread(default_call_rcu_data);
764 }
765
766 call_rcu_lock(&call_rcu_mutex);
767 cds_list_del(&crdp->list);
768 call_rcu_unlock(&call_rcu_mutex);
769
770 free(crdp);
771 }
772
773 /*
774 * Clean up all the per-CPU call_rcu threads.
775 */
776 void free_all_cpu_call_rcu_data(void)
777 {
778 int cpu;
779 struct call_rcu_data **crdp;
780 static int warned = 0;
781
782 if (maxcpus <= 0)
783 return;
784
785 crdp = malloc(sizeof(*crdp) * maxcpus);
786 if (!crdp) {
787 if (!warned) {
788 fprintf(stderr, "[error] liburcu: unable to allocate per-CPU pointer array\n");
789 }
790 warned = 1;
791 return;
792 }
793
794 for (cpu = 0; cpu < maxcpus; cpu++) {
795 crdp[cpu] = get_cpu_call_rcu_data(cpu);
796 if (crdp[cpu] == NULL)
797 continue;
798 set_cpu_call_rcu_data(cpu, NULL);
799 }
800 /*
801 * Wait for call_rcu sites acting as RCU readers of the
802 * call_rcu_data to become quiescent.
803 */
804 synchronize_rcu();
805 for (cpu = 0; cpu < maxcpus; cpu++) {
806 if (crdp[cpu] == NULL)
807 continue;
808 call_rcu_data_free(crdp[cpu]);
809 }
810 free(crdp);
811 }
812
813 static
814 void free_completion(struct urcu_ref *ref)
815 {
816 struct call_rcu_completion *completion;
817
818 completion = caa_container_of(ref, struct call_rcu_completion, ref);
819 free(completion);
820 }
821
822 static
823 void _rcu_barrier_complete(struct rcu_head *head)
824 {
825 struct call_rcu_completion_work *work;
826 struct call_rcu_completion *completion;
827
828 work = caa_container_of(head, struct call_rcu_completion_work, head);
829 completion = work->completion;
830 if (!uatomic_sub_return(&completion->barrier_count, 1))
831 call_rcu_completion_wake_up(completion);
832 urcu_ref_put(&completion->ref, free_completion);
833 free(work);
834 }
835
836 /*
837 * Wait for all in-flight call_rcu callbacks to complete execution.
838 */
839 void rcu_barrier(void)
840 {
841 struct call_rcu_data *crdp;
842 struct call_rcu_completion *completion;
843 int count = 0;
844 int was_online;
845
846 /* Put in offline state in QSBR. */
847 was_online = _rcu_read_ongoing();
848 if (was_online)
849 rcu_thread_offline();
850 /*
851 * Calling a rcu_barrier() within a RCU read-side critical
852 * section is an error.
853 */
854 if (_rcu_read_ongoing()) {
855 static int warned = 0;
856
857 if (!warned) {
858 fprintf(stderr, "[error] liburcu: rcu_barrier() called from within RCU read-side critical section.\n");
859 }
860 warned = 1;
861 goto online;
862 }
863
864 completion = calloc(sizeof(*completion), 1);
865 if (!completion)
866 urcu_die(errno);
867
868 call_rcu_lock(&call_rcu_mutex);
869 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
870 count++;
871
872 /* Referenced by rcu_barrier() and each call_rcu thread. */
873 urcu_ref_set(&completion->ref, count + 1);
874 completion->barrier_count = count;
875
876 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
877 struct call_rcu_completion_work *work;
878
879 work = calloc(sizeof(*work), 1);
880 if (!work)
881 urcu_die(errno);
882 work->completion = completion;
883 _call_rcu(&work->head, _rcu_barrier_complete, crdp);
884 }
885 call_rcu_unlock(&call_rcu_mutex);
886
887 /* Wait for them */
888 for (;;) {
889 uatomic_dec(&completion->futex);
890 /* Decrement futex before reading barrier_count */
891 cmm_smp_mb();
892 if (!uatomic_read(&completion->barrier_count))
893 break;
894 call_rcu_completion_wait(completion);
895 }
896
897 urcu_ref_put(&completion->ref, free_completion);
898
899 online:
900 if (was_online)
901 rcu_thread_online();
902 }
903
904 /*
905 * Acquire the call_rcu_mutex in order to ensure that the child sees
906 * all of the call_rcu() data structures in a consistent state. Ensure
907 * that all call_rcu threads are in a quiescent state across fork.
908 * Suitable for pthread_atfork() and friends.
909 */
910 void call_rcu_before_fork(void)
911 {
912 struct call_rcu_data *crdp;
913 struct urcu_atfork *atfork;
914
915 call_rcu_lock(&call_rcu_mutex);
916
917 atfork = registered_rculfhash_atfork;
918 if (atfork)
919 atfork->before_fork(atfork->priv);
920
921 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
922 uatomic_or(&crdp->flags, URCU_CALL_RCU_PAUSE);
923 cmm_smp_mb__after_uatomic_or();
924 wake_call_rcu_thread(crdp);
925 }
926 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
927 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) == 0)
928 (void) poll(NULL, 0, 1);
929 }
930 }
931
932 /*
933 * Clean up call_rcu data structures in the parent of a successful fork()
934 * that is not followed by exec() in the child. Suitable for
935 * pthread_atfork() and friends.
936 */
937 void call_rcu_after_fork_parent(void)
938 {
939 struct call_rcu_data *crdp;
940 struct urcu_atfork *atfork;
941
942 cds_list_for_each_entry(crdp, &call_rcu_data_list, list)
943 uatomic_and(&crdp->flags, ~URCU_CALL_RCU_PAUSE);
944 cds_list_for_each_entry(crdp, &call_rcu_data_list, list) {
945 while ((uatomic_read(&crdp->flags) & URCU_CALL_RCU_PAUSED) != 0)
946 (void) poll(NULL, 0, 1);
947 }
948 atfork = registered_rculfhash_atfork;
949 if (atfork)
950 atfork->after_fork_parent(atfork->priv);
951 call_rcu_unlock(&call_rcu_mutex);
952 }
953
954 /*
955 * Clean up call_rcu data structures in the child of a successful fork()
956 * that is not followed by exec(). Suitable for pthread_atfork() and
957 * friends.
958 */
959 void call_rcu_after_fork_child(void)
960 {
961 struct call_rcu_data *crdp, *next;
962 struct urcu_atfork *atfork;
963
964 /* Release the mutex. */
965 call_rcu_unlock(&call_rcu_mutex);
966
967 atfork = registered_rculfhash_atfork;
968 if (atfork)
969 atfork->after_fork_child(atfork->priv);
970
971 /* Do nothing when call_rcu() has not been used */
972 if (cds_list_empty(&call_rcu_data_list))
973 return;
974
975 /*
976 * Allocate a new default call_rcu_data structure in order
977 * to get a working call_rcu thread to go with it.
978 */
979 default_call_rcu_data = NULL;
980 (void)get_default_call_rcu_data();
981
982 /* Cleanup call_rcu_data pointers before use */
983 maxcpus_reset();
984 free(per_cpu_call_rcu_data);
985 rcu_set_pointer(&per_cpu_call_rcu_data, NULL);
986 URCU_TLS(thread_call_rcu_data) = NULL;
987
988 /*
989 * Dispose of all of the rest of the call_rcu_data structures.
990 * Leftover call_rcu callbacks will be merged into the new
991 * default call_rcu thread queue.
992 */
993 cds_list_for_each_entry_safe(crdp, next, &call_rcu_data_list, list) {
994 if (crdp == default_call_rcu_data)
995 continue;
996 uatomic_set(&crdp->flags, URCU_CALL_RCU_STOPPED);
997 call_rcu_data_free(crdp);
998 }
999 }
1000
1001 void urcu_register_rculfhash_atfork(struct urcu_atfork *atfork)
1002 {
1003 call_rcu_lock(&call_rcu_mutex);
1004 if (registered_rculfhash_atfork_refcount++)
1005 goto end;
1006 registered_rculfhash_atfork = atfork;
1007 end:
1008 call_rcu_unlock(&call_rcu_mutex);
1009 }
1010
1011 void urcu_unregister_rculfhash_atfork(struct urcu_atfork *atfork)
1012 {
1013 call_rcu_lock(&call_rcu_mutex);
1014 if (--registered_rculfhash_atfork_refcount)
1015 goto end;
1016 registered_rculfhash_atfork = NULL;
1017 end:
1018 call_rcu_unlock(&call_rcu_mutex);
1019 }
This page took 0.090637 seconds and 4 git commands to generate.