Commit | Line | Data |
---|---|---|
10544ee8 | 1 | /* |
c0c0989a | 2 | * SPDX-License-Identifier: LGPL-2.1-or-later |
10544ee8 MD |
3 | * |
4 | * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> | |
5 | * Copyright (c) 2009 Paul E. McKenney, IBM Corporation. | |
6 | * | |
c0c0989a | 7 | * Userspace RCU library for LTTng-UST, derived from liburcu "bulletproof" version. |
10544ee8 MD |
8 | */ |
9 | ||
10 | #define _LGPL_SOURCE | |
11 | #include <stdio.h> | |
12 | #include <pthread.h> | |
13 | #include <signal.h> | |
14 | #include <assert.h> | |
15 | #include <stdlib.h> | |
16 | #include <string.h> | |
17 | #include <errno.h> | |
18 | #include <poll.h> | |
19 | #include <unistd.h> | |
20 | #include <stdbool.h> | |
21 | #include <sys/mman.h> | |
22 | ||
23 | #include <urcu/arch.h> | |
24 | #include <urcu/wfcqueue.h> | |
25 | #include <lttng/urcu/static/urcu-ust.h> | |
26 | #include <lttng/urcu/pointer.h> | |
27 | #include <urcu/tls-compat.h> | |
28 | ||
29 | /* Do not #define _LGPL_SOURCE to ensure we can emit the wrapper symbols */ | |
30 | #undef _LGPL_SOURCE | |
31 | #include <lttng/urcu/urcu-ust.h> | |
32 | #define _LGPL_SOURCE | |
33 | ||
34 | #ifndef MAP_ANONYMOUS | |
35 | #define MAP_ANONYMOUS MAP_ANON | |
36 | #endif | |
37 | ||
38 | #ifdef __linux__ | |
39 | static | |
40 | void *mremap_wrapper(void *old_address, size_t old_size, | |
41 | size_t new_size, int flags) | |
42 | { | |
43 | return mremap(old_address, old_size, new_size, flags); | |
44 | } | |
45 | #else | |
46 | ||
47 | #define MREMAP_MAYMOVE 1 | |
48 | #define MREMAP_FIXED 2 | |
49 | ||
50 | /* | |
51 | * mremap wrapper for non-Linux systems not allowing MAYMOVE. | |
52 | * This is not generic. | |
53 | */ | |
54 | static | |
55 | void *mremap_wrapper(void *old_address, size_t old_size, | |
56 | size_t new_size, int flags) | |
57 | { | |
58 | assert(!(flags & MREMAP_MAYMOVE)); | |
59 | ||
60 | return MAP_FAILED; | |
61 | } | |
62 | #endif | |
63 | ||
64 | /* Sleep delay in ms */ | |
65 | #define RCU_SLEEP_DELAY_MS 10 | |
66 | #define INIT_NR_THREADS 8 | |
67 | #define ARENA_INIT_ALLOC \ | |
68 | sizeof(struct registry_chunk) \ | |
69 | + INIT_NR_THREADS * sizeof(struct lttng_ust_urcu_reader) | |
70 | ||
71 | /* | |
72 | * Active attempts to check for reader Q.S. before calling sleep(). | |
73 | */ | |
74 | #define RCU_QS_ACTIVE_ATTEMPTS 100 | |
75 | ||
76 | static | |
77 | int lttng_ust_urcu_refcount; | |
78 | ||
79 | /* If the headers do not support membarrier system call, fall back smp_mb. */ | |
80 | #ifdef __NR_membarrier | |
81 | # define membarrier(...) syscall(__NR_membarrier, __VA_ARGS__) | |
82 | #else | |
83 | # define membarrier(...) -ENOSYS | |
84 | #endif | |
85 | ||
86 | enum membarrier_cmd { | |
87 | MEMBARRIER_CMD_QUERY = 0, | |
88 | MEMBARRIER_CMD_SHARED = (1 << 0), | |
89 | /* reserved for MEMBARRIER_CMD_SHARED_EXPEDITED (1 << 1) */ | |
90 | /* reserved for MEMBARRIER_CMD_PRIVATE (1 << 2) */ | |
91 | MEMBARRIER_CMD_PRIVATE_EXPEDITED = (1 << 3), | |
92 | MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED = (1 << 4), | |
93 | }; | |
94 | ||
95 | static | |
96 | void __attribute__((constructor)) _lttng_ust_urcu_init(void); | |
97 | static | |
98 | void __attribute__((destructor)) lttng_ust_urcu_exit(void); | |
99 | ||
100 | #ifndef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
101 | int lttng_ust_urcu_has_sys_membarrier; | |
102 | #endif | |
103 | ||
104 | /* | |
105 | * rcu_gp_lock ensures mutual exclusion between threads calling | |
106 | * synchronize_rcu(). | |
107 | */ | |
108 | static pthread_mutex_t rcu_gp_lock = PTHREAD_MUTEX_INITIALIZER; | |
109 | /* | |
110 | * rcu_registry_lock ensures mutual exclusion between threads | |
111 | * registering and unregistering themselves to/from the registry, and | |
112 | * with threads reading that registry from synchronize_rcu(). However, | |
113 | * this lock is not held all the way through the completion of awaiting | |
114 | * for the grace period. It is sporadically released between iterations | |
115 | * on the registry. | |
116 | * rcu_registry_lock may nest inside rcu_gp_lock. | |
117 | */ | |
118 | static pthread_mutex_t rcu_registry_lock = PTHREAD_MUTEX_INITIALIZER; | |
119 | ||
120 | static pthread_mutex_t init_lock = PTHREAD_MUTEX_INITIALIZER; | |
121 | static int initialized; | |
122 | ||
123 | static pthread_key_t lttng_ust_urcu_key; | |
124 | ||
125 | struct lttng_ust_urcu_gp lttng_ust_urcu_gp = { .ctr = LTTNG_UST_URCU_GP_COUNT }; | |
126 | ||
127 | /* | |
128 | * Pointer to registry elements. Written to only by each individual reader. Read | |
129 | * by both the reader and the writers. | |
130 | */ | |
131 | DEFINE_URCU_TLS(struct lttng_ust_urcu_reader *, lttng_ust_urcu_reader); | |
132 | ||
133 | static CDS_LIST_HEAD(registry); | |
134 | ||
135 | struct registry_chunk { | |
136 | size_t data_len; /* data length */ | |
137 | size_t used; /* amount of data used */ | |
138 | struct cds_list_head node; /* chunk_list node */ | |
139 | char data[]; | |
140 | }; | |
141 | ||
142 | struct registry_arena { | |
143 | struct cds_list_head chunk_list; | |
144 | }; | |
145 | ||
146 | static struct registry_arena registry_arena = { | |
147 | .chunk_list = CDS_LIST_HEAD_INIT(registry_arena.chunk_list), | |
148 | }; | |
149 | ||
150 | /* Saved fork signal mask, protected by rcu_gp_lock */ | |
151 | static sigset_t saved_fork_signal_mask; | |
152 | ||
153 | static void mutex_lock(pthread_mutex_t *mutex) | |
154 | { | |
155 | int ret; | |
156 | ||
157 | #ifndef DISTRUST_SIGNALS_EXTREME | |
158 | ret = pthread_mutex_lock(mutex); | |
159 | if (ret) | |
160 | abort(); | |
161 | #else /* #ifndef DISTRUST_SIGNALS_EXTREME */ | |
162 | while ((ret = pthread_mutex_trylock(mutex)) != 0) { | |
163 | if (ret != EBUSY && ret != EINTR) | |
164 | abort(); | |
165 | poll(NULL,0,10); | |
166 | } | |
167 | #endif /* #else #ifndef DISTRUST_SIGNALS_EXTREME */ | |
168 | } | |
169 | ||
170 | static void mutex_unlock(pthread_mutex_t *mutex) | |
171 | { | |
172 | int ret; | |
173 | ||
174 | ret = pthread_mutex_unlock(mutex); | |
175 | if (ret) | |
176 | abort(); | |
177 | } | |
178 | ||
179 | static void smp_mb_master(void) | |
180 | { | |
181 | if (caa_likely(lttng_ust_urcu_has_sys_membarrier)) { | |
182 | if (membarrier(MEMBARRIER_CMD_PRIVATE_EXPEDITED, 0)) | |
183 | abort(); | |
184 | } else { | |
185 | cmm_smp_mb(); | |
186 | } | |
187 | } | |
188 | ||
189 | /* | |
190 | * Always called with rcu_registry lock held. Releases this lock between | |
191 | * iterations and grabs it again. Holds the lock when it returns. | |
192 | */ | |
193 | static void wait_for_readers(struct cds_list_head *input_readers, | |
194 | struct cds_list_head *cur_snap_readers, | |
195 | struct cds_list_head *qsreaders) | |
196 | { | |
197 | unsigned int wait_loops = 0; | |
198 | struct lttng_ust_urcu_reader *index, *tmp; | |
199 | ||
200 | /* | |
201 | * Wait for each thread URCU_TLS(lttng_ust_urcu_reader).ctr to either | |
202 | * indicate quiescence (not nested), or observe the current | |
203 | * rcu_gp.ctr value. | |
204 | */ | |
205 | for (;;) { | |
206 | if (wait_loops < RCU_QS_ACTIVE_ATTEMPTS) | |
207 | wait_loops++; | |
208 | ||
209 | cds_list_for_each_entry_safe(index, tmp, input_readers, node) { | |
210 | switch (lttng_ust_urcu_reader_state(&index->ctr)) { | |
211 | case LTTNG_UST_URCU_READER_ACTIVE_CURRENT: | |
212 | if (cur_snap_readers) { | |
213 | cds_list_move(&index->node, | |
214 | cur_snap_readers); | |
215 | break; | |
216 | } | |
217 | /* Fall-through */ | |
218 | case LTTNG_UST_URCU_READER_INACTIVE: | |
219 | cds_list_move(&index->node, qsreaders); | |
220 | break; | |
221 | case LTTNG_UST_URCU_READER_ACTIVE_OLD: | |
222 | /* | |
223 | * Old snapshot. Leaving node in | |
224 | * input_readers will make us busy-loop | |
225 | * until the snapshot becomes current or | |
226 | * the reader becomes inactive. | |
227 | */ | |
228 | break; | |
229 | } | |
230 | } | |
231 | ||
232 | if (cds_list_empty(input_readers)) { | |
233 | break; | |
234 | } else { | |
235 | /* Temporarily unlock the registry lock. */ | |
236 | mutex_unlock(&rcu_registry_lock); | |
237 | if (wait_loops >= RCU_QS_ACTIVE_ATTEMPTS) | |
238 | (void) poll(NULL, 0, RCU_SLEEP_DELAY_MS); | |
239 | else | |
240 | caa_cpu_relax(); | |
241 | /* Re-lock the registry lock before the next loop. */ | |
242 | mutex_lock(&rcu_registry_lock); | |
243 | } | |
244 | } | |
245 | } | |
246 | ||
247 | void lttng_ust_urcu_synchronize_rcu(void) | |
248 | { | |
249 | CDS_LIST_HEAD(cur_snap_readers); | |
250 | CDS_LIST_HEAD(qsreaders); | |
251 | sigset_t newmask, oldmask; | |
252 | int ret; | |
253 | ||
254 | ret = sigfillset(&newmask); | |
255 | assert(!ret); | |
256 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
257 | assert(!ret); | |
258 | ||
259 | mutex_lock(&rcu_gp_lock); | |
260 | ||
261 | mutex_lock(&rcu_registry_lock); | |
262 | ||
263 | if (cds_list_empty(®istry)) | |
264 | goto out; | |
265 | ||
266 | /* All threads should read qparity before accessing data structure | |
267 | * where new ptr points to. */ | |
268 | /* Write new ptr before changing the qparity */ | |
269 | smp_mb_master(); | |
270 | ||
271 | /* | |
272 | * Wait for readers to observe original parity or be quiescent. | |
273 | * wait_for_readers() can release and grab again rcu_registry_lock | |
274 | * interally. | |
275 | */ | |
276 | wait_for_readers(®istry, &cur_snap_readers, &qsreaders); | |
277 | ||
278 | /* | |
279 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
280 | * model easier to understand. It does not have a big performance impact | |
281 | * anyway, given this is the write-side. | |
282 | */ | |
283 | cmm_smp_mb(); | |
284 | ||
285 | /* Switch parity: 0 -> 1, 1 -> 0 */ | |
286 | CMM_STORE_SHARED(lttng_ust_urcu_gp.ctr, lttng_ust_urcu_gp.ctr ^ LTTNG_UST_URCU_GP_CTR_PHASE); | |
287 | ||
288 | /* | |
289 | * Must commit qparity update to memory before waiting for other parity | |
290 | * quiescent state. Failure to do so could result in the writer waiting | |
291 | * forever while new readers are always accessing data (no progress). | |
292 | * Ensured by CMM_STORE_SHARED and CMM_LOAD_SHARED. | |
293 | */ | |
294 | ||
295 | /* | |
296 | * Adding a cmm_smp_mb() which is _not_ formally required, but makes the | |
297 | * model easier to understand. It does not have a big performance impact | |
298 | * anyway, given this is the write-side. | |
299 | */ | |
300 | cmm_smp_mb(); | |
301 | ||
302 | /* | |
303 | * Wait for readers to observe new parity or be quiescent. | |
304 | * wait_for_readers() can release and grab again rcu_registry_lock | |
305 | * interally. | |
306 | */ | |
307 | wait_for_readers(&cur_snap_readers, NULL, &qsreaders); | |
308 | ||
309 | /* | |
310 | * Put quiescent reader list back into registry. | |
311 | */ | |
312 | cds_list_splice(&qsreaders, ®istry); | |
313 | ||
314 | /* | |
315 | * Finish waiting for reader threads before letting the old ptr being | |
316 | * freed. | |
317 | */ | |
318 | smp_mb_master(); | |
319 | out: | |
320 | mutex_unlock(&rcu_registry_lock); | |
321 | mutex_unlock(&rcu_gp_lock); | |
322 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
323 | assert(!ret); | |
324 | } | |
325 | ||
326 | /* | |
327 | * library wrappers to be used by non-LGPL compatible source code. | |
328 | */ | |
329 | ||
330 | void lttng_ust_urcu_read_lock(void) | |
331 | { | |
332 | _lttng_ust_urcu_read_lock(); | |
333 | } | |
334 | ||
335 | void lttng_ust_urcu_read_unlock(void) | |
336 | { | |
337 | _lttng_ust_urcu_read_unlock(); | |
338 | } | |
339 | ||
340 | int lttng_ust_urcu_read_ongoing(void) | |
341 | { | |
342 | return _lttng_ust_urcu_read_ongoing(); | |
343 | } | |
344 | ||
345 | /* | |
346 | * Only grow for now. If empty, allocate a ARENA_INIT_ALLOC sized chunk. | |
347 | * Else, try expanding the last chunk. If this fails, allocate a new | |
348 | * chunk twice as big as the last chunk. | |
349 | * Memory used by chunks _never_ moves. A chunk could theoretically be | |
350 | * freed when all "used" slots are released, but we don't do it at this | |
351 | * point. | |
352 | */ | |
353 | static | |
354 | void expand_arena(struct registry_arena *arena) | |
355 | { | |
356 | struct registry_chunk *new_chunk, *last_chunk; | |
357 | size_t old_chunk_len, new_chunk_len; | |
358 | ||
359 | /* No chunk. */ | |
360 | if (cds_list_empty(&arena->chunk_list)) { | |
361 | assert(ARENA_INIT_ALLOC >= | |
362 | sizeof(struct registry_chunk) | |
363 | + sizeof(struct lttng_ust_urcu_reader)); | |
364 | new_chunk_len = ARENA_INIT_ALLOC; | |
365 | new_chunk = (struct registry_chunk *) mmap(NULL, | |
366 | new_chunk_len, | |
367 | PROT_READ | PROT_WRITE, | |
368 | MAP_ANONYMOUS | MAP_PRIVATE, | |
369 | -1, 0); | |
370 | if (new_chunk == MAP_FAILED) | |
371 | abort(); | |
372 | memset(new_chunk, 0, new_chunk_len); | |
373 | new_chunk->data_len = | |
374 | new_chunk_len - sizeof(struct registry_chunk); | |
375 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
376 | return; /* We're done. */ | |
377 | } | |
378 | ||
379 | /* Try expanding last chunk. */ | |
380 | last_chunk = cds_list_entry(arena->chunk_list.prev, | |
381 | struct registry_chunk, node); | |
382 | old_chunk_len = | |
383 | last_chunk->data_len + sizeof(struct registry_chunk); | |
384 | new_chunk_len = old_chunk_len << 1; | |
385 | ||
386 | /* Don't allow memory mapping to move, just expand. */ | |
387 | new_chunk = mremap_wrapper(last_chunk, old_chunk_len, | |
388 | new_chunk_len, 0); | |
389 | if (new_chunk != MAP_FAILED) { | |
390 | /* Should not have moved. */ | |
391 | assert(new_chunk == last_chunk); | |
392 | memset((char *) last_chunk + old_chunk_len, 0, | |
393 | new_chunk_len - old_chunk_len); | |
394 | last_chunk->data_len = | |
395 | new_chunk_len - sizeof(struct registry_chunk); | |
396 | return; /* We're done. */ | |
397 | } | |
398 | ||
399 | /* Remap did not succeed, we need to add a new chunk. */ | |
400 | new_chunk = (struct registry_chunk *) mmap(NULL, | |
401 | new_chunk_len, | |
402 | PROT_READ | PROT_WRITE, | |
403 | MAP_ANONYMOUS | MAP_PRIVATE, | |
404 | -1, 0); | |
405 | if (new_chunk == MAP_FAILED) | |
406 | abort(); | |
407 | memset(new_chunk, 0, new_chunk_len); | |
408 | new_chunk->data_len = | |
409 | new_chunk_len - sizeof(struct registry_chunk); | |
410 | cds_list_add_tail(&new_chunk->node, &arena->chunk_list); | |
411 | } | |
412 | ||
413 | static | |
414 | struct lttng_ust_urcu_reader *arena_alloc(struct registry_arena *arena) | |
415 | { | |
416 | struct registry_chunk *chunk; | |
417 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
418 | int expand_done = 0; /* Only allow to expand once per alloc */ | |
419 | size_t len = sizeof(struct lttng_ust_urcu_reader); | |
420 | ||
421 | retry: | |
422 | cds_list_for_each_entry(chunk, &arena->chunk_list, node) { | |
423 | if (chunk->data_len - chunk->used < len) | |
424 | continue; | |
425 | /* Find spot */ | |
426 | for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0]; | |
427 | rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]; | |
428 | rcu_reader_reg++) { | |
429 | if (!rcu_reader_reg->alloc) { | |
430 | rcu_reader_reg->alloc = 1; | |
431 | chunk->used += len; | |
432 | return rcu_reader_reg; | |
433 | } | |
434 | } | |
435 | } | |
436 | ||
437 | if (!expand_done) { | |
438 | expand_arena(arena); | |
439 | expand_done = 1; | |
440 | goto retry; | |
441 | } | |
442 | ||
443 | return NULL; | |
444 | } | |
445 | ||
446 | /* Called with signals off and mutex locked */ | |
447 | static | |
448 | void add_thread(void) | |
449 | { | |
450 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
451 | int ret; | |
452 | ||
453 | rcu_reader_reg = arena_alloc(®istry_arena); | |
454 | if (!rcu_reader_reg) | |
455 | abort(); | |
456 | ret = pthread_setspecific(lttng_ust_urcu_key, rcu_reader_reg); | |
457 | if (ret) | |
458 | abort(); | |
459 | ||
460 | /* Add to registry */ | |
461 | rcu_reader_reg->tid = pthread_self(); | |
462 | assert(rcu_reader_reg->ctr == 0); | |
463 | cds_list_add(&rcu_reader_reg->node, ®istry); | |
464 | /* | |
465 | * Reader threads are pointing to the reader registry. This is | |
466 | * why its memory should never be relocated. | |
467 | */ | |
468 | URCU_TLS(lttng_ust_urcu_reader) = rcu_reader_reg; | |
469 | } | |
470 | ||
471 | /* Called with mutex locked */ | |
472 | static | |
473 | void cleanup_thread(struct registry_chunk *chunk, | |
474 | struct lttng_ust_urcu_reader *rcu_reader_reg) | |
475 | { | |
476 | rcu_reader_reg->ctr = 0; | |
477 | cds_list_del(&rcu_reader_reg->node); | |
478 | rcu_reader_reg->tid = 0; | |
479 | rcu_reader_reg->alloc = 0; | |
480 | chunk->used -= sizeof(struct lttng_ust_urcu_reader); | |
481 | } | |
482 | ||
483 | static | |
484 | struct registry_chunk *find_chunk(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
485 | { | |
486 | struct registry_chunk *chunk; | |
487 | ||
488 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
489 | if (rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[0]) | |
490 | continue; | |
491 | if (rcu_reader_reg >= (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]) | |
492 | continue; | |
493 | return chunk; | |
494 | } | |
495 | return NULL; | |
496 | } | |
497 | ||
498 | /* Called with signals off and mutex locked */ | |
499 | static | |
500 | void remove_thread(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
501 | { | |
502 | cleanup_thread(find_chunk(rcu_reader_reg), rcu_reader_reg); | |
503 | URCU_TLS(lttng_ust_urcu_reader) = NULL; | |
504 | } | |
505 | ||
506 | /* Disable signals, take mutex, add to registry */ | |
507 | void lttng_ust_urcu_register(void) | |
508 | { | |
509 | sigset_t newmask, oldmask; | |
510 | int ret; | |
511 | ||
512 | ret = sigfillset(&newmask); | |
513 | if (ret) | |
514 | abort(); | |
515 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
516 | if (ret) | |
517 | abort(); | |
518 | ||
519 | /* | |
520 | * Check if a signal concurrently registered our thread since | |
521 | * the check in rcu_read_lock(). | |
522 | */ | |
523 | if (URCU_TLS(lttng_ust_urcu_reader)) | |
524 | goto end; | |
525 | ||
526 | /* | |
527 | * Take care of early registration before lttng_ust_urcu constructor. | |
528 | */ | |
529 | _lttng_ust_urcu_init(); | |
530 | ||
531 | mutex_lock(&rcu_registry_lock); | |
532 | add_thread(); | |
533 | mutex_unlock(&rcu_registry_lock); | |
534 | end: | |
535 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
536 | if (ret) | |
537 | abort(); | |
538 | } | |
539 | ||
540 | void lttng_ust_urcu_register_thread(void) | |
541 | { | |
542 | if (caa_unlikely(!URCU_TLS(lttng_ust_urcu_reader))) | |
543 | lttng_ust_urcu_register(); /* If not yet registered. */ | |
544 | } | |
545 | ||
546 | /* Disable signals, take mutex, remove from registry */ | |
547 | static | |
548 | void lttng_ust_urcu_unregister(struct lttng_ust_urcu_reader *rcu_reader_reg) | |
549 | { | |
550 | sigset_t newmask, oldmask; | |
551 | int ret; | |
552 | ||
553 | ret = sigfillset(&newmask); | |
554 | if (ret) | |
555 | abort(); | |
556 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
557 | if (ret) | |
558 | abort(); | |
559 | ||
560 | mutex_lock(&rcu_registry_lock); | |
561 | remove_thread(rcu_reader_reg); | |
562 | mutex_unlock(&rcu_registry_lock); | |
563 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
564 | if (ret) | |
565 | abort(); | |
566 | lttng_ust_urcu_exit(); | |
567 | } | |
568 | ||
569 | /* | |
570 | * Remove thread from the registry when it exits, and flag it as | |
571 | * destroyed so garbage collection can take care of it. | |
572 | */ | |
573 | static | |
574 | void lttng_ust_urcu_thread_exit_notifier(void *rcu_key) | |
575 | { | |
576 | lttng_ust_urcu_unregister(rcu_key); | |
577 | } | |
578 | ||
579 | #ifdef CONFIG_RCU_FORCE_SYS_MEMBARRIER | |
580 | static | |
581 | void lttng_ust_urcu_sys_membarrier_status(bool available) | |
582 | { | |
583 | if (!available) | |
584 | abort(); | |
585 | } | |
586 | #else | |
587 | static | |
588 | void lttng_ust_urcu_sys_membarrier_status(bool available) | |
589 | { | |
590 | if (!available) | |
591 | return; | |
592 | lttng_ust_urcu_has_sys_membarrier = 1; | |
593 | } | |
594 | #endif | |
595 | ||
596 | static | |
597 | void lttng_ust_urcu_sys_membarrier_init(void) | |
598 | { | |
599 | bool available = false; | |
600 | int mask; | |
601 | ||
602 | mask = membarrier(MEMBARRIER_CMD_QUERY, 0); | |
603 | if (mask >= 0) { | |
604 | if (mask & MEMBARRIER_CMD_PRIVATE_EXPEDITED) { | |
605 | if (membarrier(MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED, 0)) | |
606 | abort(); | |
607 | available = true; | |
608 | } | |
609 | } | |
610 | lttng_ust_urcu_sys_membarrier_status(available); | |
611 | } | |
612 | ||
613 | static | |
614 | void _lttng_ust_urcu_init(void) | |
615 | { | |
616 | mutex_lock(&init_lock); | |
617 | if (!lttng_ust_urcu_refcount++) { | |
618 | int ret; | |
619 | ||
620 | ret = pthread_key_create(<tng_ust_urcu_key, | |
621 | lttng_ust_urcu_thread_exit_notifier); | |
622 | if (ret) | |
623 | abort(); | |
624 | lttng_ust_urcu_sys_membarrier_init(); | |
625 | initialized = 1; | |
626 | } | |
627 | mutex_unlock(&init_lock); | |
628 | } | |
629 | ||
630 | static | |
631 | void lttng_ust_urcu_exit(void) | |
632 | { | |
633 | mutex_lock(&init_lock); | |
634 | if (!--lttng_ust_urcu_refcount) { | |
635 | struct registry_chunk *chunk, *tmp; | |
636 | int ret; | |
637 | ||
638 | cds_list_for_each_entry_safe(chunk, tmp, | |
639 | ®istry_arena.chunk_list, node) { | |
640 | munmap((void *) chunk, chunk->data_len | |
641 | + sizeof(struct registry_chunk)); | |
642 | } | |
643 | CDS_INIT_LIST_HEAD(®istry_arena.chunk_list); | |
644 | ret = pthread_key_delete(lttng_ust_urcu_key); | |
645 | if (ret) | |
646 | abort(); | |
647 | } | |
648 | mutex_unlock(&init_lock); | |
649 | } | |
650 | ||
651 | /* | |
652 | * Holding the rcu_gp_lock and rcu_registry_lock across fork will make | |
653 | * sure we fork() don't race with a concurrent thread executing with | |
654 | * any of those locks held. This ensures that the registry and data | |
655 | * protected by rcu_gp_lock are in a coherent state in the child. | |
656 | */ | |
657 | void lttng_ust_urcu_before_fork(void) | |
658 | { | |
659 | sigset_t newmask, oldmask; | |
660 | int ret; | |
661 | ||
662 | ret = sigfillset(&newmask); | |
663 | assert(!ret); | |
664 | ret = pthread_sigmask(SIG_BLOCK, &newmask, &oldmask); | |
665 | assert(!ret); | |
666 | mutex_lock(&rcu_gp_lock); | |
667 | mutex_lock(&rcu_registry_lock); | |
668 | saved_fork_signal_mask = oldmask; | |
669 | } | |
670 | ||
671 | void lttng_ust_urcu_after_fork_parent(void) | |
672 | { | |
673 | sigset_t oldmask; | |
674 | int ret; | |
675 | ||
676 | oldmask = saved_fork_signal_mask; | |
677 | mutex_unlock(&rcu_registry_lock); | |
678 | mutex_unlock(&rcu_gp_lock); | |
679 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
680 | assert(!ret); | |
681 | } | |
682 | ||
683 | /* | |
684 | * Prune all entries from registry except our own thread. Fits the Linux | |
685 | * fork behavior. Called with rcu_gp_lock and rcu_registry_lock held. | |
686 | */ | |
687 | static | |
688 | void lttng_ust_urcu_prune_registry(void) | |
689 | { | |
690 | struct registry_chunk *chunk; | |
691 | struct lttng_ust_urcu_reader *rcu_reader_reg; | |
692 | ||
693 | cds_list_for_each_entry(chunk, ®istry_arena.chunk_list, node) { | |
694 | for (rcu_reader_reg = (struct lttng_ust_urcu_reader *) &chunk->data[0]; | |
695 | rcu_reader_reg < (struct lttng_ust_urcu_reader *) &chunk->data[chunk->data_len]; | |
696 | rcu_reader_reg++) { | |
697 | if (!rcu_reader_reg->alloc) | |
698 | continue; | |
699 | if (rcu_reader_reg->tid == pthread_self()) | |
700 | continue; | |
701 | cleanup_thread(chunk, rcu_reader_reg); | |
702 | } | |
703 | } | |
704 | } | |
705 | ||
706 | void lttng_ust_urcu_after_fork_child(void) | |
707 | { | |
708 | sigset_t oldmask; | |
709 | int ret; | |
710 | ||
711 | lttng_ust_urcu_prune_registry(); | |
712 | oldmask = saved_fork_signal_mask; | |
713 | mutex_unlock(&rcu_registry_lock); | |
714 | mutex_unlock(&rcu_gp_lock); | |
715 | ret = pthread_sigmask(SIG_SETMASK, &oldmask, NULL); | |
716 | assert(!ret); | |
717 | } |