liblttng-ust-ctl: Implement SIGBUS handling
[lttng-ust.git] / src / common / ringbuffer / ring_buffer_frontend.c
1 /*
2 * SPDX-License-Identifier: LGPL-2.1-only
3 *
4 * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
7 * recorder (overwrite) modes. See thesis:
8 *
9 * Desnoyers, Mathieu (2009), "Low-Impact Operating System Tracing", Ph.D.
10 * dissertation, Ecole Polytechnique de Montreal.
11 * http://www.lttng.org/pub/thesis/desnoyers-dissertation-2009-12.pdf
12 *
13 * - Algorithm presentation in Chapter 5:
14 * "Lockless Multi-Core High-Throughput Buffering".
15 * - Algorithm formal verification in Section 8.6:
16 * "Formal verification of LTTng"
17 *
18 * Author:
19 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
20 *
21 * Inspired from LTT and RelayFS:
22 * Karim Yaghmour <karim@opersys.com>
23 * Tom Zanussi <zanussi@us.ibm.com>
24 * Bob Wisniewski <bob@watson.ibm.com>
25 * And from K42 :
26 * Bob Wisniewski <bob@watson.ibm.com>
27 *
28 * Buffer reader semantic :
29 *
30 * - get_subbuf_size
31 * while buffer is not finalized and empty
32 * - get_subbuf
33 * - if return value != 0, continue
34 * - splice one subbuffer worth of data to a pipe
35 * - splice the data from pipe to disk/network
36 * - put_subbuf
37 */
38
39 #define _LGPL_SOURCE
40 #include <sys/types.h>
41 #include <sys/mman.h>
42 #include <sys/stat.h>
43 #include <unistd.h>
44 #include <fcntl.h>
45 #include <signal.h>
46 #include <time.h>
47 #include <stdbool.h>
48 #include <stdint.h>
49 #include <urcu/compiler.h>
50 #include <urcu/ref.h>
51 #include <urcu/tls-compat.h>
52 #include <poll.h>
53 #include "common/macros.h"
54
55 #include <lttng/ust-utils.h>
56 #include <lttng/ust-ringbuffer-context.h>
57
58 #include "common/smp.h"
59 #include "ringbuffer-config.h"
60 #include "vatomic.h"
61 #include "backend.h"
62 #include "frontend.h"
63 #include "shm.h"
64 #include "rb-init.h"
65 #include "common/compat/errno.h" /* For ENODATA */
66
67 /* Print DBG() messages about events lost only every 1048576 hits */
68 #define DBG_PRINT_NR_LOST (1UL << 20)
69
70 #define LTTNG_UST_RB_SIG_FLUSH SIGRTMIN
71 #define LTTNG_UST_RB_SIG_READ SIGRTMIN + 1
72 #define LTTNG_UST_RB_SIG_TEARDOWN SIGRTMIN + 2
73 #define CLOCKID CLOCK_MONOTONIC
74 #define LTTNG_UST_RING_BUFFER_GET_RETRY 10
75 #define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10
76 #define RETRY_DELAY_MS 100 /* 100 ms. */
77
78 /*
79 * Non-static to ensure the compiler does not optimize away the xor.
80 */
81 uint8_t lttng_crash_magic_xor[]
82 __attribute__((visibility("hidden")));
83 uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR;
84
85 /*
86 * Use POSIX SHM: shm_open(3) and shm_unlink(3).
87 * close(2) to close the fd returned by shm_open.
88 * shm_unlink releases the shared memory object name.
89 * ftruncate(2) sets the size of the memory object.
90 * mmap/munmap maps the shared memory obj to a virtual address in the
91 * calling proceess (should be done both in libust and consumer).
92 * See shm_overview(7) for details.
93 * Pass file descriptor returned by shm_open(3) to ltt-sessiond through
94 * a UNIX socket.
95 *
96 * Since we don't need to access the object using its name, we can
97 * immediately shm_unlink(3) it, and only keep the handle with its file
98 * descriptor.
99 */
100
101 /*
102 * Internal structure representing offsets to use at a sub-buffer switch.
103 */
104 struct switch_offsets {
105 unsigned long begin, end, old;
106 size_t pre_header_padding, size;
107 unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
108 switch_old_end:1;
109 };
110
111 DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
112
113 /*
114 * wakeup_fd_mutex protects wakeup fd use by timer from concurrent
115 * close.
116 */
117 static pthread_mutex_t wakeup_fd_mutex = PTHREAD_MUTEX_INITIALIZER;
118
119 static
120 void lib_ring_buffer_print_errors(struct lttng_ust_ring_buffer_channel *chan,
121 struct lttng_ust_ring_buffer *buf, int cpu,
122 struct lttng_ust_shm_handle *handle);
123
124 /*
125 * Handle timer teardown race wrt memory free of private data by
126 * ring buffer signals are handled by a single thread, which permits
127 * a synchronization point between handling of each signal.
128 * Protected by the lock within the structure.
129 */
130 struct timer_signal_data {
131 pthread_t tid; /* thread id managing signals */
132 int setup_done;
133 int qs_done;
134 pthread_mutex_t lock;
135 };
136
137 static struct timer_signal_data timer_signal = {
138 .tid = 0,
139 .setup_done = 0,
140 .qs_done = 0,
141 .lock = PTHREAD_MUTEX_INITIALIZER,
142 };
143
144 static bool lttng_ust_allow_blocking;
145
146 void lttng_ust_ringbuffer_set_allow_blocking(void)
147 {
148 lttng_ust_allow_blocking = true;
149 }
150
151 /* Get blocking timeout, in ms */
152 static int lttng_ust_ringbuffer_get_timeout(struct lttng_ust_ring_buffer_channel *chan)
153 {
154 if (!lttng_ust_allow_blocking)
155 return 0;
156 return chan->u.s.blocking_timeout_ms;
157 }
158
159 /**
160 * lib_ring_buffer_reset - Reset ring buffer to initial values.
161 * @buf: Ring buffer.
162 *
163 * Effectively empty the ring buffer. Should be called when the buffer is not
164 * used for writing. The ring buffer can be opened for reading, but the reader
165 * should not be using the iterator concurrently with reset. The previous
166 * current iterator record is reset.
167 */
168 void lib_ring_buffer_reset(struct lttng_ust_ring_buffer *buf,
169 struct lttng_ust_shm_handle *handle)
170 {
171 struct lttng_ust_ring_buffer_channel *chan;
172 const struct lttng_ust_ring_buffer_config *config;
173 unsigned int i;
174
175 chan = shmp(handle, buf->backend.chan);
176 if (!chan)
177 return;
178 config = &chan->backend.config;
179 /*
180 * Reset iterator first. It will put the subbuffer if it currently holds
181 * it.
182 */
183 v_set(config, &buf->offset, 0);
184 for (i = 0; i < chan->backend.num_subbuf; i++) {
185 struct commit_counters_hot *cc_hot;
186 struct commit_counters_cold *cc_cold;
187 uint64_t *ts_end;
188
189 cc_hot = shmp_index(handle, buf->commit_hot, i);
190 if (!cc_hot)
191 return;
192 cc_cold = shmp_index(handle, buf->commit_cold, i);
193 if (!cc_cold)
194 return;
195 ts_end = shmp_index(handle, buf->ts_end, i);
196 if (!ts_end)
197 return;
198 v_set(config, &cc_hot->cc, 0);
199 v_set(config, &cc_hot->seq, 0);
200 v_set(config, &cc_cold->cc_sb, 0);
201 *ts_end = 0;
202 }
203 uatomic_set(&buf->consumed, 0);
204 uatomic_set(&buf->record_disabled, 0);
205 v_set(config, &buf->last_tsc, 0);
206 lib_ring_buffer_backend_reset(&buf->backend, handle);
207 /* Don't reset number of active readers */
208 v_set(config, &buf->records_lost_full, 0);
209 v_set(config, &buf->records_lost_wrap, 0);
210 v_set(config, &buf->records_lost_big, 0);
211 v_set(config, &buf->records_count, 0);
212 v_set(config, &buf->records_overrun, 0);
213 buf->finalized = 0;
214 }
215
216 /**
217 * channel_reset - Reset channel to initial values.
218 * @chan: Channel.
219 *
220 * Effectively empty the channel. Should be called when the channel is not used
221 * for writing. The channel can be opened for reading, but the reader should not
222 * be using the iterator concurrently with reset. The previous current iterator
223 * record is reset.
224 */
225 void channel_reset(struct lttng_ust_ring_buffer_channel *chan)
226 {
227 /*
228 * Reset iterators first. Will put the subbuffer if held for reading.
229 */
230 uatomic_set(&chan->record_disabled, 0);
231 /* Don't reset commit_count_mask, still valid */
232 channel_backend_reset(&chan->backend);
233 /* Don't reset switch/read timer interval */
234 /* Don't reset notifiers and notifier enable bits */
235 /* Don't reset reader reference count */
236 }
237
238 static
239 void init_crash_abi(const struct lttng_ust_ring_buffer_config *config,
240 struct lttng_crash_abi *crash_abi,
241 struct lttng_ust_ring_buffer *buf,
242 struct channel_backend *chanb,
243 struct shm_object *shmobj,
244 struct lttng_ust_shm_handle *handle)
245 {
246 int i;
247
248 for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++)
249 crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF;
250 crash_abi->mmap_length = shmobj->memory_map_size;
251 crash_abi->endian = RB_CRASH_ENDIAN;
252 crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR;
253 crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR;
254 crash_abi->word_size = sizeof(unsigned long);
255 crash_abi->layout_type = LTTNG_CRASH_TYPE_UST;
256
257 /* Offset of fields */
258 crash_abi->offset.prod_offset =
259 (uint32_t) ((char *) &buf->offset - (char *) buf);
260 crash_abi->offset.consumed_offset =
261 (uint32_t) ((char *) &buf->consumed - (char *) buf);
262 crash_abi->offset.commit_hot_array =
263 (uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf);
264 crash_abi->offset.commit_hot_seq =
265 offsetof(struct commit_counters_hot, seq);
266 crash_abi->offset.buf_wsb_array =
267 (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf);
268 crash_abi->offset.buf_wsb_id =
269 offsetof(struct lttng_ust_ring_buffer_backend_subbuffer, id);
270 crash_abi->offset.sb_array =
271 (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf);
272 crash_abi->offset.sb_array_shmp_offset =
273 offsetof(struct lttng_ust_ring_buffer_backend_pages_shmp,
274 shmp._ref.offset);
275 crash_abi->offset.sb_backend_p_offset =
276 offsetof(struct lttng_ust_ring_buffer_backend_pages,
277 p._ref.offset);
278
279 /* Field length */
280 crash_abi->length.prod_offset = sizeof(buf->offset);
281 crash_abi->length.consumed_offset = sizeof(buf->consumed);
282 crash_abi->length.commit_hot_seq =
283 sizeof(((struct commit_counters_hot *) NULL)->seq);
284 crash_abi->length.buf_wsb_id =
285 sizeof(((struct lttng_ust_ring_buffer_backend_subbuffer *) NULL)->id);
286 crash_abi->length.sb_array_shmp_offset =
287 sizeof(((struct lttng_ust_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset);
288 crash_abi->length.sb_backend_p_offset =
289 sizeof(((struct lttng_ust_ring_buffer_backend_pages *) NULL)->p._ref.offset);
290
291 /* Array stride */
292 crash_abi->stride.commit_hot_array =
293 sizeof(struct commit_counters_hot);
294 crash_abi->stride.buf_wsb_array =
295 sizeof(struct lttng_ust_ring_buffer_backend_subbuffer);
296 crash_abi->stride.sb_array =
297 sizeof(struct lttng_ust_ring_buffer_backend_pages_shmp);
298
299 /* Buffer constants */
300 crash_abi->buf_size = chanb->buf_size;
301 crash_abi->subbuf_size = chanb->subbuf_size;
302 crash_abi->num_subbuf = chanb->num_subbuf;
303 crash_abi->mode = (uint32_t) chanb->config.mode;
304
305 if (config->cb.content_size_field) {
306 size_t offset, length;
307
308 config->cb.content_size_field(config, &offset, &length);
309 crash_abi->offset.content_size = offset;
310 crash_abi->length.content_size = length;
311 } else {
312 crash_abi->offset.content_size = 0;
313 crash_abi->length.content_size = 0;
314 }
315 if (config->cb.packet_size_field) {
316 size_t offset, length;
317
318 config->cb.packet_size_field(config, &offset, &length);
319 crash_abi->offset.packet_size = offset;
320 crash_abi->length.packet_size = length;
321 } else {
322 crash_abi->offset.packet_size = 0;
323 crash_abi->length.packet_size = 0;
324 }
325 }
326
327 /*
328 * Must be called under cpu hotplug protection.
329 */
330 int lib_ring_buffer_create(struct lttng_ust_ring_buffer *buf,
331 struct channel_backend *chanb, int cpu,
332 struct lttng_ust_shm_handle *handle,
333 struct shm_object *shmobj)
334 {
335 const struct lttng_ust_ring_buffer_config *config = &chanb->config;
336 struct lttng_ust_ring_buffer_channel *chan = caa_container_of(chanb,
337 struct lttng_ust_ring_buffer_channel, backend);
338 struct lttng_ust_ring_buffer_backend_subbuffer *wsb;
339 struct lttng_ust_ring_buffer_channel *shmp_chan;
340 struct commit_counters_hot *cc_hot;
341 void *priv = channel_get_private_config(chan);
342 size_t subbuf_header_size;
343 uint64_t tsc;
344 int ret;
345
346 /* Test for cpu hotplug */
347 if (buf->backend.allocated)
348 return 0;
349
350 align_shm(shmobj, __alignof__(struct commit_counters_hot));
351 set_shmp(buf->commit_hot,
352 zalloc_shm(shmobj,
353 sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
354 if (!shmp(handle, buf->commit_hot)) {
355 return -ENOMEM;
356 }
357
358 align_shm(shmobj, __alignof__(struct commit_counters_cold));
359 set_shmp(buf->commit_cold,
360 zalloc_shm(shmobj,
361 sizeof(struct commit_counters_cold) * chan->backend.num_subbuf));
362 if (!shmp(handle, buf->commit_cold)) {
363 ret = -ENOMEM;
364 goto free_commit;
365 }
366
367 align_shm(shmobj, __alignof__(uint64_t));
368 set_shmp(buf->ts_end,
369 zalloc_shm(shmobj,
370 sizeof(uint64_t) * chan->backend.num_subbuf));
371 if (!shmp(handle, buf->ts_end)) {
372 ret = -ENOMEM;
373 goto free_commit_cold;
374 }
375
376
377 ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
378 cpu, handle, shmobj);
379 if (ret) {
380 goto free_init;
381 }
382
383 /*
384 * Write the subbuffer header for first subbuffer so we know the total
385 * duration of data gathering.
386 */
387 subbuf_header_size = config->cb.subbuffer_header_size();
388 v_set(config, &buf->offset, subbuf_header_size);
389 wsb = shmp_index(handle, buf->backend.buf_wsb, 0);
390 if (!wsb) {
391 ret = -EPERM;
392 goto free_chanbuf;
393 }
394 subbuffer_id_clear_noref(config, &wsb->id);
395 shmp_chan = shmp(handle, buf->backend.chan);
396 if (!shmp_chan) {
397 ret = -EPERM;
398 goto free_chanbuf;
399 }
400 tsc = config->cb.ring_buffer_clock_read(shmp_chan);
401 config->cb.buffer_begin(buf, tsc, 0, handle);
402 cc_hot = shmp_index(handle, buf->commit_hot, 0);
403 if (!cc_hot) {
404 ret = -EPERM;
405 goto free_chanbuf;
406 }
407 v_add(config, subbuf_header_size, &cc_hot->cc);
408 v_add(config, subbuf_header_size, &cc_hot->seq);
409
410 if (config->cb.buffer_create) {
411 ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
412 if (ret)
413 goto free_chanbuf;
414 }
415
416 init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle);
417
418 buf->backend.allocated = 1;
419 return 0;
420
421 /* Error handling */
422 free_init:
423 /* ts_end will be freed by shm teardown */
424 free_commit_cold:
425 /* commit_cold will be freed by shm teardown */
426 free_commit:
427 /* commit_hot will be freed by shm teardown */
428 free_chanbuf:
429 return ret;
430 }
431
432 static
433 void lib_ring_buffer_channel_switch_timer(int sig __attribute__((unused)),
434 siginfo_t *si, void *uc __attribute__((unused)))
435 {
436 const struct lttng_ust_ring_buffer_config *config;
437 struct lttng_ust_shm_handle *handle;
438 struct lttng_ust_ring_buffer_channel *chan;
439 int cpu;
440
441 assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
442
443 chan = si->si_value.sival_ptr;
444 handle = chan->handle;
445 config = &chan->backend.config;
446
447 DBG("Switch timer for channel %p\n", chan);
448
449 /*
450 * Only flush buffers periodically if readers are active.
451 */
452 pthread_mutex_lock(&wakeup_fd_mutex);
453 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
454 for_each_possible_cpu(cpu) {
455 struct lttng_ust_ring_buffer *buf =
456 shmp(handle, chan->backend.buf[cpu].shmp);
457
458 if (!buf)
459 goto end;
460 if (uatomic_read(&buf->active_readers))
461 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
462 chan->handle);
463 }
464 } else {
465 struct lttng_ust_ring_buffer *buf =
466 shmp(handle, chan->backend.buf[0].shmp);
467
468 if (!buf)
469 goto end;
470 if (uatomic_read(&buf->active_readers))
471 lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
472 chan->handle);
473 }
474 end:
475 pthread_mutex_unlock(&wakeup_fd_mutex);
476 return;
477 }
478
479 static
480 int lib_ring_buffer_poll_deliver(const struct lttng_ust_ring_buffer_config *config,
481 struct lttng_ust_ring_buffer *buf,
482 struct lttng_ust_ring_buffer_channel *chan,
483 struct lttng_ust_shm_handle *handle)
484 {
485 unsigned long consumed_old, consumed_idx, commit_count, write_offset;
486 struct commit_counters_cold *cc_cold;
487
488 consumed_old = uatomic_read(&buf->consumed);
489 consumed_idx = subbuf_index(consumed_old, chan);
490 cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
491 if (!cc_cold)
492 return 0;
493 commit_count = v_read(config, &cc_cold->cc_sb);
494 /*
495 * No memory barrier here, since we are only interested
496 * in a statistically correct polling result. The next poll will
497 * get the data is we are racing. The mb() that ensures correct
498 * memory order is in get_subbuf.
499 */
500 write_offset = v_read(config, &buf->offset);
501
502 /*
503 * Check that the subbuffer we are trying to consume has been
504 * already fully committed.
505 */
506
507 if (((commit_count - chan->backend.subbuf_size)
508 & chan->commit_count_mask)
509 - (buf_trunc(consumed_old, chan)
510 >> chan->backend.num_subbuf_order)
511 != 0)
512 return 0;
513
514 /*
515 * Check that we are not about to read the same subbuffer in
516 * which the writer head is.
517 */
518 if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
519 == 0)
520 return 0;
521
522 return 1;
523 }
524
525 static
526 void lib_ring_buffer_wakeup(struct lttng_ust_ring_buffer *buf,
527 struct lttng_ust_shm_handle *handle)
528 {
529 int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
530 sigset_t sigpipe_set, pending_set, old_set;
531 int ret, sigpipe_was_pending = 0;
532
533 if (wakeup_fd < 0)
534 return;
535
536 /*
537 * Wake-up the other end by writing a null byte in the pipe
538 * (non-blocking). Important note: Because writing into the
539 * pipe is non-blocking (and therefore we allow dropping wakeup
540 * data, as long as there is wakeup data present in the pipe
541 * buffer to wake up the consumer), the consumer should perform
542 * the following sequence for waiting:
543 * 1) empty the pipe (reads).
544 * 2) check if there is data in the buffer.
545 * 3) wait on the pipe (poll).
546 *
547 * Discard the SIGPIPE from write(), not disturbing any SIGPIPE
548 * that might be already pending. If a bogus SIGPIPE is sent to
549 * the entire process concurrently by a malicious user, it may
550 * be simply discarded.
551 */
552 ret = sigemptyset(&pending_set);
553 assert(!ret);
554 /*
555 * sigpending returns the mask of signals that are _both_
556 * blocked for the thread _and_ pending for either the thread or
557 * the entire process.
558 */
559 ret = sigpending(&pending_set);
560 assert(!ret);
561 sigpipe_was_pending = sigismember(&pending_set, SIGPIPE);
562 /*
563 * If sigpipe was pending, it means it was already blocked, so
564 * no need to block it.
565 */
566 if (!sigpipe_was_pending) {
567 ret = sigemptyset(&sigpipe_set);
568 assert(!ret);
569 ret = sigaddset(&sigpipe_set, SIGPIPE);
570 assert(!ret);
571 ret = pthread_sigmask(SIG_BLOCK, &sigpipe_set, &old_set);
572 assert(!ret);
573 }
574 do {
575 ret = write(wakeup_fd, "", 1);
576 } while (ret == -1L && errno == EINTR);
577 if (ret == -1L && errno == EPIPE && !sigpipe_was_pending) {
578 struct timespec timeout = { 0, 0 };
579 do {
580 ret = sigtimedwait(&sigpipe_set, NULL,
581 &timeout);
582 } while (ret == -1L && errno == EINTR);
583 }
584 if (!sigpipe_was_pending) {
585 ret = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
586 assert(!ret);
587 }
588 }
589
590 static
591 void lib_ring_buffer_channel_do_read(struct lttng_ust_ring_buffer_channel *chan)
592 {
593 const struct lttng_ust_ring_buffer_config *config;
594 struct lttng_ust_shm_handle *handle;
595 int cpu;
596
597 handle = chan->handle;
598 config = &chan->backend.config;
599
600 /*
601 * Only flush buffers periodically if readers are active.
602 */
603 pthread_mutex_lock(&wakeup_fd_mutex);
604 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
605 for_each_possible_cpu(cpu) {
606 struct lttng_ust_ring_buffer *buf =
607 shmp(handle, chan->backend.buf[cpu].shmp);
608
609 if (!buf)
610 goto end;
611 if (uatomic_read(&buf->active_readers)
612 && lib_ring_buffer_poll_deliver(config, buf,
613 chan, handle)) {
614 lib_ring_buffer_wakeup(buf, handle);
615 }
616 }
617 } else {
618 struct lttng_ust_ring_buffer *buf =
619 shmp(handle, chan->backend.buf[0].shmp);
620
621 if (!buf)
622 goto end;
623 if (uatomic_read(&buf->active_readers)
624 && lib_ring_buffer_poll_deliver(config, buf,
625 chan, handle)) {
626 lib_ring_buffer_wakeup(buf, handle);
627 }
628 }
629 end:
630 pthread_mutex_unlock(&wakeup_fd_mutex);
631 }
632
633 static
634 void lib_ring_buffer_channel_read_timer(int sig __attribute__((unused)),
635 siginfo_t *si, void *uc __attribute__((unused)))
636 {
637 struct lttng_ust_ring_buffer_channel *chan;
638
639 assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self());
640 chan = si->si_value.sival_ptr;
641 DBG("Read timer for channel %p\n", chan);
642 lib_ring_buffer_channel_do_read(chan);
643 return;
644 }
645
646 static
647 void rb_setmask(sigset_t *mask)
648 {
649 int ret;
650
651 ret = sigemptyset(mask);
652 if (ret) {
653 PERROR("sigemptyset");
654 }
655 ret = sigaddset(mask, LTTNG_UST_RB_SIG_FLUSH);
656 if (ret) {
657 PERROR("sigaddset");
658 }
659 ret = sigaddset(mask, LTTNG_UST_RB_SIG_READ);
660 if (ret) {
661 PERROR("sigaddset");
662 }
663 ret = sigaddset(mask, LTTNG_UST_RB_SIG_TEARDOWN);
664 if (ret) {
665 PERROR("sigaddset");
666 }
667 }
668
669 static
670 void *sig_thread(void *arg __attribute__((unused)))
671 {
672 sigset_t mask;
673 siginfo_t info;
674 int signr;
675
676 /* Only self thread will receive signal mask. */
677 rb_setmask(&mask);
678 CMM_STORE_SHARED(timer_signal.tid, pthread_self());
679
680 for (;;) {
681 signr = sigwaitinfo(&mask, &info);
682 if (signr == -1) {
683 if (errno != EINTR)
684 PERROR("sigwaitinfo");
685 continue;
686 }
687 if (signr == LTTNG_UST_RB_SIG_FLUSH) {
688 lib_ring_buffer_channel_switch_timer(info.si_signo,
689 &info, NULL);
690 } else if (signr == LTTNG_UST_RB_SIG_READ) {
691 lib_ring_buffer_channel_read_timer(info.si_signo,
692 &info, NULL);
693 } else if (signr == LTTNG_UST_RB_SIG_TEARDOWN) {
694 cmm_smp_mb();
695 CMM_STORE_SHARED(timer_signal.qs_done, 1);
696 cmm_smp_mb();
697 } else {
698 ERR("Unexptected signal %d\n", info.si_signo);
699 }
700 }
701 return NULL;
702 }
703
704 /*
705 * Ensure only a single thread listens on the timer signal.
706 */
707 static
708 void lib_ring_buffer_setup_timer_thread(void)
709 {
710 pthread_t thread;
711 int ret;
712
713 pthread_mutex_lock(&timer_signal.lock);
714 if (timer_signal.setup_done)
715 goto end;
716
717 ret = pthread_create(&thread, NULL, &sig_thread, NULL);
718 if (ret) {
719 errno = ret;
720 PERROR("pthread_create");
721 }
722 ret = pthread_detach(thread);
723 if (ret) {
724 errno = ret;
725 PERROR("pthread_detach");
726 }
727 timer_signal.setup_done = 1;
728 end:
729 pthread_mutex_unlock(&timer_signal.lock);
730 }
731
732 /*
733 * Wait for signal-handling thread quiescent state.
734 */
735 static
736 void lib_ring_buffer_wait_signal_thread_qs(unsigned int signr)
737 {
738 sigset_t pending_set;
739 int ret;
740
741 /*
742 * We need to be the only thread interacting with the thread
743 * that manages signals for teardown synchronization.
744 */
745 pthread_mutex_lock(&timer_signal.lock);
746
747 /*
748 * Ensure we don't have any signal queued for this channel.
749 */
750 for (;;) {
751 ret = sigemptyset(&pending_set);
752 if (ret == -1) {
753 PERROR("sigemptyset");
754 }
755 ret = sigpending(&pending_set);
756 if (ret == -1) {
757 PERROR("sigpending");
758 }
759 if (!sigismember(&pending_set, signr))
760 break;
761 caa_cpu_relax();
762 }
763
764 /*
765 * From this point, no new signal handler will be fired that
766 * would try to access "chan". However, we still need to wait
767 * for any currently executing handler to complete.
768 */
769 cmm_smp_mb();
770 CMM_STORE_SHARED(timer_signal.qs_done, 0);
771 cmm_smp_mb();
772
773 /*
774 * Kill with LTTNG_UST_RB_SIG_TEARDOWN, so signal management
775 * thread wakes up.
776 */
777 kill(getpid(), LTTNG_UST_RB_SIG_TEARDOWN);
778
779 while (!CMM_LOAD_SHARED(timer_signal.qs_done))
780 caa_cpu_relax();
781 cmm_smp_mb();
782
783 pthread_mutex_unlock(&timer_signal.lock);
784 }
785
786 static
787 void lib_ring_buffer_channel_switch_timer_start(struct lttng_ust_ring_buffer_channel *chan)
788 {
789 struct sigevent sev;
790 struct itimerspec its;
791 int ret;
792
793 if (!chan->switch_timer_interval || chan->switch_timer_enabled)
794 return;
795
796 chan->switch_timer_enabled = 1;
797
798 lib_ring_buffer_setup_timer_thread();
799
800 memset(&sev, 0, sizeof(sev));
801 sev.sigev_notify = SIGEV_SIGNAL;
802 sev.sigev_signo = LTTNG_UST_RB_SIG_FLUSH;
803 sev.sigev_value.sival_ptr = chan;
804 ret = timer_create(CLOCKID, &sev, &chan->switch_timer);
805 if (ret == -1) {
806 PERROR("timer_create");
807 }
808
809 its.it_value.tv_sec = chan->switch_timer_interval / 1000000;
810 its.it_value.tv_nsec = (chan->switch_timer_interval % 1000000) * 1000;
811 its.it_interval.tv_sec = its.it_value.tv_sec;
812 its.it_interval.tv_nsec = its.it_value.tv_nsec;
813
814 ret = timer_settime(chan->switch_timer, 0, &its, NULL);
815 if (ret == -1) {
816 PERROR("timer_settime");
817 }
818 }
819
820 static
821 void lib_ring_buffer_channel_switch_timer_stop(struct lttng_ust_ring_buffer_channel *chan)
822 {
823 int ret;
824
825 if (!chan->switch_timer_interval || !chan->switch_timer_enabled)
826 return;
827
828 ret = timer_delete(chan->switch_timer);
829 if (ret == -1) {
830 PERROR("timer_delete");
831 }
832
833 lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_FLUSH);
834
835 chan->switch_timer = 0;
836 chan->switch_timer_enabled = 0;
837 }
838
839 static
840 void lib_ring_buffer_channel_read_timer_start(struct lttng_ust_ring_buffer_channel *chan)
841 {
842 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
843 struct sigevent sev;
844 struct itimerspec its;
845 int ret;
846
847 if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
848 || !chan->read_timer_interval || chan->read_timer_enabled)
849 return;
850
851 chan->read_timer_enabled = 1;
852
853 lib_ring_buffer_setup_timer_thread();
854
855 sev.sigev_notify = SIGEV_SIGNAL;
856 sev.sigev_signo = LTTNG_UST_RB_SIG_READ;
857 sev.sigev_value.sival_ptr = chan;
858 ret = timer_create(CLOCKID, &sev, &chan->read_timer);
859 if (ret == -1) {
860 PERROR("timer_create");
861 }
862
863 its.it_value.tv_sec = chan->read_timer_interval / 1000000;
864 its.it_value.tv_nsec = (chan->read_timer_interval % 1000000) * 1000;
865 its.it_interval.tv_sec = its.it_value.tv_sec;
866 its.it_interval.tv_nsec = its.it_value.tv_nsec;
867
868 ret = timer_settime(chan->read_timer, 0, &its, NULL);
869 if (ret == -1) {
870 PERROR("timer_settime");
871 }
872 }
873
874 static
875 void lib_ring_buffer_channel_read_timer_stop(struct lttng_ust_ring_buffer_channel *chan)
876 {
877 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
878 int ret;
879
880 if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
881 || !chan->read_timer_interval || !chan->read_timer_enabled)
882 return;
883
884 ret = timer_delete(chan->read_timer);
885 if (ret == -1) {
886 PERROR("timer_delete");
887 }
888
889 /*
890 * do one more check to catch data that has been written in the last
891 * timer period.
892 */
893 lib_ring_buffer_channel_do_read(chan);
894
895 lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_READ);
896
897 chan->read_timer = 0;
898 chan->read_timer_enabled = 0;
899 }
900
901 static void channel_unregister_notifiers(struct lttng_ust_ring_buffer_channel *chan,
902 struct lttng_ust_shm_handle *handle __attribute__((unused)))
903 {
904 lib_ring_buffer_channel_switch_timer_stop(chan);
905 lib_ring_buffer_channel_read_timer_stop(chan);
906 }
907
908 static void channel_print_errors(struct lttng_ust_ring_buffer_channel *chan,
909 struct lttng_ust_shm_handle *handle)
910 {
911 const struct lttng_ust_ring_buffer_config *config =
912 &chan->backend.config;
913 int cpu;
914
915 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
916 for_each_possible_cpu(cpu) {
917 struct lttng_ust_ring_buffer *buf =
918 shmp(handle, chan->backend.buf[cpu].shmp);
919 if (buf)
920 lib_ring_buffer_print_errors(chan, buf, cpu, handle);
921 }
922 } else {
923 struct lttng_ust_ring_buffer *buf =
924 shmp(handle, chan->backend.buf[0].shmp);
925
926 if (buf)
927 lib_ring_buffer_print_errors(chan, buf, -1, handle);
928 }
929 }
930
931 static void channel_free(struct lttng_ust_ring_buffer_channel *chan,
932 struct lttng_ust_shm_handle *handle,
933 int consumer)
934 {
935 channel_backend_free(&chan->backend, handle);
936 /* chan is freed by shm teardown */
937 shm_object_table_destroy(handle->table, consumer);
938 free(handle);
939 }
940
941 /**
942 * channel_create - Create channel.
943 * @config: ring buffer instance configuration
944 * @name: name of the channel
945 * @priv_data_align: alignment, in bytes, of the private data area. (config)
946 * @priv_data_size: length, in bytes, of the private data area. (config)
947 * @priv_data_init: initialization data for private data. (config)
948 * @priv: local private data (memory owner by caller)
949 * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
950 * address mapping. It is used only by RING_BUFFER_STATIC
951 * configuration. It can be set to NULL for other backends.
952 * @subbuf_size: subbuffer size
953 * @num_subbuf: number of subbuffers
954 * @switch_timer_interval: Time interval (in us) to fill sub-buffers with
955 * padding to let readers get those sub-buffers.
956 * Used for live streaming.
957 * @read_timer_interval: Time interval (in us) to wake up pending readers.
958 * @stream_fds: array of stream file descriptors.
959 * @nr_stream_fds: number of file descriptors in array.
960 *
961 * Holds cpu hotplug.
962 * Returns NULL on failure.
963 */
964 struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_ring_buffer_config *config,
965 const char *name,
966 size_t priv_data_align,
967 size_t priv_data_size,
968 void *priv_data_init,
969 void *priv,
970 void *buf_addr __attribute__((unused)), size_t subbuf_size,
971 size_t num_subbuf, unsigned int switch_timer_interval,
972 unsigned int read_timer_interval,
973 const int *stream_fds, int nr_stream_fds,
974 int64_t blocking_timeout)
975 {
976 int ret;
977 size_t shmsize, chansize;
978 struct lttng_ust_ring_buffer_channel *chan;
979 struct lttng_ust_shm_handle *handle;
980 struct shm_object *shmobj;
981 unsigned int nr_streams;
982 int64_t blocking_timeout_ms;
983
984 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
985 nr_streams = num_possible_cpus();
986 else
987 nr_streams = 1;
988
989 if (nr_stream_fds != nr_streams)
990 return NULL;
991
992 if (blocking_timeout < -1) {
993 return NULL;
994 }
995 /* usec to msec */
996 if (blocking_timeout == -1) {
997 blocking_timeout_ms = -1;
998 } else {
999 blocking_timeout_ms = blocking_timeout / 1000;
1000 if (blocking_timeout_ms != (int32_t) blocking_timeout_ms) {
1001 return NULL;
1002 }
1003 }
1004
1005 if (lib_ring_buffer_check_config(config, switch_timer_interval,
1006 read_timer_interval))
1007 return NULL;
1008
1009 handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
1010 if (!handle)
1011 return NULL;
1012
1013 /* Allocate table for channel + per-cpu buffers */
1014 handle->table = shm_object_table_create(1 + num_possible_cpus());
1015 if (!handle->table)
1016 goto error_table_alloc;
1017
1018 /* Calculate the shm allocation layout */
1019 shmsize = sizeof(struct lttng_ust_ring_buffer_channel);
1020 shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_ring_buffer_shmp));
1021 shmsize += sizeof(struct lttng_ust_ring_buffer_shmp) * nr_streams;
1022 chansize = shmsize;
1023 if (priv_data_align)
1024 shmsize += lttng_ust_offset_align(shmsize, priv_data_align);
1025 shmsize += priv_data_size;
1026
1027 /* Allocate normal memory for channel (not shared) */
1028 shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
1029 -1, -1);
1030 if (!shmobj)
1031 goto error_append;
1032 /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
1033 set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
1034 assert(handle->chan._ref.index == 0);
1035 assert(handle->chan._ref.offset == 0);
1036 chan = shmp(handle, handle->chan);
1037 if (!chan)
1038 goto error_append;
1039 chan->nr_streams = nr_streams;
1040
1041 /* space for private data */
1042 if (priv_data_size) {
1043 void *priv_config;
1044
1045 DECLARE_SHMP(void, priv_data_alloc);
1046
1047 align_shm(shmobj, priv_data_align);
1048 chan->priv_data_offset = shmobj->allocated_len;
1049 set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
1050 if (!shmp(handle, priv_data_alloc))
1051 goto error_append;
1052 priv_config = channel_get_private_config(chan);
1053 memcpy(priv_config, priv_data_init, priv_data_size);
1054 } else {
1055 chan->priv_data_offset = -1;
1056 }
1057
1058 chan->u.s.blocking_timeout_ms = (int32_t) blocking_timeout_ms;
1059
1060 channel_set_private(chan, priv);
1061
1062 ret = channel_backend_init(&chan->backend, name, config,
1063 subbuf_size, num_subbuf, handle,
1064 stream_fds);
1065 if (ret)
1066 goto error_backend_init;
1067
1068 chan->handle = handle;
1069 chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
1070
1071 chan->switch_timer_interval = switch_timer_interval;
1072 chan->read_timer_interval = read_timer_interval;
1073 lib_ring_buffer_channel_switch_timer_start(chan);
1074 lib_ring_buffer_channel_read_timer_start(chan);
1075
1076 return handle;
1077
1078 error_backend_init:
1079 error_append:
1080 shm_object_table_destroy(handle->table, 1);
1081 error_table_alloc:
1082 free(handle);
1083 return NULL;
1084 }
1085
1086 struct lttng_ust_shm_handle *channel_handle_create(void *data,
1087 uint64_t memory_map_size,
1088 int wakeup_fd)
1089 {
1090 struct lttng_ust_shm_handle *handle;
1091 struct shm_object *object;
1092
1093 handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
1094 if (!handle)
1095 return NULL;
1096
1097 /* Allocate table for channel + per-cpu buffers */
1098 handle->table = shm_object_table_create(1 + num_possible_cpus());
1099 if (!handle->table)
1100 goto error_table_alloc;
1101 /* Add channel object */
1102 object = shm_object_table_append_mem(handle->table, data,
1103 memory_map_size, wakeup_fd);
1104 if (!object)
1105 goto error_table_object;
1106 /* struct lttng_ust_ring_buffer_channel is at object 0, offset 0 (hardcoded) */
1107 handle->chan._ref.index = 0;
1108 handle->chan._ref.offset = 0;
1109 return handle;
1110
1111 error_table_object:
1112 shm_object_table_destroy(handle->table, 0);
1113 error_table_alloc:
1114 free(handle);
1115 return NULL;
1116 }
1117
1118 int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
1119 int shm_fd, int wakeup_fd, uint32_t stream_nr,
1120 uint64_t memory_map_size)
1121 {
1122 struct shm_object *object;
1123
1124 /* Add stream object */
1125 object = shm_object_table_append_shm(handle->table,
1126 shm_fd, wakeup_fd, stream_nr,
1127 memory_map_size);
1128 if (!object)
1129 return -EINVAL;
1130 return 0;
1131 }
1132
1133 unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle)
1134 {
1135 assert(handle->table);
1136 return handle->table->allocated_len - 1;
1137 }
1138
1139 static
1140 void channel_release(struct lttng_ust_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
1141 int consumer)
1142 {
1143 channel_free(chan, handle, consumer);
1144 }
1145
1146 /**
1147 * channel_destroy - Finalize, wait for q.s. and destroy channel.
1148 * @chan: channel to destroy
1149 *
1150 * Holds cpu hotplug.
1151 * Call "destroy" callback, finalize channels, decrement the channel
1152 * reference count. Note that when readers have completed data
1153 * consumption of finalized channels, get_subbuf() will return -ENODATA.
1154 * They should release their handle at that point.
1155 */
1156 void channel_destroy(struct lttng_ust_ring_buffer_channel *chan, struct lttng_ust_shm_handle *handle,
1157 int consumer)
1158 {
1159 if (consumer) {
1160 /*
1161 * Note: the consumer takes care of finalizing and
1162 * switching the buffers.
1163 */
1164 channel_unregister_notifiers(chan, handle);
1165 /*
1166 * The consumer prints errors.
1167 */
1168 channel_print_errors(chan, handle);
1169 }
1170
1171 /*
1172 * sessiond/consumer are keeping a reference on the shm file
1173 * descriptor directly. No need to refcount.
1174 */
1175 channel_release(chan, handle, consumer);
1176 return;
1177 }
1178
1179 struct lttng_ust_ring_buffer *channel_get_ring_buffer(
1180 const struct lttng_ust_ring_buffer_config *config,
1181 struct lttng_ust_ring_buffer_channel *chan, int cpu,
1182 struct lttng_ust_shm_handle *handle,
1183 int *shm_fd, int *wait_fd,
1184 int *wakeup_fd,
1185 uint64_t *memory_map_size,
1186 void **memory_map_addr)
1187 {
1188 struct shm_ref *ref;
1189
1190 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
1191 cpu = 0;
1192 } else {
1193 if (cpu >= num_possible_cpus())
1194 return NULL;
1195 }
1196 ref = &chan->backend.buf[cpu].shmp._ref;
1197 *shm_fd = shm_get_shm_fd(handle, ref);
1198 *wait_fd = shm_get_wait_fd(handle, ref);
1199 *wakeup_fd = shm_get_wakeup_fd(handle, ref);
1200 if (shm_get_shm_size(handle, ref, memory_map_size))
1201 return NULL;
1202 *memory_map_addr = handle->table->objects[ref->index].memory_map;
1203 return shmp(handle, chan->backend.buf[cpu].shmp);
1204 }
1205
1206 int ring_buffer_channel_close_wait_fd(
1207 const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
1208 struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
1209 struct lttng_ust_shm_handle *handle)
1210 {
1211 struct shm_ref *ref;
1212
1213 ref = &handle->chan._ref;
1214 return shm_close_wait_fd(handle, ref);
1215 }
1216
1217 int ring_buffer_channel_close_wakeup_fd(
1218 const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
1219 struct lttng_ust_ring_buffer_channel *chan __attribute__((unused)),
1220 struct lttng_ust_shm_handle *handle)
1221 {
1222 struct shm_ref *ref;
1223
1224 ref = &handle->chan._ref;
1225 return shm_close_wakeup_fd(handle, ref);
1226 }
1227
1228 int ring_buffer_stream_close_wait_fd(const struct lttng_ust_ring_buffer_config *config,
1229 struct lttng_ust_ring_buffer_channel *chan,
1230 struct lttng_ust_shm_handle *handle,
1231 int cpu)
1232 {
1233 struct shm_ref *ref;
1234
1235 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
1236 cpu = 0;
1237 } else {
1238 if (cpu >= num_possible_cpus())
1239 return -EINVAL;
1240 }
1241 ref = &chan->backend.buf[cpu].shmp._ref;
1242 return shm_close_wait_fd(handle, ref);
1243 }
1244
1245 int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_ring_buffer_config *config,
1246 struct lttng_ust_ring_buffer_channel *chan,
1247 struct lttng_ust_shm_handle *handle,
1248 int cpu)
1249 {
1250 struct shm_ref *ref;
1251 int ret;
1252
1253 if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
1254 cpu = 0;
1255 } else {
1256 if (cpu >= num_possible_cpus())
1257 return -EINVAL;
1258 }
1259 ref = &chan->backend.buf[cpu].shmp._ref;
1260 pthread_mutex_lock(&wakeup_fd_mutex);
1261 ret = shm_close_wakeup_fd(handle, ref);
1262 pthread_mutex_unlock(&wakeup_fd_mutex);
1263 return ret;
1264 }
1265
1266 int lib_ring_buffer_open_read(struct lttng_ust_ring_buffer *buf,
1267 struct lttng_ust_shm_handle *handle __attribute__((unused)))
1268 {
1269 if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
1270 return -EBUSY;
1271 cmm_smp_mb();
1272 return 0;
1273 }
1274
1275 void lib_ring_buffer_release_read(struct lttng_ust_ring_buffer *buf,
1276 struct lttng_ust_shm_handle *handle)
1277 {
1278 struct lttng_ust_ring_buffer_channel *chan = shmp(handle, buf->backend.chan);
1279
1280 if (!chan)
1281 return;
1282 CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
1283 cmm_smp_mb();
1284 uatomic_dec(&buf->active_readers);
1285 }
1286
1287 /**
1288 * lib_ring_buffer_snapshot - save subbuffer position snapshot (for read)
1289 * @buf: ring buffer
1290 * @consumed: consumed count indicating the position where to read
1291 * @produced: produced count, indicates position when to stop reading
1292 *
1293 * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
1294 * data to read at consumed position, or 0 if the get operation succeeds.
1295 */
1296
1297 int lib_ring_buffer_snapshot(struct lttng_ust_ring_buffer *buf,
1298 unsigned long *consumed, unsigned long *produced,
1299 struct lttng_ust_shm_handle *handle)
1300 {
1301 struct lttng_ust_ring_buffer_channel *chan;
1302 const struct lttng_ust_ring_buffer_config *config;
1303 unsigned long consumed_cur, write_offset;
1304 int finalized;
1305
1306 chan = shmp(handle, buf->backend.chan);
1307 if (!chan)
1308 return -EPERM;
1309 config = &chan->backend.config;
1310 finalized = CMM_ACCESS_ONCE(buf->finalized);
1311 /*
1312 * Read finalized before counters.
1313 */
1314 cmm_smp_rmb();
1315 consumed_cur = uatomic_read(&buf->consumed);
1316 /*
1317 * No need to issue a memory barrier between consumed count read and
1318 * write offset read, because consumed count can only change
1319 * concurrently in overwrite mode, and we keep a sequence counter
1320 * identifier derived from the write offset to check we are getting
1321 * the same sub-buffer we are expecting (the sub-buffers are atomically
1322 * "tagged" upon writes, tags are checked upon read).
1323 */
1324 write_offset = v_read(config, &buf->offset);
1325
1326 /*
1327 * Check that we are not about to read the same subbuffer in
1328 * which the writer head is.
1329 */
1330 if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan)
1331 == 0)
1332 goto nodata;
1333
1334 *consumed = consumed_cur;
1335 *produced = subbuf_trunc(write_offset, chan);
1336
1337 return 0;
1338
1339 nodata:
1340 /*
1341 * The memory barriers __wait_event()/wake_up_interruptible() take care
1342 * of "raw_spin_is_locked" memory ordering.
1343 */
1344 if (finalized)
1345 return -ENODATA;
1346 else
1347 return -EAGAIN;
1348 }
1349
1350 /**
1351 * Performs the same function as lib_ring_buffer_snapshot(), but the positions
1352 * are saved regardless of whether the consumed and produced positions are
1353 * in the same subbuffer.
1354 * @buf: ring buffer
1355 * @consumed: consumed byte count indicating the last position read
1356 * @produced: produced byte count indicating the last position written
1357 *
1358 * This function is meant to provide information on the exact producer and
1359 * consumer positions without regard for the "snapshot" feature.
1360 */
1361 int lib_ring_buffer_snapshot_sample_positions(
1362 struct lttng_ust_ring_buffer *buf,
1363 unsigned long *consumed, unsigned long *produced,
1364 struct lttng_ust_shm_handle *handle)
1365 {
1366 struct lttng_ust_ring_buffer_channel *chan;
1367 const struct lttng_ust_ring_buffer_config *config;
1368
1369 chan = shmp(handle, buf->backend.chan);
1370 if (!chan)
1371 return -EPERM;
1372 config = &chan->backend.config;
1373 cmm_smp_rmb();
1374 *consumed = uatomic_read(&buf->consumed);
1375 /*
1376 * No need to issue a memory barrier between consumed count read and
1377 * write offset read, because consumed count can only change
1378 * concurrently in overwrite mode, and we keep a sequence counter
1379 * identifier derived from the write offset to check we are getting
1380 * the same sub-buffer we are expecting (the sub-buffers are atomically
1381 * "tagged" upon writes, tags are checked upon read).
1382 */
1383 *produced = v_read(config, &buf->offset);
1384 return 0;
1385 }
1386
1387 /**
1388 * lib_ring_buffer_move_consumer - move consumed counter forward
1389 * @buf: ring buffer
1390 * @consumed_new: new consumed count value
1391 */
1392 void lib_ring_buffer_move_consumer(struct lttng_ust_ring_buffer *buf,
1393 unsigned long consumed_new,
1394 struct lttng_ust_shm_handle *handle)
1395 {
1396 struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
1397 struct lttng_ust_ring_buffer_channel *chan;
1398 unsigned long consumed;
1399
1400 chan = shmp(handle, bufb->chan);
1401 if (!chan)
1402 return;
1403 CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
1404
1405 /*
1406 * Only push the consumed value forward.
1407 * If the consumed cmpxchg fails, this is because we have been pushed by
1408 * the writer in flight recorder mode.
1409 */
1410 consumed = uatomic_read(&buf->consumed);
1411 while ((long) consumed - (long) consumed_new < 0)
1412 consumed = uatomic_cmpxchg(&buf->consumed, consumed,
1413 consumed_new);
1414 }
1415
1416 /**
1417 * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
1418 * @buf: ring buffer
1419 * @consumed: consumed count indicating the position where to read
1420 *
1421 * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
1422 * data to read at consumed position, or 0 if the get operation succeeds.
1423 */
1424 int lib_ring_buffer_get_subbuf(struct lttng_ust_ring_buffer *buf,
1425 unsigned long consumed,
1426 struct lttng_ust_shm_handle *handle)
1427 {
1428 struct lttng_ust_ring_buffer_channel *chan;
1429 const struct lttng_ust_ring_buffer_config *config;
1430 unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
1431 int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
1432 struct commit_counters_cold *cc_cold;
1433
1434 chan = shmp(handle, buf->backend.chan);
1435 if (!chan)
1436 return -EPERM;
1437 config = &chan->backend.config;
1438 retry:
1439 finalized = CMM_ACCESS_ONCE(buf->finalized);
1440 /*
1441 * Read finalized before counters.
1442 */
1443 cmm_smp_rmb();
1444 consumed_cur = uatomic_read(&buf->consumed);
1445 consumed_idx = subbuf_index(consumed, chan);
1446 cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
1447 if (!cc_cold)
1448 return -EPERM;
1449 commit_count = v_read(config, &cc_cold->cc_sb);
1450 /*
1451 * Make sure we read the commit count before reading the buffer
1452 * data and the write offset. Correct consumed offset ordering
1453 * wrt commit count is insured by the use of cmpxchg to update
1454 * the consumed offset.
1455 */
1456 /*
1457 * Local rmb to match the remote wmb to read the commit count
1458 * before the buffer data and the write offset.
1459 */
1460 cmm_smp_rmb();
1461
1462 write_offset = v_read(config, &buf->offset);
1463
1464 /*
1465 * Check that the buffer we are getting is after or at consumed_cur
1466 * position.
1467 */
1468 if ((long) subbuf_trunc(consumed, chan)
1469 - (long) subbuf_trunc(consumed_cur, chan) < 0)
1470 goto nodata;
1471
1472 /*
1473 * Check that the subbuffer we are trying to consume has been
1474 * already fully committed. There are a few causes that can make
1475 * this unavailability situation occur:
1476 *
1477 * Temporary (short-term) situation:
1478 * - Application is running on a different CPU, between reserve
1479 * and commit ring buffer operations,
1480 * - Application is preempted between reserve and commit ring
1481 * buffer operations,
1482 *
1483 * Long-term situation:
1484 * - Application is stopped (SIGSTOP) between reserve and commit
1485 * ring buffer operations. Could eventually be resumed by
1486 * SIGCONT.
1487 * - Application is killed (SIGTERM, SIGINT, SIGKILL) between
1488 * reserve and commit ring buffer operation.
1489 *
1490 * From a consumer perspective, handling short-term
1491 * unavailability situations is performed by retrying a few
1492 * times after a delay. Handling long-term unavailability
1493 * situations is handled by failing to get the sub-buffer.
1494 *
1495 * In all of those situations, if the application is taking a
1496 * long time to perform its commit after ring buffer space
1497 * reservation, we can end up in a situation where the producer
1498 * will fill the ring buffer and try to write into the same
1499 * sub-buffer again (which has a missing commit). This is
1500 * handled by the producer in the sub-buffer switch handling
1501 * code of the reserve routine by detecting unbalanced
1502 * reserve/commit counters and discarding all further events
1503 * until the situation is resolved in those situations. Two
1504 * scenarios can occur:
1505 *
1506 * 1) The application causing the reserve/commit counters to be
1507 * unbalanced has been terminated. In this situation, all
1508 * further events will be discarded in the buffers, and no
1509 * further buffer data will be readable by the consumer
1510 * daemon. Tearing down the UST tracing session and starting
1511 * anew is a work-around for those situations. Note that this
1512 * only affects per-UID tracing. In per-PID tracing, the
1513 * application vanishes with the termination, and therefore
1514 * no more data needs to be written to the buffers.
1515 * 2) The application causing the unbalance has been delayed for
1516 * a long time, but will eventually try to increment the
1517 * commit counter after eventually writing to the sub-buffer.
1518 * This situation can cause events to be discarded until the
1519 * application resumes its operations.
1520 */
1521 if (((commit_count - chan->backend.subbuf_size)
1522 & chan->commit_count_mask)
1523 - (buf_trunc(consumed, chan)
1524 >> chan->backend.num_subbuf_order)
1525 != 0) {
1526 if (nr_retry-- > 0) {
1527 if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
1528 (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
1529 goto retry;
1530 } else {
1531 goto nodata;
1532 }
1533 }
1534
1535 /*
1536 * Check that we are not about to read the same subbuffer in
1537 * which the writer head is.
1538 */
1539 if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan)
1540 == 0)
1541 goto nodata;
1542
1543 /*
1544 * Failure to get the subbuffer causes a busy-loop retry without going
1545 * to a wait queue. These are caused by short-lived race windows where
1546 * the writer is getting access to a subbuffer we were trying to get
1547 * access to. Also checks that the "consumed" buffer count we are
1548 * looking for matches the one contained in the subbuffer id.
1549 *
1550 * The short-lived race window described here can be affected by
1551 * application signals and preemption, thus requiring to bound
1552 * the loop to a maximum number of retry.
1553 */
1554 ret = update_read_sb_index(config, &buf->backend, &chan->backend,
1555 consumed_idx, buf_trunc_val(consumed, chan),
1556 handle);
1557 if (ret) {
1558 if (nr_retry-- > 0) {
1559 if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1))
1560 (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS);
1561 goto retry;
1562 } else {
1563 goto nodata;
1564 }
1565 }
1566 subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
1567
1568 buf->get_subbuf_consumed = consumed;
1569 buf->get_subbuf = 1;
1570
1571 return 0;
1572
1573 nodata:
1574 /*
1575 * The memory barriers __wait_event()/wake_up_interruptible() take care
1576 * of "raw_spin_is_locked" memory ordering.
1577 */
1578 if (finalized)
1579 return -ENODATA;
1580 else
1581 return -EAGAIN;
1582 }
1583
1584 /**
1585 * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
1586 * @buf: ring buffer
1587 */
1588 void lib_ring_buffer_put_subbuf(struct lttng_ust_ring_buffer *buf,
1589 struct lttng_ust_shm_handle *handle)
1590 {
1591 struct lttng_ust_ring_buffer_backend *bufb = &buf->backend;
1592 struct lttng_ust_ring_buffer_channel *chan;
1593 const struct lttng_ust_ring_buffer_config *config;
1594 unsigned long sb_bindex, consumed_idx, consumed;
1595 struct lttng_ust_ring_buffer_backend_pages_shmp *rpages;
1596 struct lttng_ust_ring_buffer_backend_pages *backend_pages;
1597
1598 chan = shmp(handle, bufb->chan);
1599 if (!chan)
1600 return;
1601 config = &chan->backend.config;
1602 CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
1603
1604 if (!buf->get_subbuf) {
1605 /*
1606 * Reader puts a subbuffer it did not get.
1607 */
1608 CHAN_WARN_ON(chan, 1);
1609 return;
1610 }
1611 consumed = buf->get_subbuf_consumed;
1612 buf->get_subbuf = 0;
1613
1614 /*
1615 * Clear the records_unread counter. (overruns counter)
1616 * Can still be non-zero if a file reader simply grabbed the data
1617 * without using iterators.
1618 * Can be below zero if an iterator is used on a snapshot more than
1619 * once.
1620 */
1621 sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
1622 rpages = shmp_index(handle, bufb->array, sb_bindex);
1623 if (!rpages)
1624 return;
1625 backend_pages = shmp(handle, rpages->shmp);
1626 if (!backend_pages)
1627 return;
1628 v_add(config, v_read(config, &backend_pages->records_unread),
1629 &bufb->records_read);
1630 v_set(config, &backend_pages->records_unread, 0);
1631 CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
1632 && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
1633 subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
1634
1635 /*
1636 * Exchange the reader subbuffer with the one we put in its place in the
1637 * writer subbuffer table. Expect the original consumed count. If
1638 * update_read_sb_index fails, this is because the writer updated the
1639 * subbuffer concurrently. We should therefore keep the subbuffer we
1640 * currently have: it has become invalid to try reading this sub-buffer
1641 * consumed count value anyway.
1642 */
1643 consumed_idx = subbuf_index(consumed, chan);
1644 update_read_sb_index(config, &buf->backend, &chan->backend,
1645 consumed_idx, buf_trunc_val(consumed, chan),
1646 handle);
1647 /*
1648 * update_read_sb_index return value ignored. Don't exchange sub-buffer
1649 * if the writer concurrently updated it.
1650 */
1651 }
1652
1653 /*
1654 * cons_offset is an iterator on all subbuffer offsets between the reader
1655 * position and the writer position. (inclusive)
1656 */
1657 static
1658 void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_ring_buffer *buf,
1659 struct lttng_ust_ring_buffer_channel *chan,
1660 unsigned long cons_offset,
1661 int cpu,
1662 struct lttng_ust_shm_handle *handle)
1663 {
1664 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
1665 unsigned long cons_idx, commit_count, commit_count_sb;
1666 struct commit_counters_hot *cc_hot;
1667 struct commit_counters_cold *cc_cold;
1668
1669 cons_idx = subbuf_index(cons_offset, chan);
1670 cc_hot = shmp_index(handle, buf->commit_hot, cons_idx);
1671 if (!cc_hot)
1672 return;
1673 cc_cold = shmp_index(handle, buf->commit_cold, cons_idx);
1674 if (!cc_cold)
1675 return;
1676 commit_count = v_read(config, &cc_hot->cc);
1677 commit_count_sb = v_read(config, &cc_cold->cc_sb);
1678
1679 if (subbuf_offset(commit_count, chan) != 0)
1680 DBG("ring buffer %s, cpu %d: "
1681 "commit count in subbuffer %lu,\n"
1682 "expecting multiples of %lu bytes\n"
1683 " [ %lu bytes committed, %lu bytes reader-visible ]\n",
1684 chan->backend.name, cpu, cons_idx,
1685 chan->backend.subbuf_size,
1686 commit_count, commit_count_sb);
1687
1688 DBG("ring buffer: %s, cpu %d: %lu bytes committed\n",
1689 chan->backend.name, cpu, commit_count);
1690 }
1691
1692 static
1693 void lib_ring_buffer_print_buffer_errors(struct lttng_ust_ring_buffer *buf,
1694 struct lttng_ust_ring_buffer_channel *chan,
1695 int cpu, struct lttng_ust_shm_handle *handle)
1696 {
1697 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
1698 unsigned long write_offset, cons_offset;
1699
1700 /*
1701 * No need to order commit_count, write_offset and cons_offset reads
1702 * because we execute at teardown when no more writer nor reader
1703 * references are left.
1704 */
1705 write_offset = v_read(config, &buf->offset);
1706 cons_offset = uatomic_read(&buf->consumed);
1707 if (write_offset != cons_offset)
1708 DBG("ring buffer %s, cpu %d: "
1709 "non-consumed data\n"
1710 " [ %lu bytes written, %lu bytes read ]\n",
1711 chan->backend.name, cpu, write_offset, cons_offset);
1712
1713 for (cons_offset = uatomic_read(&buf->consumed);
1714 (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
1715 chan)
1716 - cons_offset) > 0;
1717 cons_offset = subbuf_align(cons_offset, chan))
1718 lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
1719 cpu, handle);
1720 }
1721
1722 static
1723 void lib_ring_buffer_print_errors(struct lttng_ust_ring_buffer_channel *chan,
1724 struct lttng_ust_ring_buffer *buf, int cpu,
1725 struct lttng_ust_shm_handle *handle)
1726 {
1727 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
1728
1729 if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
1730 DBG("ring buffer %s: %lu records written, "
1731 "%lu records overrun\n",
1732 chan->backend.name,
1733 v_read(config, &buf->records_count),
1734 v_read(config, &buf->records_overrun));
1735 } else {
1736 DBG("ring buffer %s, cpu %d: %lu records written, "
1737 "%lu records overrun\n",
1738 chan->backend.name, cpu,
1739 v_read(config, &buf->records_count),
1740 v_read(config, &buf->records_overrun));
1741
1742 if (v_read(config, &buf->records_lost_full)
1743 || v_read(config, &buf->records_lost_wrap)
1744 || v_read(config, &buf->records_lost_big))
1745 DBG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
1746 " [ %lu buffer full, %lu nest buffer wrap-around, "
1747 "%lu event too big ]\n",
1748 chan->backend.name, cpu,
1749 v_read(config, &buf->records_lost_full),
1750 v_read(config, &buf->records_lost_wrap),
1751 v_read(config, &buf->records_lost_big));
1752 }
1753 lib_ring_buffer_print_buffer_errors(buf, chan, cpu, handle);
1754 }
1755
1756 /*
1757 * lib_ring_buffer_switch_old_start: Populate old subbuffer header.
1758 *
1759 * Only executed by SWITCH_FLUSH, which can be issued while tracing is
1760 * active or at buffer finalization (destroy).
1761 */
1762 static
1763 void lib_ring_buffer_switch_old_start(struct lttng_ust_ring_buffer *buf,
1764 struct lttng_ust_ring_buffer_channel *chan,
1765 struct switch_offsets *offsets,
1766 uint64_t tsc,
1767 struct lttng_ust_shm_handle *handle)
1768 {
1769 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
1770 unsigned long oldidx = subbuf_index(offsets->old, chan);
1771 unsigned long commit_count;
1772 struct commit_counters_hot *cc_hot;
1773
1774 config->cb.buffer_begin(buf, tsc, oldidx, handle);
1775
1776 /*
1777 * Order all writes to buffer before the commit count update that will
1778 * determine that the subbuffer is full.
1779 */
1780 cmm_smp_wmb();
1781 cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
1782 if (!cc_hot)
1783 return;
1784 v_add(config, config->cb.subbuffer_header_size(),
1785 &cc_hot->cc);
1786 commit_count = v_read(config, &cc_hot->cc);
1787 /* Check if the written buffer has to be delivered */
1788 lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
1789 commit_count, oldidx, handle, tsc);
1790 lib_ring_buffer_write_commit_counter(config, buf, chan,
1791 offsets->old + config->cb.subbuffer_header_size(),
1792 commit_count, handle, cc_hot);
1793 }
1794
1795 /*
1796 * lib_ring_buffer_switch_old_end: switch old subbuffer
1797 *
1798 * Note : offset_old should never be 0 here. It is ok, because we never perform
1799 * buffer switch on an empty subbuffer in SWITCH_ACTIVE mode. The caller
1800 * increments the offset_old value when doing a SWITCH_FLUSH on an empty
1801 * subbuffer.
1802 */
1803 static
1804 void lib_ring_buffer_switch_old_end(struct lttng_ust_ring_buffer *buf,
1805 struct lttng_ust_ring_buffer_channel *chan,
1806 struct switch_offsets *offsets,
1807 uint64_t tsc,
1808 struct lttng_ust_shm_handle *handle)
1809 {
1810 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
1811 unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
1812 unsigned long commit_count, padding_size, data_size;
1813 struct commit_counters_hot *cc_hot;
1814 uint64_t *ts_end;
1815
1816 data_size = subbuf_offset(offsets->old - 1, chan) + 1;
1817 padding_size = chan->backend.subbuf_size - data_size;
1818 subbuffer_set_data_size(config, &buf->backend, oldidx, data_size,
1819 handle);
1820
1821 ts_end = shmp_index(handle, buf->ts_end, oldidx);
1822 if (!ts_end)
1823 return;
1824 /*
1825 * This is the last space reservation in that sub-buffer before
1826 * it gets delivered. This provides exclusive access to write to
1827 * this sub-buffer's ts_end. There are also no concurrent
1828 * readers of that ts_end because delivery of that sub-buffer is
1829 * postponed until the commit counter is incremented for the
1830 * current space reservation.
1831 */
1832 *ts_end = tsc;
1833
1834 /*
1835 * Order all writes to buffer and store to ts_end before the commit
1836 * count update that will determine that the subbuffer is full.
1837 */
1838 cmm_smp_wmb();
1839 cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
1840 if (!cc_hot)
1841 return;
1842 v_add(config, padding_size, &cc_hot->cc);
1843 commit_count = v_read(config, &cc_hot->cc);
1844 lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
1845 commit_count, oldidx, handle, tsc);
1846 lib_ring_buffer_write_commit_counter(config, buf, chan,
1847 offsets->old + padding_size, commit_count, handle,
1848 cc_hot);
1849 }
1850
1851 /*
1852 * lib_ring_buffer_switch_new_start: Populate new subbuffer.
1853 *
1854 * This code can be executed unordered : writers may already have written to the
1855 * sub-buffer before this code gets executed, caution. The commit makes sure
1856 * that this code is executed before the deliver of this sub-buffer.
1857 */
1858 static
1859 void lib_ring_buffer_switch_new_start(struct lttng_ust_ring_buffer *buf,
1860 struct lttng_ust_ring_buffer_channel *chan,
1861 struct switch_offsets *offsets,
1862 uint64_t tsc,
1863 struct lttng_ust_shm_handle *handle)
1864 {
1865 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
1866 unsigned long beginidx = subbuf_index(offsets->begin, chan);
1867 unsigned long commit_count;
1868 struct commit_counters_hot *cc_hot;
1869
1870 config->cb.buffer_begin(buf, tsc, beginidx, handle);
1871
1872 /*
1873 * Order all writes to buffer before the commit count update that will
1874 * determine that the subbuffer is full.
1875 */
1876 cmm_smp_wmb();
1877 cc_hot = shmp_index(handle, buf->commit_hot, beginidx);
1878 if (!cc_hot)
1879 return;
1880 v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
1881 commit_count = v_read(config, &cc_hot->cc);
1882 /* Check if the written buffer has to be delivered */
1883 lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
1884 commit_count, beginidx, handle, tsc);
1885 lib_ring_buffer_write_commit_counter(config, buf, chan,
1886 offsets->begin + config->cb.subbuffer_header_size(),
1887 commit_count, handle, cc_hot);
1888 }
1889
1890 /*
1891 * lib_ring_buffer_switch_new_end: finish switching current subbuffer
1892 *
1893 * Calls subbuffer_set_data_size() to set the data size of the current
1894 * sub-buffer. We do not need to perform check_deliver nor commit here,
1895 * since this task will be done by the "commit" of the event for which
1896 * we are currently doing the space reservation.
1897 */
1898 static
1899 void lib_ring_buffer_switch_new_end(struct lttng_ust_ring_buffer *buf,
1900 struct lttng_ust_ring_buffer_channel *chan,
1901 struct switch_offsets *offsets,
1902 uint64_t tsc,
1903 struct lttng_ust_shm_handle *handle)
1904 {
1905 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
1906 unsigned long endidx, data_size;
1907 uint64_t *ts_end;
1908
1909 endidx = subbuf_index(offsets->end - 1, chan);
1910 data_size = subbuf_offset(offsets->end - 1, chan) + 1;
1911 subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
1912 handle);
1913 ts_end = shmp_index(handle, buf->ts_end, endidx);
1914 if (!ts_end)
1915 return;
1916 /*
1917 * This is the last space reservation in that sub-buffer before
1918 * it gets delivered. This provides exclusive access to write to
1919 * this sub-buffer's ts_end. There are also no concurrent
1920 * readers of that ts_end because delivery of that sub-buffer is
1921 * postponed until the commit counter is incremented for the
1922 * current space reservation.
1923 */
1924 *ts_end = tsc;
1925 }
1926
1927 /*
1928 * Returns :
1929 * 0 if ok
1930 * !0 if execution must be aborted.
1931 */
1932 static
1933 int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
1934 struct lttng_ust_ring_buffer *buf,
1935 struct lttng_ust_ring_buffer_channel *chan,
1936 struct switch_offsets *offsets,
1937 uint64_t *tsc,
1938 struct lttng_ust_shm_handle *handle)
1939 {
1940 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
1941 unsigned long off, reserve_commit_diff;
1942
1943 offsets->begin = v_read(config, &buf->offset);
1944 offsets->old = offsets->begin;
1945 offsets->switch_old_start = 0;
1946 off = subbuf_offset(offsets->begin, chan);
1947
1948 *tsc = config->cb.ring_buffer_clock_read(chan);
1949
1950 /*
1951 * Ensure we flush the header of an empty subbuffer when doing the
1952 * finalize (SWITCH_FLUSH). This ensures that we end up knowing the
1953 * total data gathering duration even if there were no records saved
1954 * after the last buffer switch.
1955 * In SWITCH_ACTIVE mode, switch the buffer when it contains events.
1956 * SWITCH_ACTIVE only flushes the current subbuffer, dealing with end of
1957 * subbuffer header as appropriate.
1958 * The next record that reserves space will be responsible for
1959 * populating the following subbuffer header. We choose not to populate
1960 * the next subbuffer header here because we want to be able to use
1961 * SWITCH_ACTIVE for periodical buffer flush, which must
1962 * guarantee that all the buffer content (records and header
1963 * timestamps) are visible to the reader. This is required for
1964 * quiescence guarantees for the fusion merge.
1965 */
1966 if (mode != SWITCH_FLUSH && !off)
1967 return -1; /* we do not have to switch : buffer is empty */
1968
1969 if (caa_unlikely(off == 0)) {
1970 unsigned long sb_index, commit_count;
1971 struct commit_counters_cold *cc_cold;
1972
1973 /*
1974 * We are performing a SWITCH_FLUSH. There may be concurrent
1975 * writes into the buffer if e.g. invoked while performing a
1976 * snapshot on an active trace.
1977 *
1978 * If the client does not save any header information
1979 * (sub-buffer header size == 0), don't switch empty subbuffer
1980 * on finalize, because it is invalid to deliver a completely
1981 * empty subbuffer.
1982 */
1983 if (!config->cb.subbuffer_header_size())
1984 return -1;
1985
1986 /* Test new buffer integrity */
1987 sb_index = subbuf_index(offsets->begin, chan);
1988 cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
1989 if (!cc_cold)
1990 return -1;
1991 commit_count = v_read(config, &cc_cold->cc_sb);
1992 reserve_commit_diff =
1993 (buf_trunc(offsets->begin, chan)
1994 >> chan->backend.num_subbuf_order)
1995 - (commit_count & chan->commit_count_mask);
1996 if (caa_likely(reserve_commit_diff == 0)) {
1997 /* Next subbuffer not being written to. */
1998 if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
1999 subbuf_trunc(offsets->begin, chan)
2000 - subbuf_trunc((unsigned long)
2001 uatomic_read(&buf->consumed), chan)
2002 >= chan->backend.buf_size)) {
2003 /*
2004 * We do not overwrite non consumed buffers
2005 * and we are full : don't switch.
2006 */
2007 return -1;
2008 } else {
2009 /*
2010 * Next subbuffer not being written to, and we
2011 * are either in overwrite mode or the buffer is
2012 * not full. It's safe to write in this new
2013 * subbuffer.
2014 */
2015 }
2016 } else {
2017 /*
2018 * Next subbuffer reserve offset does not match the
2019 * commit offset. Don't perform switch in
2020 * producer-consumer and overwrite mode. Caused by
2021 * either a writer OOPS or too many nested writes over a
2022 * reserve/commit pair.
2023 */
2024 return -1;
2025 }
2026
2027 /*
2028 * Need to write the subbuffer start header on finalize.
2029 */
2030 offsets->switch_old_start = 1;
2031 }
2032 offsets->begin = subbuf_align(offsets->begin, chan);
2033 /* Note: old points to the next subbuf at offset 0 */
2034 offsets->end = offsets->begin;
2035 return 0;
2036 }
2037
2038 /*
2039 * Force a sub-buffer switch. This operation is completely reentrant : can be
2040 * called while tracing is active with absolutely no lock held.
2041 *
2042 * For RING_BUFFER_SYNC_PER_CPU ring buffers, as a v_cmpxchg is used for
2043 * some atomic operations, this function must be called from the CPU
2044 * which owns the buffer for a ACTIVE flush. However, for
2045 * RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called
2046 * from any CPU.
2047 */
2048 void lib_ring_buffer_switch_slow(struct lttng_ust_ring_buffer *buf, enum switch_mode mode,
2049 struct lttng_ust_shm_handle *handle)
2050 {
2051 struct lttng_ust_ring_buffer_channel *chan;
2052 const struct lttng_ust_ring_buffer_config *config;
2053 struct switch_offsets offsets;
2054 unsigned long oldidx;
2055 uint64_t tsc;
2056
2057 chan = shmp(handle, buf->backend.chan);
2058 if (!chan)
2059 return;
2060 config = &chan->backend.config;
2061
2062 offsets.size = 0;
2063
2064 /*
2065 * Perform retryable operations.
2066 */
2067 do {
2068 if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets,
2069 &tsc, handle))
2070 return; /* Switch not needed */
2071 } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end)
2072 != offsets.old);
2073
2074 /*
2075 * Atomically update last_tsc. This update races against concurrent
2076 * atomic updates, but the race will always cause supplementary full TSC
2077 * records, never the opposite (missing a full TSC record when it would
2078 * be needed).
2079 */
2080 save_last_tsc(config, buf, tsc);
2081
2082 /*
2083 * Push the reader if necessary
2084 */
2085 lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
2086
2087 oldidx = subbuf_index(offsets.old, chan);
2088 lib_ring_buffer_clear_noref(config, &buf->backend, oldidx, handle);
2089
2090 /*
2091 * May need to populate header start on SWITCH_FLUSH.
2092 */
2093 if (offsets.switch_old_start) {
2094 lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
2095 offsets.old += config->cb.subbuffer_header_size();
2096 }
2097
2098 /*
2099 * Switch old subbuffer.
2100 */
2101 lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
2102 }
2103
2104 static
2105 bool handle_blocking_retry(int *timeout_left_ms)
2106 {
2107 int timeout = *timeout_left_ms, delay;
2108
2109 if (caa_likely(!timeout))
2110 return false; /* Do not retry, discard event. */
2111 if (timeout < 0) /* Wait forever. */
2112 delay = RETRY_DELAY_MS;
2113 else
2114 delay = min_t(int, timeout, RETRY_DELAY_MS);
2115 (void) poll(NULL, 0, delay);
2116 if (timeout > 0)
2117 *timeout_left_ms -= delay;
2118 return true; /* Retry. */
2119 }
2120
2121 /*
2122 * Returns :
2123 * 0 if ok
2124 * -ENOSPC if event size is too large for packet.
2125 * -ENOBUFS if there is currently not enough space in buffer for the event.
2126 * -EIO if data cannot be written into the buffer for any other reason.
2127 */
2128 static
2129 int lib_ring_buffer_try_reserve_slow(struct lttng_ust_ring_buffer *buf,
2130 struct lttng_ust_ring_buffer_channel *chan,
2131 struct switch_offsets *offsets,
2132 struct lttng_ust_ring_buffer_ctx *ctx,
2133 void *client_ctx)
2134 {
2135 struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
2136 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
2137 struct lttng_ust_shm_handle *handle = chan->handle;
2138 unsigned long reserve_commit_diff, offset_cmp;
2139 int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
2140
2141 retry:
2142 offsets->begin = offset_cmp = v_read(config, &buf->offset);
2143 offsets->old = offsets->begin;
2144 offsets->switch_new_start = 0;
2145 offsets->switch_new_end = 0;
2146 offsets->switch_old_end = 0;
2147 offsets->pre_header_padding = 0;
2148
2149 ctx_private->tsc = config->cb.ring_buffer_clock_read(chan);
2150 if ((int64_t) ctx_private->tsc == -EIO)
2151 return -EIO;
2152
2153 if (last_tsc_overflow(config, buf, ctx_private->tsc))
2154 ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
2155
2156 if (caa_unlikely(subbuf_offset(offsets->begin, chan) == 0)) {
2157 offsets->switch_new_start = 1; /* For offsets->begin */
2158 } else {
2159 offsets->size = config->cb.record_header_size(config, chan,
2160 offsets->begin,
2161 &offsets->pre_header_padding,
2162 ctx, client_ctx);
2163 offsets->size +=
2164 lttng_ust_ring_buffer_align(offsets->begin + offsets->size,
2165 ctx->largest_align)
2166 + ctx->data_size;
2167 if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
2168 offsets->size > chan->backend.subbuf_size)) {
2169 offsets->switch_old_end = 1; /* For offsets->old */
2170 offsets->switch_new_start = 1; /* For offsets->begin */
2171 }
2172 }
2173 if (caa_unlikely(offsets->switch_new_start)) {
2174 unsigned long sb_index, commit_count;
2175 struct commit_counters_cold *cc_cold;
2176
2177 /*
2178 * We are typically not filling the previous buffer completely.
2179 */
2180 if (caa_likely(offsets->switch_old_end))
2181 offsets->begin = subbuf_align(offsets->begin, chan);
2182 offsets->begin = offsets->begin
2183 + config->cb.subbuffer_header_size();
2184 /* Test new buffer integrity */
2185 sb_index = subbuf_index(offsets->begin, chan);
2186 /*
2187 * Read buf->offset before buf->commit_cold[sb_index].cc_sb.
2188 * lib_ring_buffer_check_deliver() has the matching
2189 * memory barriers required around commit_cold cc_sb
2190 * updates to ensure reserve and commit counter updates
2191 * are not seen reordered when updated by another CPU.
2192 */
2193 cmm_smp_rmb();
2194 cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
2195 if (!cc_cold)
2196 return -1;
2197 commit_count = v_read(config, &cc_cold->cc_sb);
2198 /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
2199 cmm_smp_rmb();
2200 if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) {
2201 /*
2202 * The reserve counter have been concurrently updated
2203 * while we read the commit counter. This means the
2204 * commit counter we read might not match buf->offset
2205 * due to concurrent update. We therefore need to retry.
2206 */
2207 goto retry;
2208 }
2209 reserve_commit_diff =
2210 (buf_trunc(offsets->begin, chan)
2211 >> chan->backend.num_subbuf_order)
2212 - (commit_count & chan->commit_count_mask);
2213 if (caa_likely(reserve_commit_diff == 0)) {
2214 /* Next subbuffer not being written to. */
2215 if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
2216 subbuf_trunc(offsets->begin, chan)
2217 - subbuf_trunc((unsigned long)
2218 uatomic_read(&buf->consumed), chan)
2219 >= chan->backend.buf_size)) {
2220 unsigned long nr_lost;
2221
2222 if (handle_blocking_retry(&timeout_left_ms))
2223 goto retry;
2224
2225 /*
2226 * We do not overwrite non consumed buffers
2227 * and we are full : record is lost.
2228 */
2229 nr_lost = v_read(config, &buf->records_lost_full);
2230 v_inc(config, &buf->records_lost_full);
2231 if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
2232 DBG("%lu or more records lost in (%s:%d) (buffer full)\n",
2233 nr_lost + 1, chan->backend.name,
2234 buf->backend.cpu);
2235 }
2236 return -ENOBUFS;
2237 } else {
2238 /*
2239 * Next subbuffer not being written to, and we
2240 * are either in overwrite mode or the buffer is
2241 * not full. It's safe to write in this new
2242 * subbuffer.
2243 */
2244 }
2245 } else {
2246 unsigned long nr_lost;
2247
2248 /*
2249 * Next subbuffer reserve offset does not match the
2250 * commit offset, and this did not involve update to the
2251 * reserve counter. Drop record in producer-consumer and
2252 * overwrite mode. Caused by either a writer OOPS or too
2253 * many nested writes over a reserve/commit pair.
2254 */
2255 nr_lost = v_read(config, &buf->records_lost_wrap);
2256 v_inc(config, &buf->records_lost_wrap);
2257 if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
2258 DBG("%lu or more records lost in (%s:%d) (wrap-around)\n",
2259 nr_lost + 1, chan->backend.name,
2260 buf->backend.cpu);
2261 }
2262 return -EIO;
2263 }
2264 offsets->size =
2265 config->cb.record_header_size(config, chan,
2266 offsets->begin,
2267 &offsets->pre_header_padding,
2268 ctx, client_ctx);
2269 offsets->size +=
2270 lttng_ust_ring_buffer_align(offsets->begin + offsets->size,
2271 ctx->largest_align)
2272 + ctx->data_size;
2273 if (caa_unlikely(subbuf_offset(offsets->begin, chan)
2274 + offsets->size > chan->backend.subbuf_size)) {
2275 unsigned long nr_lost;
2276
2277 /*
2278 * Record too big for subbuffers, report error, don't
2279 * complete the sub-buffer switch.
2280 */
2281 nr_lost = v_read(config, &buf->records_lost_big);
2282 v_inc(config, &buf->records_lost_big);
2283 if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
2284 DBG("%lu or more records lost in (%s:%d) record size "
2285 " of %zu bytes is too large for buffer\n",
2286 nr_lost + 1, chan->backend.name,
2287 buf->backend.cpu, offsets->size);
2288 }
2289 return -ENOSPC;
2290 } else {
2291 /*
2292 * We just made a successful buffer switch and the
2293 * record fits in the new subbuffer. Let's write.
2294 */
2295 }
2296 } else {
2297 /*
2298 * Record fits in the current buffer and we are not on a switch
2299 * boundary. It's safe to write.
2300 */
2301 }
2302 offsets->end = offsets->begin + offsets->size;
2303
2304 if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
2305 /*
2306 * The offset_end will fall at the very beginning of the next
2307 * subbuffer.
2308 */
2309 offsets->switch_new_end = 1; /* For offsets->begin */
2310 }
2311 return 0;
2312 }
2313
2314 /**
2315 * lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
2316 * @ctx: ring buffer context.
2317 *
2318 * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
2319 * -EIO for other errors, else returns 0.
2320 * It will take care of sub-buffer switching.
2321 */
2322 int lib_ring_buffer_reserve_slow(struct lttng_ust_ring_buffer_ctx *ctx,
2323 void *client_ctx)
2324 {
2325 struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
2326 struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
2327 struct lttng_ust_shm_handle *handle = chan->handle;
2328 const struct lttng_ust_ring_buffer_config *config = &chan->backend.config;
2329 struct lttng_ust_ring_buffer *buf;
2330 struct switch_offsets offsets;
2331 int ret;
2332
2333 if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
2334 buf = shmp(handle, chan->backend.buf[ctx_private->reserve_cpu].shmp);
2335 else
2336 buf = shmp(handle, chan->backend.buf[0].shmp);
2337 if (!buf)
2338 return -EIO;
2339 ctx_private->buf = buf;
2340
2341 offsets.size = 0;
2342
2343 do {
2344 ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
2345 ctx, client_ctx);
2346 if (caa_unlikely(ret))
2347 return ret;
2348 } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
2349 offsets.end)
2350 != offsets.old));
2351
2352 /*
2353 * Atomically update last_tsc. This update races against concurrent
2354 * atomic updates, but the race will always cause supplementary full TSC
2355 * records, never the opposite (missing a full TSC record when it would
2356 * be needed).
2357 */
2358 save_last_tsc(config, buf, ctx_private->tsc);
2359
2360 /*
2361 * Push the reader if necessary
2362 */
2363 lib_ring_buffer_reserve_push_reader(buf, chan, offsets.end - 1);
2364
2365 /*
2366 * Clear noref flag for this subbuffer.
2367 */
2368 lib_ring_buffer_clear_noref(config, &buf->backend,
2369 subbuf_index(offsets.end - 1, chan),
2370 handle);
2371
2372 /*
2373 * Switch old subbuffer if needed.
2374 */
2375 if (caa_unlikely(offsets.switch_old_end)) {
2376 lib_ring_buffer_clear_noref(config, &buf->backend,
2377 subbuf_index(offsets.old - 1, chan),
2378 handle);
2379 lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx_private->tsc, handle);
2380 }
2381
2382 /*
2383 * Populate new subbuffer.
2384 */
2385 if (caa_unlikely(offsets.switch_new_start))
2386 lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx_private->tsc, handle);
2387
2388 if (caa_unlikely(offsets.switch_new_end))
2389 lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx_private->tsc, handle);
2390
2391 ctx_private->slot_size = offsets.size;
2392 ctx_private->pre_offset = offsets.begin;
2393 ctx_private->buf_offset = offsets.begin + offsets.pre_header_padding;
2394 return 0;
2395 }
2396
2397 static
2398 void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_ring_buffer_config *config,
2399 struct lttng_ust_ring_buffer *buf,
2400 unsigned long commit_count,
2401 unsigned long idx,
2402 struct lttng_ust_shm_handle *handle)
2403 {
2404 struct commit_counters_hot *cc_hot;
2405
2406 if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
2407 return;
2408 cc_hot = shmp_index(handle, buf->commit_hot, idx);
2409 if (!cc_hot)
2410 return;
2411 v_set(config, &cc_hot->seq, commit_count);
2412 }
2413
2414 /*
2415 * The ring buffer can count events recorded and overwritten per buffer,
2416 * but it is disabled by default due to its performance overhead.
2417 */
2418 #ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
2419 static
2420 void deliver_count_events(const struct lttng_ust_ring_buffer_config *config,
2421 struct lttng_ust_ring_buffer *buf,
2422 unsigned long idx,
2423 struct lttng_ust_shm_handle *handle)
2424 {
2425 v_add(config, subbuffer_get_records_count(config,
2426 &buf->backend, idx, handle),
2427 &buf->records_count);
2428 v_add(config, subbuffer_count_records_overrun(config,
2429 &buf->backend, idx, handle),
2430 &buf->records_overrun);
2431 }
2432 #else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
2433 static
2434 void deliver_count_events(
2435 const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
2436 struct lttng_ust_ring_buffer *buf __attribute__((unused)),
2437 unsigned long idx __attribute__((unused)),
2438 struct lttng_ust_shm_handle *handle __attribute__((unused)))
2439 {
2440 }
2441 #endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
2442
2443 void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_ring_buffer_config *config,
2444 struct lttng_ust_ring_buffer *buf,
2445 struct lttng_ust_ring_buffer_channel *chan,
2446 unsigned long offset,
2447 unsigned long commit_count,
2448 unsigned long idx,
2449 struct lttng_ust_shm_handle *handle,
2450 uint64_t tsc __attribute__((unused)))
2451 {
2452 unsigned long old_commit_count = commit_count
2453 - chan->backend.subbuf_size;
2454 struct commit_counters_cold *cc_cold;
2455
2456 /*
2457 * If we succeeded at updating cc_sb below, we are the subbuffer
2458 * writer delivering the subbuffer. Deals with concurrent
2459 * updates of the "cc" value without adding a add_return atomic
2460 * operation to the fast path.
2461 *
2462 * We are doing the delivery in two steps:
2463 * - First, we cmpxchg() cc_sb to the new value
2464 * old_commit_count + 1. This ensures that we are the only
2465 * subbuffer user successfully filling the subbuffer, but we
2466 * do _not_ set the cc_sb value to "commit_count" yet.
2467 * Therefore, other writers that would wrap around the ring
2468 * buffer and try to start writing to our subbuffer would
2469 * have to drop records, because it would appear as
2470 * non-filled.
2471 * We therefore have exclusive access to the subbuffer control
2472 * structures. This mutual exclusion with other writers is
2473 * crucially important to perform record overruns count in
2474 * flight recorder mode locklessly.
2475 * - When we are ready to release the subbuffer (either for
2476 * reading or for overrun by other writers), we simply set the
2477 * cc_sb value to "commit_count" and perform delivery.
2478 *
2479 * The subbuffer size is least 2 bytes (minimum size: 1 page).
2480 * This guarantees that old_commit_count + 1 != commit_count.
2481 */
2482
2483 /*
2484 * Order prior updates to reserve count prior to the
2485 * commit_cold cc_sb update.
2486 */
2487 cmm_smp_wmb();
2488 cc_cold = shmp_index(handle, buf->commit_cold, idx);
2489 if (!cc_cold)
2490 return;
2491 if (caa_likely(v_cmpxchg(config, &cc_cold->cc_sb,
2492 old_commit_count, old_commit_count + 1)
2493 == old_commit_count)) {
2494 uint64_t *ts_end;
2495
2496 /*
2497 * Start of exclusive subbuffer access. We are
2498 * guaranteed to be the last writer in this subbuffer
2499 * and any other writer trying to access this subbuffer
2500 * in this state is required to drop records.
2501 *
2502 * We can read the ts_end for the current sub-buffer
2503 * which has been saved by the very last space
2504 * reservation for the current sub-buffer.
2505 *
2506 * Order increment of commit counter before reading ts_end.
2507 */
2508 cmm_smp_mb();
2509 ts_end = shmp_index(handle, buf->ts_end, idx);
2510 if (!ts_end)
2511 return;
2512 deliver_count_events(config, buf, idx, handle);
2513 config->cb.buffer_end(buf, *ts_end, idx,
2514 lib_ring_buffer_get_data_size(config,
2515 buf,
2516 idx,
2517 handle),
2518 handle);
2519
2520 /*
2521 * Increment the packet counter while we have exclusive
2522 * access.
2523 */
2524 subbuffer_inc_packet_count(config, &buf->backend, idx, handle);
2525
2526 /*
2527 * Set noref flag and offset for this subbuffer id.
2528 * Contains a memory barrier that ensures counter stores
2529 * are ordered before set noref and offset.
2530 */
2531 lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
2532 buf_trunc_val(offset, chan), handle);
2533
2534 /*
2535 * Order set_noref and record counter updates before the
2536 * end of subbuffer exclusive access. Orders with
2537 * respect to writers coming into the subbuffer after
2538 * wrap around, and also order wrt concurrent readers.
2539 */
2540 cmm_smp_mb();
2541 /* End of exclusive subbuffer access */
2542 v_set(config, &cc_cold->cc_sb, commit_count);
2543 /*
2544 * Order later updates to reserve count after
2545 * the commit cold cc_sb update.
2546 */
2547 cmm_smp_wmb();
2548 lib_ring_buffer_vmcore_check_deliver(config, buf,
2549 commit_count, idx, handle);
2550
2551 /*
2552 * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
2553 */
2554 if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
2555 && uatomic_read(&buf->active_readers)
2556 && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
2557 lib_ring_buffer_wakeup(buf, handle);
2558 }
2559 }
2560 }
2561
2562 /*
2563 * Force a read (imply TLS allocation for dlopen) of TLS variables.
2564 */
2565 void lttng_ringbuffer_alloc_tls(void)
2566 {
2567 asm volatile ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting)));
2568 }
2569
2570 void lib_ringbuffer_signal_init(void)
2571 {
2572 sigset_t mask;
2573 int ret;
2574
2575 /*
2576 * Block signal for entire process, so only our thread processes
2577 * it.
2578 */
2579 rb_setmask(&mask);
2580 ret = pthread_sigmask(SIG_BLOCK, &mask, NULL);
2581 if (ret) {
2582 errno = ret;
2583 PERROR("pthread_sigmask");
2584 }
2585 }
This page took 0.127078 seconds and 4 git commands to generate.