X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fring_buffer_frontend.c;h=45c0659bfafc8bcdb83077babd00e06068d5bf74;hb=ba7025296f76b11cc8547f5c1000df9b5744b529;hp=0b4b89f7306fee1b77bf4e9b44c9ec48e47af643;hpb=e095d8031307428069e549360284388e06683293;p=lttng-ust.git diff --git a/libringbuffer/ring_buffer_frontend.c b/libringbuffer/ring_buffer_frontend.c index 0b4b89f7..45c0659b 100644 --- a/libringbuffer/ring_buffer_frontend.c +++ b/libringbuffer/ring_buffer_frontend.c @@ -1,7 +1,22 @@ /* * ring_buffer_frontend.c * - * (C) Copyright 2005-2010 - Mathieu Desnoyers + * Copyright (C) 2005-2012 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * * * Ring buffer wait-free buffer synchronization. Producer-consumer and flight * recorder (overwrite) modes. See thesis: @@ -34,26 +49,48 @@ * - splice one subbuffer worth of data to a pipe * - splice the data from pipe to disk/network * - put_subbuf - * - * Dual LGPL v2.1/GPL v2 license. */ +#define _LGPL_SOURCE #include #include #include +#include #include +#include +#include +#include +#include #include #include +#include +#include +#include #include "smp.h" -#include +#include +#include "vatomic.h" #include "backend.h" #include "frontend.h" #include "shm.h" +#include "rb-init.h" +#include "../liblttng-ust/compat.h" /* For ENODATA */ -#ifndef max -#define max(a, b) ((a) > (b) ? (a) : (b)) -#endif +/* Print DBG() messages about events lost only every 1048576 hits */ +#define DBG_PRINT_NR_LOST (1UL << 20) + +#define LTTNG_UST_RB_SIG_FLUSH SIGRTMIN +#define LTTNG_UST_RB_SIG_READ SIGRTMIN + 1 +#define LTTNG_UST_RB_SIG_TEARDOWN SIGRTMIN + 2 +#define CLOCKID CLOCK_MONOTONIC +#define LTTNG_UST_RING_BUFFER_GET_RETRY 10 +#define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS 10 +#define RETRY_DELAY_MS 100 /* 100 ms. */ + +/* + * Non-static to ensure the compiler does not optimize away the xor. + */ +uint8_t lttng_crash_magic_xor[] = RB_CRASH_DUMP_ABI_MAGIC_XOR; /* * Use POSIX SHM: shm_open(3) and shm_unlink(3). @@ -81,26 +118,52 @@ struct switch_offsets { switch_old_end:1; }; -__thread unsigned int lib_ring_buffer_nesting; +DEFINE_URCU_TLS(unsigned int, lib_ring_buffer_nesting); + +/* + * wakeup_fd_mutex protects wakeup fd use by timer from concurrent + * close. + */ +static pthread_mutex_t wakeup_fd_mutex = PTHREAD_MUTEX_INITIALIZER; static void lib_ring_buffer_print_errors(struct channel *chan, - struct lib_ring_buffer *buf, int cpu, - struct shm_handle *handle); + struct lttng_ust_lib_ring_buffer *buf, int cpu, + struct lttng_ust_shm_handle *handle); /* - * Must be called under cpu hotplug protection. + * Handle timer teardown race wrt memory free of private data by + * ring buffer signals are handled by a single thread, which permits + * a synchronization point between handling of each signal. + * Protected by the lock within the structure. */ -void lib_ring_buffer_free(struct lib_ring_buffer *buf, - struct shm_handle *handle) -{ - struct channel *chan = shmp(handle, buf->backend.chan); +struct timer_signal_data { + pthread_t tid; /* thread id managing signals */ + int setup_done; + int qs_done; + pthread_mutex_t lock; +}; - lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu, handle); - /* buf->commit_hot will be freed by shm teardown */ - /* buf->commit_cold will be freed by shm teardown */ +static struct timer_signal_data timer_signal = { + .tid = 0, + .setup_done = 0, + .qs_done = 0, + .lock = PTHREAD_MUTEX_INITIALIZER, +}; - lib_ring_buffer_backend_free(&buf->backend); +static bool lttng_ust_allow_blocking; + +void lttng_ust_ringbuffer_set_allow_blocking(void) +{ + lttng_ust_allow_blocking = true; +} + +/* Get blocking timeout, in ms */ +static int lttng_ust_ringbuffer_get_timeout(struct channel *chan) +{ + if (!lttng_ust_allow_blocking) + return 0; + return chan->u.s.blocking_timeout_ms; } /** @@ -112,22 +175,40 @@ void lib_ring_buffer_free(struct lib_ring_buffer *buf, * should not be using the iterator concurrently with reset. The previous * current iterator record is reset. */ -void lib_ring_buffer_reset(struct lib_ring_buffer *buf, - struct shm_handle *handle) +void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + struct channel *chan; + const struct lttng_ust_lib_ring_buffer_config *config; unsigned int i; + chan = shmp(handle, buf->backend.chan); + if (!chan) + return; + config = &chan->backend.config; /* * Reset iterator first. It will put the subbuffer if it currently holds * it. */ v_set(config, &buf->offset, 0); for (i = 0; i < chan->backend.num_subbuf; i++) { - v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0); - v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0); - v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0); + struct commit_counters_hot *cc_hot; + struct commit_counters_cold *cc_cold; + uint64_t *ts_end; + + cc_hot = shmp_index(handle, buf->commit_hot, i); + if (!cc_hot) + return; + cc_cold = shmp_index(handle, buf->commit_cold, i); + if (!cc_cold) + return; + ts_end = shmp_index(handle, buf->ts_end, i); + if (!ts_end) + return; + v_set(config, &cc_hot->cc, 0); + v_set(config, &cc_hot->seq, 0); + v_set(config, &cc_cold->cc_sb, 0); + *ts_end = 0; } uatomic_set(&buf->consumed, 0); uatomic_set(&buf->record_disabled, 0); @@ -164,38 +245,123 @@ void channel_reset(struct channel *chan) /* Don't reset reader reference count */ } +static +void init_crash_abi(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_crash_abi *crash_abi, + struct lttng_ust_lib_ring_buffer *buf, + struct channel_backend *chanb, + struct shm_object *shmobj, + struct lttng_ust_shm_handle *handle) +{ + int i; + + for (i = 0; i < RB_CRASH_DUMP_ABI_MAGIC_LEN; i++) + crash_abi->magic[i] = lttng_crash_magic_xor[i] ^ 0xFF; + crash_abi->mmap_length = shmobj->memory_map_size; + crash_abi->endian = RB_CRASH_ENDIAN; + crash_abi->major = RB_CRASH_DUMP_ABI_MAJOR; + crash_abi->minor = RB_CRASH_DUMP_ABI_MINOR; + crash_abi->word_size = sizeof(unsigned long); + crash_abi->layout_type = LTTNG_CRASH_TYPE_UST; + + /* Offset of fields */ + crash_abi->offset.prod_offset = + (uint32_t) ((char *) &buf->offset - (char *) buf); + crash_abi->offset.consumed_offset = + (uint32_t) ((char *) &buf->consumed - (char *) buf); + crash_abi->offset.commit_hot_array = + (uint32_t) ((char *) shmp(handle, buf->commit_hot) - (char *) buf); + crash_abi->offset.commit_hot_seq = + offsetof(struct commit_counters_hot, seq); + crash_abi->offset.buf_wsb_array = + (uint32_t) ((char *) shmp(handle, buf->backend.buf_wsb) - (char *) buf); + crash_abi->offset.buf_wsb_id = + offsetof(struct lttng_ust_lib_ring_buffer_backend_subbuffer, id); + crash_abi->offset.sb_array = + (uint32_t) ((char *) shmp(handle, buf->backend.array) - (char *) buf); + crash_abi->offset.sb_array_shmp_offset = + offsetof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp, + shmp._ref.offset); + crash_abi->offset.sb_backend_p_offset = + offsetof(struct lttng_ust_lib_ring_buffer_backend_pages, + p._ref.offset); + + /* Field length */ + crash_abi->length.prod_offset = sizeof(buf->offset); + crash_abi->length.consumed_offset = sizeof(buf->consumed); + crash_abi->length.commit_hot_seq = + sizeof(((struct commit_counters_hot *) NULL)->seq); + crash_abi->length.buf_wsb_id = + sizeof(((struct lttng_ust_lib_ring_buffer_backend_subbuffer *) NULL)->id); + crash_abi->length.sb_array_shmp_offset = + sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages_shmp *) NULL)->shmp._ref.offset); + crash_abi->length.sb_backend_p_offset = + sizeof(((struct lttng_ust_lib_ring_buffer_backend_pages *) NULL)->p._ref.offset); + + /* Array stride */ + crash_abi->stride.commit_hot_array = + sizeof(struct commit_counters_hot); + crash_abi->stride.buf_wsb_array = + sizeof(struct lttng_ust_lib_ring_buffer_backend_subbuffer); + crash_abi->stride.sb_array = + sizeof(struct lttng_ust_lib_ring_buffer_backend_pages_shmp); + + /* Buffer constants */ + crash_abi->buf_size = chanb->buf_size; + crash_abi->subbuf_size = chanb->subbuf_size; + crash_abi->num_subbuf = chanb->num_subbuf; + crash_abi->mode = (uint32_t) chanb->config.mode; + + if (config->cb.content_size_field) { + size_t offset, length; + + config->cb.content_size_field(config, &offset, &length); + crash_abi->offset.content_size = offset; + crash_abi->length.content_size = length; + } else { + crash_abi->offset.content_size = 0; + crash_abi->length.content_size = 0; + } + if (config->cb.packet_size_field) { + size_t offset, length; + + config->cb.packet_size_field(config, &offset, &length); + crash_abi->offset.packet_size = offset; + crash_abi->length.packet_size = length; + } else { + crash_abi->offset.packet_size = 0; + crash_abi->length.packet_size = 0; + } +} + /* * Must be called under cpu hotplug protection. */ -int lib_ring_buffer_create(struct lib_ring_buffer *buf, +int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, struct channel_backend *chanb, int cpu, - struct shm_handle *handle, + struct lttng_ust_shm_handle *handle, struct shm_object *shmobj) { - const struct lib_ring_buffer_config *config = &chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; struct channel *chan = caa_container_of(chanb, struct channel, backend); - void *priv = chanb->priv; - unsigned int num_subbuf; + struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb; + struct channel *shmp_chan; + struct commit_counters_hot *cc_hot; + void *priv = channel_get_private(chan); size_t subbuf_header_size; - u64 tsc; + uint64_t tsc; int ret; /* Test for cpu hotplug */ if (buf->backend.allocated) return 0; - ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, - cpu, handle, shmobj); - if (ret) - return ret; - align_shm(shmobj, __alignof__(struct commit_counters_hot)); set_shmp(buf->commit_hot, zalloc_shm(shmobj, sizeof(struct commit_counters_hot) * chan->backend.num_subbuf)); if (!shmp(handle, buf->commit_hot)) { - ret = -ENOMEM; - goto free_chanbuf; + return -ENOMEM; } align_shm(shmobj, __alignof__(struct commit_counters_cold)); @@ -207,8 +373,21 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, goto free_commit; } - num_subbuf = chan->backend.num_subbuf; - //init_waitqueue_head(&buf->read_wait); + align_shm(shmobj, __alignof__(uint64_t)); + set_shmp(buf->ts_end, + zalloc_shm(shmobj, + sizeof(uint64_t) * chan->backend.num_subbuf)); + if (!shmp(handle, buf->ts_end)) { + ret = -ENOMEM; + goto free_commit_cold; + } + + + ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, + cpu, handle, shmobj); + if (ret) { + goto free_init; + } /* * Write the subbuffer header for first subbuffer so we know the total @@ -216,194 +395,553 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, */ subbuf_header_size = config->cb.subbuffer_header_size(); v_set(config, &buf->offset, subbuf_header_size); - subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id); - tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan)); + wsb = shmp_index(handle, buf->backend.buf_wsb, 0); + if (!wsb) { + ret = -EPERM; + goto free_chanbuf; + } + subbuffer_id_clear_noref(config, &wsb->id); + shmp_chan = shmp(handle, buf->backend.chan); + if (!shmp_chan) { + ret = -EPERM; + goto free_chanbuf; + } + tsc = config->cb.ring_buffer_clock_read(shmp_chan); config->cb.buffer_begin(buf, tsc, 0, handle); - v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc); + cc_hot = shmp_index(handle, buf->commit_hot, 0); + if (!cc_hot) { + ret = -EPERM; + goto free_chanbuf; + } + v_add(config, subbuf_header_size, &cc_hot->cc); + v_add(config, subbuf_header_size, &cc_hot->seq); if (config->cb.buffer_create) { ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle); if (ret) - goto free_init; + goto free_chanbuf; } + + init_crash_abi(config, &buf->crash_abi, buf, chanb, shmobj, handle); + buf->backend.allocated = 1; return 0; /* Error handling */ free_init: + /* ts_end will be freed by shm teardown */ +free_commit_cold: /* commit_cold will be freed by shm teardown */ free_commit: /* commit_hot will be freed by shm teardown */ free_chanbuf: - lib_ring_buffer_backend_free(&buf->backend); return ret; } -#if 0 -static void switch_buffer_timer(unsigned long data) +static +void lib_ring_buffer_channel_switch_timer(int sig, siginfo_t *si, void *uc) { - struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data; - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config; + struct lttng_ust_shm_handle *handle; + struct channel *chan; + int cpu; + + assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self()); + + chan = si->si_value.sival_ptr; + handle = chan->handle; + config = &chan->backend.config; + + DBG("Switch timer for channel %p\n", chan); /* * Only flush buffers periodically if readers are active. */ - if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers)) - lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle); - - //TODO timers - //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - // mod_timer_pinned(&buf->switch_timer, - // jiffies + chan->switch_timer_interval); - //else - // mod_timer(&buf->switch_timer, - // jiffies + chan->switch_timer_interval); + pthread_mutex_lock(&wakeup_fd_mutex); + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { + for_each_possible_cpu(cpu) { + struct lttng_ust_lib_ring_buffer *buf = + shmp(handle, chan->backend.buf[cpu].shmp); + + if (!buf) + goto end; + if (uatomic_read(&buf->active_readers)) + lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, + chan->handle); + } + } else { + struct lttng_ust_lib_ring_buffer *buf = + shmp(handle, chan->backend.buf[0].shmp); + + if (!buf) + goto end; + if (uatomic_read(&buf->active_readers)) + lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, + chan->handle); + } +end: + pthread_mutex_unlock(&wakeup_fd_mutex); + return; } -#endif //0 -static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf, - struct shm_handle *handle) +static +int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + struct channel *chan, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + unsigned long consumed_old, consumed_idx, commit_count, write_offset; + struct commit_counters_cold *cc_cold; - if (!chan->switch_timer_interval || buf->switch_timer_enabled) - return; - //TODO - //init_timer(&buf->switch_timer); - //buf->switch_timer.function = switch_buffer_timer; - //buf->switch_timer.expires = jiffies + chan->switch_timer_interval; - //buf->switch_timer.data = (unsigned long)buf; - //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - // add_timer_on(&buf->switch_timer, buf->backend.cpu); - //else - // add_timer(&buf->switch_timer); - buf->switch_timer_enabled = 1; + consumed_old = uatomic_read(&buf->consumed); + consumed_idx = subbuf_index(consumed_old, chan); + cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx); + if (!cc_cold) + return 0; + commit_count = v_read(config, &cc_cold->cc_sb); + /* + * No memory barrier here, since we are only interested + * in a statistically correct polling result. The next poll will + * get the data is we are racing. The mb() that ensures correct + * memory order is in get_subbuf. + */ + write_offset = v_read(config, &buf->offset); + + /* + * Check that the subbuffer we are trying to consume has been + * already fully committed. + */ + + if (((commit_count - chan->backend.subbuf_size) + & chan->commit_count_mask) + - (buf_trunc(consumed_old, chan) + >> chan->backend.num_subbuf_order) + != 0) + return 0; + + /* + * Check that we are not about to read the same subbuffer in + * which the writer head is. + */ + if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan) + == 0) + return 0; + + return 1; } -static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf, - struct shm_handle *handle) +static +void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(handle, buf->backend.chan); + int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref); + sigset_t sigpipe_set, pending_set, old_set; + int ret, sigpipe_was_pending = 0; - if (!chan->switch_timer_interval || !buf->switch_timer_enabled) + if (wakeup_fd < 0) return; - //TODO - //del_timer_sync(&buf->switch_timer); - buf->switch_timer_enabled = 0; + /* + * Wake-up the other end by writing a null byte in the pipe + * (non-blocking). Important note: Because writing into the + * pipe is non-blocking (and therefore we allow dropping wakeup + * data, as long as there is wakeup data present in the pipe + * buffer to wake up the consumer), the consumer should perform + * the following sequence for waiting: + * 1) empty the pipe (reads). + * 2) check if there is data in the buffer. + * 3) wait on the pipe (poll). + * + * Discard the SIGPIPE from write(), not disturbing any SIGPIPE + * that might be already pending. If a bogus SIGPIPE is sent to + * the entire process concurrently by a malicious user, it may + * be simply discarded. + */ + ret = sigemptyset(&pending_set); + assert(!ret); + /* + * sigpending returns the mask of signals that are _both_ + * blocked for the thread _and_ pending for either the thread or + * the entire process. + */ + ret = sigpending(&pending_set); + assert(!ret); + sigpipe_was_pending = sigismember(&pending_set, SIGPIPE); + /* + * If sigpipe was pending, it means it was already blocked, so + * no need to block it. + */ + if (!sigpipe_was_pending) { + ret = sigemptyset(&sigpipe_set); + assert(!ret); + ret = sigaddset(&sigpipe_set, SIGPIPE); + assert(!ret); + ret = pthread_sigmask(SIG_BLOCK, &sigpipe_set, &old_set); + assert(!ret); + } + do { + ret = write(wakeup_fd, "", 1); + } while (ret == -1L && errno == EINTR); + if (ret == -1L && errno == EPIPE && !sigpipe_was_pending) { + struct timespec timeout = { 0, 0 }; + do { + ret = sigtimedwait(&sigpipe_set, NULL, + &timeout); + } while (ret == -1L && errno == EINTR); + } + if (!sigpipe_was_pending) { + ret = pthread_sigmask(SIG_SETMASK, &old_set, NULL); + assert(!ret); + } +} + +static +void lib_ring_buffer_channel_do_read(struct channel *chan) +{ + const struct lttng_ust_lib_ring_buffer_config *config; + struct lttng_ust_shm_handle *handle; + int cpu; + + handle = chan->handle; + config = &chan->backend.config; + + /* + * Only flush buffers periodically if readers are active. + */ + pthread_mutex_lock(&wakeup_fd_mutex); + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { + for_each_possible_cpu(cpu) { + struct lttng_ust_lib_ring_buffer *buf = + shmp(handle, chan->backend.buf[cpu].shmp); + + if (!buf) + goto end; + if (uatomic_read(&buf->active_readers) + && lib_ring_buffer_poll_deliver(config, buf, + chan, handle)) { + lib_ring_buffer_wakeup(buf, handle); + } + } + } else { + struct lttng_ust_lib_ring_buffer *buf = + shmp(handle, chan->backend.buf[0].shmp); + + if (!buf) + goto end; + if (uatomic_read(&buf->active_readers) + && lib_ring_buffer_poll_deliver(config, buf, + chan, handle)) { + lib_ring_buffer_wakeup(buf, handle); + } + } +end: + pthread_mutex_unlock(&wakeup_fd_mutex); +} + +static +void lib_ring_buffer_channel_read_timer(int sig, siginfo_t *si, void *uc) +{ + struct channel *chan; + + assert(CMM_LOAD_SHARED(timer_signal.tid) == pthread_self()); + chan = si->si_value.sival_ptr; + DBG("Read timer for channel %p\n", chan); + lib_ring_buffer_channel_do_read(chan); + return; +} + +static +void rb_setmask(sigset_t *mask) +{ + int ret; + + ret = sigemptyset(mask); + if (ret) { + PERROR("sigemptyset"); + } + ret = sigaddset(mask, LTTNG_UST_RB_SIG_FLUSH); + if (ret) { + PERROR("sigaddset"); + } + ret = sigaddset(mask, LTTNG_UST_RB_SIG_READ); + if (ret) { + PERROR("sigaddset"); + } + ret = sigaddset(mask, LTTNG_UST_RB_SIG_TEARDOWN); + if (ret) { + PERROR("sigaddset"); + } +} + +static +void *sig_thread(void *arg) +{ + sigset_t mask; + siginfo_t info; + int signr; + + /* Only self thread will receive signal mask. */ + rb_setmask(&mask); + CMM_STORE_SHARED(timer_signal.tid, pthread_self()); + + for (;;) { + signr = sigwaitinfo(&mask, &info); + if (signr == -1) { + if (errno != EINTR) + PERROR("sigwaitinfo"); + continue; + } + if (signr == LTTNG_UST_RB_SIG_FLUSH) { + lib_ring_buffer_channel_switch_timer(info.si_signo, + &info, NULL); + } else if (signr == LTTNG_UST_RB_SIG_READ) { + lib_ring_buffer_channel_read_timer(info.si_signo, + &info, NULL); + } else if (signr == LTTNG_UST_RB_SIG_TEARDOWN) { + cmm_smp_mb(); + CMM_STORE_SHARED(timer_signal.qs_done, 1); + cmm_smp_mb(); + } else { + ERR("Unexptected signal %d\n", info.si_signo); + } + } + return NULL; } -#if 0 /* - * Polling timer to check the channels for data. + * Ensure only a single thread listens on the timer signal. */ -static void read_buffer_timer(unsigned long data) +static +void lib_ring_buffer_setup_timer_thread(void) { - struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data; - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + pthread_t thread; + int ret; - CHAN_WARN_ON(chan, !buf->backend.allocated); + pthread_mutex_lock(&timer_signal.lock); + if (timer_signal.setup_done) + goto end; - if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers)) - && lib_ring_buffer_poll_deliver(config, buf, chan)) { - //TODO - //wake_up_interruptible(&buf->read_wait); - //wake_up_interruptible(&chan->read_wait); + ret = pthread_create(&thread, NULL, &sig_thread, NULL); + if (ret) { + errno = ret; + PERROR("pthread_create"); } + ret = pthread_detach(thread); + if (ret) { + errno = ret; + PERROR("pthread_detach"); + } + timer_signal.setup_done = 1; +end: + pthread_mutex_unlock(&timer_signal.lock); +} - //TODO - //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - // mod_timer_pinned(&buf->read_timer, - // jiffies + chan->read_timer_interval); - //else - // mod_timer(&buf->read_timer, - // jiffies + chan->read_timer_interval); +/* + * Wait for signal-handling thread quiescent state. + */ +static +void lib_ring_buffer_wait_signal_thread_qs(unsigned int signr) +{ + sigset_t pending_set; + int ret; + + /* + * We need to be the only thread interacting with the thread + * that manages signals for teardown synchronization. + */ + pthread_mutex_lock(&timer_signal.lock); + + /* + * Ensure we don't have any signal queued for this channel. + */ + for (;;) { + ret = sigemptyset(&pending_set); + if (ret == -1) { + PERROR("sigemptyset"); + } + ret = sigpending(&pending_set); + if (ret == -1) { + PERROR("sigpending"); + } + if (!sigismember(&pending_set, signr)) + break; + caa_cpu_relax(); + } + + /* + * From this point, no new signal handler will be fired that + * would try to access "chan". However, we still need to wait + * for any currently executing handler to complete. + */ + cmm_smp_mb(); + CMM_STORE_SHARED(timer_signal.qs_done, 0); + cmm_smp_mb(); + + /* + * Kill with LTTNG_UST_RB_SIG_TEARDOWN, so signal management + * thread wakes up. + */ + kill(getpid(), LTTNG_UST_RB_SIG_TEARDOWN); + + while (!CMM_LOAD_SHARED(timer_signal.qs_done)) + caa_cpu_relax(); + cmm_smp_mb(); + + pthread_mutex_unlock(&timer_signal.lock); } -#endif //0 -static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf, - struct shm_handle *handle) +static +void lib_ring_buffer_channel_switch_timer_start(struct channel *chan) { - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + struct sigevent sev; + struct itimerspec its; + int ret; + + if (!chan->switch_timer_interval || chan->switch_timer_enabled) + return; + + chan->switch_timer_enabled = 1; + + lib_ring_buffer_setup_timer_thread(); + + memset(&sev, 0, sizeof(sev)); + sev.sigev_notify = SIGEV_SIGNAL; + sev.sigev_signo = LTTNG_UST_RB_SIG_FLUSH; + sev.sigev_value.sival_ptr = chan; + ret = timer_create(CLOCKID, &sev, &chan->switch_timer); + if (ret == -1) { + PERROR("timer_create"); + } + + its.it_value.tv_sec = chan->switch_timer_interval / 1000000; + its.it_value.tv_nsec = (chan->switch_timer_interval % 1000000) * 1000; + its.it_interval.tv_sec = its.it_value.tv_sec; + its.it_interval.tv_nsec = its.it_value.tv_nsec; + + ret = timer_settime(chan->switch_timer, 0, &its, NULL); + if (ret == -1) { + PERROR("timer_settime"); + } +} + +static +void lib_ring_buffer_channel_switch_timer_stop(struct channel *chan) +{ + int ret; + + if (!chan->switch_timer_interval || !chan->switch_timer_enabled) + return; + + ret = timer_delete(chan->switch_timer); + if (ret == -1) { + PERROR("timer_delete"); + } + + lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_FLUSH); + + chan->switch_timer = 0; + chan->switch_timer_enabled = 0; +} + +static +void lib_ring_buffer_channel_read_timer_start(struct channel *chan) +{ + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + struct sigevent sev; + struct itimerspec its; + int ret; if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER - || !chan->read_timer_interval - || buf->read_timer_enabled) + || !chan->read_timer_interval || chan->read_timer_enabled) return; - //TODO - //init_timer(&buf->read_timer); - //buf->read_timer.function = read_buffer_timer; - //buf->read_timer.expires = jiffies + chan->read_timer_interval; - //buf->read_timer.data = (unsigned long)buf; - - //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - // add_timer_on(&buf->read_timer, buf->backend.cpu); - //else - // add_timer(&buf->read_timer); - buf->read_timer_enabled = 1; + chan->read_timer_enabled = 1; + + lib_ring_buffer_setup_timer_thread(); + + sev.sigev_notify = SIGEV_SIGNAL; + sev.sigev_signo = LTTNG_UST_RB_SIG_READ; + sev.sigev_value.sival_ptr = chan; + ret = timer_create(CLOCKID, &sev, &chan->read_timer); + if (ret == -1) { + PERROR("timer_create"); + } + + its.it_value.tv_sec = chan->read_timer_interval / 1000000; + its.it_value.tv_nsec = (chan->read_timer_interval % 1000000) * 1000; + its.it_interval.tv_sec = its.it_value.tv_sec; + its.it_interval.tv_nsec = its.it_value.tv_nsec; + + ret = timer_settime(chan->read_timer, 0, &its, NULL); + if (ret == -1) { + PERROR("timer_settime"); + } } -static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf, - struct shm_handle *handle) +static +void lib_ring_buffer_channel_read_timer_stop(struct channel *chan) { - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + int ret; if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER - || !chan->read_timer_interval - || !buf->read_timer_enabled) + || !chan->read_timer_interval || !chan->read_timer_enabled) return; - //TODO - //del_timer_sync(&buf->read_timer); + ret = timer_delete(chan->read_timer); + if (ret == -1) { + PERROR("timer_delete"); + } + /* * do one more check to catch data that has been written in the last * timer period. */ - if (lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { - //TODO - //wake_up_interruptible(&buf->read_wait); - //wake_up_interruptible(&chan->read_wait); - } - buf->read_timer_enabled = 0; + lib_ring_buffer_channel_do_read(chan); + + lib_ring_buffer_wait_signal_thread_qs(LTTNG_UST_RB_SIG_READ); + + chan->read_timer = 0; + chan->read_timer_enabled = 0; } static void channel_unregister_notifiers(struct channel *chan, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) +{ + lib_ring_buffer_channel_switch_timer_stop(chan); + lib_ring_buffer_channel_read_timer_stop(chan); +} + +static void channel_print_errors(struct channel *chan, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = + &chan->backend.config; int cpu; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { for_each_possible_cpu(cpu) { - struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp); - - lib_ring_buffer_stop_switch_timer(buf, handle); - lib_ring_buffer_stop_read_timer(buf, handle); + struct lttng_ust_lib_ring_buffer *buf = + shmp(handle, chan->backend.buf[cpu].shmp); + if (buf) + lib_ring_buffer_print_errors(chan, buf, cpu, handle); } } else { - struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp); + struct lttng_ust_lib_ring_buffer *buf = + shmp(handle, chan->backend.buf[0].shmp); - lib_ring_buffer_stop_switch_timer(buf, handle); - lib_ring_buffer_stop_read_timer(buf, handle); + if (buf) + lib_ring_buffer_print_errors(chan, buf, -1, handle); } - //channel_backend_unregister_notifiers(&chan->backend); } -static void channel_free(struct channel *chan, struct shm_handle *handle, - int shadow) +static void channel_free(struct channel *chan, + struct lttng_ust_shm_handle *handle, + int consumer) { - int ret; - - if (!shadow) - channel_backend_free(&chan->backend, handle); + channel_backend_free(&chan->backend, handle); /* chan is freed by shm teardown */ - shm_object_table_destroy(handle->table); + shm_object_table_destroy(handle->table, consumer); free(handle); } @@ -411,7 +949,9 @@ static void channel_free(struct channel *chan, struct shm_handle *handle, * channel_create - Create channel. * @config: ring buffer instance configuration * @name: name of the channel - * @priv: ring buffer client private data + * @priv_data: ring buffer client private data area pointer (output) + * @priv_data_size: length, in bytes, of the private data area. + * @priv_data_init: initialization data for private data. * @buf_addr: pointer the the beginning of the preallocated buffer contiguous * address mapping. It is used only by RING_BUFFER_STATIC * configuration. It can be set to NULL for other backends. @@ -421,29 +961,58 @@ static void channel_free(struct channel *chan, struct shm_handle *handle, * padding to let readers get those sub-buffers. * Used for live streaming. * @read_timer_interval: Time interval (in us) to wake up pending readers. + * @stream_fds: array of stream file descriptors. + * @nr_stream_fds: number of file descriptors in array. * * Holds cpu hotplug. * Returns NULL on failure. */ -struct shm_handle *channel_create(const struct lib_ring_buffer_config *config, - const char *name, void *priv, void *buf_addr, - size_t subbuf_size, +struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config, + const char *name, + void **priv_data, + size_t priv_data_align, + size_t priv_data_size, + void *priv_data_init, + void *buf_addr, size_t subbuf_size, size_t num_subbuf, unsigned int switch_timer_interval, unsigned int read_timer_interval, - int *shm_fd, int *wait_fd, uint64_t *memory_map_size) + const int *stream_fds, int nr_stream_fds, + int64_t blocking_timeout) { - int ret, cpu; - size_t shmsize; + int ret; + size_t shmsize, chansize; struct channel *chan; - struct shm_handle *handle; + struct lttng_ust_shm_handle *handle; struct shm_object *shmobj; - struct shm_ref *ref; + unsigned int nr_streams; + int64_t blocking_timeout_ms; + + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) + nr_streams = num_possible_cpus(); + else + nr_streams = 1; + + if (nr_stream_fds != nr_streams) + return NULL; + + if (blocking_timeout < -1) { + return NULL; + } + /* usec to msec */ + if (blocking_timeout == -1) { + blocking_timeout_ms = -1; + } else { + blocking_timeout_ms = blocking_timeout / 1000; + if (blocking_timeout_ms != (int32_t) blocking_timeout_ms) { + return NULL; + } + } if (lib_ring_buffer_check_config(config, switch_timer_interval, read_timer_interval)) return NULL; - handle = zmalloc(sizeof(struct shm_handle)); + handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); if (!handle) return NULL; @@ -454,68 +1023,78 @@ struct shm_handle *channel_create(const struct lib_ring_buffer_config *config, /* Calculate the shm allocation layout */ shmsize = sizeof(struct channel); - if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - shmsize += sizeof(struct lib_ring_buffer_shmp) * num_possible_cpus(); - else - shmsize += sizeof(struct lib_ring_buffer_shmp); - - shmobj = shm_object_table_append(handle->table, shmsize); + shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp)); + shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams; + chansize = shmsize; + if (priv_data_align) + shmsize += lttng_ust_offset_align(shmsize, priv_data_align); + shmsize += priv_data_size; + + /* Allocate normal memory for channel (not shared) */ + shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM, + -1, -1); if (!shmobj) goto error_append; - set_shmp(handle->chan, zalloc_shm(shmobj, sizeof(struct channel))); + /* struct channel is at object 0, offset 0 (hardcoded) */ + set_shmp(handle->chan, zalloc_shm(shmobj, chansize)); + assert(handle->chan._ref.index == 0); + assert(handle->chan._ref.offset == 0); chan = shmp(handle, handle->chan); if (!chan) goto error_append; + chan->nr_streams = nr_streams; + + /* space for private data */ + if (priv_data_size) { + DECLARE_SHMP(void, priv_data_alloc); + + align_shm(shmobj, priv_data_align); + chan->priv_data_offset = shmobj->allocated_len; + set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size)); + if (!shmp(handle, priv_data_alloc)) + goto error_append; + *priv_data = channel_get_private(chan); + memcpy(*priv_data, priv_data_init, priv_data_size); + } else { + chan->priv_data_offset = -1; + if (priv_data) + *priv_data = NULL; + } - ret = channel_backend_init(&chan->backend, name, config, priv, - subbuf_size, num_subbuf, handle); + chan->u.s.blocking_timeout_ms = (int32_t) blocking_timeout_ms; + + ret = channel_backend_init(&chan->backend, name, config, + subbuf_size, num_subbuf, handle, + stream_fds); if (ret) goto error_backend_init; + chan->handle = handle; chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order); - //TODO - //chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval); - //chan->read_timer_interval = usecs_to_jiffies(read_timer_interval); - //TODO - //init_waitqueue_head(&chan->read_wait); - //init_waitqueue_head(&chan->hp_wait); - if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - /* - * In case of non-hotplug cpu, if the ring-buffer is allocated - * in early initcall, it will not be notified of secondary cpus. - * In that off case, we need to allocate for all possible cpus. - */ - for_each_possible_cpu(cpu) { - struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp); - lib_ring_buffer_start_switch_timer(buf, handle); - lib_ring_buffer_start_read_timer(buf, handle); - } - } else { - struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp); + chan->switch_timer_interval = switch_timer_interval; + chan->read_timer_interval = read_timer_interval; + lib_ring_buffer_channel_switch_timer_start(chan); + lib_ring_buffer_channel_read_timer_start(chan); - lib_ring_buffer_start_switch_timer(buf, handle); - lib_ring_buffer_start_read_timer(buf, handle); - } - ref = &handle->chan._ref; - shm_get_object_data(handle, ref, shm_fd, wait_fd, memory_map_size); return handle; error_backend_init: error_append: - shm_object_table_destroy(handle->table); + shm_object_table_destroy(handle->table, 1); error_table_alloc: free(handle); return NULL; } -struct shm_handle *channel_handle_create(int shm_fd, int wait_fd, - uint64_t memory_map_size) +struct lttng_ust_shm_handle *channel_handle_create(void *data, + uint64_t memory_map_size, + int wakeup_fd) { - struct shm_handle *handle; + struct lttng_ust_shm_handle *handle; struct shm_object *object; - handle = zmalloc(sizeof(struct shm_handle)); + handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); if (!handle) return NULL; @@ -524,38 +1103,48 @@ struct shm_handle *channel_handle_create(int shm_fd, int wait_fd, if (!handle->table) goto error_table_alloc; /* Add channel object */ - object = shm_object_table_append_shadow(handle->table, - shm_fd, wait_fd, memory_map_size); + object = shm_object_table_append_mem(handle->table, data, + memory_map_size, wakeup_fd); if (!object) goto error_table_object; - + /* struct channel is at object 0, offset 0 (hardcoded) */ + handle->chan._ref.index = 0; + handle->chan._ref.offset = 0; return handle; error_table_object: - shm_object_table_destroy(handle->table); + shm_object_table_destroy(handle->table, 0); error_table_alloc: free(handle); return NULL; } -int channel_handle_add_stream(struct shm_handle *handle, - int shm_fd, int wait_fd, uint64_t memory_map_size) +int channel_handle_add_stream(struct lttng_ust_shm_handle *handle, + int shm_fd, int wakeup_fd, uint32_t stream_nr, + uint64_t memory_map_size) { struct shm_object *object; /* Add stream object */ - object = shm_object_table_append_shadow(handle->table, - shm_fd, wait_fd, memory_map_size); + object = shm_object_table_append_shm(handle->table, + shm_fd, wakeup_fd, stream_nr, + memory_map_size); if (!object) - return -1; + return -EINVAL; return 0; } +unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle) +{ + assert(handle->table); + return handle->table->allocated_len - 1; +} + static -void channel_release(struct channel *chan, struct shm_handle *handle, - int shadow) +void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle, + int consumer) { - channel_free(chan, handle, shadow); + channel_free(chan, handle, consumer); } /** @@ -566,122 +1155,130 @@ void channel_release(struct channel *chan, struct shm_handle *handle, * Call "destroy" callback, finalize channels, decrement the channel * reference count. Note that when readers have completed data * consumption of finalized channels, get_subbuf() will return -ENODATA. - * They should release their handle at that point. Returns the private - * data pointer. + * They should release their handle at that point. */ -void *channel_destroy(struct channel *chan, struct shm_handle *handle, - int shadow) +void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle, + int consumer) { - const struct lib_ring_buffer_config *config = &chan->backend.config; - void *priv; - int cpu; - - if (shadow) { - channel_release(chan, handle, shadow); - return NULL; - } - - channel_unregister_notifiers(chan, handle); - - if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { - for_each_channel_cpu(cpu, chan) { - struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp); - - if (config->cb.buffer_finalize) - config->cb.buffer_finalize(buf, - chan->backend.priv, - cpu, handle); - if (buf->backend.allocated) - lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH, - handle); - /* - * Perform flush before writing to finalized. - */ - cmm_smp_wmb(); - CMM_ACCESS_ONCE(buf->finalized) = 1; - //wake_up_interruptible(&buf->read_wait); - } - } else { - struct lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp); - - if (config->cb.buffer_finalize) - config->cb.buffer_finalize(buf, chan->backend.priv, -1, handle); - if (buf->backend.allocated) - lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH, - handle); + if (consumer) { /* - * Perform flush before writing to finalized. + * Note: the consumer takes care of finalizing and + * switching the buffers. */ - cmm_smp_wmb(); - CMM_ACCESS_ONCE(buf->finalized) = 1; - //wake_up_interruptible(&buf->read_wait); + channel_unregister_notifiers(chan, handle); + /* + * The consumer prints errors. + */ + channel_print_errors(chan, handle); } - CMM_ACCESS_ONCE(chan->finalized) = 1; - //wake_up_interruptible(&chan->hp_wait); - //wake_up_interruptible(&chan->read_wait); + /* * sessiond/consumer are keeping a reference on the shm file * descriptor directly. No need to refcount. */ - priv = chan->backend.priv; - channel_release(chan, handle, shadow); - return priv; + channel_release(chan, handle, consumer); + return; } -struct lib_ring_buffer *channel_get_ring_buffer( - const struct lib_ring_buffer_config *config, +struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer( + const struct lttng_ust_lib_ring_buffer_config *config, struct channel *chan, int cpu, - struct shm_handle *handle, + struct lttng_ust_shm_handle *handle, int *shm_fd, int *wait_fd, + int *wakeup_fd, uint64_t *memory_map_size) { struct shm_ref *ref; if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) { - ref = &chan->backend.buf[0].shmp._ref; - shm_get_object_data(handle, ref, shm_fd, wait_fd, - memory_map_size); - return shmp(handle, chan->backend.buf[0].shmp); + cpu = 0; } else { if (cpu >= num_possible_cpus()) return NULL; - ref = &chan->backend.buf[cpu].shmp._ref; - shm_get_object_data(handle, ref, shm_fd, wait_fd, - memory_map_size); - return shmp(handle, chan->backend.buf[cpu].shmp); } + ref = &chan->backend.buf[cpu].shmp._ref; + *shm_fd = shm_get_shm_fd(handle, ref); + *wait_fd = shm_get_wait_fd(handle, ref); + *wakeup_fd = shm_get_wakeup_fd(handle, ref); + if (shm_get_shm_size(handle, ref, memory_map_size)) + return NULL; + return shmp(handle, chan->backend.buf[cpu].shmp); } -int lib_ring_buffer_open_read(struct lib_ring_buffer *buf, - struct shm_handle *handle, - int shadow) +int ring_buffer_channel_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config, + struct channel *chan, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(handle, buf->backend.chan); + struct shm_ref *ref; - if (shadow) { - if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0) - return -EBUSY; - cmm_smp_mb(); - return 0; + ref = &handle->chan._ref; + return shm_close_wait_fd(handle, ref); +} + +int ring_buffer_channel_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config, + struct channel *chan, + struct lttng_ust_shm_handle *handle) +{ + struct shm_ref *ref; + + ref = &handle->chan._ref; + return shm_close_wakeup_fd(handle, ref); +} + +int ring_buffer_stream_close_wait_fd(const struct lttng_ust_lib_ring_buffer_config *config, + struct channel *chan, + struct lttng_ust_shm_handle *handle, + int cpu) +{ + struct shm_ref *ref; + + if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) { + cpu = 0; + } else { + if (cpu >= num_possible_cpus()) + return -EINVAL; + } + ref = &chan->backend.buf[cpu].shmp._ref; + return shm_close_wait_fd(handle, ref); +} + +int ring_buffer_stream_close_wakeup_fd(const struct lttng_ust_lib_ring_buffer_config *config, + struct channel *chan, + struct lttng_ust_shm_handle *handle, + int cpu) +{ + struct shm_ref *ref; + int ret; + + if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) { + cpu = 0; + } else { + if (cpu >= num_possible_cpus()) + return -EINVAL; } + ref = &chan->backend.buf[cpu].shmp._ref; + pthread_mutex_lock(&wakeup_fd_mutex); + ret = shm_close_wakeup_fd(handle, ref); + pthread_mutex_unlock(&wakeup_fd_mutex); + return ret; +} + +int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) +{ if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0) return -EBUSY; cmm_smp_mb(); return 0; } -void lib_ring_buffer_release_read(struct lib_ring_buffer *buf, - struct shm_handle *handle, - int shadow) +void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { struct channel *chan = shmp(handle, buf->backend.chan); - if (shadow) { - CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1); - cmm_smp_mb(); - uatomic_dec(&buf->active_shadow_readers); + if (!chan) return; - } CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1); cmm_smp_mb(); uatomic_dec(&buf->active_readers); @@ -697,15 +1294,19 @@ void lib_ring_buffer_release_read(struct lib_ring_buffer *buf, * data to read at consumed position, or 0 if the get operation succeeds. */ -int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, +int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf, unsigned long *consumed, unsigned long *produced, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + struct channel *chan; + const struct lttng_ust_lib_ring_buffer_config *config; unsigned long consumed_cur, write_offset; int finalized; + chan = shmp(handle, buf->backend.chan); + if (!chan) + return -EPERM; + config = &chan->backend.config; finalized = CMM_ACCESS_ONCE(buf->finalized); /* * Read finalized before counters. @@ -747,20 +1348,59 @@ nodata: } /** - * lib_ring_buffer_put_snapshot - move consumed counter forward + * Performs the same function as lib_ring_buffer_snapshot(), but the positions + * are saved regardless of whether the consumed and produced positions are + * in the same subbuffer. + * @buf: ring buffer + * @consumed: consumed byte count indicating the last position read + * @produced: produced byte count indicating the last position written + * + * This function is meant to provide information on the exact producer and + * consumer positions without regard for the "snapshot" feature. + */ +int lib_ring_buffer_snapshot_sample_positions( + struct lttng_ust_lib_ring_buffer *buf, + unsigned long *consumed, unsigned long *produced, + struct lttng_ust_shm_handle *handle) +{ + struct channel *chan; + const struct lttng_ust_lib_ring_buffer_config *config; + + chan = shmp(handle, buf->backend.chan); + if (!chan) + return -EPERM; + config = &chan->backend.config; + cmm_smp_rmb(); + *consumed = uatomic_read(&buf->consumed); + /* + * No need to issue a memory barrier between consumed count read and + * write offset read, because consumed count can only change + * concurrently in overwrite mode, and we keep a sequence counter + * identifier derived from the write offset to check we are getting + * the same sub-buffer we are expecting (the sub-buffers are atomically + * "tagged" upon writes, tags are checked upon read). + */ + *produced = v_read(config, &buf->offset); + return 0; +} + +/** + * lib_ring_buffer_move_consumer - move consumed counter forward * @buf: ring buffer * @consumed_new: new consumed count value */ -void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf, +void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf, unsigned long consumed_new, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { - struct lib_ring_buffer_backend *bufb = &buf->backend; - struct channel *chan = shmp(handle, bufb->chan); + struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend; + struct channel *chan; unsigned long consumed; - CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1 - && uatomic_read(&buf->active_shadow_readers) != 1); + chan = shmp(handle, bufb->chan); + if (!chan) + return; + CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1); /* * Only push the consumed value forward. @@ -781,16 +1421,20 @@ void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf, * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no * data to read at consumed position, or 0 if the get operation succeeds. */ -int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf, +int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf, unsigned long consumed, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + struct channel *chan; + const struct lttng_ust_lib_ring_buffer_config *config; unsigned long consumed_cur, consumed_idx, commit_count, write_offset; - int ret; - int finalized; + int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY; + struct commit_counters_cold *cc_cold; + chan = shmp(handle, buf->backend.chan); + if (!chan) + return -EPERM; + config = &chan->backend.config; retry: finalized = CMM_ACCESS_ONCE(buf->finalized); /* @@ -799,7 +1443,10 @@ retry: cmm_smp_rmb(); consumed_cur = uatomic_read(&buf->consumed); consumed_idx = subbuf_index(consumed, chan); - commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb); + cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx); + if (!cc_cold) + return -EPERM; + commit_count = v_read(config, &cc_cold->cc_sb); /* * Make sure we read the commit count before reading the buffer * data and the write offset. Correct consumed offset ordering @@ -824,20 +1471,72 @@ retry: /* * Check that the subbuffer we are trying to consume has been - * already fully committed. + * already fully committed. There are a few causes that can make + * this unavailability situation occur: + * + * Temporary (short-term) situation: + * - Application is running on a different CPU, between reserve + * and commit ring buffer operations, + * - Application is preempted between reserve and commit ring + * buffer operations, + * + * Long-term situation: + * - Application is stopped (SIGSTOP) between reserve and commit + * ring buffer operations. Could eventually be resumed by + * SIGCONT. + * - Application is killed (SIGTERM, SIGINT, SIGKILL) between + * reserve and commit ring buffer operation. + * + * From a consumer perspective, handling short-term + * unavailability situations is performed by retrying a few + * times after a delay. Handling long-term unavailability + * situations is handled by failing to get the sub-buffer. + * + * In all of those situations, if the application is taking a + * long time to perform its commit after ring buffer space + * reservation, we can end up in a situation where the producer + * will fill the ring buffer and try to write into the same + * sub-buffer again (which has a missing commit). This is + * handled by the producer in the sub-buffer switch handling + * code of the reserve routine by detecting unbalanced + * reserve/commit counters and discarding all further events + * until the situation is resolved in those situations. Two + * scenarios can occur: + * + * 1) The application causing the reserve/commit counters to be + * unbalanced has been terminated. In this situation, all + * further events will be discarded in the buffers, and no + * further buffer data will be readable by the consumer + * daemon. Tearing down the UST tracing session and starting + * anew is a work-around for those situations. Note that this + * only affects per-UID tracing. In per-PID tracing, the + * application vanishes with the termination, and therefore + * no more data needs to be written to the buffers. + * 2) The application causing the unbalance has been delayed for + * a long time, but will eventually try to increment the + * commit counter after eventually writing to the sub-buffer. + * This situation can cause events to be discarded until the + * application resumes its operations. */ if (((commit_count - chan->backend.subbuf_size) & chan->commit_count_mask) - - (buf_trunc(consumed_cur, chan) + - (buf_trunc(consumed, chan) >> chan->backend.num_subbuf_order) - != 0) - goto nodata; + != 0) { + if (nr_retry-- > 0) { + if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1)) + (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS); + goto retry; + } else { + goto nodata; + } + } /* * Check that we are not about to read the same subbuffer in * which the writer head is. */ - if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_cur, chan) + if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed, chan) == 0) goto nodata; @@ -847,12 +1546,23 @@ retry: * the writer is getting access to a subbuffer we were trying to get * access to. Also checks that the "consumed" buffer count we are * looking for matches the one contained in the subbuffer id. + * + * The short-lived race window described here can be affected by + * application signals and preemption, thus requiring to bound + * the loop to a maximum number of retry. */ ret = update_read_sb_index(config, &buf->backend, &chan->backend, consumed_idx, buf_trunc_val(consumed, chan), handle); - if (ret) - goto retry; + if (ret) { + if (nr_retry-- > 0) { + if (nr_retry <= (LTTNG_UST_RING_BUFFER_GET_RETRY >> 1)) + (void) poll(NULL, 0, LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS); + goto retry; + } else { + goto nodata; + } + } subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id); buf->get_subbuf_consumed = consumed; @@ -875,16 +1585,21 @@ nodata: * lib_ring_buffer_put_subbuf - release exclusive subbuffer access * @buf: ring buffer */ -void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf, - struct shm_handle *handle) +void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct lib_ring_buffer_backend *bufb = &buf->backend; - struct channel *chan = shmp(handle, bufb->chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; - unsigned long read_sb_bindex, consumed_idx, consumed; + struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend; + struct channel *chan; + const struct lttng_ust_lib_ring_buffer_config *config; + unsigned long sb_bindex, consumed_idx, consumed; + struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages; + struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages; - CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1 - && uatomic_read(&buf->active_shadow_readers) != 1); + chan = shmp(handle, bufb->chan); + if (!chan) + return; + config = &chan->backend.config; + CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1); if (!buf->get_subbuf) { /* @@ -903,11 +1618,16 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf, * Can be below zero if an iterator is used on a snapshot more than * once. */ - read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); - v_add(config, v_read(config, - &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread), - &bufb->records_read); - v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0); + sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); + rpages = shmp_index(handle, bufb->array, sb_bindex); + if (!rpages) + return; + backend_pages = shmp(handle, rpages->shmp); + if (!backend_pages) + return; + v_add(config, v_read(config, &backend_pages->records_unread), + &bufb->records_read); + v_set(config, &backend_pages->records_unread, 0); CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, bufb->buf_rsb.id)); subbuffer_id_set_noref(config, &bufb->buf_rsb.id); @@ -935,21 +1655,29 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf, * position and the writer position. (inclusive) */ static -void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf, +void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, unsigned long cons_offset, int cpu, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long cons_idx, commit_count, commit_count_sb; + struct commit_counters_hot *cc_hot; + struct commit_counters_cold *cc_cold; cons_idx = subbuf_index(cons_offset, chan); - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc); - commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb); + cc_hot = shmp_index(handle, buf->commit_hot, cons_idx); + if (!cc_hot) + return; + cc_cold = shmp_index(handle, buf->commit_cold, cons_idx); + if (!cc_cold) + return; + commit_count = v_read(config, &cc_hot->cc); + commit_count_sb = v_read(config, &cc_cold->cc_sb); if (subbuf_offset(commit_count, chan) != 0) - ERRMSG("ring buffer %s, cpu %d: " + DBG("ring buffer %s, cpu %d: " "commit count in subbuffer %lu,\n" "expecting multiples of %lu bytes\n" " [ %lu bytes committed, %lu bytes reader-visible ]\n", @@ -957,25 +1685,19 @@ void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf, chan->backend.subbuf_size, commit_count, commit_count_sb); - ERRMSG("ring buffer: %s, cpu %d: %lu bytes committed\n", + DBG("ring buffer: %s, cpu %d: %lu bytes committed\n", chan->backend.name, cpu, commit_count); } static -void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, +void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, void *priv, int cpu, - struct shm_handle *handle) + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long write_offset, cons_offset; - /* - * Can be called in the error path of allocation when - * trans_channel_data is not yet set. - */ - if (!chan) - return; /* * No need to order commit_count, write_offset and cons_offset reads * because we execute at teardown when no more writer nor reader @@ -984,7 +1706,7 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, write_offset = v_read(config, &buf->offset); cons_offset = uatomic_read(&buf->consumed); if (write_offset != cons_offset) - ERRMSG("ring buffer %s, cpu %d: " + DBG("ring buffer %s, cpu %d: " "non-consumed data\n" " [ %lu bytes written, %lu bytes read ]\n", chan->backend.name, cpu, write_offset, cons_offset); @@ -1000,47 +1722,56 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, static void lib_ring_buffer_print_errors(struct channel *chan, - struct lib_ring_buffer *buf, int cpu, - struct shm_handle *handle) + struct lttng_ust_lib_ring_buffer *buf, int cpu, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; - void *priv = chan->backend.priv; - - ERRMSG("ring buffer %s, cpu %d: %lu records written, " - "%lu records overrun\n", - chan->backend.name, cpu, - v_read(config, &buf->records_count), - v_read(config, &buf->records_overrun)); - - if (v_read(config, &buf->records_lost_full) - || v_read(config, &buf->records_lost_wrap) - || v_read(config, &buf->records_lost_big)) - ERRMSG("ring buffer %s, cpu %d: records were lost. Caused by:\n" - " [ %lu buffer full, %lu nest buffer wrap-around, " - "%lu event too big ]\n", - chan->backend.name, cpu, - v_read(config, &buf->records_lost_full), - v_read(config, &buf->records_lost_wrap), - v_read(config, &buf->records_lost_big)); - + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + void *priv = channel_get_private(chan); + + if (!strcmp(chan->backend.name, "relay-metadata-mmap")) { + DBG("ring buffer %s: %lu records written, " + "%lu records overrun\n", + chan->backend.name, + v_read(config, &buf->records_count), + v_read(config, &buf->records_overrun)); + } else { + DBG("ring buffer %s, cpu %d: %lu records written, " + "%lu records overrun\n", + chan->backend.name, cpu, + v_read(config, &buf->records_count), + v_read(config, &buf->records_overrun)); + + if (v_read(config, &buf->records_lost_full) + || v_read(config, &buf->records_lost_wrap) + || v_read(config, &buf->records_lost_big)) + DBG("ring buffer %s, cpu %d: records were lost. Caused by:\n" + " [ %lu buffer full, %lu nest buffer wrap-around, " + "%lu event too big ]\n", + chan->backend.name, cpu, + v_read(config, &buf->records_lost_full), + v_read(config, &buf->records_lost_wrap), + v_read(config, &buf->records_lost_big)); + } lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu, handle); } /* * lib_ring_buffer_switch_old_start: Populate old subbuffer header. * - * Only executed when the buffer is finalized, in SWITCH_FLUSH. + * Only executed by SWITCH_FLUSH, which can be issued while tracing is + * active or at buffer finalization (destroy). */ static -void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, +void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - u64 tsc, - struct shm_handle *handle) + uint64_t tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long oldidx = subbuf_index(offsets->old, chan); unsigned long commit_count; + struct commit_counters_hot *cc_hot; config->cb.buffer_begin(buf, tsc, oldidx, handle); @@ -1049,16 +1780,18 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, * determine that the subbuffer is full. */ cmm_smp_wmb(); + cc_hot = shmp_index(handle, buf->commit_hot, oldidx); + if (!cc_hot) + return; v_add(config, config->cb.subbuffer_header_size(), - &shmp_index(handle, buf->commit_hot, oldidx)->cc); - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc); + &cc_hot->cc); + commit_count = v_read(config, &cc_hot->cc); /* Check if the written buffer has to be delivered */ lib_ring_buffer_check_deliver(config, buf, chan, offsets->old, - commit_count, oldidx, handle); - lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx, - offsets->old, commit_count, - config->cb.subbuffer_header_size(), - handle); + commit_count, oldidx, handle, tsc); + lib_ring_buffer_write_commit_counter(config, buf, chan, + offsets->old + config->cb.subbuffer_header_size(), + commit_count, handle, cc_hot); } /* @@ -1070,33 +1803,51 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, * subbuffer. */ static -void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf, +void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - u64 tsc, - struct shm_handle *handle) + uint64_t tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long oldidx = subbuf_index(offsets->old - 1, chan); unsigned long commit_count, padding_size, data_size; + struct commit_counters_hot *cc_hot; + uint64_t *ts_end; data_size = subbuf_offset(offsets->old - 1, chan) + 1; padding_size = chan->backend.subbuf_size - data_size; subbuffer_set_data_size(config, &buf->backend, oldidx, data_size, handle); + ts_end = shmp_index(handle, buf->ts_end, oldidx); + if (!ts_end) + return; /* - * Order all writes to buffer before the commit count update that will - * determine that the subbuffer is full. + * This is the last space reservation in that sub-buffer before + * it gets delivered. This provides exclusive access to write to + * this sub-buffer's ts_end. There are also no concurrent + * readers of that ts_end because delivery of that sub-buffer is + * postponed until the commit counter is incremented for the + * current space reservation. + */ + *ts_end = tsc; + + /* + * Order all writes to buffer and store to ts_end before the commit + * count update that will determine that the subbuffer is full. */ cmm_smp_wmb(); - v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc); - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc); + cc_hot = shmp_index(handle, buf->commit_hot, oldidx); + if (!cc_hot) + return; + v_add(config, padding_size, &cc_hot->cc); + commit_count = v_read(config, &cc_hot->cc); lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1, - commit_count, oldidx, handle); - lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx, - offsets->old, commit_count, - padding_size, handle); + commit_count, oldidx, handle, tsc); + lib_ring_buffer_write_commit_counter(config, buf, chan, + offsets->old + padding_size, commit_count, handle, + cc_hot); } /* @@ -1107,15 +1858,16 @@ void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf, * that this code is executed before the deliver of this sub-buffer. */ static -void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, +void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - u64 tsc, - struct shm_handle *handle) + uint64_t tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long beginidx = subbuf_index(offsets->begin, chan); unsigned long commit_count; + struct commit_counters_hot *cc_hot; config->cb.buffer_begin(buf, tsc, beginidx, handle); @@ -1124,52 +1876,54 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, * determine that the subbuffer is full. */ cmm_smp_wmb(); - v_add(config, config->cb.subbuffer_header_size(), - &shmp_index(handle, buf->commit_hot, beginidx)->cc); - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc); + cc_hot = shmp_index(handle, buf->commit_hot, beginidx); + if (!cc_hot) + return; + v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc); + commit_count = v_read(config, &cc_hot->cc); /* Check if the written buffer has to be delivered */ lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin, - commit_count, beginidx, handle); - lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx, - offsets->begin, commit_count, - config->cb.subbuffer_header_size(), - handle); + commit_count, beginidx, handle, tsc); + lib_ring_buffer_write_commit_counter(config, buf, chan, + offsets->begin + config->cb.subbuffer_header_size(), + commit_count, handle, cc_hot); } /* * lib_ring_buffer_switch_new_end: finish switching current subbuffer * - * The only remaining threads could be the ones with pending commits. They will - * have to do the deliver themselves. + * Calls subbuffer_set_data_size() to set the data size of the current + * sub-buffer. We do not need to perform check_deliver nor commit here, + * since this task will be done by the "commit" of the event for which + * we are currently doing the space reservation. */ static -void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, +void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - u64 tsc, - struct shm_handle *handle) + uint64_t tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; - unsigned long endidx = subbuf_index(offsets->end - 1, chan); - unsigned long commit_count, padding_size, data_size; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + unsigned long endidx, data_size; + uint64_t *ts_end; + endidx = subbuf_index(offsets->end - 1, chan); data_size = subbuf_offset(offsets->end - 1, chan) + 1; - padding_size = chan->backend.subbuf_size - data_size; subbuffer_set_data_size(config, &buf->backend, endidx, data_size, handle); - + ts_end = shmp_index(handle, buf->ts_end, endidx); + if (!ts_end) + return; /* - * Order all writes to buffer before the commit count update that will - * determine that the subbuffer is full. + * This is the last space reservation in that sub-buffer before + * it gets delivered. This provides exclusive access to write to + * this sub-buffer's ts_end. There are also no concurrent + * readers of that ts_end because delivery of that sub-buffer is + * postponed until the commit counter is incremented for the + * current space reservation. */ - cmm_smp_wmb(); - v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc); - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc); - lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1, - commit_count, endidx, handle); - lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, - offsets->end, commit_count, - padding_size, handle); + *ts_end = tsc; } /* @@ -1179,13 +1933,14 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, */ static int lib_ring_buffer_try_switch_slow(enum switch_mode mode, - struct lib_ring_buffer *buf, + struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - u64 *tsc) + uint64_t *tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = &chan->backend.config; - unsigned long off; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + unsigned long off, reserve_commit_diff; offsets->begin = v_read(config, &buf->offset); offsets->old = offsets->begin; @@ -1210,23 +1965,73 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, * timestamps) are visible to the reader. This is required for * quiescence guarantees for the fusion merge. */ - if (mode == SWITCH_FLUSH || off > 0) { - if (unlikely(off == 0)) { - /* - * The client does not save any header information. - * Don't switch empty subbuffer on finalize, because it - * is invalid to deliver a completely empty subbuffer. - */ - if (!config->cb.subbuffer_header_size()) + if (mode != SWITCH_FLUSH && !off) + return -1; /* we do not have to switch : buffer is empty */ + + if (caa_unlikely(off == 0)) { + unsigned long sb_index, commit_count; + struct commit_counters_cold *cc_cold; + + /* + * We are performing a SWITCH_FLUSH. There may be concurrent + * writes into the buffer if e.g. invoked while performing a + * snapshot on an active trace. + * + * If the client does not save any header information + * (sub-buffer header size == 0), don't switch empty subbuffer + * on finalize, because it is invalid to deliver a completely + * empty subbuffer. + */ + if (!config->cb.subbuffer_header_size()) + return -1; + + /* Test new buffer integrity */ + sb_index = subbuf_index(offsets->begin, chan); + cc_cold = shmp_index(handle, buf->commit_cold, sb_index); + if (!cc_cold) + return -1; + commit_count = v_read(config, &cc_cold->cc_sb); + reserve_commit_diff = + (buf_trunc(offsets->begin, chan) + >> chan->backend.num_subbuf_order) + - (commit_count & chan->commit_count_mask); + if (caa_likely(reserve_commit_diff == 0)) { + /* Next subbuffer not being written to. */ + if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE && + subbuf_trunc(offsets->begin, chan) + - subbuf_trunc((unsigned long) + uatomic_read(&buf->consumed), chan) + >= chan->backend.buf_size)) { + /* + * We do not overwrite non consumed buffers + * and we are full : don't switch. + */ return -1; + } else { + /* + * Next subbuffer not being written to, and we + * are either in overwrite mode or the buffer is + * not full. It's safe to write in this new + * subbuffer. + */ + } + } else { /* - * Need to write the subbuffer start header on finalize. + * Next subbuffer reserve offset does not match the + * commit offset. Don't perform switch in + * producer-consumer and overwrite mode. Caused by + * either a writer OOPS or too many nested writes over a + * reserve/commit pair. */ - offsets->switch_old_start = 1; + return -1; } - offsets->begin = subbuf_align(offsets->begin, chan); - } else - return -1; /* we do not have to switch : buffer is empty */ + + /* + * Need to write the subbuffer start header on finalize. + */ + offsets->switch_old_start = 1; + } + offsets->begin = subbuf_align(offsets->begin, chan); /* Note: old points to the next subbuf at offset 0 */ offsets->end = offsets->begin; return 0; @@ -1236,18 +2041,25 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, * Force a sub-buffer switch. This operation is completely reentrant : can be * called while tracing is active with absolutely no lock held. * - * Note, however, that as a v_cmpxchg is used for some atomic - * operations, this function must be called from the CPU which owns the buffer - * for a ACTIVE flush. + * For RING_BUFFER_SYNC_PER_CPU ring buffers, as a v_cmpxchg is used for + * some atomic operations, this function must be called from the CPU + * which owns the buffer for a ACTIVE flush. However, for + * RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called + * from any CPU. */ -void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode, - struct shm_handle *handle) +void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(handle, buf->backend.chan); - const struct lib_ring_buffer_config *config = &chan->backend.config; + struct channel *chan; + const struct lttng_ust_lib_ring_buffer_config *config; struct switch_offsets offsets; unsigned long oldidx; - u64 tsc; + uint64_t tsc; + + chan = shmp(handle, buf->backend.chan); + if (!chan) + return; + config = &chan->backend.config; offsets.size = 0; @@ -1256,7 +2068,7 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m */ do { if (lib_ring_buffer_try_switch_slow(mode, buf, chan, &offsets, - &tsc)) + &tsc, handle)) return; /* Switch not needed */ } while (v_cmpxchg(config, &buf->offset, offsets.old, offsets.end) != offsets.old); @@ -1291,6 +2103,23 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle); } +static +bool handle_blocking_retry(int *timeout_left_ms) +{ + int timeout = *timeout_left_ms, delay; + + if (caa_likely(!timeout)) + return false; /* Do not retry, discard event. */ + if (timeout < 0) /* Wait forever. */ + delay = RETRY_DELAY_MS; + else + delay = min_t(int, timeout, RETRY_DELAY_MS); + (void) poll(NULL, 0, delay); + if (timeout > 0) + *timeout_left_ms -= delay; + return true; /* Retry. */ +} + /* * Returns : * 0 if ok @@ -1299,16 +2128,19 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m * -EIO if data cannot be written into the buffer for any other reason. */ static -int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, +int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - struct lib_ring_buffer_ctx *ctx) + struct lttng_ust_lib_ring_buffer_ctx *ctx, + void *client_ctx) { - const struct lib_ring_buffer_config *config = &chan->backend.config; - struct shm_handle *handle = ctx->handle; - unsigned long reserve_commit_diff; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + struct lttng_ust_shm_handle *handle = ctx->handle; + unsigned long reserve_commit_diff, offset_cmp; + int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan); - offsets->begin = v_read(config, &buf->offset); +retry: + offsets->begin = offset_cmp = v_read(config, &buf->offset); offsets->old = offsets->begin; offsets->switch_new_start = 0; offsets->switch_new_end = 0; @@ -1322,53 +2154,86 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, if (last_tsc_overflow(config, buf, ctx->tsc)) ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; - if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { + if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ } else { offsets->size = config->cb.record_header_size(config, chan, offsets->begin, &offsets->pre_header_padding, - ctx); + ctx, client_ctx); offsets->size += lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) + ctx->data_size; - if (unlikely(subbuf_offset(offsets->begin, chan) + + if (caa_unlikely(subbuf_offset(offsets->begin, chan) + offsets->size > chan->backend.subbuf_size)) { offsets->switch_old_end = 1; /* For offsets->old */ offsets->switch_new_start = 1; /* For offsets->begin */ } } - if (unlikely(offsets->switch_new_start)) { - unsigned long sb_index; + if (caa_unlikely(offsets->switch_new_start)) { + unsigned long sb_index, commit_count; + struct commit_counters_cold *cc_cold; /* * We are typically not filling the previous buffer completely. */ - if (likely(offsets->switch_old_end)) + if (caa_likely(offsets->switch_old_end)) offsets->begin = subbuf_align(offsets->begin, chan); offsets->begin = offsets->begin + config->cb.subbuffer_header_size(); /* Test new buffer integrity */ sb_index = subbuf_index(offsets->begin, chan); + /* + * Read buf->offset before buf->commit_cold[sb_index].cc_sb. + * lib_ring_buffer_check_deliver() has the matching + * memory barriers required around commit_cold cc_sb + * updates to ensure reserve and commit counter updates + * are not seen reordered when updated by another CPU. + */ + cmm_smp_rmb(); + cc_cold = shmp_index(handle, buf->commit_cold, sb_index); + if (!cc_cold) + return -1; + commit_count = v_read(config, &cc_cold->cc_sb); + /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */ + cmm_smp_rmb(); + if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) { + /* + * The reserve counter have been concurrently updated + * while we read the commit counter. This means the + * commit counter we read might not match buf->offset + * due to concurrent update. We therefore need to retry. + */ + goto retry; + } reserve_commit_diff = (buf_trunc(offsets->begin, chan) >> chan->backend.num_subbuf_order) - - ((unsigned long) v_read(config, - &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb) - & chan->commit_count_mask); - if (likely(reserve_commit_diff == 0)) { + - (commit_count & chan->commit_count_mask); + if (caa_likely(reserve_commit_diff == 0)) { /* Next subbuffer not being written to. */ - if (unlikely(config->mode != RING_BUFFER_OVERWRITE && + if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE && subbuf_trunc(offsets->begin, chan) - subbuf_trunc((unsigned long) uatomic_read(&buf->consumed), chan) >= chan->backend.buf_size)) { + unsigned long nr_lost; + + if (handle_blocking_retry(&timeout_left_ms)) + goto retry; + /* * We do not overwrite non consumed buffers * and we are full : record is lost. */ + nr_lost = v_read(config, &buf->records_lost_full); v_inc(config, &buf->records_lost_full); + if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) { + DBG("%lu or more records lost in (%s:%d) (buffer full)\n", + nr_lost + 1, chan->backend.name, + buf->backend.cpu); + } return -ENOBUFS; } else { /* @@ -1379,31 +2244,49 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, */ } } else { + unsigned long nr_lost; + /* * Next subbuffer reserve offset does not match the - * commit offset. Drop record in producer-consumer and + * commit offset, and this did not involve update to the + * reserve counter. Drop record in producer-consumer and * overwrite mode. Caused by either a writer OOPS or too * many nested writes over a reserve/commit pair. */ + nr_lost = v_read(config, &buf->records_lost_wrap); v_inc(config, &buf->records_lost_wrap); + if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) { + DBG("%lu or more records lost in (%s:%d) (wrap-around)\n", + nr_lost + 1, chan->backend.name, + buf->backend.cpu); + } return -EIO; } offsets->size = config->cb.record_header_size(config, chan, offsets->begin, &offsets->pre_header_padding, - ctx); + ctx, client_ctx); offsets->size += lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) + ctx->data_size; - if (unlikely(subbuf_offset(offsets->begin, chan) + if (caa_unlikely(subbuf_offset(offsets->begin, chan) + offsets->size > chan->backend.subbuf_size)) { + unsigned long nr_lost; + /* * Record too big for subbuffers, report error, don't * complete the sub-buffer switch. */ + nr_lost = v_read(config, &buf->records_lost_big); v_inc(config, &buf->records_lost_big); + if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) { + DBG("%lu or more records lost in (%s:%d) record size " + " of %zu bytes is too large for buffer\n", + nr_lost + 1, chan->backend.name, + buf->backend.cpu, offsets->size); + } return -ENOSPC; } else { /* @@ -1419,7 +2302,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, } offsets->end = offsets->begin + offsets->size; - if (unlikely(subbuf_offset(offsets->end, chan) == 0)) { + if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) { /* * The offset_end will fall at the very beginning of the next * subbuffer. @@ -1437,12 +2320,13 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, * -EIO for other errors, else returns 0. * It will take care of sub-buffer switching. */ -int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) +int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx, + void *client_ctx) { struct channel *chan = ctx->chan; - struct shm_handle *handle = ctx->handle; - const struct lib_ring_buffer_config *config = &chan->backend.config; - struct lib_ring_buffer *buf; + struct lttng_ust_shm_handle *handle = ctx->handle; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + struct lttng_ust_lib_ring_buffer *buf; struct switch_offsets offsets; int ret; @@ -1450,16 +2334,18 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); else buf = shmp(handle, chan->backend.buf[0].shmp); + if (!buf) + return -EIO; ctx->buf = buf; offsets.size = 0; do { ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets, - ctx); - if (unlikely(ret)) + ctx, client_ctx); + if (caa_unlikely(ret)) return ret; - } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old, + } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old, offsets.end) != offsets.old)); @@ -1486,7 +2372,7 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) /* * Switch old subbuffer if needed. */ - if (unlikely(offsets.switch_old_end)) { + if (caa_unlikely(offsets.switch_old_end)) { lib_ring_buffer_clear_noref(config, &buf->backend, subbuf_index(offsets.old - 1, chan), handle); @@ -1496,10 +2382,10 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) /* * Populate new subbuffer. */ - if (unlikely(offsets.switch_new_start)) + if (caa_unlikely(offsets.switch_new_start)) lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle); - if (unlikely(offsets.switch_new_end)) + if (caa_unlikely(offsets.switch_new_end)) lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle); ctx->slot_size = offsets.size; @@ -1507,3 +2393,192 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) ctx->buf_offset = offsets.begin + offsets.pre_header_padding; return 0; } + +static +void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + unsigned long commit_count, + unsigned long idx, + struct lttng_ust_shm_handle *handle) +{ + struct commit_counters_hot *cc_hot; + + if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) + return; + cc_hot = shmp_index(handle, buf->commit_hot, idx); + if (!cc_hot) + return; + v_set(config, &cc_hot->seq, commit_count); +} + +/* + * The ring buffer can count events recorded and overwritten per buffer, + * but it is disabled by default due to its performance overhead. + */ +#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS +static +void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + unsigned long idx, + struct lttng_ust_shm_handle *handle) +{ + v_add(config, subbuffer_get_records_count(config, + &buf->backend, idx, handle), + &buf->records_count); + v_add(config, subbuffer_count_records_overrun(config, + &buf->backend, idx, handle), + &buf->records_overrun); +} +#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */ +static +void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + unsigned long idx, + struct lttng_ust_shm_handle *handle) +{ +} +#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */ + +void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + struct channel *chan, + unsigned long offset, + unsigned long commit_count, + unsigned long idx, + struct lttng_ust_shm_handle *handle, + uint64_t tsc) +{ + unsigned long old_commit_count = commit_count + - chan->backend.subbuf_size; + struct commit_counters_cold *cc_cold; + + /* + * If we succeeded at updating cc_sb below, we are the subbuffer + * writer delivering the subbuffer. Deals with concurrent + * updates of the "cc" value without adding a add_return atomic + * operation to the fast path. + * + * We are doing the delivery in two steps: + * - First, we cmpxchg() cc_sb to the new value + * old_commit_count + 1. This ensures that we are the only + * subbuffer user successfully filling the subbuffer, but we + * do _not_ set the cc_sb value to "commit_count" yet. + * Therefore, other writers that would wrap around the ring + * buffer and try to start writing to our subbuffer would + * have to drop records, because it would appear as + * non-filled. + * We therefore have exclusive access to the subbuffer control + * structures. This mutual exclusion with other writers is + * crucially important to perform record overruns count in + * flight recorder mode locklessly. + * - When we are ready to release the subbuffer (either for + * reading or for overrun by other writers), we simply set the + * cc_sb value to "commit_count" and perform delivery. + * + * The subbuffer size is least 2 bytes (minimum size: 1 page). + * This guarantees that old_commit_count + 1 != commit_count. + */ + + /* + * Order prior updates to reserve count prior to the + * commit_cold cc_sb update. + */ + cmm_smp_wmb(); + cc_cold = shmp_index(handle, buf->commit_cold, idx); + if (!cc_cold) + return; + if (caa_likely(v_cmpxchg(config, &cc_cold->cc_sb, + old_commit_count, old_commit_count + 1) + == old_commit_count)) { + uint64_t *ts_end; + + /* + * Start of exclusive subbuffer access. We are + * guaranteed to be the last writer in this subbuffer + * and any other writer trying to access this subbuffer + * in this state is required to drop records. + * + * We can read the ts_end for the current sub-buffer + * which has been saved by the very last space + * reservation for the current sub-buffer. + * + * Order increment of commit counter before reading ts_end. + */ + cmm_smp_mb(); + ts_end = shmp_index(handle, buf->ts_end, idx); + if (!ts_end) + return; + deliver_count_events(config, buf, idx, handle); + config->cb.buffer_end(buf, *ts_end, idx, + lib_ring_buffer_get_data_size(config, + buf, + idx, + handle), + handle); + + /* + * Increment the packet counter while we have exclusive + * access. + */ + subbuffer_inc_packet_count(config, &buf->backend, idx, handle); + + /* + * Set noref flag and offset for this subbuffer id. + * Contains a memory barrier that ensures counter stores + * are ordered before set noref and offset. + */ + lib_ring_buffer_set_noref_offset(config, &buf->backend, idx, + buf_trunc_val(offset, chan), handle); + + /* + * Order set_noref and record counter updates before the + * end of subbuffer exclusive access. Orders with + * respect to writers coming into the subbuffer after + * wrap around, and also order wrt concurrent readers. + */ + cmm_smp_mb(); + /* End of exclusive subbuffer access */ + v_set(config, &cc_cold->cc_sb, commit_count); + /* + * Order later updates to reserve count after + * the commit cold cc_sb update. + */ + cmm_smp_wmb(); + lib_ring_buffer_vmcore_check_deliver(config, buf, + commit_count, idx, handle); + + /* + * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. + */ + if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER + && uatomic_read(&buf->active_readers) + && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { + lib_ring_buffer_wakeup(buf, handle); + } + } +} + +/* + * Force a read (imply TLS fixup for dlopen) of TLS variables. + */ +void lttng_fixup_ringbuffer_tls(void) +{ + asm volatile ("" : : "m" (URCU_TLS(lib_ring_buffer_nesting))); +} + +void lib_ringbuffer_signal_init(void) +{ + sigset_t mask; + int ret; + + /* + * Block signal for entire process, so only our thread processes + * it. + */ + rb_setmask(&mask); + ret = pthread_sigmask(SIG_BLOCK, &mask, NULL); + if (ret) { + errno = ret; + PERROR("pthread_sigmask"); + } +}