X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Ffrontend_internal.h;h=c8a54e96a83cd52ed1f29877c1b98cd1871ced4e;hb=071dec4386ff4c89d8b0f92ab1a8dbf079abd50c;hp=4ada183791c2a4f4253bb10baf1cff36b4d97214;hpb=e185cf4b895d5d9a040d74be1dfa4d004eff04b7;p=lttng-ust.git diff --git a/libringbuffer/frontend_internal.h b/libringbuffer/frontend_internal.h index 4ada1837..c8a54e96 100644 --- a/libringbuffer/frontend_internal.h +++ b/libringbuffer/frontend_internal.h @@ -1,45 +1,27 @@ -#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H -#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H - /* - * libringbuffer/frontend_internal.h - * - * Ring Buffer Library Synchronization Header (internal helpers). + * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only) * * Copyright (C) 2005-2012 Mathieu Desnoyers * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - * - * Author: - * Mathieu Desnoyers + * Ring Buffer Library Synchronization Header (internal helpers). * * See ring_buffer_frontend.c for more information on wait-free algorithms. - * - * Dual LGPL v2.1/GPL v2 license. */ +#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H +#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H + #include #include #include +#include #include #include #include "backend_types.h" #include "frontend_types.h" #include "shm.h" +#include "ust-helper.h" /* Buffer offset macros */ @@ -157,14 +139,27 @@ int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config, } #endif +LTTNG_HIDDEN extern -int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx); +int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx, + void *client_ctx); +LTTNG_HIDDEN extern void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode, struct lttng_ust_shm_handle *handle); +LTTNG_HIDDEN +void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config, + struct lttng_ust_lib_ring_buffer *buf, + struct channel *chan, + unsigned long offset, + unsigned long commit_count, + unsigned long idx, + struct lttng_ust_shm_handle *handle, + uint64_t tsc); + /* Buffer write helpers */ static inline @@ -195,58 +190,37 @@ void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf, consumed_new) != consumed_old)); } +/* + * Move consumed position to the beginning of subbuffer in which the + * write offset is. Should only be used on ring buffers that are not + * actively being written into, because clear_reader does not take into + * account the commit counters when moving the consumed position, which + * can make concurrent trace producers or consumers observe consumed + * position further than the write offset, which breaks ring buffer + * algorithm guarantees. + */ static inline -void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, - struct lttng_ust_lib_ring_buffer *buf, - unsigned long commit_count, - unsigned long idx, - struct lttng_ust_shm_handle *handle) -{ - if (config->oops == RING_BUFFER_OOPS_CONSISTENCY) - v_set(config, &shmp_index(handle, buf->commit_hot, idx)->seq, commit_count); -} - -static inline -int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *config, - struct lttng_ust_lib_ring_buffer *buf, - struct channel *chan, - struct lttng_ust_shm_handle *handle) +void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - unsigned long consumed_old, consumed_idx, commit_count, write_offset; - - consumed_old = uatomic_read(&buf->consumed); - consumed_idx = subbuf_index(consumed_old, chan); - commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb); - /* - * No memory barrier here, since we are only interested - * in a statistically correct polling result. The next poll will - * get the data is we are racing. The mb() that ensures correct - * memory order is in get_subbuf. - */ - write_offset = v_read(config, &buf->offset); - - /* - * Check that the subbuffer we are trying to consume has been - * already fully committed. - */ + struct channel *chan; + const struct lttng_ust_lib_ring_buffer_config *config; + unsigned long offset, consumed_old, consumed_new; - if (((commit_count - chan->backend.subbuf_size) - & chan->commit_count_mask) - - (buf_trunc(consumed_old, chan) - >> chan->backend.num_subbuf_order) - != 0) - return 0; - - /* - * Check that we are not about to read the same subbuffer in - * which the writer head is. - */ - if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan) - == 0) - return 0; - - return 1; + chan = shmp(handle, buf->backend.chan); + if (!chan) + return; + config = &chan->backend.config; + do { + offset = v_read(config, &buf->offset); + consumed_old = uatomic_read(&buf->consumed); + CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan) + - subbuf_trunc(consumed_old, chan)) + < 0); + consumed_new = subbuf_trunc(offset, chan); + } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old, + consumed_new) != consumed_old)); } static inline @@ -278,6 +252,7 @@ int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_con struct lttng_ust_shm_handle *handle) { unsigned long offset, idx, commit_count; + struct commit_counters_hot *cc_hot; CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU); CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU); @@ -293,78 +268,23 @@ int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_con do { offset = v_read(config, &buf->offset); idx = subbuf_index(offset, chan); - commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->cc); + cc_hot = shmp_index(handle, buf->commit_hot, idx); + if (caa_unlikely(!cc_hot)) + return 0; + commit_count = v_read(config, &cc_hot->cc); } while (offset != v_read(config, &buf->offset)); return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) - (commit_count & chan->commit_count_mask) == 0); } -static inline -void lib_ring_buffer_wakeup(struct lttng_ust_lib_ring_buffer *buf, - struct lttng_ust_shm_handle *handle) -{ - int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref); - sigset_t sigpipe_set, pending_set, old_set; - int ret, sigpipe_was_pending = 0; - - if (wakeup_fd < 0) - return; - - /* - * Wake-up the other end by writing a null byte in the pipe - * (non-blocking). Important note: Because writing into the - * pipe is non-blocking (and therefore we allow dropping wakeup - * data, as long as there is wakeup data present in the pipe - * buffer to wake up the consumer), the consumer should perform - * the following sequence for waiting: - * 1) empty the pipe (reads). - * 2) check if there is data in the buffer. - * 3) wait on the pipe (poll). - * - * Discard the SIGPIPE from write(), not disturbing any SIGPIPE - * that might be already pending. If a bogus SIGPIPE is sent to - * the entire process concurrently by a malicious user, it may - * be simply discarded. - */ - ret = sigemptyset(&pending_set); - assert(!ret); - /* - * sigpending returns the mask of signals that are _both_ - * blocked for the thread _and_ pending for either the thread or - * the entire process. - */ - ret = sigpending(&pending_set); - assert(!ret); - sigpipe_was_pending = sigismember(&pending_set, SIGPIPE); - /* - * If sigpipe was pending, it means it was already blocked, so - * no need to block it. - */ - if (!sigpipe_was_pending) { - ret = sigemptyset(&sigpipe_set); - assert(!ret); - ret = sigaddset(&sigpipe_set, SIGPIPE); - assert(!ret); - ret = pthread_sigmask(SIG_BLOCK, &sigpipe_set, &old_set); - assert(!ret); - } - do { - ret = write(wakeup_fd, "", 1); - } while (ret == -1L && errno == EINTR); - if (ret == -1L && errno == EPIPE && !sigpipe_was_pending) { - struct timespec timeout = { 0, 0 }; - do { - ret = sigtimedwait(&sigpipe_set, NULL, - &timeout); - } while (ret == -1L && errno == EINTR); - } - if (!sigpipe_was_pending) { - ret = pthread_sigmask(SIG_SETMASK, &old_set, NULL); - assert(!ret); - } -} - +/* + * Receive end of subbuffer TSC as parameter. It has been read in the + * space reservation loop of either reserve or switch, which ensures it + * progresses monotonically with event records in the buffer. Therefore, + * it ensures that the end timestamp of a subbuffer is <= begin + * timestamp of the following subbuffers. + */ static inline void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer *buf, @@ -372,110 +292,17 @@ void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config unsigned long offset, unsigned long commit_count, unsigned long idx, - struct lttng_ust_shm_handle *handle) + struct lttng_ust_shm_handle *handle, + uint64_t tsc) { unsigned long old_commit_count = commit_count - chan->backend.subbuf_size; - uint64_t tsc; /* Check if all commits have been done */ if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order) - - (old_commit_count & chan->commit_count_mask) == 0)) { - /* - * If we succeeded at updating cc_sb below, we are the subbuffer - * writer delivering the subbuffer. Deals with concurrent - * updates of the "cc" value without adding a add_return atomic - * operation to the fast path. - * - * We are doing the delivery in two steps: - * - First, we cmpxchg() cc_sb to the new value - * old_commit_count + 1. This ensures that we are the only - * subbuffer user successfully filling the subbuffer, but we - * do _not_ set the cc_sb value to "commit_count" yet. - * Therefore, other writers that would wrap around the ring - * buffer and try to start writing to our subbuffer would - * have to drop records, because it would appear as - * non-filled. - * We therefore have exclusive access to the subbuffer control - * structures. This mutual exclusion with other writers is - * crucially important to perform record overruns count in - * flight recorder mode locklessly. - * - When we are ready to release the subbuffer (either for - * reading or for overrun by other writers), we simply set the - * cc_sb value to "commit_count" and perform delivery. - * - * The subbuffer size is least 2 bytes (minimum size: 1 page). - * This guarantees that old_commit_count + 1 != commit_count. - */ - - /* - * Order prior updates to reserve count prior to the - * commit_cold cc_sb update. - */ - cmm_smp_wmb(); - if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, - old_commit_count, old_commit_count + 1) - == old_commit_count)) { - /* - * Start of exclusive subbuffer access. We are - * guaranteed to be the last writer in this subbuffer - * and any other writer trying to access this subbuffer - * in this state is required to drop records. - */ - tsc = config->cb.ring_buffer_clock_read(chan); - v_add(config, - subbuffer_get_records_count(config, - &buf->backend, - idx, handle), - &buf->records_count); - v_add(config, - subbuffer_count_records_overrun(config, - &buf->backend, - idx, handle), - &buf->records_overrun); - config->cb.buffer_end(buf, tsc, idx, - lib_ring_buffer_get_data_size(config, - buf, - idx, - handle), - handle); - - /* - * Set noref flag and offset for this subbuffer id. - * Contains a memory barrier that ensures counter stores - * are ordered before set noref and offset. - */ - lib_ring_buffer_set_noref_offset(config, &buf->backend, idx, - buf_trunc_val(offset, chan), handle); - - /* - * Order set_noref and record counter updates before the - * end of subbuffer exclusive access. Orders with - * respect to writers coming into the subbuffer after - * wrap around, and also order wrt concurrent readers. - */ - cmm_smp_mb(); - /* End of exclusive subbuffer access */ - v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb, - commit_count); - /* - * Order later updates to reserve count after - * the commit cold cc_sb update. - */ - cmm_smp_wmb(); - lib_ring_buffer_vmcore_check_deliver(config, buf, - commit_count, idx, handle); - - /* - * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free. - */ - if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER - && uatomic_read(&buf->active_readers) - && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { - lib_ring_buffer_wakeup(buf, handle); - } - } - } + - (old_commit_count & chan->commit_count_mask) == 0)) + lib_ring_buffer_check_deliver_slow(config, buf, chan, offset, + commit_count, idx, handle, tsc); } /* @@ -490,42 +317,41 @@ static inline void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config, struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, - unsigned long idx, unsigned long buf_offset, unsigned long commit_count, - size_t slot_size, - struct lttng_ust_shm_handle *handle) + struct lttng_ust_shm_handle *handle, + struct commit_counters_hot *cc_hot) { - unsigned long offset, commit_seq_old; + unsigned long commit_seq_old; if (config->oops != RING_BUFFER_OOPS_CONSISTENCY) return; - offset = buf_offset + slot_size; - /* * subbuf_offset includes commit_count_mask. We can simply * compare the offsets within the subbuffer without caring about * buffer full/empty mismatch because offset is never zero here * (subbuffer header and record headers have non-zero length). */ - if (caa_unlikely(subbuf_offset(offset - commit_count, chan))) + if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan))) return; - commit_seq_old = v_read(config, &shmp_index(handle, buf->commit_hot, idx)->seq); - while ((long) (commit_seq_old - commit_count) < 0) - commit_seq_old = v_cmpxchg(config, &shmp_index(handle, buf->commit_hot, idx)->seq, - commit_seq_old, commit_count); + commit_seq_old = v_read(config, &cc_hot->seq); + if (caa_likely((long) (commit_seq_old - commit_count) < 0)) + v_set(config, &cc_hot->seq, commit_count); } +LTTNG_HIDDEN extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, struct channel_backend *chanb, int cpu, struct lttng_ust_shm_handle *handle, struct shm_object *shmobj); +LTTNG_HIDDEN extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf, struct lttng_ust_shm_handle *handle); /* Keep track of trap nesting inside ring buffer code */ +LTTNG_HIDDEN extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting); #endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */