-#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
-#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
-
/*
- * libringbuffer/frontend_internal.h
- *
- * Ring Buffer Library Synchronization Header (internal helpers).
+ * SPDX-License-Identifier: (LGPL-2.1-only or GPL-2.0-only)
*
* Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- *
- * Author:
- * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Ring Buffer Library Synchronization Header (internal helpers).
*
* See ring_buffer_frontend.c for more information on wait-free algorithms.
- *
- * Dual LGPL v2.1/GPL v2 license.
*/
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
+#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
+
#include <urcu/compiler.h>
#include <urcu/tls-compat.h>
#include <signal.h>
+#include <stdint.h>
#include <pthread.h>
#include <lttng/ringbuffer-config.h>
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
extern
void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
consumed_new) != consumed_old));
}
+/*
+ * Move consumed position to the beginning of subbuffer in which the
+ * write offset is. Should only be used on ring buffers that are not
+ * actively being written into, because clear_reader does not take into
+ * account the commit counters when moving the consumed position, which
+ * can make concurrent trace producers or consumers observe consumed
+ * position further than the write offset, which breaks ring buffer
+ * algorithm guarantees.
+ */
+static inline
+void lib_ring_buffer_clear_reader(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
+{
+ struct channel *chan;
+ const struct lttng_ust_lib_ring_buffer_config *config;
+ unsigned long offset, consumed_old, consumed_new;
+
+ chan = shmp(handle, buf->backend.chan);
+ if (!chan)
+ return;
+ config = &chan->backend.config;
+
+ do {
+ offset = v_read(config, &buf->offset);
+ consumed_old = uatomic_read(&buf->consumed);
+ CHAN_WARN_ON(chan, (long) (subbuf_trunc(offset, chan)
+ - subbuf_trunc(consumed_old, chan))
+ < 0);
+ consumed_new = subbuf_trunc(offset, chan);
+ } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
+ consumed_new) != consumed_old));
+}
+
static inline
int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
struct lttng_ust_lib_ring_buffer *buf,
struct lttng_ust_shm_handle *handle)
{
unsigned long offset, idx, commit_count;
- struct commit_counters_hot *cc_hot = shmp_index(handle, buf->commit_hot, idx);
+ struct commit_counters_hot *cc_hot;
CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
- if (caa_unlikely(!cc_hot))
- return 0;
-
/*
* Read offset and commit count in a loop so they are both read
* atomically wrt interrupts. By deal with interrupt concurrency by
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
+ cc_hot = shmp_index(handle, buf->commit_hot, idx);
+ if (caa_unlikely(!cc_hot))
+ return 0;
commit_count = v_read(config, &cc_hot->cc);
} while (offset != v_read(config, &buf->offset));