#include <urcu/compiler.h>
+#include "common/getcpu.h"
#include "frontend.h"
/**
* Returns a nesting level >= 0 on success, -EPERM on failure (nesting
* count too high).
*
- * asm volatile and "memory" clobber prevent the compiler from moving
+ * __asm__ __volatile__ and "memory" clobber prevent the compiler from moving
* instructions out of the ring buffer nesting count. This is required to ensure
* that probe side-effects which can cause recursion (e.g. unforeseen traps,
* divisions by 0, ...) are triggered within the incremented nesting count
*/
static inline
int lib_ring_buffer_nesting_inc(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)))
{
int nesting;
static inline
int lib_ring_buffer_nesting_count(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)))
{
return URCU_TLS(lib_ring_buffer_nesting);
}
static inline
void lib_ring_buffer_nesting_dec(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)))
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)))
{
cmm_barrier();
URCU_TLS(lib_ring_buffer_nesting)--; /* TLS */
* returns 0 if reserve ok, or 1 if the slow path must be taken.
*/
static inline
-int lib_ring_buffer_try_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_try_reserve(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_ring_buffer *buf = ctx_private->buf;
*o_begin = v_read(config, &buf->offset);
*o_old = *o_begin;
- ctx_private->tsc = lib_ring_buffer_clock_read(chan);
- if ((int64_t) ctx_private->tsc == -EIO)
+ ctx_private->timestamp = lib_ring_buffer_clock_read(chan);
+ if ((int64_t) ctx_private->timestamp == -EIO)
return 1;
/*
*/
//prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
- if (last_tsc_overflow(config, buf, ctx_private->tsc))
- ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
+ if (last_timestamp_overflow(config, buf, ctx_private->timestamp))
+ ctx_private->rflags |= RING_BUFFER_RFLAG_FULL_TIMESTAMP;
if (caa_unlikely(subbuf_offset(*o_begin, chan) == 0))
return 1;
ctx_private->slot_size = record_header_size(config, chan, *o_begin,
before_hdr_pad, ctx, client_ctx);
ctx_private->slot_size +=
- lttng_ust_lib_ring_buffer_align(*o_begin + ctx_private->slot_size,
+ lttng_ust_ring_buffer_align(*o_begin + ctx_private->slot_size,
ctx->largest_align) + ctx->data_size;
if (caa_unlikely((subbuf_offset(*o_begin, chan) + ctx_private->slot_size)
> chan->backend.subbuf_size))
* @ctx: ring buffer context. (input and output) Must be already initialized.
*
* Atomic wait-free slot reservation. The reserved space starts at the context
- * "pre_offset". Its length is "slot_size". The associated time-stamp is "tsc".
+ * "pre_offset". Its length is "slot_size". The associated time-stamp is
+ * "timestamp".
*
* Return :
* 0 on success.
*/
static inline
-int lib_ring_buffer_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- struct lttng_ust_lib_ring_buffer_ctx *ctx,
+int lib_ring_buffer_reserve(const struct lttng_ust_ring_buffer_config *config,
+ struct lttng_ust_ring_buffer_ctx *ctx,
void *client_ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
struct lttng_ust_shm_handle *handle = chan->handle;
- struct lttng_ust_lib_ring_buffer *buf;
+ struct lttng_ust_ring_buffer *buf;
unsigned long o_begin, o_end, o_old;
size_t before_hdr_pad = 0;
goto slow_path;
/*
- * Atomically update last_tsc. This update races against concurrent
- * atomic updates, but the race will always cause supplementary full TSC
- * record headers, never the opposite (missing a full TSC record header
- * when it would be needed).
+ * Atomically update last_timestamp. This update races against concurrent
+ * atomic updates, but the race will always cause supplementary full
+ * timestamp record headers, never the opposite (missing a full
+ * timestamp record header when it would be needed).
*/
- save_last_tsc(config, buf, ctx_private->tsc);
+ save_last_timestamp(config, buf, ctx_private->timestamp);
/*
* Push the reader if necessary
*/
static inline
void lib_ring_buffer_switch(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf, enum switch_mode mode,
struct lttng_ust_shm_handle *handle)
{
lib_ring_buffer_switch_slow(buf, mode, handle);
* specified sub-buffer, and delivers it if necessary.
*/
static inline
-void lib_ring_buffer_commit(const struct lttng_ust_lib_ring_buffer_config *config,
- const struct lttng_ust_lib_ring_buffer_ctx *ctx)
+void lib_ring_buffer_commit(const struct lttng_ust_ring_buffer_config *config,
+ const struct lttng_ust_ring_buffer_ctx *ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer_channel *chan = ctx_private->chan;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer_channel *chan = ctx_private->chan;
struct lttng_ust_shm_handle *handle = chan->handle;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ struct lttng_ust_ring_buffer *buf = ctx_private->buf;
unsigned long offset_end = ctx_private->buf_offset;
unsigned long endidx = subbuf_index(offset_end - 1, chan);
unsigned long commit_count;
/*
* Must count record before incrementing the commit count.
*/
- subbuffer_count_record(config, ctx, &buf->backend, endidx, handle);
+ subbuffer_count_record(config, ctx);
/*
* Order all writes to buffer before the commit count update that will
commit_count = v_read(config, &cc_hot->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
- commit_count, endidx, handle, ctx_private->tsc);
+ commit_count, endidx, handle, ctx);
/*
* Update used size at each commit. It's needed only for extracting
* ring_buffer buffers from vmcore, after crash.
* Returns 0 upon success, -EPERM if the record cannot be discarded.
*/
static inline
-int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_lib_ring_buffer_config *config,
- const struct lttng_ust_lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_try_discard_reserve(const struct lttng_ust_ring_buffer_config *config,
+ const struct lttng_ust_ring_buffer_ctx *ctx)
{
- struct lttng_ust_lib_ring_buffer_ctx_private *ctx_private = ctx->priv;
- struct lttng_ust_lib_ring_buffer *buf = ctx_private->buf;
+ struct lttng_ust_ring_buffer_ctx_private *ctx_private = ctx->priv;
+ struct lttng_ust_ring_buffer *buf = ctx_private->buf;
unsigned long end_offset = ctx_private->pre_offset + ctx_private->slot_size;
/*
* We need to ensure that if the cmpxchg succeeds and discards the
- * record, the next record will record a full TSC, because it cannot
- * rely on the last_tsc associated with the discarded record to detect
- * overflows. The only way to ensure this is to set the last_tsc to 0
- * (assuming no 64-bit TSC overflow), which forces to write a 64-bit
+ * record, the next record will record a full timestamp, because it cannot
+ * rely on the last_timestamp associated with the discarded record to detect
+ * overflows. The only way to ensure this is to set the last_timestamp to 0
+ * (assuming no 64-bit timestamp overflow), which forces to write a 64-bit
* timestamp in the next record.
*
- * Note: if discard fails, we must leave the TSC in the record header.
- * It is needed to keep track of TSC overflows for the following
+ * Note: if discard fails, we must leave the timestamp in the record header.
+ * It is needed to keep track of timestamp overflows for the following
* records.
*/
- save_last_tsc(config, buf, 0ULL);
+ save_last_timestamp(config, buf, 0ULL);
if (caa_likely(v_cmpxchg(config, &buf->offset, end_offset, ctx_private->pre_offset)
!= end_offset))
static inline
void channel_record_disable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan)
{
uatomic_inc(&chan->record_disabled);
}
static inline
void channel_record_enable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer_channel *chan)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer_channel *chan)
{
uatomic_dec(&chan->record_disabled);
}
static inline
void lib_ring_buffer_record_disable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf)
{
uatomic_inc(&buf->record_disabled);
}
static inline
void lib_ring_buffer_record_enable(
- const struct lttng_ust_lib_ring_buffer_config *config __attribute__((unused)),
- struct lttng_ust_lib_ring_buffer *buf)
+ const struct lttng_ust_ring_buffer_config *config __attribute__((unused)),
+ struct lttng_ust_ring_buffer *buf)
{
uatomic_dec(&buf->record_disabled);
}