Allow context length calculation to have side-effects (e.g. page faults)
which trigger event tracing by moving the calculation outside of the
buffer space reservation retry loop.
This also paves the way to have dynamically sized contexts, which
would expect to put their size of the internal stack. Note that the
context length calculation is performed *after* the event payload field
length calculation, so the stack needs to be used accordingly.
Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
size_t (*record_header_size) (const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx);
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
/* Slow path only, at subbuffer switch */
size_t (*subbuffer_header_size) (void);
static inline
int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
static inline
int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer_ctx *ctx,
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
unsigned long *o_begin, unsigned long *o_end,
unsigned long *o_old, size_t *before_hdr_pad)
{
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
return 1;
ctx->slot_size = record_header_size(config, chan, *o_begin,
+ before_hdr_pad, ctx, client_ctx);
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
ctx->slot_size +=
lib_ring_buffer_align(*o_begin + ctx->slot_size,
ctx->largest_align) + ctx->data_size;
static inline
int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
static inline
int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
struct channel *chan = ctx->chan;
struct lib_ring_buffer *buf;
{
struct channel *chan = ctx->chan;
struct lib_ring_buffer *buf;
/*
* Perform retryable operations.
*/
/*
* Perform retryable operations.
*/
- if (unlikely(lib_ring_buffer_try_reserve(config, ctx, &o_begin,
+ if (unlikely(lib_ring_buffer_try_reserve(config, ctx, client_ctx, &o_begin,
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
&o_end, &o_old, &before_hdr_pad)))
goto slow_path;
ctx->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
ctx->buf_offset = o_begin + before_hdr_pad;
return 0;
slow_path:
- return lib_ring_buffer_reserve_slow(ctx);
+ return lib_ring_buffer_reserve_slow(ctx, client_ctx);
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
extern
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
extern
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long reserve_commit_diff, offset_cmp;
{
const struct lib_ring_buffer_config *config = &chan->backend.config;
unsigned long reserve_commit_diff, offset_cmp;
offsets->size = config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
offsets->size = config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
config->cb.record_header_size(config, chan,
offsets->begin,
&offsets->pre_header_padding,
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
struct channel *chan = ctx->chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
{
struct channel *chan = ctx->chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
do {
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
do {
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
if (unlikely(ret))
return ret;
} while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
if (unlikely(ret))
return ret;
} while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+struct lttng_client_ctx {
+ size_t packet_context_len;
+ size_t event_context_len;
+};
static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
{
static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
{
-size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
+ size_t ctx_len)
size_t orig_offset = offset;
if (likely(!ctx))
return 0;
offset += lib_ring_buffer_align(offset, ctx->largest_align);
size_t orig_offset = offset;
if (likely(!ctx))
return 0;
offset += lib_ring_buffer_align(offset, ctx->largest_align);
+ offset += ctx_len;
+ return offset - orig_offset;
+}
+
+static inline
+void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len)
+{
+ int i;
+ size_t offset = 0;
+
+ if (likely(!ctx)) {
+ *ctx_len = 0;
+ return;
+ }
for (i = 0; i < ctx->nr_fields; i++)
offset += ctx->fields[i].get_size(offset);
for (i = 0; i < ctx->nr_fields; i++)
offset += ctx->fields[i].get_size(offset);
- return offset - orig_offset;
size_t record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
size_t record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx)
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
padding = 0;
WARN_ON_ONCE(1);
}
padding = 0;
WARN_ON_ONCE(1);
}
- offset += ctx_get_size(offset, lttng_chan->ctx);
- offset += ctx_get_size(offset, event->ctx);
+ offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, event->ctx,
+ client_ctx->event_context_len);
*pre_header_padding = padding;
return offset - orig_offset;
*pre_header_padding = padding;
return offset - orig_offset;
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
return record_header_size(config, chan, offset,
{
return record_header_size(config, chan, offset,
- pre_header_padding, ctx);
+ pre_header_padding, ctx, client_ctx);
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv;
+ struct lttng_event *event = lttng_probe_ctx->event;
+ struct lttng_client_ctx client_ctx;
+ /* Compute internal size of context structures. */
+ ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len);
+ ctx_get_struct_size(event->ctx, &client_ctx.event_context_len);
+
cpu = lib_ring_buffer_get_cpu(&client_config);
if (unlikely(cpu < 0))
return -EPERM;
cpu = lib_ring_buffer_get_cpu(&client_config);
if (unlikely(cpu < 0))
return -EPERM;
- ret = lib_ring_buffer_reserve(&client_config, ctx);
+ ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
if (unlikely(ret))
goto put;
lib_ring_buffer_backend_get_pages(&client_config, ctx,
if (unlikely(ret))
goto put;
lib_ring_buffer_backend_get_pages(&client_config, ctx,
size_t record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
size_t record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
size_t client_record_header_size(const struct lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lib_ring_buffer_ctx *ctx)
+ struct lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
- ret = lib_ring_buffer_reserve(&client_config, ctx);
+ ret = lib_ring_buffer_reserve(&client_config, ctx, NULL);
if (ret)
return ret;
lib_ring_buffer_backend_get_pages(&client_config, ctx,
if (ret)
return ret;
lib_ring_buffer_backend_get_pages(&client_config, ctx,