#define LTTNG_COMPACT_EVENT_BITS 5
#define LTTNG_COMPACT_TSC_BITS 27
+enum app_ctx_mode {
+ APP_CTX_DISABLED,
+ APP_CTX_ENABLED,
+};
+
/*
* Keep the natural field alignment for _each field_ within this structure if
* you ever add/remove a field from this header. Packed attribute is not used
} ctx;
};
+struct lttng_client_ctx {
+ size_t packet_context_len;
+ size_t event_context_len;
+};
static inline uint64_t lib_ring_buffer_clock_read(struct channel *chan)
{
}
static inline
-size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
+size_t ctx_get_aligned_size(size_t offset, struct lttng_ctx *ctx,
+ size_t ctx_len)
{
- int i;
size_t orig_offset = offset;
if (caa_likely(!ctx))
return 0;
offset += lib_ring_buffer_align(offset, ctx->largest_align);
- for (i = 0; i < ctx->nr_fields; i++)
- offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ offset += ctx_len;
return offset - orig_offset;
}
+static inline
+void ctx_get_struct_size(struct lttng_ctx *ctx, size_t *ctx_len,
+ enum app_ctx_mode mode)
+{
+ int i;
+ size_t offset = 0;
+
+ if (caa_likely(!ctx)) {
+ *ctx_len = 0;
+ return;
+ }
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (mode == APP_CTX_ENABLED) {
+ offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ } else {
+ if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
+ /*
+ * Before UST 2.8, we cannot use the
+ * application context, because we
+ * cannot trust that the handler used
+ * for get_size is the same used for
+ * ctx_record, which would result in
+ * corrupted traces when tracing
+ * concurrently with application context
+ * register/unregister.
+ */
+ offset += lttng_ust_dummy_get_size(&ctx->fields[i], offset);
+ } else {
+ offset += ctx->fields[i].get_size(&ctx->fields[i], offset);
+ }
+ }
+ }
+ *ctx_len = offset;
+}
+
static inline
void ctx_record(struct lttng_ust_lib_ring_buffer_ctx *bufctx,
struct lttng_channel *chan,
- struct lttng_ctx *ctx)
+ struct lttng_ctx *ctx,
+ enum app_ctx_mode mode)
{
int i;
if (caa_likely(!ctx))
return;
lib_ring_buffer_align_ctx(bufctx, ctx->largest_align);
- for (i = 0; i < ctx->nr_fields; i++)
- ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ for (i = 0; i < ctx->nr_fields; i++) {
+ if (mode == APP_CTX_ENABLED) {
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ } else {
+ if (lttng_context_is_app(ctx->fields[i].event_field.name)) {
+ /*
+ * Before UST 2.8, we cannot use the
+ * application context, because we
+ * cannot trust that the handler used
+ * for get_size is the same used for
+ * ctx_record, which would result in
+ * corrupted traces when tracing
+ * concurrently with application context
+ * register/unregister.
+ */
+ lttng_ust_dummy_record(&ctx->fields[i], bufctx, chan);
+ } else {
+ ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
+ }
+ }
+ }
}
/*
size_t record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ struct lttng_client_ctx *client_ctx)
{
struct lttng_channel *lttng_chan = channel_get_private(chan);
struct lttng_event *event = ctx->priv;
+ struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
size_t orig_offset = offset;
size_t padding;
padding = 0;
WARN_ON_ONCE(1);
}
- offset += ctx_get_size(offset, event->ctx);
- offset += ctx_get_size(offset, lttng_chan->ctx);
-
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ offset += ctx_get_aligned_size(offset, lttng_ctx->chan_ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, lttng_ctx->event_ctx,
+ client_ctx->event_context_len);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ offset += ctx_get_aligned_size(offset, lttng_chan->ctx,
+ client_ctx->packet_context_len);
+ offset += ctx_get_aligned_size(offset, event->ctx,
+ client_ctx->event_context_len);
+ }
*pre_header_padding = padding;
return offset - orig_offset;
}
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_event *event = ctx->priv;
struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
if (caa_unlikely(ctx->rflags))
WARN_ON_ONCE(1);
}
- ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx);
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
+ ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
+ }
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
return;
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_event *event = ctx->priv;
struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
switch (lttng_chan->header_type) {
default:
WARN_ON_ONCE(1);
}
- ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx);
- ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx);
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_ctx->chan_ctx, APP_CTX_ENABLED);
+ ctx_record(ctx, lttng_chan, lttng_ctx->event_ctx, APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ ctx_record(ctx, lttng_chan, lttng_chan->ctx, APP_CTX_DISABLED);
+ ctx_record(ctx, lttng_chan, event->ctx, APP_CTX_DISABLED);
+ }
lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
}
size_t client_record_header_size(const struct lttng_ust_lib_ring_buffer_config *config,
struct channel *chan, size_t offset,
size_t *pre_header_padding,
- struct lttng_ust_lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx)
{
return record_header_size(config, chan, offset,
- pre_header_padding, ctx);
+ pre_header_padding, ctx, client_ctx);
}
/**
return 0;
}
+static int client_instance_id(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle,
+ uint64_t *id)
+{
+ struct packet_header *header;
+
+ header = client_packet_header(buf, handle);
+ *id = header->stream_instance_id;
+ return 0;
+}
+
static const
struct lttng_ust_client_lib_ring_buffer_client_cb client_cb = {
.parent = {
.stream_id = client_stream_id,
.current_timestamp = client_current_timestamp,
.sequence_number = client_sequence_number,
+ .instance_id = client_instance_id,
};
static const struct lttng_ust_lib_ring_buffer_config client_config = {
unsigned int read_timer_interval,
unsigned char *uuid,
uint32_t chan_id,
- const int *stream_fds, int nr_stream_fds)
+ const int *stream_fds, int nr_stream_fds,
+ int64_t blocking_timeout)
{
struct lttng_channel chan_priv_init;
struct lttng_ust_shm_handle *handle;
&chan_priv_init,
buf_addr, subbuf_size, num_subbuf,
switch_timer_interval, read_timer_interval,
- stream_fds, nr_stream_fds);
+ stream_fds, nr_stream_fds, blocking_timeout);
if (!handle)
return NULL;
lttng_chan = priv;
uint32_t event_id)
{
struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
+ struct lttng_event *event = ctx->priv;
+ struct lttng_stack_ctx *lttng_ctx = ctx->priv2;
+ struct lttng_client_ctx client_ctx;
int ret, cpu;
+ /* Compute internal size of context structures. */
+
+ if (lttng_ctx) {
+ /* 2.8+ probe ABI. */
+ ctx_get_struct_size(lttng_ctx->chan_ctx, &client_ctx.packet_context_len,
+ APP_CTX_ENABLED);
+ ctx_get_struct_size(lttng_ctx->event_ctx, &client_ctx.event_context_len,
+ APP_CTX_ENABLED);
+ } else {
+ /* Pre 2.8 probe ABI. */
+ ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len,
+ APP_CTX_DISABLED);
+ ctx_get_struct_size(event->ctx, &client_ctx.event_context_len,
+ APP_CTX_DISABLED);
+ }
+
cpu = lib_ring_buffer_get_cpu(&client_config);
if (cpu < 0)
return -EPERM;
WARN_ON_ONCE(1);
}
- ret = lib_ring_buffer_reserve(&client_config, ctx);
- if (ret)
+ ret = lib_ring_buffer_reserve(&client_config, ctx, &client_ctx);
+ if (caa_unlikely(ret))
goto put;
+ if (caa_likely(ctx->ctx_len
+ >= sizeof(struct lttng_ust_lib_ring_buffer_ctx))) {
+ if (lib_ring_buffer_backend_get_pages(&client_config, ctx,
+ &ctx->backend_pages)) {
+ ret = -EPERM;
+ goto put;
+ }
+ }
lttng_write_event_header(&client_config, ctx, event_id);
return 0;
put: