From: Mathieu Desnoyers Date: Wed, 28 Apr 2021 18:56:05 +0000 (-0400) Subject: Refactoring: ring buffer context X-Git-Tag: v2.13.0-rc2~66 X-Git-Url: https://git.lttng.org/?p=lttng-modules.git;a=commitdiff_plain;h=c7d9db61d9c4861b6f344af8f1471a42e00739a8 Refactoring: ring buffer context Split the ring buffer context into: - Public ring buffer context (initialized by probe), - Private context (initialized by reserve callback), Pass event recorder rather than channel as client_ctx for events generated from instrumentation (calling ring buffer client). Signed-off-by: Mathieu Desnoyers Change-Id: Iecd23f11c54da4ba58a97c46192f775a0a74bd85 --- diff --git a/include/lttng/tracepoint-event-impl.h b/include/lttng/tracepoint-event-impl.h index a8a438b2..dbb92b85 100644 --- a/include/lttng/tracepoint-event-impl.h +++ b/include/lttng/tracepoint-event-impl.h @@ -1092,8 +1092,8 @@ static void __event_probe__##_name(_data_proto) \ goto __post; \ } \ __event_align = __event_get_align__##_name(_locvar_args); \ - lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event_len, \ - __event_align, -1, &__lttng_probe_ctx); \ + lib_ring_buffer_ctx_init(&__ctx, __event_recorder, __event_len, \ + __event_align, &__lttng_probe_ctx); \ __ret = __chan->ops->event_reserve(&__ctx, __event_recorder->priv->id); \ if (__ret < 0) \ goto __post; \ diff --git a/include/ringbuffer/backend.h b/include/ringbuffer/backend.h index a975c7ec..5ceb4f1a 100644 --- a/include/ringbuffer/backend.h +++ b/include/ringbuffer/backend.h @@ -75,10 +75,10 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, const void *src, size_t len) { - struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; - struct channel_backend *chanb = &ctx->chan->backend; + struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend; + struct channel_backend *chanb = &ctx->priv.chan->backend; size_t index, pagecpy; - size_t offset = ctx->buf_offset; + size_t offset = ctx->priv.buf_offset; struct lib_ring_buffer_backend_pages *backend_pages; if (unlikely(!len)) @@ -95,7 +95,7 @@ void lib_ring_buffer_write(const struct lib_ring_buffer_config *config, src, len); else _lib_ring_buffer_write(bufb, offset, src, len, 0); - ctx->buf_offset += len; + ctx->priv.buf_offset += len; } /** @@ -116,10 +116,10 @@ void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, int c, size_t len) { - struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; - struct channel_backend *chanb = &ctx->chan->backend; + struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend; + struct channel_backend *chanb = &ctx->priv.chan->backend; size_t index, pagecpy; - size_t offset = ctx->buf_offset; + size_t offset = ctx->priv.buf_offset; struct lib_ring_buffer_backend_pages *backend_pages; if (unlikely(!len)) @@ -135,7 +135,7 @@ void lib_ring_buffer_memset(const struct lib_ring_buffer_config *config, c, len); else _lib_ring_buffer_memset(bufb, offset, c, len, 0); - ctx->buf_offset += len; + ctx->priv.buf_offset += len; } /* @@ -213,10 +213,10 @@ void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, const char *src, size_t len, int pad) { - struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; - struct channel_backend *chanb = &ctx->chan->backend; + struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend; + struct channel_backend *chanb = &ctx->priv.chan->backend; size_t index, pagecpy; - size_t offset = ctx->buf_offset; + size_t offset = ctx->priv.buf_offset; struct lib_ring_buffer_backend_pages *backend_pages; if (unlikely(!len)) @@ -250,7 +250,7 @@ void lib_ring_buffer_strcpy(const struct lib_ring_buffer_config *config, } else { _lib_ring_buffer_strcpy(bufb, offset, src, len, 0, pad); } - ctx->buf_offset += len; + ctx->priv.buf_offset += len; } /** @@ -271,10 +271,10 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config struct lib_ring_buffer_ctx *ctx, const void __user *src, size_t len) { - struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; - struct channel_backend *chanb = &ctx->chan->backend; + struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend; + struct channel_backend *chanb = &ctx->priv.chan->backend; size_t index, pagecpy; - size_t offset = ctx->buf_offset; + size_t offset = ctx->priv.buf_offset; struct lib_ring_buffer_backend_pages *backend_pages; unsigned long ret; @@ -302,7 +302,7 @@ void lib_ring_buffer_copy_from_user_inatomic(const struct lib_ring_buffer_config _lib_ring_buffer_copy_from_user_inatomic(bufb, offset, src, len, 0); } pagefault_enable(); - ctx->buf_offset += len; + ctx->priv.buf_offset += len; return; @@ -338,10 +338,10 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_conf struct lib_ring_buffer_ctx *ctx, const void __user *src, size_t len, int pad) { - struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; - struct channel_backend *chanb = &ctx->chan->backend; + struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend; + struct channel_backend *chanb = &ctx->priv.chan->backend; size_t index, pagecpy; - size_t offset = ctx->buf_offset; + size_t offset = ctx->priv.buf_offset; struct lib_ring_buffer_backend_pages *backend_pages; if (unlikely(!len)) @@ -382,7 +382,7 @@ void lib_ring_buffer_strcpy_from_user_inatomic(const struct lib_ring_buffer_conf len, 0, pad); } pagefault_enable(); - ctx->buf_offset += len; + ctx->priv.buf_offset += len; return; diff --git a/include/ringbuffer/backend_internal.h b/include/ringbuffer/backend_internal.h index fd24c674..8a93ab07 100644 --- a/include/ringbuffer/backend_internal.h +++ b/include/ringbuffer/backend_internal.h @@ -195,9 +195,9 @@ void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *conf struct lib_ring_buffer_ctx *ctx, struct lib_ring_buffer_backend_pages **backend_pages) { - struct lib_ring_buffer_backend *bufb = &ctx->buf->backend; - struct channel_backend *chanb = &ctx->chan->backend; - size_t sbidx, offset = ctx->buf_offset; + struct lib_ring_buffer_backend *bufb = &ctx->priv.buf->backend; + struct channel_backend *chanb = &ctx->priv.chan->backend; + size_t sbidx, offset = ctx->priv.buf_offset; unsigned long sb_bindex, id; struct lib_ring_buffer_backend_pages *rpages; @@ -206,7 +206,7 @@ void lib_ring_buffer_backend_get_pages(const struct lib_ring_buffer_config *conf id = bufb->buf_wsb[sbidx].id; sb_bindex = subbuffer_id_get_index(config, id); rpages = bufb->array[sb_bindex]; - CHAN_WARN_ON(ctx->chan, + CHAN_WARN_ON(ctx->priv.chan, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, id)); *backend_pages = rpages; @@ -218,7 +218,7 @@ struct lib_ring_buffer_backend_pages * lib_ring_buffer_get_backend_pages_from_ctx(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx) { - return ctx->backend_pages; + return ctx->priv.backend_pages; } /* diff --git a/include/ringbuffer/config.h b/include/ringbuffer/config.h index 2019e14c..bc638f94 100644 --- a/include/ringbuffer/config.h +++ b/include/ringbuffer/config.h @@ -20,6 +20,7 @@ struct lib_ring_buffer; struct channel; struct lib_ring_buffer_config; struct lib_ring_buffer_ctx; +struct lttng_kernel_ring_buffer_ctx_private; /* * Ring buffer client callbacks. Only used by slow path, never on fast path. @@ -156,6 +157,40 @@ struct lib_ring_buffer_config { struct lib_ring_buffer_client_cb cb; }; +/* + * ring buffer private context + * + * Private context passed to lib_ring_buffer_reserve(), lib_ring_buffer_commit(), + * lib_ring_buffer_try_discard_reserve(), lib_ring_buffer_align_ctx() and + * lib_ring_buffer_write(). + * + * Get struct lttng_kernel_ring_buffer_ctx parent with container_of(). + */ + +struct lttng_kernel_ring_buffer_ctx_private { + /* input received by lib_ring_buffer_reserve(). */ + struct channel *chan; /* ring buffer channel */ + + /* output from lib_ring_buffer_reserve() */ + int reserve_cpu; /* processor id updated by the reserve */ + size_t slot_size; /* size of the reserved slot */ + unsigned long buf_offset; /* offset following the record header */ + unsigned long pre_offset; /* + * Initial offset position _before_ + * the record is written. Positioned + * prior to record header alignment + * padding. + */ + u64 tsc; /* time-stamp counter value */ + unsigned int rflags; /* reservation flags */ + + struct lib_ring_buffer *buf; /* + * buffer corresponding to processor id + * for this channel + */ + struct lib_ring_buffer_backend_pages *backend_pages; +}; + /* * ring buffer context * @@ -164,57 +199,37 @@ struct lib_ring_buffer_config { * lib_ring_buffer_write(). */ struct lib_ring_buffer_ctx { + /* Private ring buffer context, set by reserve callback. */ + struct lttng_kernel_ring_buffer_ctx_private priv; + /* input received by lib_ring_buffer_reserve(), saved here. */ - struct channel *chan; /* channel */ - void *priv; /* client private data */ + void *client_priv; /* Ring buffer client private data */ + size_t data_size; /* size of payload */ int largest_align; /* * alignment of the largest element * in the payload */ - int cpu; /* processor id */ - - /* output from lib_ring_buffer_reserve() */ - struct lib_ring_buffer *buf; /* - * buffer corresponding to processor id - * for this channel - */ - size_t slot_size; /* size of the reserved slot */ - unsigned long buf_offset; /* offset following the record header */ - unsigned long pre_offset; /* - * Initial offset position _before_ - * the record is written. Positioned - * prior to record header alignment - * padding. - */ - u64 tsc; /* time-stamp counter value */ - unsigned int rflags; /* reservation flags */ - /* Cache backend pages pointer chasing. */ - struct lib_ring_buffer_backend_pages *backend_pages; + struct lttng_probe_ctx *probe_ctx; /* Probe context */ }; /** * lib_ring_buffer_ctx_init - initialize ring buffer context * @ctx: ring buffer context to initialize - * @chan: channel - * @priv: client private data + * @client_priv: client private data * @data_size: size of record data payload. It must be greater than 0. * @largest_align: largest alignment within data payload types - * @cpu: processor id */ static inline void lib_ring_buffer_ctx_init(struct lib_ring_buffer_ctx *ctx, - struct channel *chan, + void *client_priv, size_t data_size, int largest_align, - int cpu, void *priv) + struct lttng_probe_ctx *probe_ctx) { - ctx->chan = chan; - ctx->priv = priv; + ctx->client_priv = client_priv; ctx->data_size = data_size; ctx->largest_align = largest_align; - ctx->cpu = cpu; - ctx->rflags = 0; - ctx->backend_pages = NULL; + ctx->probe_ctx = probe_ctx; } /* @@ -282,7 +297,7 @@ static inline void lib_ring_buffer_align_ctx(struct lib_ring_buffer_ctx *ctx, size_t alignment) { - ctx->buf_offset += lib_ring_buffer_align(ctx->buf_offset, + ctx->priv.buf_offset += lib_ring_buffer_align(ctx->priv.buf_offset, alignment); } diff --git a/include/ringbuffer/frontend_api.h b/include/ringbuffer/frontend_api.h index 3fa6c82f..1444e60a 100644 --- a/include/ringbuffer/frontend_api.h +++ b/include/ringbuffer/frontend_api.h @@ -75,13 +75,13 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, unsigned long *o_begin, unsigned long *o_end, unsigned long *o_old, size_t *before_hdr_pad) { - struct channel *chan = ctx->chan; - struct lib_ring_buffer *buf = ctx->buf; + struct channel *chan = ctx->priv.chan; + struct lib_ring_buffer *buf = ctx->priv.buf; *o_begin = v_read(config, &buf->offset); *o_old = *o_begin; - ctx->tsc = lib_ring_buffer_clock_read(chan); - if ((int64_t) ctx->tsc == -EIO) + ctx->priv.tsc = lib_ring_buffer_clock_read(chan); + if ((int64_t) ctx->priv.tsc == -EIO) return 1; /* @@ -91,18 +91,18 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, */ prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]); - if (last_tsc_overflow(config, buf, ctx->tsc)) - ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; + if (last_tsc_overflow(config, buf, ctx->priv.tsc)) + ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC; if (unlikely(subbuf_offset(*o_begin, chan) == 0)) return 1; - ctx->slot_size = record_header_size(config, chan, *o_begin, + ctx->priv.slot_size = record_header_size(config, chan, *o_begin, before_hdr_pad, ctx, client_ctx); - ctx->slot_size += - lib_ring_buffer_align(*o_begin + ctx->slot_size, + ctx->priv.slot_size += + lib_ring_buffer_align(*o_begin + ctx->priv.slot_size, ctx->largest_align) + ctx->data_size; - if (unlikely((subbuf_offset(*o_begin, chan) + ctx->slot_size) + if (unlikely((subbuf_offset(*o_begin, chan) + ctx->priv.slot_size) > chan->backend.subbuf_size)) return 1; @@ -110,7 +110,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config, * Record fits in the current buffer and we are not on a switch * boundary. It's safe to write. */ - *o_end = *o_begin + ctx->slot_size; + *o_end = *o_begin + ctx->priv.slot_size; if (unlikely((subbuf_offset(*o_end, chan)) == 0)) /* @@ -143,7 +143,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, void *client_ctx) { - struct channel *chan = ctx->chan; + struct channel *chan = ctx->priv.chan; struct lib_ring_buffer *buf; unsigned long o_begin, o_end, o_old; size_t before_hdr_pad = 0; @@ -152,12 +152,12 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, return -EAGAIN; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - buf = per_cpu_ptr(chan->backend.buf, ctx->cpu); + buf = per_cpu_ptr(chan->backend.buf, ctx->priv.reserve_cpu); else buf = chan->backend.buf; if (unlikely(atomic_read(&buf->record_disabled))) return -EAGAIN; - ctx->buf = buf; + ctx->priv.buf = buf; /* * Perform retryable operations. @@ -166,7 +166,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, &o_end, &o_old, &before_hdr_pad))) goto slow_path; - if (unlikely(v_cmpxchg(config, &ctx->buf->offset, o_old, o_end) + if (unlikely(v_cmpxchg(config, &ctx->priv.buf->offset, o_old, o_end) != o_old)) goto slow_path; @@ -176,21 +176,21 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config, * record headers, never the opposite (missing a full TSC record header * when it would be needed). */ - save_last_tsc(config, ctx->buf, ctx->tsc); + save_last_tsc(config, ctx->priv.buf, ctx->priv.tsc); /* * Push the reader if necessary */ - lib_ring_buffer_reserve_push_reader(ctx->buf, chan, o_end - 1); + lib_ring_buffer_reserve_push_reader(ctx->priv.buf, chan, o_end - 1); /* * Clear noref flag for this subbuffer. */ - lib_ring_buffer_clear_noref(config, &ctx->buf->backend, + lib_ring_buffer_clear_noref(config, &ctx->priv.buf->backend, subbuf_index(o_end - 1, chan)); - ctx->pre_offset = o_begin; - ctx->buf_offset = o_begin + before_hdr_pad; + ctx->priv.pre_offset = o_begin; + ctx->priv.buf_offset = o_begin + before_hdr_pad; return 0; slow_path: return lib_ring_buffer_reserve_slow(ctx, client_ctx); @@ -231,9 +231,9 @@ static inline void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, const struct lib_ring_buffer_ctx *ctx) { - struct channel *chan = ctx->chan; - struct lib_ring_buffer *buf = ctx->buf; - unsigned long offset_end = ctx->buf_offset; + struct channel *chan = ctx->priv.chan; + struct lib_ring_buffer *buf = ctx->priv.buf; + unsigned long offset_end = ctx->priv.buf_offset; unsigned long endidx = subbuf_index(offset_end - 1, chan); unsigned long commit_count; struct commit_counters_hot *cc_hot = &buf->commit_hot[endidx]; @@ -257,7 +257,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, } else smp_wmb(); - v_add(config, ctx->slot_size, &cc_hot->cc); + v_add(config, ctx->priv.slot_size, &cc_hot->cc); /* * commit count read can race with concurrent OOO commit count updates. @@ -280,7 +280,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config, commit_count = v_read(config, &cc_hot->cc); lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1, - commit_count, endidx, ctx->tsc); + commit_count, endidx, ctx->priv.tsc); /* * Update used size at each commit. It's needed only for extracting * ring_buffer buffers from vmcore, after crash. @@ -303,8 +303,8 @@ static inline int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *config, const struct lib_ring_buffer_ctx *ctx) { - struct lib_ring_buffer *buf = ctx->buf; - unsigned long end_offset = ctx->pre_offset + ctx->slot_size; + struct lib_ring_buffer *buf = ctx->priv.buf; + unsigned long end_offset = ctx->priv.pre_offset + ctx->priv.slot_size; /* * We need to ensure that if the cmpxchg succeeds and discards the @@ -320,7 +320,7 @@ int lib_ring_buffer_try_discard_reserve(const struct lib_ring_buffer_config *con */ save_last_tsc(config, buf, 0ULL); - if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->pre_offset) + if (likely(v_cmpxchg(config, &buf->offset, end_offset, ctx->priv.pre_offset) != end_offset)) return -EPERM; else diff --git a/src/lib/ringbuffer/ring_buffer_frontend.c b/src/lib/ringbuffer/ring_buffer_frontend.c index 38ba05d9..d9e64dff 100644 --- a/src/lib/ringbuffer/ring_buffer_frontend.c +++ b/src/lib/ringbuffer/ring_buffer_frontend.c @@ -2011,14 +2011,14 @@ retry: offsets->switch_old_end = 0; offsets->pre_header_padding = 0; - ctx->tsc = config->cb.ring_buffer_clock_read(chan); - if ((int64_t) ctx->tsc == -EIO) + ctx->priv.tsc = config->cb.ring_buffer_clock_read(chan); + if ((int64_t) ctx->priv.tsc == -EIO) return -EIO; - if (last_tsc_overflow(config, buf, ctx->tsc)) - ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; + if (last_tsc_overflow(config, buf, ctx->priv.tsc)) + ctx->priv.rflags |= RING_BUFFER_RFLAG_FULL_TSC; - if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { + if (unlikely(subbuf_offset(offsets->begin, ctx->priv.chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ } else { offsets->size = config->cb.record_header_size(config, chan, @@ -2175,13 +2175,13 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_lost_event_too_big); int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, void *client_ctx) { - struct channel *chan = ctx->chan; + struct channel *chan = ctx->priv.chan; const struct lib_ring_buffer_config *config = &chan->backend.config; struct lib_ring_buffer *buf; struct switch_offsets offsets; int ret; - ctx->buf = buf = get_current_buf(chan, ctx->cpu); + ctx->priv.buf = buf = get_current_buf(chan, ctx->priv.reserve_cpu); offsets.size = 0; do { @@ -2199,7 +2199,7 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, * records, never the opposite (missing a full TSC record when it would * be needed). */ - save_last_tsc(config, buf, ctx->tsc); + save_last_tsc(config, buf, ctx->priv.tsc); /* * Push the reader if necessary @@ -2218,21 +2218,21 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx, if (unlikely(offsets.switch_old_end)) { lib_ring_buffer_clear_noref(config, &buf->backend, subbuf_index(offsets.old - 1, chan)); - lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc); + lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->priv.tsc); } /* * Populate new subbuffer. */ if (unlikely(offsets.switch_new_start)) - lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc); + lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->priv.tsc); if (unlikely(offsets.switch_new_end)) - lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc); + lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->priv.tsc); - ctx->slot_size = offsets.size; - ctx->pre_offset = offsets.begin; - ctx->buf_offset = offsets.begin + offsets.pre_header_padding; + ctx->priv.slot_size = offsets.size; + ctx->priv.pre_offset = offsets.begin; + ctx->priv.buf_offset = offsets.begin + offsets.pre_header_padding; return 0; } EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow); diff --git a/src/lttng-context-callstack-legacy-impl.h b/src/lttng-context-callstack-legacy-impl.h index b74b966a..edd13cf5 100644 --- a/src/lttng-context-callstack-legacy-impl.h +++ b/src/lttng-context-callstack-legacy-impl.h @@ -107,7 +107,7 @@ struct stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *field, * Do not gather the userspace callstack context when the event was * triggered by the userspace callstack context saving mechanism. */ - cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu); + cs_user_nesting = per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu); if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1) return NULL; @@ -119,8 +119,8 @@ struct stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *field, * max nesting is checked in lib_ring_buffer_get_cpu(). * Check it again as a safety net. */ - cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu); - buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1; + cs = per_cpu_ptr(fdata->cs_percpu, ctx->priv.reserve_cpu); + buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->priv.reserve_cpu) - 1; if (buffer_nesting >= RING_BUFFER_MAX_NESTING) return NULL; @@ -163,13 +163,13 @@ size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_kernel_ctx_ trace->nr_entries = 0; if (fdata->mode == CALLSTACK_USER) - ++per_cpu(callstack_user_nesting, ctx->cpu); + ++per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu); /* do the real work and reserve space */ cs_types[fdata->mode].save_func(trace); if (fdata->mode == CALLSTACK_USER) - per_cpu(callstack_user_nesting, ctx->cpu)--; + per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu)--; /* * Remove final ULONG_MAX delimiter. If we cannot find it, add diff --git a/src/lttng-context-callstack-stackwalk-impl.h b/src/lttng-context-callstack-stackwalk-impl.h index 7c452491..a7c5a062 100644 --- a/src/lttng-context-callstack-stackwalk-impl.h +++ b/src/lttng-context-callstack-stackwalk-impl.h @@ -109,7 +109,7 @@ struct lttng_stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *fie * Do not gather the userspace callstack context when the event was * triggered by the userspace callstack context saving mechanism. */ - cs_user_nesting = per_cpu(callstack_user_nesting, ctx->cpu); + cs_user_nesting = per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu); if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1) return NULL; @@ -121,8 +121,8 @@ struct lttng_stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *fie * max nesting is checked in lib_ring_buffer_get_cpu(). * Check it again as a safety net. */ - cs = per_cpu_ptr(fdata->cs_percpu, ctx->cpu); - buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->cpu) - 1; + cs = per_cpu_ptr(fdata->cs_percpu, ctx->priv.reserve_cpu); + buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->priv.reserve_cpu) - 1; if (buffer_nesting >= RING_BUFFER_MAX_NESTING) return NULL; @@ -171,11 +171,11 @@ size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_kernel_ctx_ MAX_ENTRIES, 0); break; case CALLSTACK_USER: - ++per_cpu(callstack_user_nesting, ctx->cpu); + ++per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu); /* do the real work and reserve space */ trace->nr_entries = save_func_user(trace->entries, MAX_ENTRIES); - per_cpu(callstack_user_nesting, ctx->cpu)--; + per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu)--; break; default: WARN_ON_ONCE(1); diff --git a/src/lttng-context-cpu-id.c b/src/lttng-context-cpu-id.c index 47c4aa20..7ae79a64 100644 --- a/src/lttng-context-cpu-id.c +++ b/src/lttng-context-cpu-id.c @@ -33,7 +33,7 @@ void cpu_id_record(struct lttng_kernel_ctx_field *field, { int cpu; - cpu = ctx->cpu; + cpu = ctx->priv.reserve_cpu; lib_ring_buffer_align_ctx(ctx, lttng_alignof(cpu)); chan->ops->event_write(ctx, &cpu, sizeof(cpu)); } diff --git a/src/lttng-context-interruptible.c b/src/lttng-context-interruptible.c index 5cc64b2b..1c78d59b 100644 --- a/src/lttng-context-interruptible.c +++ b/src/lttng-context-interruptible.c @@ -36,7 +36,7 @@ void interruptible_record(struct lttng_kernel_ctx_field *field, struct lib_ring_buffer_ctx *ctx, struct lttng_channel *chan) { - struct lttng_probe_ctx *lttng_probe_ctx = ctx->priv; + struct lttng_probe_ctx *lttng_probe_ctx = ctx->probe_ctx; int8_t interruptible = lttng_probe_ctx->interruptible; lib_ring_buffer_align_ctx(ctx, lttng_alignof(interruptible)); diff --git a/src/lttng-context-perf-counters.c b/src/lttng-context-perf-counters.c index e637bb75..53578475 100644 --- a/src/lttng-context-perf-counters.c +++ b/src/lttng-context-perf-counters.c @@ -38,7 +38,7 @@ void perf_counter_record(struct lttng_kernel_ctx_field *field, struct perf_event *event; uint64_t value; - event = perf_field->e[ctx->cpu]; + event = perf_field->e[ctx->priv.reserve_cpu]; if (likely(event)) { if (unlikely(event->state == PERF_EVENT_STATE_ERROR)) { value = 0; diff --git a/src/lttng-event-notifier-notification.c b/src/lttng-event-notifier-notification.c index ce5a670f..e5bd172a 100644 --- a/src/lttng-event-notifier-notification.c +++ b/src/lttng-event-notifier-notification.c @@ -401,7 +401,7 @@ void notification_send(struct lttng_event_notifier_notification *notif, kernel_notif.capture_buf_size = capture_buffer_content_len; lib_ring_buffer_ctx_init(&ctx, event_notifier_group->chan, reserve_size, - lttng_alignof(kernel_notif), -1, NULL); + lttng_alignof(kernel_notif), NULL); ret = event_notifier_group->ops->event_reserve(&ctx, 0); if (ret < 0) { record_error(event_notifier); diff --git a/src/lttng-events.c b/src/lttng-events.c index 51c5cb39..4c266466 100644 --- a/src/lttng-events.c +++ b/src/lttng-events.c @@ -2882,7 +2882,7 @@ int lttng_metadata_output_channel(struct lttng_metadata_stream *stream, stream->transport->ops.packet_avail_size(chan), len); lib_ring_buffer_ctx_init(&ctx, chan, reserve_len, - sizeof(char), -1, NULL); + sizeof(char), NULL); /* * If reservation failed, return an error to the caller. */ diff --git a/src/lttng-ring-buffer-client.h b/src/lttng-ring-buffer-client.h index 26034e92..ff404368 100644 --- a/src/lttng-ring-buffer-client.h +++ b/src/lttng-ring-buffer-client.h @@ -149,7 +149,7 @@ size_t record_header_size(const struct lib_ring_buffer_config *config, case 1: /* compact */ padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t)); offset += padding; - if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { offset += sizeof(uint32_t); /* id and timestamp */ } else { /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */ @@ -165,7 +165,7 @@ size_t record_header_size(const struct lib_ring_buffer_config *config, padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t)); offset += padding; offset += sizeof(uint16_t); - if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t)); offset += sizeof(uint32_t); /* timestamp */ } else { @@ -207,9 +207,9 @@ void lttng_write_event_header(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, uint32_t event_id) { - struct lttng_channel *lttng_chan = channel_get_private(ctx->chan); + struct lttng_channel *lttng_chan = channel_get_private(ctx->priv.chan); - if (unlikely(ctx->rflags)) + if (unlikely(ctx->priv.rflags)) goto slow_path; switch (lttng_chan->header_type) { @@ -224,13 +224,13 @@ void lttng_write_event_header(const struct lib_ring_buffer_config *config, bt_bitfield_write(&id_time, uint32_t, LTTNG_COMPACT_EVENT_BITS, LTTNG_COMPACT_TSC_BITS, - ctx->tsc); + ctx->priv.tsc); lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); break; } case 2: /* large */ { - uint32_t timestamp = (uint32_t) ctx->tsc; + uint32_t timestamp = (uint32_t) ctx->priv.tsc; uint16_t id = event_id; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); @@ -256,11 +256,11 @@ void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config, struct lib_ring_buffer_ctx *ctx, uint32_t event_id) { - struct lttng_channel *lttng_chan = channel_get_private(ctx->chan); + struct lttng_channel *lttng_chan = channel_get_private(ctx->priv.chan); switch (lttng_chan->header_type) { case 1: /* compact */ - if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { uint32_t id_time = 0; bt_bitfield_write(&id_time, uint32_t, @@ -269,11 +269,11 @@ void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config, event_id); bt_bitfield_write(&id_time, uint32_t, LTTNG_COMPACT_EVENT_BITS, - LTTNG_COMPACT_TSC_BITS, ctx->tsc); + LTTNG_COMPACT_TSC_BITS, ctx->priv.tsc); lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time)); } else { uint8_t id = 0; - uint64_t timestamp = ctx->tsc; + uint64_t timestamp = ctx->priv.tsc; bt_bitfield_write(&id, uint8_t, 0, @@ -289,8 +289,8 @@ void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config, break; case 2: /* large */ { - if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { - uint32_t timestamp = (uint32_t) ctx->tsc; + if (!(ctx->priv.rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) { + uint32_t timestamp = (uint32_t) ctx->priv.tsc; uint16_t id = event_id; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); @@ -298,7 +298,7 @@ void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config, lib_ring_buffer_write(config, ctx, ×tamp, sizeof(timestamp)); } else { uint16_t id = 65535; - uint64_t timestamp = ctx->tsc; + uint64_t timestamp = ctx->priv.tsc; lib_ring_buffer_write(config, ctx, &id, sizeof(id)); /* Align extended struct on largest member */ @@ -607,14 +607,17 @@ static int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id) { - struct lttng_channel *lttng_chan = channel_get_private(ctx->chan); + struct lttng_kernel_event_recorder *event_recorder = ctx->client_priv; + struct lttng_channel *lttng_chan = event_recorder->chan; struct lttng_client_ctx client_ctx; int ret, cpu; cpu = lib_ring_buffer_get_cpu(&client_config); if (unlikely(cpu < 0)) return -EPERM; - ctx->cpu = cpu; + memset(&ctx->priv, 0, sizeof(ctx->priv)); + ctx->priv.chan = lttng_chan->chan; + ctx->priv.reserve_cpu = cpu; /* Compute internal size of context structures. */ ctx_get_struct_size(lttng_chan->ctx, &client_ctx.packet_context_len, lttng_chan, ctx); @@ -622,11 +625,11 @@ int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, switch (lttng_chan->header_type) { case 1: /* compact */ if (event_id > 30) - ctx->rflags |= LTTNG_RFLAG_EXTENDED; + ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED; break; case 2: /* large */ if (event_id > 65534) - ctx->rflags |= LTTNG_RFLAG_EXTENDED; + ctx->priv.rflags |= LTTNG_RFLAG_EXTENDED; break; default: WARN_ON_ONCE(1); @@ -636,7 +639,7 @@ int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, if (unlikely(ret)) goto put; lib_ring_buffer_backend_get_pages(&client_config, ctx, - &ctx->backend_pages); + &ctx->priv.backend_pages); lttng_write_event_header(&client_config, ctx, event_id); return 0; put: diff --git a/src/lttng-ring-buffer-event-notifier-client.h b/src/lttng-ring-buffer-event-notifier-client.h index b5e91c55..993c96da 100644 --- a/src/lttng-ring-buffer-event-notifier-client.h +++ b/src/lttng-ring-buffer-event-notifier-client.h @@ -303,13 +303,17 @@ void lttng_write_event_notifier_header(const struct lib_ring_buffer_config *conf static int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id) { + struct channel *chan = ctx->client_priv; int ret; + memset(&ctx->priv, 0, sizeof(ctx->priv)); + ctx->priv.chan = chan; + ret = lib_ring_buffer_reserve(&client_config, ctx, NULL); if (ret) return ret; lib_ring_buffer_backend_get_pages(&client_config, ctx, - &ctx->backend_pages); + &ctx->priv.backend_pages); lttng_write_event_notifier_header(&client_config, ctx); return 0; diff --git a/src/lttng-ring-buffer-metadata-client.h b/src/lttng-ring-buffer-metadata-client.h index 6fa0c2b1..7e418001 100644 --- a/src/lttng-ring-buffer-metadata-client.h +++ b/src/lttng-ring-buffer-metadata-client.h @@ -302,13 +302,17 @@ void lttng_buffer_read_close(struct lib_ring_buffer *buf) static int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx, uint32_t event_id) { + struct channel *chan = ctx->client_priv; int ret; + memset(&ctx->priv, 0, sizeof(ctx->priv)); + ctx->priv.chan = chan; + ret = lib_ring_buffer_reserve(&client_config, ctx, NULL); if (ret) return ret; lib_ring_buffer_backend_get_pages(&client_config, ctx, - &ctx->backend_pages); + &ctx->priv.backend_pages); return 0; } diff --git a/src/probes/lttng-kprobes.c b/src/probes/lttng-kprobes.c index 238441f5..39de7d3d 100644 --- a/src/probes/lttng-kprobes.c +++ b/src/probes/lttng-kprobes.c @@ -63,8 +63,8 @@ int lttng_kprobes_event_handler_pre(struct kprobe *p, struct pt_regs *regs) struct lib_ring_buffer_ctx ctx; int ret; - lib_ring_buffer_ctx_init(&ctx, chan->chan, sizeof(data), - lttng_alignof(data), -1, <tng_probe_ctx); + lib_ring_buffer_ctx_init(&ctx, event_recorder, sizeof(data), + lttng_alignof(data), <tng_probe_ctx); ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id); if (ret < 0) return 0; diff --git a/src/probes/lttng-kretprobes.c b/src/probes/lttng-kretprobes.c index 378a0e83..03561703 100644 --- a/src/probes/lttng-kretprobes.c +++ b/src/probes/lttng-kretprobes.c @@ -81,8 +81,8 @@ int _lttng_kretprobes_handler(struct kretprobe_instance *krpi, payload.ip = (unsigned long) lttng_get_kretprobe(krpi)->kp.addr; payload.parent_ip = (unsigned long) krpi->ret_addr; - lib_ring_buffer_ctx_init(&ctx, chan->chan, sizeof(payload), - lttng_alignof(payload), -1, <tng_probe_ctx); + lib_ring_buffer_ctx_init(&ctx, event_recorder, sizeof(payload), + lttng_alignof(payload), <tng_probe_ctx); ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id); if (ret < 0) return 0; diff --git a/src/probes/lttng-uprobes.c b/src/probes/lttng-uprobes.c index 233813f0..20865889 100644 --- a/src/probes/lttng-uprobes.c +++ b/src/probes/lttng-uprobes.c @@ -68,8 +68,8 @@ int lttng_uprobes_event_handler_pre(struct uprobe_consumer *uc, struct pt_regs * struct lib_ring_buffer_ctx ctx; int ret; - lib_ring_buffer_ctx_init(&ctx, chan->chan, - sizeof(payload), lttng_alignof(payload), -1, <tng_probe_ctx); + lib_ring_buffer_ctx_init(&ctx, event_recorder, + sizeof(payload), lttng_alignof(payload), <tng_probe_ctx); ret = chan->ops->event_reserve(&ctx, event_recorder->priv->id); if (ret < 0)