/* Keep track of nesting inside userspace callstack context code */
DEFINE_PER_CPU(int, callstack_user_nesting);
+/*
+ * Note: these callbacks expect to be invoked with preemption disabled across
+ * get_size and record due to its use of a per-cpu stack.
+ */
static
-struct stack_trace *stack_trace_context(struct lttng_kernel_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx)
+struct stack_trace *stack_trace_context(struct field_data *fdata, int cpu)
{
int buffer_nesting, cs_user_nesting;
struct lttng_cs *cs;
- struct field_data *fdata = field->priv;
/*
* Do not gather the userspace callstack context when the event was
* triggered by the userspace callstack context saving mechanism.
*/
- cs_user_nesting = per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
+ cs_user_nesting = per_cpu(callstack_user_nesting, cpu);
if (fdata->mode == CALLSTACK_USER && cs_user_nesting >= 1)
return NULL;
* max nesting is checked in lib_ring_buffer_get_cpu().
* Check it again as a safety net.
*/
- cs = per_cpu_ptr(fdata->cs_percpu, ctx->priv.reserve_cpu);
- buffer_nesting = per_cpu(lib_ring_buffer_nesting, ctx->priv.reserve_cpu) - 1;
+ cs = per_cpu_ptr(fdata->cs_percpu, cpu);
+ buffer_nesting = per_cpu(lib_ring_buffer_nesting, cpu) - 1;
if (buffer_nesting >= RING_BUFFER_MAX_NESTING)
return NULL;
}
static
-size_t lttng_callstack_length_get_size(size_t offset, struct lttng_kernel_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
- struct lttng_channel *chan)
+size_t lttng_callstack_length_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
{
size_t orig_offset = offset;
* resulting callstack is saved to be accessed in the record step.
*/
static
-size_t lttng_callstack_sequence_get_size(size_t offset, struct lttng_kernel_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
- struct lttng_channel *chan)
+size_t lttng_callstack_sequence_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
{
struct stack_trace *trace;
- struct field_data *fdata = field->priv;
+ struct field_data *fdata = (struct field_data *) priv;
size_t orig_offset = offset;
+ int cpu = smp_processor_id();
/* do not write data if no space is available */
- trace = stack_trace_context(field, ctx);
+ trace = stack_trace_context(fdata, cpu);
if (unlikely(!trace)) {
offset += lib_ring_buffer_align(offset, lttng_alignof(unsigned long));
return offset - orig_offset;
trace->nr_entries = 0;
if (fdata->mode == CALLSTACK_USER)
- ++per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu);
+ ++per_cpu(callstack_user_nesting, cpu);
/* do the real work and reserve space */
cs_types[fdata->mode].save_func(trace);
if (fdata->mode == CALLSTACK_USER)
- per_cpu(callstack_user_nesting, ctx->priv.reserve_cpu)--;
+ per_cpu(callstack_user_nesting, cpu)--;
/*
* Remove final ULONG_MAX delimiter. If we cannot find it, add
}
static
-void lttng_callstack_length_record(struct lttng_kernel_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
- struct lttng_channel *chan)
+void lttng_callstack_length_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
+ struct lttng_kernel_channel_buffer *chan)
{
- struct stack_trace *trace = stack_trace_context(field, ctx);
+ int cpu = ctx->priv.reserve_cpu;
+ struct field_data *fdata = (struct field_data *) priv;
+ struct stack_trace *trace = stack_trace_context(fdata, cpu);
unsigned int nr_seq_entries;
- lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned int));
if (unlikely(!trace)) {
nr_seq_entries = 0;
} else {
if (trace->nr_entries == trace->max_entries)
nr_seq_entries++;
}
- chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int));
+ chan->ops->event_write(ctx, &nr_seq_entries, sizeof(unsigned int), lttng_alignof(unsigned int));
}
static
-void lttng_callstack_sequence_record(struct lttng_kernel_ctx_field *field,
- struct lib_ring_buffer_ctx *ctx,
- struct lttng_channel *chan)
+void lttng_callstack_sequence_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
+ struct lttng_kernel_ring_buffer_ctx *ctx,
+ struct lttng_kernel_channel_buffer *chan)
{
- struct stack_trace *trace = stack_trace_context(field, ctx);
+ int cpu = ctx->priv.reserve_cpu;
+ struct field_data *fdata = (struct field_data *) priv;
+ struct stack_trace *trace = stack_trace_context(fdata, cpu);
unsigned int nr_seq_entries;
- lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
if (unlikely(!trace)) {
+ lib_ring_buffer_align_ctx(ctx, lttng_alignof(unsigned long));
return;
}
nr_seq_entries = trace->nr_entries;
if (trace->nr_entries == trace->max_entries)
nr_seq_entries++;
chan->ops->event_write(ctx, trace->entries,
- sizeof(unsigned long) * trace->nr_entries);
+ sizeof(unsigned long) * trace->nr_entries, lttng_alignof(unsigned long));
/* Add our own ULONG_MAX delimiter to show incomplete stack. */
if (trace->nr_entries == trace->max_entries) {
unsigned long delim = ULONG_MAX;
- chan->ops->event_write(ctx, &delim, sizeof(unsigned long));
+ chan->ops->event_write(ctx, &delim, sizeof(unsigned long), 1);
}
}