Rename struct lib_ring_buffer_ctx to struct lttng_kernel_ring_buffer_ctx
[lttng-modules.git] / src / lttng-context-ppid.c
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * lttng-context-ppid.c
b64bc438
MD
4 *
5 * LTTng PPID context.
6 *
886d51a3 7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
b64bc438
MD
8 */
9
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <linux/sched.h>
13#include <linux/syscalls.h>
2df37e95 14#include <lttng/events.h>
437d5aa5 15#include <lttng/events-internal.h>
24591303 16#include <ringbuffer/frontend_types.h>
241ae9a8 17#include <wrapper/vmalloc.h>
2df37e95 18#include <lttng/tracer.h>
b64bc438
MD
19
20static
a92e844e 21size_t ppid_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
b64bc438
MD
22{
23 size_t size = 0;
24
a90917c3 25 size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
b64bc438
MD
26 size += sizeof(pid_t);
27 return size;
28}
29
30static
a92e844e 31void ppid_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
8a57ec02 32 struct lttng_kernel_ring_buffer_ctx *ctx,
a90917c3 33 struct lttng_channel *chan)
b64bc438
MD
34{
35 pid_t ppid;
36
1638c9b4
MD
37 /*
38 * TODO: when we eventually add RCU subsystem instrumentation,
39 * taking the rcu read lock here will trigger RCU tracing
40 * recursively. We should modify the kernel synchronization so
41 * it synchronizes both for RCU and RCU sched, and rely on
42 * rcu_read_lock_sched_notrace.
43 */
b64bc438
MD
44 rcu_read_lock();
45 ppid = task_tgid_nr(current->real_parent);
46 rcu_read_unlock();
a90917c3 47 lib_ring_buffer_align_ctx(ctx, lttng_alignof(ppid));
b64bc438
MD
48 chan->ops->event_write(ctx, &ppid, sizeof(ppid));
49}
50
f127e61e 51static
2dc781e0 52void ppid_get_value(void *priv,
a92e844e 53 struct lttng_kernel_probe_ctx *lttng_probe_ctx,
2dc781e0 54 struct lttng_ctx_value *value)
f127e61e
MD
55{
56 pid_t ppid;
57
58 /*
59 * TODO: when we eventually add RCU subsystem instrumentation,
60 * taking the rcu read lock here will trigger RCU tracing
61 * recursively. We should modify the kernel synchronization so
62 * it synchronizes both for RCU and RCU sched, and rely on
63 * rcu_read_lock_sched_notrace.
64 */
65 rcu_read_lock();
66 ppid = task_tgid_nr(current->real_parent);
67 rcu_read_unlock();
2dc781e0 68 value->u.s64 = ppid;
f127e61e
MD
69}
70
437d5aa5
MD
71static const struct lttng_kernel_ctx_field *ctx_field = lttng_kernel_static_ctx_field(
72 lttng_kernel_static_event_field("ppid",
73 lttng_kernel_static_type_integer_from_type(pid_t, __BYTE_ORDER, 10),
74 false, false, false),
75 ppid_get_size,
437d5aa5
MD
76 ppid_record,
77 ppid_get_value,
78 NULL, NULL);
79
80int lttng_add_ppid_to_ctx(struct lttng_kernel_ctx **ctx)
b64bc438 81{
437d5aa5 82 int ret;
b64bc438 83
437d5aa5 84 if (lttng_kernel_find_context(*ctx, ctx_field->event_field->name))
44252f0f 85 return -EEXIST;
437d5aa5 86 ret = lttng_kernel_context_append(ctx, ctx_field);
263b6c88 87 wrapper_vmalloc_sync_mappings();
437d5aa5 88 return ret;
b64bc438
MD
89}
90EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
This page took 0.048268 seconds and 4 git commands to generate.