Move alignment into event write callback
[lttng-modules.git] / src / lttng-context-vppid.c
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-context-vppid.c
4 *
5 * LTTng vPPID context.
6 *
7 * Copyright (C) 2009-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/sched.h>
13 #include <linux/syscalls.h>
14 #include <lttng/events.h>
15 #include <lttng/events-internal.h>
16 #include <ringbuffer/frontend_types.h>
17 #include <wrapper/vmalloc.h>
18 #include <lttng/tracer.h>
19
20 static
21 size_t vppid_get_size(void *priv, struct lttng_kernel_probe_ctx *probe_ctx, size_t offset)
22 {
23 size_t size = 0;
24
25 size += lib_ring_buffer_align(offset, lttng_alignof(pid_t));
26 size += sizeof(pid_t);
27 return size;
28 }
29
30 static
31 void vppid_record(void *priv, struct lttng_kernel_probe_ctx *probe_ctx,
32 struct lttng_kernel_ring_buffer_ctx *ctx,
33 struct lttng_kernel_channel_buffer *chan)
34 {
35 struct task_struct *parent;
36 pid_t vppid;
37
38 /*
39 * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses
40 * the current thread nsproxy to perform the lookup.
41 */
42
43 /*
44 * TODO: when we eventually add RCU subsystem instrumentation,
45 * taking the rcu read lock here will trigger RCU tracing
46 * recursively. We should modify the kernel synchronization so
47 * it synchronizes both for RCU and RCU sched, and rely on
48 * rcu_read_lock_sched_notrace.
49 */
50
51 rcu_read_lock();
52 parent = rcu_dereference(current->real_parent);
53 if (!current->nsproxy)
54 vppid = 0;
55 else
56 vppid = task_tgid_vnr(parent);
57 rcu_read_unlock();
58 chan->ops->event_write(ctx, &vppid, sizeof(vppid), lttng_alignof(vppid));
59 }
60
61 static
62 void vppid_get_value(void *priv,
63 struct lttng_kernel_probe_ctx *lttng_probe_ctx,
64 struct lttng_ctx_value *value)
65 {
66 struct task_struct *parent;
67 pid_t vppid;
68
69 /*
70 * current nsproxy can be NULL when scheduled out of exit. pid_vnr uses
71 * the current thread nsproxy to perform the lookup.
72 */
73
74 /*
75 * TODO: when we eventually add RCU subsystem instrumentation,
76 * taking the rcu read lock here will trigger RCU tracing
77 * recursively. We should modify the kernel synchronization so
78 * it synchronizes both for RCU and RCU sched, and rely on
79 * rcu_read_lock_sched_notrace.
80 */
81
82 rcu_read_lock();
83 parent = rcu_dereference(current->real_parent);
84 if (!current->nsproxy)
85 vppid = 0;
86 else
87 vppid = task_tgid_vnr(parent);
88 rcu_read_unlock();
89 value->u.s64 = vppid;
90 }
91
92 static const struct lttng_kernel_ctx_field *ctx_field = lttng_kernel_static_ctx_field(
93 lttng_kernel_static_event_field("vppid",
94 lttng_kernel_static_type_integer_from_type(pid_t, __BYTE_ORDER, 10),
95 false, false, false),
96 vppid_get_size,
97 vppid_record,
98 vppid_get_value,
99 NULL, NULL);
100
101 int lttng_add_vppid_to_ctx(struct lttng_kernel_ctx **ctx)
102 {
103 int ret;
104
105 if (lttng_kernel_find_context(*ctx, ctx_field->event_field->name))
106 return -EEXIST;
107 ret = lttng_kernel_context_append(ctx, ctx_field);
108 wrapper_vmalloc_sync_mappings();
109 return ret;
110 }
111 EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
This page took 0.030827 seconds and 4 git commands to generate.