Update README.md for supported kernel
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_SCHED_H
7
8 #include <probes/lttng-tracepoint-event.h>
9 #include <linux/sched.h>
10 #include <linux/pid_namespace.h>
11 #include <linux/binfmts.h>
12 #include <linux/sched/rt.h>
13
14 #define LTTNG_MAX_PID_NS_LEVEL 32
15
16 #ifndef _TRACE_SCHED_DEF_
17 #define _TRACE_SCHED_DEF_
18
19 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
20 {
21 unsigned int state;
22
23 #ifdef CONFIG_SCHED_DEBUG
24 BUG_ON(p != current);
25 #endif /* CONFIG_SCHED_DEBUG */
26
27 /*
28 * Preemption ignores task state, therefore preempted tasks are always
29 * RUNNING (we will not have dequeued if state != RUNNING).
30 */
31 if (preempt)
32 return TASK_REPORT_MAX;
33
34 /*
35 * task_state_index() uses fls() and returns a value from 0-8 range.
36 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
37 * it for left shift operation to get the correct task->state
38 * mapping.
39 */
40 state = task_state_index(p);
41
42 return state ? (1 << (state - 1)) : state;
43 }
44
45 #endif /* _TRACE_SCHED_DEF_ */
46
47 /*
48 * Enumeration of the task state bitmask.
49 * Only bit flags are enumerated here, not composition of states.
50 */
51 LTTNG_TRACEPOINT_ENUM(task_state,
52 TP_ENUM_VALUES(
53 ctf_enum_value("TASK_RUNNING", TASK_RUNNING)
54 ctf_enum_value("TASK_INTERRUPTIBLE", TASK_INTERRUPTIBLE)
55 ctf_enum_value("TASK_UNINTERRUPTIBLE", TASK_UNINTERRUPTIBLE)
56 ctf_enum_value("TASK_STOPPED", __TASK_STOPPED)
57 ctf_enum_value("TASK_TRACED", __TASK_TRACED)
58 ctf_enum_value("EXIT_DEAD", EXIT_DEAD)
59 ctf_enum_value("EXIT_ZOMBIE", EXIT_ZOMBIE)
60 ctf_enum_value("TASK_PARKED", TASK_PARKED)
61 ctf_enum_value("TASK_DEAD", TASK_DEAD)
62 ctf_enum_value("TASK_WAKEKILL", TASK_WAKEKILL)
63 ctf_enum_value("TASK_WAKING", TASK_WAKING)
64 ctf_enum_value("TASK_NOLOAD", TASK_NOLOAD)
65 ctf_enum_value("TASK_NEW", TASK_NEW)
66 ctf_enum_value("TASK_STATE_MAX", TASK_STATE_MAX)
67 )
68 )
69
70 /*
71 * Tracepoint for calling kthread_stop, performed to end a kthread:
72 */
73 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
74
75 TP_PROTO(struct task_struct *t),
76
77 TP_ARGS(t),
78
79 TP_FIELDS(
80 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
81 ctf_integer(pid_t, tid, t->pid)
82 )
83 )
84
85 /*
86 * Tracepoint for the return value of the kthread stopping:
87 */
88 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
89
90 TP_PROTO(int ret),
91
92 TP_ARGS(ret),
93
94 TP_FIELDS(
95 ctf_integer(int, ret, ret)
96 )
97 )
98
99 /*
100 * Tracepoint for waking up a task:
101 */
102 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
103
104 TP_PROTO(struct task_struct *p),
105
106 TP_ARGS(p),
107
108 TP_FIELDS(
109 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
110 ctf_integer(pid_t, tid, p->pid)
111 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
112 ctf_integer(int, target_cpu, task_cpu(p))
113 )
114 )
115
116 /*
117 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
118 * called from the waking context.
119 */
120 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
121 TP_PROTO(struct task_struct *p),
122 TP_ARGS(p))
123
124 /*
125 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
126 * It it not always called from the waking context.
127 */
128 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
129 TP_PROTO(struct task_struct *p),
130 TP_ARGS(p))
131
132 /*
133 * Tracepoint for waking up a new task:
134 */
135 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
136 TP_PROTO(struct task_struct *p),
137 TP_ARGS(p))
138
139 /*
140 * Tracepoint for task switches, performed by the scheduler:
141 */
142 LTTNG_TRACEPOINT_EVENT(sched_switch,
143
144 TP_PROTO(bool preempt,
145 struct task_struct *prev,
146 struct task_struct *next),
147
148 TP_ARGS(preempt, prev, next),
149
150 TP_FIELDS(
151 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
152 ctf_integer(pid_t, prev_tid, prev->pid)
153 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
154 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev))
155 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
156 ctf_integer(pid_t, next_tid, next->pid)
157 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
158 )
159 )
160
161 /*
162 * Tracepoint for a task being migrated:
163 */
164 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
165
166 TP_PROTO(struct task_struct *p, int dest_cpu),
167
168 TP_ARGS(p, dest_cpu),
169
170 TP_FIELDS(
171 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
172 ctf_integer(pid_t, tid, p->pid)
173 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
174 ctf_integer(int, orig_cpu, task_cpu(p))
175 ctf_integer(int, dest_cpu, dest_cpu)
176 )
177 )
178
179 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
180
181 TP_PROTO(struct task_struct *p),
182
183 TP_ARGS(p),
184
185 TP_FIELDS(
186 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
187 ctf_integer(pid_t, tid, p->pid)
188 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
189 )
190 )
191
192 /*
193 * Tracepoint for freeing a task:
194 */
195 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
196 TP_PROTO(struct task_struct *p),
197 TP_ARGS(p))
198
199
200 /*
201 * Tracepoint for a task exiting:
202 */
203 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
204 TP_PROTO(struct task_struct *p),
205 TP_ARGS(p))
206
207 /*
208 * Tracepoint for waiting on task to unschedule:
209 */
210 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
211 TP_PROTO(struct task_struct *p),
212 TP_ARGS(p))
213
214 /*
215 * Tracepoint for a waiting task:
216 */
217 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
218
219 TP_PROTO(struct pid *pid),
220
221 TP_ARGS(pid),
222
223 TP_FIELDS(
224 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
225 ctf_integer(pid_t, tid, pid_nr(pid))
226 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
227 )
228 )
229
230 /*
231 * Tracepoint for do_fork.
232 * Saving both TID and PID information, especially for the child, allows
233 * trace analyzers to distinguish between creation of a new process and
234 * creation of a new thread. Newly created processes will have child_tid
235 * == child_pid, while creation of a thread yields to child_tid !=
236 * child_pid.
237 */
238 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
239
240 TP_PROTO(struct task_struct *parent, struct task_struct *child),
241
242 TP_ARGS(parent, child),
243
244 TP_locvar(
245 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
246 unsigned int ns_level;
247 ),
248
249 TP_code_pre(
250 if (child) {
251 struct pid *child_pid;
252 unsigned int i;
253
254 child_pid = task_pid(child);
255 tp_locvar->ns_level =
256 min_t(unsigned int, child_pid->level + 1,
257 LTTNG_MAX_PID_NS_LEVEL);
258 for (i = 0; i < tp_locvar->ns_level; i++)
259 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
260 }
261 ),
262
263 TP_FIELDS(
264 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
265 ctf_integer(pid_t, parent_tid, parent->pid)
266 ctf_integer(pid_t, parent_pid, parent->tgid)
267 ctf_integer(unsigned int, parent_ns_inum,
268 ({
269 unsigned int parent_ns_inum = 0;
270
271 if (parent) {
272 struct pid_namespace *pid_ns;
273
274 pid_ns = task_active_pid_ns(parent);
275 if (pid_ns)
276 parent_ns_inum =
277 pid_ns->ns.inum;
278 }
279 parent_ns_inum;
280 }))
281 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
282 ctf_integer(pid_t, child_tid, child->pid)
283 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
284 ctf_integer(pid_t, child_pid, child->tgid)
285 ctf_integer(unsigned int, child_ns_inum,
286 ({
287 unsigned int child_ns_inum = 0;
288
289 if (child) {
290 struct pid_namespace *pid_ns;
291
292 pid_ns = task_active_pid_ns(child);
293 if (pid_ns)
294 child_ns_inum =
295 pid_ns->ns.inum;
296 }
297 child_ns_inum;
298 }))
299 ),
300
301 TP_code_post()
302 )
303
304 /*
305 * Tracepoint for exec:
306 */
307 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
308
309 TP_PROTO(struct task_struct *p, pid_t old_pid,
310 struct linux_binprm *bprm),
311
312 TP_ARGS(p, old_pid, bprm),
313
314 TP_FIELDS(
315 ctf_string(filename, bprm->filename)
316 ctf_integer(pid_t, tid, p->pid)
317 ctf_integer(pid_t, old_tid, old_pid)
318 )
319 )
320
321 /*
322 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
323 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
324 */
325 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
326
327 TP_PROTO(struct task_struct *tsk, u64 delay),
328
329 TP_ARGS(tsk, delay),
330
331 TP_FIELDS(
332 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
333 ctf_integer(pid_t, tid, tsk->pid)
334 ctf_integer(u64, delay, delay)
335 )
336 )
337
338
339 /*
340 * Tracepoint for accounting wait time (time the task is runnable
341 * but not actually running due to scheduler contention).
342 */
343 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
344 TP_PROTO(struct task_struct *tsk, u64 delay),
345 TP_ARGS(tsk, delay))
346
347 /*
348 * Tracepoint for accounting sleep time (time the task is not runnable,
349 * including iowait, see below).
350 */
351 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
352 TP_PROTO(struct task_struct *tsk, u64 delay),
353 TP_ARGS(tsk, delay))
354
355 /*
356 * Tracepoint for accounting iowait time (time the task is not runnable
357 * due to waiting on IO to complete).
358 */
359 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
360 TP_PROTO(struct task_struct *tsk, u64 delay),
361 TP_ARGS(tsk, delay))
362
363 /*
364 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
365 */
366 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
367 TP_PROTO(struct task_struct *tsk, u64 delay),
368 TP_ARGS(tsk, delay))
369
370 /*
371 * Tracepoint for accounting runtime (time the task is executing
372 * on a CPU).
373 */
374 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
375
376 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
377
378 TP_ARGS(tsk, runtime, vruntime),
379
380 TP_FIELDS(
381 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
382 ctf_integer(pid_t, tid, tsk->pid)
383 ctf_integer(u64, runtime, runtime)
384 ctf_integer(u64, vruntime, vruntime)
385 )
386 )
387
388 /*
389 * Tracepoint for showing priority inheritance modifying a tasks
390 * priority.
391 */
392 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
393
394 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
395
396 TP_ARGS(tsk, pi_task),
397
398 TP_FIELDS(
399 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
400 ctf_integer(pid_t, tid, tsk->pid)
401 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
402 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
403 )
404 )
405
406 #endif /* LTTNG_TRACE_SCHED_H */
407
408 /* This part must be outside protection */
409 #include <probes/define_trace.h>
This page took 0.03684 seconds and 4 git commands to generate.