Fix: do not use diagnostic pragma when GCC version is lower than 4.6.0
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_SCHED_H
7
8 #include <probes/lttng-tracepoint-event.h>
9 #include <linux/sched.h>
10 #include <linux/pid_namespace.h>
11 #include <linux/binfmts.h>
12 #include <linux/version.h>
13 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
14 #include <linux/sched/rt.h>
15 #endif
16
17 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
18 #define lttng_proc_inum ns.inum
19 #else
20 #define lttng_proc_inum proc_inum
21 #endif
22
23 #define LTTNG_MAX_PID_NS_LEVEL 32
24
25 #ifndef _TRACE_SCHED_DEF_
26 #define _TRACE_SCHED_DEF_
27
28 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
29
30 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
31 {
32 #ifdef CONFIG_SCHED_DEBUG
33 BUG_ON(p != current);
34 #endif /* CONFIG_SCHED_DEBUG */
35 /*
36 * Preemption ignores task state, therefore preempted tasks are always RUNNING
37 * (we will not have dequeued if state != RUNNING).
38 */
39 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
40 }
41
42 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
43
44 static inline long __trace_sched_switch_state(struct task_struct *p)
45 {
46 long state = p->state;
47
48 #ifdef CONFIG_PREEMPT
49 #ifdef CONFIG_SCHED_DEBUG
50 BUG_ON(p != current);
51 #endif /* CONFIG_SCHED_DEBUG */
52 /*
53 * For all intents and purposes a preempted task is a running task.
54 */
55 if (preempt_count() & PREEMPT_ACTIVE)
56 state = TASK_RUNNING | TASK_STATE_MAX;
57 #endif /* CONFIG_PREEMPT */
58
59 return state;
60 }
61
62 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
63
64 static inline long __trace_sched_switch_state(struct task_struct *p)
65 {
66 long state = p->state;
67
68 #ifdef CONFIG_PREEMPT
69 /*
70 * For all intents and purposes a preempted task is a running task.
71 */
72 if (task_preempt_count(p) & PREEMPT_ACTIVE)
73 state = TASK_RUNNING | TASK_STATE_MAX;
74 #endif
75
76 return state;
77 }
78
79 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
80
81 static inline long __trace_sched_switch_state(struct task_struct *p)
82 {
83 long state = p->state;
84
85 #ifdef CONFIG_PREEMPT
86 /*
87 * For all intents and purposes a preempted task is a running task.
88 */
89 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
90 state = TASK_RUNNING | TASK_STATE_MAX;
91 #endif
92
93 return state;
94 }
95
96 #else
97
98 static inline long __trace_sched_switch_state(struct task_struct *p)
99 {
100 long state = p->state;
101
102 #ifdef CONFIG_PREEMPT
103 /*
104 * For all intents and purposes a preempted task is a running task.
105 */
106 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
107 state = TASK_RUNNING;
108 #endif
109
110 return state;
111 }
112
113 #endif
114
115 #endif /* _TRACE_SCHED_DEF_ */
116
117 /*
118 * Tracepoint for calling kthread_stop, performed to end a kthread:
119 */
120 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
121
122 TP_PROTO(struct task_struct *t),
123
124 TP_ARGS(t),
125
126 TP_FIELDS(
127 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
128 ctf_integer(pid_t, tid, t->pid)
129 )
130 )
131
132 /*
133 * Tracepoint for the return value of the kthread stopping:
134 */
135 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
136
137 TP_PROTO(int ret),
138
139 TP_ARGS(ret),
140
141 TP_FIELDS(
142 ctf_integer(int, ret, ret)
143 )
144 )
145
146 /*
147 * Tracepoint for waking up a task:
148 */
149 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
150 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
151 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
152 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
153 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
154 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
155 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
156 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
157 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
158
159 TP_PROTO(struct task_struct *p),
160
161 TP_ARGS(p),
162
163 TP_FIELDS(
164 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
165 ctf_integer(pid_t, tid, p->pid)
166 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
167 ctf_integer(int, target_cpu, task_cpu(p))
168 )
169 )
170 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
171 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
172
173 TP_PROTO(struct task_struct *p, int success),
174
175 TP_ARGS(p, success),
176
177 TP_FIELDS(
178 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
179 ctf_integer(pid_t, tid, p->pid)
180 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
181 ctf_integer(int, success, success)
182 ctf_integer(int, target_cpu, task_cpu(p))
183 )
184 )
185 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
186
187 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
188 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
189 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
190 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
191 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
192 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
193 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
194 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
195
196 /*
197 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
198 * called from the waking context.
199 */
200 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
201 TP_PROTO(struct task_struct *p),
202 TP_ARGS(p))
203
204 /*
205 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
206 * It it not always called from the waking context.
207 */
208 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
209 TP_PROTO(struct task_struct *p),
210 TP_ARGS(p))
211
212 /*
213 * Tracepoint for waking up a new task:
214 */
215 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
216 TP_PROTO(struct task_struct *p),
217 TP_ARGS(p))
218
219 #else
220
221 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
222 TP_PROTO(struct task_struct *p, int success),
223 TP_ARGS(p, success))
224
225 /*
226 * Tracepoint for waking up a new task:
227 */
228 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
229 TP_PROTO(struct task_struct *p, int success),
230 TP_ARGS(p, success))
231
232 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
233
234 /*
235 * Tracepoint for task switches, performed by the scheduler:
236 */
237 LTTNG_TRACEPOINT_EVENT(sched_switch,
238
239 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
240 TP_PROTO(bool preempt,
241 struct task_struct *prev,
242 struct task_struct *next),
243
244 TP_ARGS(preempt, prev, next),
245 #else
246 TP_PROTO(struct task_struct *prev,
247 struct task_struct *next),
248
249 TP_ARGS(prev, next),
250 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) */
251
252 TP_FIELDS(
253 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
254 ctf_integer(pid_t, prev_tid, prev->pid)
255 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
256 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
257 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
258 #else
259 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
260 #endif
261 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
262 ctf_integer(pid_t, next_tid, next->pid)
263 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
264 )
265 )
266
267 /*
268 * Tracepoint for a task being migrated:
269 */
270 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
271
272 TP_PROTO(struct task_struct *p, int dest_cpu),
273
274 TP_ARGS(p, dest_cpu),
275
276 TP_FIELDS(
277 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
278 ctf_integer(pid_t, tid, p->pid)
279 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
280 ctf_integer(int, orig_cpu, task_cpu(p))
281 ctf_integer(int, dest_cpu, dest_cpu)
282 )
283 )
284
285 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
286
287 TP_PROTO(struct task_struct *p),
288
289 TP_ARGS(p),
290
291 TP_FIELDS(
292 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
293 ctf_integer(pid_t, tid, p->pid)
294 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
295 )
296 )
297
298 /*
299 * Tracepoint for freeing a task:
300 */
301 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
302 TP_PROTO(struct task_struct *p),
303 TP_ARGS(p))
304
305
306 /*
307 * Tracepoint for a task exiting:
308 */
309 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
310 TP_PROTO(struct task_struct *p),
311 TP_ARGS(p))
312
313 /*
314 * Tracepoint for waiting on task to unschedule:
315 */
316 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
317 TP_PROTO(struct task_struct *p),
318 TP_ARGS(p))
319
320 /*
321 * Tracepoint for a waiting task:
322 */
323 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
324
325 TP_PROTO(struct pid *pid),
326
327 TP_ARGS(pid),
328
329 TP_FIELDS(
330 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
331 ctf_integer(pid_t, tid, pid_nr(pid))
332 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
333 )
334 )
335
336 /*
337 * Tracepoint for do_fork.
338 * Saving both TID and PID information, especially for the child, allows
339 * trace analyzers to distinguish between creation of a new process and
340 * creation of a new thread. Newly created processes will have child_tid
341 * == child_pid, while creation of a thread yields to child_tid !=
342 * child_pid.
343 */
344 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
345
346 TP_PROTO(struct task_struct *parent, struct task_struct *child),
347
348 TP_ARGS(parent, child),
349
350 TP_locvar(
351 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
352 unsigned int ns_level;
353 ),
354
355 TP_code_pre(
356 if (child) {
357 struct pid *child_pid;
358 unsigned int i;
359
360 child_pid = task_pid(child);
361 tp_locvar->ns_level =
362 min_t(unsigned int, child_pid->level + 1,
363 LTTNG_MAX_PID_NS_LEVEL);
364 for (i = 0; i < tp_locvar->ns_level; i++)
365 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
366 }
367 ),
368
369 TP_FIELDS(
370 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
371 ctf_integer(pid_t, parent_tid, parent->pid)
372 ctf_integer(pid_t, parent_pid, parent->tgid)
373 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
374 ctf_integer(unsigned int, parent_ns_inum,
375 ({
376 unsigned int parent_ns_inum = 0;
377
378 if (parent) {
379 struct pid_namespace *pid_ns;
380
381 pid_ns = task_active_pid_ns(parent);
382 if (pid_ns)
383 parent_ns_inum =
384 pid_ns->lttng_proc_inum;
385 }
386 parent_ns_inum;
387 }))
388 #endif
389 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
390 ctf_integer(pid_t, child_tid, child->pid)
391 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
392 ctf_integer(pid_t, child_pid, child->tgid)
393 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
394 ctf_integer(unsigned int, child_ns_inum,
395 ({
396 unsigned int child_ns_inum = 0;
397
398 if (child) {
399 struct pid_namespace *pid_ns;
400
401 pid_ns = task_active_pid_ns(child);
402 if (pid_ns)
403 child_ns_inum =
404 pid_ns->lttng_proc_inum;
405 }
406 child_ns_inum;
407 }))
408 #endif
409 ),
410
411 TP_code_post()
412 )
413
414 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
415 /*
416 * Tracepoint for exec:
417 */
418 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
419
420 TP_PROTO(struct task_struct *p, pid_t old_pid,
421 struct linux_binprm *bprm),
422
423 TP_ARGS(p, old_pid, bprm),
424
425 TP_FIELDS(
426 ctf_string(filename, bprm->filename)
427 ctf_integer(pid_t, tid, p->pid)
428 ctf_integer(pid_t, old_tid, old_pid)
429 )
430 )
431 #endif
432
433 /*
434 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
435 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
436 */
437 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
438
439 TP_PROTO(struct task_struct *tsk, u64 delay),
440
441 TP_ARGS(tsk, delay),
442
443 TP_FIELDS(
444 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
445 ctf_integer(pid_t, tid, tsk->pid)
446 ctf_integer(u64, delay, delay)
447 )
448 )
449
450
451 /*
452 * Tracepoint for accounting wait time (time the task is runnable
453 * but not actually running due to scheduler contention).
454 */
455 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
456 TP_PROTO(struct task_struct *tsk, u64 delay),
457 TP_ARGS(tsk, delay))
458
459 /*
460 * Tracepoint for accounting sleep time (time the task is not runnable,
461 * including iowait, see below).
462 */
463 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
464 TP_PROTO(struct task_struct *tsk, u64 delay),
465 TP_ARGS(tsk, delay))
466
467 /*
468 * Tracepoint for accounting iowait time (time the task is not runnable
469 * due to waiting on IO to complete).
470 */
471 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
472 TP_PROTO(struct task_struct *tsk, u64 delay),
473 TP_ARGS(tsk, delay))
474
475 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
476 /*
477 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
478 */
479 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
480 TP_PROTO(struct task_struct *tsk, u64 delay),
481 TP_ARGS(tsk, delay))
482 #endif
483
484 /*
485 * Tracepoint for accounting runtime (time the task is executing
486 * on a CPU).
487 */
488 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
489
490 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
491
492 TP_ARGS(tsk, runtime, vruntime),
493
494 TP_FIELDS(
495 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
496 ctf_integer(pid_t, tid, tsk->pid)
497 ctf_integer(u64, runtime, runtime)
498 ctf_integer(u64, vruntime, vruntime)
499 )
500 )
501
502 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \
503 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
504 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
505 /*
506 * Tracepoint for showing priority inheritance modifying a tasks
507 * priority.
508 */
509 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
510
511 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
512
513 TP_ARGS(tsk, pi_task),
514
515 TP_FIELDS(
516 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
517 ctf_integer(pid_t, tid, tsk->pid)
518 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
519 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
520 )
521 )
522 #else
523 /*
524 * Tracepoint for showing priority inheritance modifying a tasks
525 * priority.
526 */
527 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
528
529 TP_PROTO(struct task_struct *tsk, int newprio),
530
531 TP_ARGS(tsk, newprio),
532
533 TP_FIELDS(
534 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
535 ctf_integer(pid_t, tid, tsk->pid)
536 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
537 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
538 )
539 )
540 #endif
541
542 #endif /* LTTNG_TRACE_SCHED_H */
543
544 /* This part must be outside protection */
545 #include <probes/define_trace.h>
This page took 0.040187 seconds and 4 git commands to generate.