2 #define TRACE_SYSTEM sched
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
7 #include "../../../probes/lttng-tracepoint-event.h"
8 #include <linux/sched.h>
9 #include <linux/binfmts.h>
10 #include <linux/version.h>
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
12 #include <linux/sched/rt.h>
15 #ifndef _TRACE_SCHED_DEF_
16 #define _TRACE_SCHED_DEF_
18 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
20 static inline long __trace_sched_switch_state(bool preempt
, struct task_struct
*p
)
22 #ifdef CONFIG_SCHED_DEBUG
24 #endif /* CONFIG_SCHED_DEBUG */
26 * Preemption ignores task state, therefore preempted tasks are always RUNNING
27 * (we will not have dequeued if state != RUNNING).
29 return preempt
? TASK_RUNNING
| TASK_STATE_MAX
: p
->state
;
32 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
34 static inline long __trace_sched_switch_state(struct task_struct
*p
)
36 long state
= p
->state
;
39 #ifdef CONFIG_SCHED_DEBUG
41 #endif /* CONFIG_SCHED_DEBUG */
43 * For all intents and purposes a preempted task is a running task.
45 if (preempt_count() & PREEMPT_ACTIVE
)
46 state
= TASK_RUNNING
| TASK_STATE_MAX
;
47 #endif /* CONFIG_PREEMPT */
52 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
54 static inline long __trace_sched_switch_state(struct task_struct
*p
)
56 long state
= p
->state
;
60 * For all intents and purposes a preempted task is a running task.
62 if (task_preempt_count(p
) & PREEMPT_ACTIVE
)
63 state
= TASK_RUNNING
| TASK_STATE_MAX
;
69 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
71 static inline long __trace_sched_switch_state(struct task_struct
*p
)
73 long state
= p
->state
;
77 * For all intents and purposes a preempted task is a running task.
79 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
80 state
= TASK_RUNNING
| TASK_STATE_MAX
;
86 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
88 static inline long __trace_sched_switch_state(struct task_struct
*p
)
90 long state
= p
->state
;
94 * For all intents and purposes a preempted task is a running task.
96 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
105 #endif /* _TRACE_SCHED_DEF_ */
108 * Tracepoint for calling kthread_stop, performed to end a kthread:
110 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop
,
112 TP_PROTO(struct task_struct
*t
),
117 __array_text( char, comm
, TASK_COMM_LEN
)
118 __field( pid_t
, tid
)
122 tp_memcpy(comm
, t
->comm
, TASK_COMM_LEN
)
123 tp_assign(tid
, t
->pid
)
126 TP_printk("comm=%s tid=%d", __entry
->comm
, __entry
->tid
)
130 * Tracepoint for the return value of the kthread stopping:
132 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret
,
146 TP_printk("ret=%d", __entry
->ret
)
150 * Tracepoint for waking up a task:
152 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
153 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
155 TP_PROTO(struct task_struct
*p
),
160 __array_text( char, comm
, TASK_COMM_LEN
)
161 __field( pid_t
, tid
)
163 __field( int, target_cpu
)
167 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
168 tp_assign(tid
, p
->pid
)
169 tp_assign(prio
, p
->prio
)
170 tp_assign(target_cpu
, task_cpu(p
))
176 TP_printk("comm=%s tid=%d prio=%d target_cpu=%03d",
177 __entry
->comm
, __entry
->tid
, __entry
->prio
,
180 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
181 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
183 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
184 TP_PROTO(struct task_struct
*p
, int success
),
188 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
190 TP_ARGS(rq
, p
, success
),
194 __array_text( char, comm
, TASK_COMM_LEN
)
195 __field( pid_t
, tid
)
197 __field( int, success
)
198 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
199 __field( int, target_cpu
)
204 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
205 tp_assign(tid
, p
->pid
)
206 tp_assign(prio
, p
->prio
)
207 tp_assign(success
, success
)
208 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
209 tp_assign(target_cpu
, task_cpu(p
))
211 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
218 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
219 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
220 __entry
->comm
, __entry
->tid
, __entry
->prio
,
221 __entry
->success
, __entry
->target_cpu
)
223 TP_printk("comm=%s tid=%d prio=%d success=%d",
224 __entry
->comm
, __entry
->tid
, __entry
->prio
,
228 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
230 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
233 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
234 * called from the waking context.
236 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_waking
,
237 TP_PROTO(struct task_struct
*p
),
241 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
242 * It it not always called from the waking context.
244 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
245 TP_PROTO(struct task_struct
*p
),
249 * Tracepoint for waking up a new task:
251 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
252 TP_PROTO(struct task_struct
*p
),
255 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
257 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
258 TP_PROTO(struct task_struct
*p
, int success
),
262 * Tracepoint for waking up a new task:
264 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
265 TP_PROTO(struct task_struct
*p
, int success
),
268 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
270 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
271 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
272 TP_ARGS(rq
, p
, success
))
275 * Tracepoint for waking up a new task:
277 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
278 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
279 TP_ARGS(rq
, p
, success
))
281 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
284 * Tracepoint for task switches, performed by the scheduler:
286 LTTNG_TRACEPOINT_EVENT(sched_switch
,
288 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
289 TP_PROTO(bool preempt
,
290 struct task_struct
*prev
,
291 struct task_struct
*next
),
293 TP_ARGS(preempt
, prev
, next
),
294 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
295 TP_PROTO(struct task_struct
*prev
,
296 struct task_struct
*next
),
299 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
300 TP_PROTO(struct rq
*rq
, struct task_struct
*prev
,
301 struct task_struct
*next
),
303 TP_ARGS(rq
, prev
, next
),
304 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
307 __array_text( char, prev_comm
, TASK_COMM_LEN
)
308 __field( pid_t
, prev_tid
)
309 __field( int, prev_prio
)
310 __field( long, prev_state
)
311 __array_text( char, next_comm
, TASK_COMM_LEN
)
312 __field( pid_t
, next_tid
)
313 __field( int, next_prio
)
317 tp_memcpy(next_comm
, next
->comm
, TASK_COMM_LEN
)
318 tp_assign(prev_tid
, prev
->pid
)
319 tp_assign(prev_prio
, prev
->prio
- MAX_RT_PRIO
)
320 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
321 tp_assign(prev_state
, __trace_sched_switch_state(preempt
, prev
))
322 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
323 tp_assign(prev_state
, __trace_sched_switch_state(prev
))
325 tp_assign(prev_state
, prev
->state
)
327 tp_memcpy(prev_comm
, prev
->comm
, TASK_COMM_LEN
)
328 tp_assign(next_tid
, next
->pid
)
329 tp_assign(next_prio
, next
->prio
- MAX_RT_PRIO
)
332 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
333 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_tid=%d next_prio=%d",
334 __entry
->prev_comm
, __entry
->prev_tid
, __entry
->prev_prio
,
335 __entry
->prev_state
& (TASK_STATE_MAX
-1) ?
336 __print_flags(__entry
->prev_state
& (TASK_STATE_MAX
-1), "|",
337 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
338 { 16, "Z" }, { 32, "X" }, { 64, "x" },
340 __entry
->prev_state
& TASK_STATE_MAX
? "+" : "",
341 __entry
->next_comm
, __entry
->next_tid
, __entry
->next_prio
)
343 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
344 __entry
->prev_comm
, __entry
->prev_tid
, __entry
->prev_prio
,
345 __entry
->prev_state
?
346 __print_flags(__entry
->prev_state
, "|",
347 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
348 { 16, "Z" }, { 32, "X" }, { 64, "x" },
350 __entry
->next_comm
, __entry
->next_tid
, __entry
->next_prio
)
355 * Tracepoint for a task being migrated:
357 LTTNG_TRACEPOINT_EVENT(sched_migrate_task
,
359 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
361 TP_ARGS(p
, dest_cpu
),
364 __array_text( char, comm
, TASK_COMM_LEN
)
365 __field( pid_t
, tid
)
367 __field( int, orig_cpu
)
368 __field( int, dest_cpu
)
372 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
373 tp_assign(tid
, p
->pid
)
374 tp_assign(prio
, p
->prio
- MAX_RT_PRIO
)
375 tp_assign(orig_cpu
, task_cpu(p
))
376 tp_assign(dest_cpu
, dest_cpu
)
379 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
380 __entry
->comm
, __entry
->tid
, __entry
->prio
,
381 __entry
->orig_cpu
, __entry
->dest_cpu
)
384 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template
,
386 TP_PROTO(struct task_struct
*p
),
391 __array_text( char, comm
, TASK_COMM_LEN
)
392 __field( pid_t
, tid
)
397 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
398 tp_assign(tid
, p
->pid
)
399 tp_assign(prio
, p
->prio
- MAX_RT_PRIO
)
402 TP_printk("comm=%s tid=%d prio=%d",
403 __entry
->comm
, __entry
->tid
, __entry
->prio
)
407 * Tracepoint for freeing a task:
409 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_free
,
410 TP_PROTO(struct task_struct
*p
),
415 * Tracepoint for a task exiting:
417 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_exit
,
418 TP_PROTO(struct task_struct
*p
),
422 * Tracepoint for waiting on task to unschedule:
424 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
425 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
426 TP_PROTO(struct task_struct
*p
),
428 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
429 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
430 TP_PROTO(struct rq
*rq
, struct task_struct
*p
),
432 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
435 * Tracepoint for a waiting task:
437 LTTNG_TRACEPOINT_EVENT(sched_process_wait
,
439 TP_PROTO(struct pid
*pid
),
444 __array_text( char, comm
, TASK_COMM_LEN
)
445 __field( pid_t
, tid
)
450 tp_memcpy(comm
, current
->comm
, TASK_COMM_LEN
)
451 tp_assign(tid
, pid_nr(pid
))
452 tp_assign(prio
, current
->prio
- MAX_RT_PRIO
)
455 TP_printk("comm=%s tid=%d prio=%d",
456 __entry
->comm
, __entry
->tid
, __entry
->prio
)
460 * Tracepoint for do_fork.
461 * Saving both TID and PID information, especially for the child, allows
462 * trace analyzers to distinguish between creation of a new process and
463 * creation of a new thread. Newly created processes will have child_tid
464 * == child_pid, while creation of a thread yields to child_tid !=
467 LTTNG_TRACEPOINT_EVENT(sched_process_fork
,
469 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
471 TP_ARGS(parent
, child
),
474 __array_text( char, parent_comm
, TASK_COMM_LEN
)
475 __field( pid_t
, parent_tid
)
476 __field( pid_t
, parent_pid
)
477 __array_text( char, child_comm
, TASK_COMM_LEN
)
478 __field( pid_t
, child_tid
)
479 __field( pid_t
, child_pid
)
483 tp_memcpy(parent_comm
, parent
->comm
, TASK_COMM_LEN
)
484 tp_assign(parent_tid
, parent
->pid
)
485 tp_assign(parent_pid
, parent
->tgid
)
486 tp_memcpy(child_comm
, child
->comm
, TASK_COMM_LEN
)
487 tp_assign(child_tid
, child
->pid
)
488 tp_assign(child_pid
, child
->tgid
)
491 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
492 __entry
->parent_comm
, __entry
->parent_tid
,
493 __entry
->child_comm
, __entry
->child_tid
)
496 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
498 * Tracepoint for sending a signal:
500 LTTNG_TRACEPOINT_EVENT(sched_signal_send
,
502 TP_PROTO(int sig
, struct task_struct
*p
),
508 __array( char, comm
, TASK_COMM_LEN
)
509 __field( pid_t
, pid
)
513 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
514 tp_assign(pid
, p
->pid
)
518 TP_printk("sig=%d comm=%s pid=%d",
519 __entry
->sig
, __entry
->comm
, __entry
->pid
)
523 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
525 * Tracepoint for exec:
527 LTTNG_TRACEPOINT_EVENT(sched_process_exec
,
529 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
530 struct linux_binprm
*bprm
),
532 TP_ARGS(p
, old_pid
, bprm
),
535 __string( filename
, bprm
->filename
)
536 __field( pid_t
, tid
)
537 __field( pid_t
, old_tid
)
541 tp_strcpy(filename
, bprm
->filename
)
542 tp_assign(tid
, p
->pid
)
543 tp_assign(old_tid
, old_pid
)
546 TP_printk("filename=%s tid=%d old_tid=%d", __get_str(filename
),
547 __entry
->tid
, __entry
->old_tid
)
551 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
553 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
554 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
556 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template
,
558 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
563 __array_text( char, comm
, TASK_COMM_LEN
)
564 __field( pid_t
, tid
)
565 __field( u64
, delay
)
569 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
570 tp_assign(tid
, tsk
->pid
)
571 tp_assign(delay
, delay
)
577 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
578 __entry
->comm
, __entry
->tid
,
579 (unsigned long long)__entry
->delay
)
584 * Tracepoint for accounting wait time (time the task is runnable
585 * but not actually running due to scheduler contention).
587 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_wait
,
588 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
592 * Tracepoint for accounting sleep time (time the task is not runnable,
593 * including iowait, see below).
595 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_sleep
,
596 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
600 * Tracepoint for accounting iowait time (time the task is not runnable
601 * due to waiting on IO to complete).
603 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_iowait
,
604 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
607 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
609 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
611 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_blocked
,
612 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
617 * Tracepoint for accounting runtime (time the task is executing
620 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime
,
622 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
624 TP_ARGS(tsk
, runtime
, vruntime
),
627 __array_text( char, comm
, TASK_COMM_LEN
)
628 __field( pid_t
, tid
)
629 __field( u64
, runtime
)
630 __field( u64
, vruntime
)
634 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
635 tp_assign(tid
, tsk
->pid
)
636 tp_assign(runtime
, runtime
)
637 tp_assign(vruntime
, vruntime
)
640 __perf_count(runtime
)
641 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
646 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
647 __entry
->comm
, __entry
->tid
,
648 (unsigned long long)__entry
->runtime
,
649 (unsigned long long)__entry
->vruntime
)
653 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
655 * Tracepoint for showing priority inheritance modifying a tasks
658 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
660 TP_PROTO(struct task_struct
*tsk
, int newprio
),
662 TP_ARGS(tsk
, newprio
),
665 __array_text( char, comm
, TASK_COMM_LEN
)
666 __field( pid_t
, tid
)
667 __field( int, oldprio
)
668 __field( int, newprio
)
672 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
673 tp_assign(tid
, tsk
->pid
)
674 tp_assign(oldprio
, tsk
->prio
- MAX_RT_PRIO
)
675 tp_assign(newprio
, newprio
- MAX_RT_PRIO
)
678 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
679 __entry
->comm
, __entry
->tid
,
680 __entry
->oldprio
, __entry
->newprio
)
684 #endif /* LTTNG_TRACE_SCHED_H */
686 /* This part must be outside protection */
687 #include "../../../probes/define_trace.h"
This page took 0.043116 seconds and 4 git commands to generate.