2 #define TRACE_SYSTEM sched
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
7 #include "../../../probes/lttng-tracepoint-event.h"
8 #include <linux/sched.h>
9 #include <linux/binfmts.h>
10 #include <linux/version.h>
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
12 #include <linux/sched/rt.h>
15 #ifndef _TRACE_SCHED_DEF_
16 #define _TRACE_SCHED_DEF_
18 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
20 static inline long __trace_sched_switch_state(struct task_struct
*p
)
22 long state
= p
->state
;
25 #ifdef CONFIG_SCHED_DEBUG
27 #endif /* CONFIG_SCHED_DEBUG */
29 * For all intents and purposes a preempted task is a running task.
31 if (preempt_count() & PREEMPT_ACTIVE
)
32 state
= TASK_RUNNING
| TASK_STATE_MAX
;
33 #endif /* CONFIG_PREEMPT */
38 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
40 static inline long __trace_sched_switch_state(struct task_struct
*p
)
42 long state
= p
->state
;
46 * For all intents and purposes a preempted task is a running task.
48 if (task_preempt_count(p
) & PREEMPT_ACTIVE
)
49 state
= TASK_RUNNING
| TASK_STATE_MAX
;
55 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
57 static inline long __trace_sched_switch_state(struct task_struct
*p
)
59 long state
= p
->state
;
63 * For all intents and purposes a preempted task is a running task.
65 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
66 state
= TASK_RUNNING
| TASK_STATE_MAX
;
72 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
74 static inline long __trace_sched_switch_state(struct task_struct
*p
)
76 long state
= p
->state
;
80 * For all intents and purposes a preempted task is a running task.
82 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
91 #endif /* _TRACE_SCHED_DEF_ */
94 * Tracepoint for calling kthread_stop, performed to end a kthread:
96 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop
,
98 TP_PROTO(struct task_struct
*t
),
103 __array_text( char, comm
, TASK_COMM_LEN
)
104 __field( pid_t
, tid
)
108 tp_memcpy(comm
, t
->comm
, TASK_COMM_LEN
)
109 tp_assign(tid
, t
->pid
)
112 TP_printk("comm=%s tid=%d", __entry
->comm
, __entry
->tid
)
116 * Tracepoint for the return value of the kthread stopping:
118 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret
,
132 TP_printk("ret=%d", __entry
->ret
)
136 * Tracepoint for waking up a task:
138 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
139 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
141 TP_PROTO(struct task_struct
*p
),
146 __array_text( char, comm
, TASK_COMM_LEN
)
147 __field( pid_t
, tid
)
149 __field( int, target_cpu
)
153 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
154 tp_assign(tid
, p
->pid
)
155 tp_assign(prio
, p
->prio
)
156 tp_assign(target_cpu
, task_cpu(p
))
162 TP_printk("comm=%s tid=%d prio=%d target_cpu=%03d",
163 __entry
->comm
, __entry
->tid
, __entry
->prio
,
166 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
167 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template
,
169 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
170 TP_PROTO(struct task_struct
*p
, int success
),
174 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
176 TP_ARGS(rq
, p
, success
),
180 __array_text( char, comm
, TASK_COMM_LEN
)
181 __field( pid_t
, tid
)
183 __field( int, success
)
184 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
185 __field( int, target_cpu
)
190 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
191 tp_assign(tid
, p
->pid
)
192 tp_assign(prio
, p
->prio
)
193 tp_assign(success
, success
)
194 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
195 tp_assign(target_cpu
, task_cpu(p
))
197 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
204 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
205 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
206 __entry
->comm
, __entry
->tid
, __entry
->prio
,
207 __entry
->success
, __entry
->target_cpu
)
209 TP_printk("comm=%s tid=%d prio=%d success=%d",
210 __entry
->comm
, __entry
->tid
, __entry
->prio
,
214 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
216 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
219 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
220 * called from the waking context.
222 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_waking
,
223 TP_PROTO(struct task_struct
*p
),
227 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
228 * It it not always called from the waking context.
230 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
231 TP_PROTO(struct task_struct
*p
),
235 * Tracepoint for waking up a new task:
237 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
238 TP_PROTO(struct task_struct
*p
),
241 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
243 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
244 TP_PROTO(struct task_struct
*p
, int success
),
248 * Tracepoint for waking up a new task:
250 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
251 TP_PROTO(struct task_struct
*p
, int success
),
254 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
256 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup
,
257 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
258 TP_ARGS(rq
, p
, success
))
261 * Tracepoint for waking up a new task:
263 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template
, sched_wakeup_new
,
264 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
265 TP_ARGS(rq
, p
, success
))
267 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
270 * Tracepoint for task switches, performed by the scheduler:
272 LTTNG_TRACEPOINT_EVENT(sched_switch
,
274 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
275 TP_PROTO(struct task_struct
*prev
,
276 struct task_struct
*next
),
279 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
280 TP_PROTO(struct rq
*rq
, struct task_struct
*prev
,
281 struct task_struct
*next
),
283 TP_ARGS(rq
, prev
, next
),
284 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
287 __array_text( char, prev_comm
, TASK_COMM_LEN
)
288 __field( pid_t
, prev_tid
)
289 __field( int, prev_prio
)
290 __field( long, prev_state
)
291 __array_text( char, next_comm
, TASK_COMM_LEN
)
292 __field( pid_t
, next_tid
)
293 __field( int, next_prio
)
297 tp_memcpy(next_comm
, next
->comm
, TASK_COMM_LEN
)
298 tp_assign(prev_tid
, prev
->pid
)
299 tp_assign(prev_prio
, prev
->prio
- MAX_RT_PRIO
)
300 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
301 tp_assign(prev_state
, __trace_sched_switch_state(prev
))
303 tp_assign(prev_state
, prev
->state
)
305 tp_memcpy(prev_comm
, prev
->comm
, TASK_COMM_LEN
)
306 tp_assign(next_tid
, next
->pid
)
307 tp_assign(next_prio
, next
->prio
- MAX_RT_PRIO
)
310 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
311 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_tid=%d next_prio=%d",
312 __entry
->prev_comm
, __entry
->prev_tid
, __entry
->prev_prio
,
313 __entry
->prev_state
& (TASK_STATE_MAX
-1) ?
314 __print_flags(__entry
->prev_state
& (TASK_STATE_MAX
-1), "|",
315 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
316 { 16, "Z" }, { 32, "X" }, { 64, "x" },
318 __entry
->prev_state
& TASK_STATE_MAX
? "+" : "",
319 __entry
->next_comm
, __entry
->next_tid
, __entry
->next_prio
)
321 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
322 __entry
->prev_comm
, __entry
->prev_tid
, __entry
->prev_prio
,
323 __entry
->prev_state
?
324 __print_flags(__entry
->prev_state
, "|",
325 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
326 { 16, "Z" }, { 32, "X" }, { 64, "x" },
328 __entry
->next_comm
, __entry
->next_tid
, __entry
->next_prio
)
333 * Tracepoint for a task being migrated:
335 LTTNG_TRACEPOINT_EVENT(sched_migrate_task
,
337 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
339 TP_ARGS(p
, dest_cpu
),
342 __array_text( char, comm
, TASK_COMM_LEN
)
343 __field( pid_t
, tid
)
345 __field( int, orig_cpu
)
346 __field( int, dest_cpu
)
350 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
351 tp_assign(tid
, p
->pid
)
352 tp_assign(prio
, p
->prio
- MAX_RT_PRIO
)
353 tp_assign(orig_cpu
, task_cpu(p
))
354 tp_assign(dest_cpu
, dest_cpu
)
357 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
358 __entry
->comm
, __entry
->tid
, __entry
->prio
,
359 __entry
->orig_cpu
, __entry
->dest_cpu
)
362 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template
,
364 TP_PROTO(struct task_struct
*p
),
369 __array_text( char, comm
, TASK_COMM_LEN
)
370 __field( pid_t
, tid
)
375 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
376 tp_assign(tid
, p
->pid
)
377 tp_assign(prio
, p
->prio
- MAX_RT_PRIO
)
380 TP_printk("comm=%s tid=%d prio=%d",
381 __entry
->comm
, __entry
->tid
, __entry
->prio
)
385 * Tracepoint for freeing a task:
387 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_free
,
388 TP_PROTO(struct task_struct
*p
),
393 * Tracepoint for a task exiting:
395 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_process_exit
,
396 TP_PROTO(struct task_struct
*p
),
400 * Tracepoint for waiting on task to unschedule:
402 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
403 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
404 TP_PROTO(struct task_struct
*p
),
406 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
407 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template
, sched_wait_task
,
408 TP_PROTO(struct rq
*rq
, struct task_struct
*p
),
410 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
413 * Tracepoint for a waiting task:
415 LTTNG_TRACEPOINT_EVENT(sched_process_wait
,
417 TP_PROTO(struct pid
*pid
),
422 __array_text( char, comm
, TASK_COMM_LEN
)
423 __field( pid_t
, tid
)
428 tp_memcpy(comm
, current
->comm
, TASK_COMM_LEN
)
429 tp_assign(tid
, pid_nr(pid
))
430 tp_assign(prio
, current
->prio
- MAX_RT_PRIO
)
433 TP_printk("comm=%s tid=%d prio=%d",
434 __entry
->comm
, __entry
->tid
, __entry
->prio
)
438 * Tracepoint for do_fork.
439 * Saving both TID and PID information, especially for the child, allows
440 * trace analyzers to distinguish between creation of a new process and
441 * creation of a new thread. Newly created processes will have child_tid
442 * == child_pid, while creation of a thread yields to child_tid !=
445 LTTNG_TRACEPOINT_EVENT(sched_process_fork
,
447 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
449 TP_ARGS(parent
, child
),
452 __array_text( char, parent_comm
, TASK_COMM_LEN
)
453 __field( pid_t
, parent_tid
)
454 __field( pid_t
, parent_pid
)
455 __array_text( char, child_comm
, TASK_COMM_LEN
)
456 __field( pid_t
, child_tid
)
457 __field( pid_t
, child_pid
)
461 tp_memcpy(parent_comm
, parent
->comm
, TASK_COMM_LEN
)
462 tp_assign(parent_tid
, parent
->pid
)
463 tp_assign(parent_pid
, parent
->tgid
)
464 tp_memcpy(child_comm
, child
->comm
, TASK_COMM_LEN
)
465 tp_assign(child_tid
, child
->pid
)
466 tp_assign(child_pid
, child
->tgid
)
469 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
470 __entry
->parent_comm
, __entry
->parent_tid
,
471 __entry
->child_comm
, __entry
->child_tid
)
474 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
476 * Tracepoint for sending a signal:
478 LTTNG_TRACEPOINT_EVENT(sched_signal_send
,
480 TP_PROTO(int sig
, struct task_struct
*p
),
486 __array( char, comm
, TASK_COMM_LEN
)
487 __field( pid_t
, pid
)
491 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
492 tp_assign(pid
, p
->pid
)
496 TP_printk("sig=%d comm=%s pid=%d",
497 __entry
->sig
, __entry
->comm
, __entry
->pid
)
501 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
503 * Tracepoint for exec:
505 LTTNG_TRACEPOINT_EVENT(sched_process_exec
,
507 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
508 struct linux_binprm
*bprm
),
510 TP_ARGS(p
, old_pid
, bprm
),
513 __string( filename
, bprm
->filename
)
514 __field( pid_t
, tid
)
515 __field( pid_t
, old_tid
)
519 tp_strcpy(filename
, bprm
->filename
)
520 tp_assign(tid
, p
->pid
)
521 tp_assign(old_tid
, old_pid
)
524 TP_printk("filename=%s tid=%d old_tid=%d", __get_str(filename
),
525 __entry
->tid
, __entry
->old_tid
)
529 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
531 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
532 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
534 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template
,
536 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
541 __array_text( char, comm
, TASK_COMM_LEN
)
542 __field( pid_t
, tid
)
543 __field( u64
, delay
)
547 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
548 tp_assign(tid
, tsk
->pid
)
549 tp_assign(delay
, delay
)
555 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
556 __entry
->comm
, __entry
->tid
,
557 (unsigned long long)__entry
->delay
)
562 * Tracepoint for accounting wait time (time the task is runnable
563 * but not actually running due to scheduler contention).
565 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_wait
,
566 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
570 * Tracepoint for accounting sleep time (time the task is not runnable,
571 * including iowait, see below).
573 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_sleep
,
574 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
578 * Tracepoint for accounting iowait time (time the task is not runnable
579 * due to waiting on IO to complete).
581 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_iowait
,
582 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
585 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
587 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
589 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template
, sched_stat_blocked
,
590 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
595 * Tracepoint for accounting runtime (time the task is executing
598 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime
,
600 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
602 TP_ARGS(tsk
, runtime
, vruntime
),
605 __array_text( char, comm
, TASK_COMM_LEN
)
606 __field( pid_t
, tid
)
607 __field( u64
, runtime
)
608 __field( u64
, vruntime
)
612 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
613 tp_assign(tid
, tsk
->pid
)
614 tp_assign(runtime
, runtime
)
615 tp_assign(vruntime
, vruntime
)
618 __perf_count(runtime
)
619 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
624 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
625 __entry
->comm
, __entry
->tid
,
626 (unsigned long long)__entry
->runtime
,
627 (unsigned long long)__entry
->vruntime
)
631 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
633 * Tracepoint for showing priority inheritance modifying a tasks
636 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio
,
638 TP_PROTO(struct task_struct
*tsk
, int newprio
),
640 TP_ARGS(tsk
, newprio
),
643 __array_text( char, comm
, TASK_COMM_LEN
)
644 __field( pid_t
, tid
)
645 __field( int, oldprio
)
646 __field( int, newprio
)
650 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
651 tp_assign(tid
, tsk
->pid
)
652 tp_assign(oldprio
, tsk
->prio
- MAX_RT_PRIO
)
653 tp_assign(newprio
, newprio
- MAX_RT_PRIO
)
656 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
657 __entry
->comm
, __entry
->tid
,
658 __entry
->oldprio
, __entry
->newprio
)
662 #endif /* LTTNG_TRACE_SCHED_H */
664 /* This part must be outside protection */
665 #include "../../../probes/define_trace.h"
This page took 0.043848 seconds and 4 git commands to generate.