23e4955ac7b4bfb7a9c86b7e9f2eb5087708655d
2 #define TRACE_SYSTEM sched
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
10 #ifndef _TRACE_SCHED_DEF_
11 #define _TRACE_SCHED_DEF_
13 static inline long __trace_sched_switch_state(struct task_struct
*p
)
15 long state
= p
->state
;
19 * For all intents and purposes a preempted task is a running task.
21 if (task_thread_info(p
)->preempt_count
& PREEMPT_ACTIVE
)
28 #endif /* _TRACE_SCHED_DEF_ */
31 * Tracepoint for calling kthread_stop, performed to end a kthread:
33 TRACE_EVENT(sched_kthread_stop
,
35 TP_PROTO(struct task_struct
*t
),
40 __array_text( char, comm
, TASK_COMM_LEN
)
45 tp_memcpy(comm
, t
->comm
, TASK_COMM_LEN
)
46 tp_assign(tid
, t
->pid
)
49 TP_printk("comm=%s tid=%d", __entry
->comm
, __entry
->tid
)
53 * Tracepoint for the return value of the kthread stopping:
55 TRACE_EVENT(sched_kthread_stop_ret
,
69 TP_printk("ret=%d", __entry
->ret
)
73 * Tracepoint for waking up a task:
75 DECLARE_EVENT_CLASS(sched_wakeup_template
,
77 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
78 TP_PROTO(struct task_struct
*p
, int success
),
82 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
84 TP_ARGS(rq
, p
, success
),
88 __array_text( char, comm
, TASK_COMM_LEN
)
91 __field( int, success
)
92 __field( int, target_cpu
)
96 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
97 tp_assign(tid
, p
->pid
)
98 tp_assign(prio
, p
->prio
)
99 tp_assign(success
, success
)
100 tp_assign(target_cpu
, task_cpu(p
))
103 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
104 __entry
->comm
, __entry
->tid
, __entry
->prio
,
105 __entry
->success
, __entry
->target_cpu
)
108 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
110 DEFINE_EVENT(sched_wakeup_template
, sched_wakeup
,
111 TP_PROTO(struct task_struct
*p
, int success
),
115 * Tracepoint for waking up a new task:
117 DEFINE_EVENT(sched_wakeup_template
, sched_wakeup_new
,
118 TP_PROTO(struct task_struct
*p
, int success
),
121 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
123 DEFINE_EVENT(sched_wakeup_template
, sched_wakeup
,
124 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
125 TP_ARGS(rq
, p
, success
))
128 * Tracepoint for waking up a new task:
130 DEFINE_EVENT(sched_wakeup_template
, sched_wakeup_new
,
131 TP_PROTO(struct rq
*rq
, struct task_struct
*p
, int success
),
132 TP_ARGS(rq
, p
, success
))
134 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
137 * Tracepoint for task switches, performed by the scheduler:
139 TRACE_EVENT(sched_switch
,
141 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
142 TP_PROTO(struct task_struct
*prev
,
143 struct task_struct
*next
),
146 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
147 TP_PROTO(struct rq
*rq
, struct task_struct
*prev
,
148 struct task_struct
*next
),
150 TP_ARGS(rq
, prev
, next
),
151 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
154 __array_text( char, prev_comm
, TASK_COMM_LEN
)
155 __field( pid_t
, prev_tid
)
156 __field( int, prev_prio
)
157 __field( long, prev_state
)
158 __array_text( char, next_comm
, TASK_COMM_LEN
)
159 __field( pid_t
, next_tid
)
160 __field( int, next_prio
)
164 tp_memcpy(next_comm
, next
->comm
, TASK_COMM_LEN
)
165 tp_assign(prev_tid
, prev
->pid
)
166 tp_assign(prev_prio
, prev
->prio
- MAX_RT_PRIO
)
167 tp_assign(prev_state
, __trace_sched_switch_state(prev
))
168 tp_memcpy(prev_comm
, prev
->comm
, TASK_COMM_LEN
)
169 tp_assign(next_tid
, next
->pid
)
170 tp_assign(next_prio
, next
->prio
- MAX_RT_PRIO
)
173 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
174 __entry
->prev_comm
, __entry
->prev_tid
, __entry
->prev_prio
,
175 __entry
->prev_state
?
176 __print_flags(__entry
->prev_state
, "|",
177 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
178 { 16, "Z" }, { 32, "X" }, { 64, "x" },
180 __entry
->next_comm
, __entry
->next_tid
, __entry
->next_prio
)
184 * Tracepoint for a task being migrated:
186 TRACE_EVENT(sched_migrate_task
,
188 TP_PROTO(struct task_struct
*p
, int dest_cpu
),
190 TP_ARGS(p
, dest_cpu
),
193 __array_text( char, comm
, TASK_COMM_LEN
)
194 __field( pid_t
, tid
)
196 __field( int, orig_cpu
)
197 __field( int, dest_cpu
)
201 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
202 tp_assign(tid
, p
->pid
)
203 tp_assign(prio
, p
->prio
- MAX_RT_PRIO
)
204 tp_assign(orig_cpu
, task_cpu(p
))
205 tp_assign(dest_cpu
, dest_cpu
)
208 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
209 __entry
->comm
, __entry
->tid
, __entry
->prio
,
210 __entry
->orig_cpu
, __entry
->dest_cpu
)
213 DECLARE_EVENT_CLASS(sched_process_template
,
215 TP_PROTO(struct task_struct
*p
),
220 __array_text( char, comm
, TASK_COMM_LEN
)
221 __field( pid_t
, tid
)
226 tp_memcpy(comm
, p
->comm
, TASK_COMM_LEN
)
227 tp_assign(tid
, p
->pid
)
228 tp_assign(prio
, p
->prio
- MAX_RT_PRIO
)
231 TP_printk("comm=%s tid=%d prio=%d",
232 __entry
->comm
, __entry
->tid
, __entry
->prio
)
236 * Tracepoint for freeing a task:
238 DEFINE_EVENT(sched_process_template
, sched_process_free
,
239 TP_PROTO(struct task_struct
*p
),
244 * Tracepoint for a task exiting:
246 DEFINE_EVENT(sched_process_template
, sched_process_exit
,
247 TP_PROTO(struct task_struct
*p
),
251 * Tracepoint for waiting on task to unschedule:
253 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
254 DEFINE_EVENT(sched_process_template
, sched_wait_task
,
255 TP_PROTO(struct task_struct
*p
),
257 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
258 DEFINE_EVENT(sched_process_template
, sched_wait_task
,
259 TP_PROTO(struct rq
*rq
, struct task_struct
*p
),
261 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
264 * Tracepoint for a waiting task:
266 TRACE_EVENT(sched_process_wait
,
268 TP_PROTO(struct pid
*pid
),
273 __array_text( char, comm
, TASK_COMM_LEN
)
274 __field( pid_t
, tid
)
279 tp_memcpy(comm
, current
->comm
, TASK_COMM_LEN
)
280 tp_assign(tid
, pid_nr(pid
))
281 tp_assign(prio
, current
->prio
- MAX_RT_PRIO
)
284 TP_printk("comm=%s tid=%d prio=%d",
285 __entry
->comm
, __entry
->tid
, __entry
->prio
)
289 * Tracepoint for do_fork:
291 TRACE_EVENT(sched_process_fork
,
293 TP_PROTO(struct task_struct
*parent
, struct task_struct
*child
),
295 TP_ARGS(parent
, child
),
298 __array_text( char, parent_comm
, TASK_COMM_LEN
)
299 __field( pid_t
, parent_tid
)
300 __array_text( char, child_comm
, TASK_COMM_LEN
)
301 __field( pid_t
, child_tid
)
305 tp_memcpy(parent_comm
, parent
->comm
, TASK_COMM_LEN
)
306 tp_assign(parent_tid
, parent
->pid
)
307 tp_memcpy(child_comm
, child
->comm
, TASK_COMM_LEN
)
308 tp_assign(child_tid
, child
->pid
)
311 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
312 __entry
->parent_comm
, __entry
->parent_tid
,
313 __entry
->child_comm
, __entry
->child_tid
)
317 * Tracepoint for exec:
319 TRACE_EVENT(sched_process_exec
,
321 TP_PROTO(struct task_struct
*p
, pid_t old_pid
,
322 struct linux_binprm
*bprm
),
324 TP_ARGS(p
, old_pid
, bprm
),
327 __string( filename
, bprm
->filename
)
328 __field( pid_t
, pid
)
329 __field( pid_t
, old_pid
)
333 tp_strcpy(filename
, bprm
->filename
)
334 tp_assign(pid
, p
->pid
)
335 tp_assign(old_pid
, old_pid
)
338 TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename
),
339 __entry
->pid
, __entry
->old_pid
)
343 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
344 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
346 DECLARE_EVENT_CLASS(sched_stat_template
,
348 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
353 __array_text( char, comm
, TASK_COMM_LEN
)
354 __field( pid_t
, tid
)
355 __field( u64
, delay
)
359 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
360 tp_assign(tid
, tsk
->pid
)
361 tp_assign(delay
, delay
)
367 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
368 __entry
->comm
, __entry
->tid
,
369 (unsigned long long)__entry
->delay
)
374 * Tracepoint for accounting wait time (time the task is runnable
375 * but not actually running due to scheduler contention).
377 DEFINE_EVENT(sched_stat_template
, sched_stat_wait
,
378 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
382 * Tracepoint for accounting sleep time (time the task is not runnable,
383 * including iowait, see below).
385 DEFINE_EVENT(sched_stat_template
, sched_stat_sleep
,
386 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
390 * Tracepoint for accounting iowait time (time the task is not runnable
391 * due to waiting on IO to complete).
393 DEFINE_EVENT(sched_stat_template
, sched_stat_iowait
,
394 TP_PROTO(struct task_struct
*tsk
, u64 delay
),
398 * Tracepoint for accounting runtime (time the task is executing
401 TRACE_EVENT(sched_stat_runtime
,
403 TP_PROTO(struct task_struct
*tsk
, u64 runtime
, u64 vruntime
),
405 TP_ARGS(tsk
, runtime
, vruntime
),
408 __array_text( char, comm
, TASK_COMM_LEN
)
409 __field( pid_t
, tid
)
410 __field( u64
, runtime
)
411 __field( u64
, vruntime
)
415 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
416 tp_assign(tid
, tsk
->pid
)
417 tp_assign(runtime
, runtime
)
418 tp_assign(vruntime
, vruntime
)
421 __perf_count(runtime
)
424 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
425 __entry
->comm
, __entry
->tid
,
426 (unsigned long long)__entry
->runtime
,
427 (unsigned long long)__entry
->vruntime
)
431 * Tracepoint for showing priority inheritance modifying a tasks
434 TRACE_EVENT(sched_pi_setprio
,
436 TP_PROTO(struct task_struct
*tsk
, int newprio
),
438 TP_ARGS(tsk
, newprio
),
441 __array_text( char, comm
, TASK_COMM_LEN
)
442 __field( pid_t
, tid
)
443 __field( int, oldprio
)
444 __field( int, newprio
)
448 tp_memcpy(comm
, tsk
->comm
, TASK_COMM_LEN
)
449 tp_assign(tid
, tsk
->pid
)
450 tp_assign(oldprio
, tsk
->prio
- MAX_RT_PRIO
)
451 tp_assign(newprio
, newprio
- MAX_RT_PRIO
)
454 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
455 __entry
->comm
, __entry
->tid
,
456 __entry
->oldprio
, __entry
->newprio
)
459 #endif /* _TRACE_SCHED_H */
461 /* This part must be outside protection */
462 #include "../../../probes/define_trace.h"
This page took 0.042705 seconds and 4 git commands to generate.