Update ring buffer and pretty print
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9
10 #ifndef _TRACE_SCHED_DEF_
11 #define _TRACE_SCHED_DEF_
12
13 static inline long __trace_sched_switch_state(struct task_struct *p)
14 {
15 long state = p->state;
16
17 #ifdef CONFIG_PREEMPT
18 /*
19 * For all intents and purposes a preempted task is a running task.
20 */
21 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
22 state = TASK_RUNNING;
23 #endif
24
25 return state;
26 }
27
28 #endif /* _TRACE_SCHED_DEF_ */
29
30 /*
31 * Tracepoint for calling kthread_stop, performed to end a kthread:
32 */
33 TRACE_EVENT(sched_kthread_stop,
34
35 TP_PROTO(struct task_struct *t),
36
37 TP_ARGS(t),
38
39 TP_STRUCT__entry(
40 __array_text( char, comm, TASK_COMM_LEN )
41 __field( pid_t, pid )
42 ),
43
44 TP_fast_assign(
45 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
46 tp_assign(pid, t->pid)
47 ),
48
49 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
50 )
51
52 /*
53 * Tracepoint for the return value of the kthread stopping:
54 */
55 TRACE_EVENT(sched_kthread_stop_ret,
56
57 TP_PROTO(int ret),
58
59 TP_ARGS(ret),
60
61 TP_STRUCT__entry(
62 __field( int, ret )
63 ),
64
65 TP_fast_assign(
66 tp_assign(ret, ret)
67 ),
68
69 TP_printk("ret=%d", __entry->ret)
70 )
71
72 /*
73 * Tracepoint for waking up a task:
74 */
75 DECLARE_EVENT_CLASS(sched_wakeup_template,
76
77 TP_PROTO(struct task_struct *p, int success),
78
79 TP_ARGS(p, success),
80
81 TP_STRUCT__entry(
82 __array_text( char, comm, TASK_COMM_LEN )
83 __field( pid_t, pid )
84 __field( int, prio )
85 __field( int, success )
86 __field( int, target_cpu )
87 ),
88
89 TP_fast_assign(
90 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
91 tp_assign(pid, p->pid)
92 tp_assign(prio, p->prio)
93 tp_assign(success, success)
94 tp_assign(target_cpu, task_cpu(p))
95 ),
96
97 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
98 __entry->comm, __entry->pid, __entry->prio,
99 __entry->success, __entry->target_cpu)
100 )
101
102 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
103 TP_PROTO(struct task_struct *p, int success),
104 TP_ARGS(p, success))
105
106 /*
107 * Tracepoint for waking up a new task:
108 */
109 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
110 TP_PROTO(struct task_struct *p, int success),
111 TP_ARGS(p, success))
112
113 /*
114 * Tracepoint for task switches, performed by the scheduler:
115 */
116 TRACE_EVENT(sched_switch,
117
118 TP_PROTO(struct task_struct *prev,
119 struct task_struct *next),
120
121 TP_ARGS(prev, next),
122
123 TP_STRUCT__entry(
124 __array_text( char, prev_comm, TASK_COMM_LEN )
125 __field( pid_t, prev_pid )
126 __field( int, prev_prio )
127 __field( long, prev_state )
128 __array_text( char, next_comm, TASK_COMM_LEN )
129 __field( pid_t, next_pid )
130 __field( int, next_prio )
131 ),
132
133 TP_fast_assign(
134 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
135 tp_assign(prev_pid, prev->pid)
136 tp_assign(prev_prio, prev->prio)
137 tp_assign(prev_state, __trace_sched_switch_state(prev))
138 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
139 tp_assign(next_pid, next->pid)
140 tp_assign(next_prio, next->prio)
141 ),
142
143 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
144 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
145 __entry->prev_state ?
146 __print_flags(__entry->prev_state, "|",
147 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
148 { 16, "Z" }, { 32, "X" }, { 64, "x" },
149 { 128, "W" }) : "R",
150 __entry->next_comm, __entry->next_pid, __entry->next_prio)
151 )
152
153 /*
154 * Tracepoint for a task being migrated:
155 */
156 TRACE_EVENT(sched_migrate_task,
157
158 TP_PROTO(struct task_struct *p, int dest_cpu),
159
160 TP_ARGS(p, dest_cpu),
161
162 TP_STRUCT__entry(
163 __array_text( char, comm, TASK_COMM_LEN )
164 __field( pid_t, pid )
165 __field( int, prio )
166 __field( int, orig_cpu )
167 __field( int, dest_cpu )
168 ),
169
170 TP_fast_assign(
171 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
172 tp_assign(pid, p->pid)
173 tp_assign(prio, p->prio)
174 tp_assign(orig_cpu, task_cpu(p))
175 tp_assign(dest_cpu, dest_cpu)
176 ),
177
178 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
179 __entry->comm, __entry->pid, __entry->prio,
180 __entry->orig_cpu, __entry->dest_cpu)
181 )
182
183 DECLARE_EVENT_CLASS(sched_process_template,
184
185 TP_PROTO(struct task_struct *p),
186
187 TP_ARGS(p),
188
189 TP_STRUCT__entry(
190 __array_text( char, comm, TASK_COMM_LEN )
191 __field( pid_t, pid )
192 __field( int, prio )
193 ),
194
195 TP_fast_assign(
196 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
197 tp_assign(pid, p->pid)
198 tp_assign(prio, p->prio)
199 ),
200
201 TP_printk("comm=%s pid=%d prio=%d",
202 __entry->comm, __entry->pid, __entry->prio)
203 )
204
205 /*
206 * Tracepoint for freeing a task:
207 */
208 DEFINE_EVENT(sched_process_template, sched_process_free,
209 TP_PROTO(struct task_struct *p),
210 TP_ARGS(p))
211
212
213 /*
214 * Tracepoint for a task exiting:
215 */
216 DEFINE_EVENT(sched_process_template, sched_process_exit,
217 TP_PROTO(struct task_struct *p),
218 TP_ARGS(p))
219
220 /*
221 * Tracepoint for waiting on task to unschedule:
222 */
223 DEFINE_EVENT(sched_process_template, sched_wait_task,
224 TP_PROTO(struct task_struct *p),
225 TP_ARGS(p))
226
227 /*
228 * Tracepoint for a waiting task:
229 */
230 TRACE_EVENT(sched_process_wait,
231
232 TP_PROTO(struct pid *pid),
233
234 TP_ARGS(pid),
235
236 TP_STRUCT__entry(
237 __array_text( char, comm, TASK_COMM_LEN )
238 __field( pid_t, pid )
239 __field( int, prio )
240 ),
241
242 TP_fast_assign(
243 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
244 tp_assign(pid, pid_nr(pid))
245 tp_assign(prio, current->prio)
246 ),
247
248 TP_printk("comm=%s pid=%d prio=%d",
249 __entry->comm, __entry->pid, __entry->prio)
250 )
251
252 /*
253 * Tracepoint for do_fork:
254 */
255 TRACE_EVENT(sched_process_fork,
256
257 TP_PROTO(struct task_struct *parent, struct task_struct *child),
258
259 TP_ARGS(parent, child),
260
261 TP_STRUCT__entry(
262 __array_text( char, parent_comm, TASK_COMM_LEN )
263 __field( pid_t, parent_pid )
264 __array_text( char, child_comm, TASK_COMM_LEN )
265 __field( pid_t, child_pid )
266 ),
267
268 TP_fast_assign(
269 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
270 tp_assign(parent_pid, parent->pid)
271 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
272 tp_assign(child_pid, child->pid)
273 ),
274
275 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
276 __entry->parent_comm, __entry->parent_pid,
277 __entry->child_comm, __entry->child_pid)
278 )
279
280 /*
281 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
282 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
283 */
284 DECLARE_EVENT_CLASS(sched_stat_template,
285
286 TP_PROTO(struct task_struct *tsk, u64 delay),
287
288 TP_ARGS(tsk, delay),
289
290 TP_STRUCT__entry(
291 __array_text( char, comm, TASK_COMM_LEN )
292 __field( pid_t, pid )
293 __field( u64, delay )
294 ),
295
296 TP_fast_assign(
297 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
298 tp_assign(pid, tsk->pid)
299 tp_assign(delay, delay)
300 )
301 TP_perf_assign(
302 __perf_count(delay)
303 ),
304
305 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
306 __entry->comm, __entry->pid,
307 (unsigned long long)__entry->delay)
308 )
309
310
311 /*
312 * Tracepoint for accounting wait time (time the task is runnable
313 * but not actually running due to scheduler contention).
314 */
315 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
316 TP_PROTO(struct task_struct *tsk, u64 delay),
317 TP_ARGS(tsk, delay))
318
319 /*
320 * Tracepoint for accounting sleep time (time the task is not runnable,
321 * including iowait, see below).
322 */
323 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
324 TP_PROTO(struct task_struct *tsk, u64 delay),
325 TP_ARGS(tsk, delay))
326
327 /*
328 * Tracepoint for accounting iowait time (time the task is not runnable
329 * due to waiting on IO to complete).
330 */
331 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
332 TP_PROTO(struct task_struct *tsk, u64 delay),
333 TP_ARGS(tsk, delay))
334
335 /*
336 * Tracepoint for accounting runtime (time the task is executing
337 * on a CPU).
338 */
339 TRACE_EVENT(sched_stat_runtime,
340
341 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
342
343 TP_ARGS(tsk, runtime, vruntime),
344
345 TP_STRUCT__entry(
346 __array_text( char, comm, TASK_COMM_LEN )
347 __field( pid_t, pid )
348 __field( u64, runtime )
349 __field( u64, vruntime )
350 ),
351
352 TP_fast_assign(
353 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
354 tp_assign(pid, tsk->pid)
355 tp_assign(runtime, runtime)
356 tp_assign(vruntime, vruntime)
357 )
358 TP_perf_assign(
359 __perf_count(runtime)
360 ),
361
362 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
363 __entry->comm, __entry->pid,
364 (unsigned long long)__entry->runtime,
365 (unsigned long long)__entry->vruntime)
366 )
367
368 /*
369 * Tracepoint for showing priority inheritance modifying a tasks
370 * priority.
371 */
372 TRACE_EVENT(sched_pi_setprio,
373
374 TP_PROTO(struct task_struct *tsk, int newprio),
375
376 TP_ARGS(tsk, newprio),
377
378 TP_STRUCT__entry(
379 __array_text( char, comm, TASK_COMM_LEN )
380 __field( pid_t, pid )
381 __field( int, oldprio )
382 __field( int, newprio )
383 ),
384
385 TP_fast_assign(
386 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
387 tp_assign(pid, tsk->pid)
388 tp_assign(oldprio, tsk->prio)
389 tp_assign(newprio, newprio)
390 ),
391
392 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
393 __entry->comm, __entry->pid,
394 __entry->oldprio, __entry->newprio)
395 )
396
397 #endif /* _TRACE_SCHED_H */
398
399 /* This part must be outside protection */
400 #include "define_trace.h"
This page took 0.037471 seconds and 4 git commands to generate.