Support for linux kernels 2.6.32 through 2.6.37
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9
10 #ifndef _TRACE_SCHED_DEF_
11 #define _TRACE_SCHED_DEF_
12
13 static inline long __trace_sched_switch_state(struct task_struct *p)
14 {
15 long state = p->state;
16
17 #ifdef CONFIG_PREEMPT
18 /*
19 * For all intents and purposes a preempted task is a running task.
20 */
21 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
22 state = TASK_RUNNING;
23 #endif
24
25 return state;
26 }
27
28 #endif /* _TRACE_SCHED_DEF_ */
29
30 /*
31 * Tracepoint for calling kthread_stop, performed to end a kthread:
32 */
33 TRACE_EVENT(sched_kthread_stop,
34
35 TP_PROTO(struct task_struct *t),
36
37 TP_ARGS(t),
38
39 TP_STRUCT__entry(
40 __array_text( char, comm, TASK_COMM_LEN )
41 __field( pid_t, tid )
42 ),
43
44 TP_fast_assign(
45 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
46 tp_assign(tid, t->pid)
47 ),
48
49 TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
50 )
51
52 /*
53 * Tracepoint for the return value of the kthread stopping:
54 */
55 TRACE_EVENT(sched_kthread_stop_ret,
56
57 TP_PROTO(int ret),
58
59 TP_ARGS(ret),
60
61 TP_STRUCT__entry(
62 __field( int, ret )
63 ),
64
65 TP_fast_assign(
66 tp_assign(ret, ret)
67 ),
68
69 TP_printk("ret=%d", __entry->ret)
70 )
71
72 /*
73 * Tracepoint for waking up a task:
74 */
75 DECLARE_EVENT_CLASS(sched_wakeup_template,
76
77 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
78 TP_PROTO(struct task_struct *p, int success),
79
80 TP_ARGS(p, success),
81 #else
82 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
83
84 TP_ARGS(rq, p, success),
85 #endif
86
87 TP_STRUCT__entry(
88 __array_text( char, comm, TASK_COMM_LEN )
89 __field( pid_t, tid )
90 __field( int, prio )
91 __field( int, success )
92 __field( int, target_cpu )
93 ),
94
95 TP_fast_assign(
96 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
97 tp_assign(tid, p->pid)
98 tp_assign(prio, p->prio)
99 tp_assign(success, success)
100 tp_assign(target_cpu, task_cpu(p))
101 ),
102
103 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
104 __entry->comm, __entry->tid, __entry->prio,
105 __entry->success, __entry->target_cpu)
106 )
107
108 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
109
110 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
111 TP_PROTO(struct task_struct *p, int success),
112 TP_ARGS(p, success))
113
114 /*
115 * Tracepoint for waking up a new task:
116 */
117 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
118 TP_PROTO(struct task_struct *p, int success),
119 TP_ARGS(p, success))
120
121 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
122
123 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
124 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
125 TP_ARGS(rq, p, success))
126
127 /*
128 * Tracepoint for waking up a new task:
129 */
130 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
131 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
132 TP_ARGS(rq, p, success))
133
134 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
135
136 /*
137 * Tracepoint for task switches, performed by the scheduler:
138 */
139 TRACE_EVENT(sched_switch,
140
141 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
142 TP_PROTO(struct task_struct *prev,
143 struct task_struct *next),
144
145 TP_ARGS(prev, next),
146 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
147 TP_PROTO(struct rq *rq, struct task_struct *prev,
148 struct task_struct *next),
149
150 TP_ARGS(rq, prev, next),
151 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
152
153 TP_STRUCT__entry(
154 __array_text( char, prev_comm, TASK_COMM_LEN )
155 __field( pid_t, prev_tid )
156 __field( int, prev_prio )
157 __field( long, prev_state )
158 __array_text( char, next_comm, TASK_COMM_LEN )
159 __field( pid_t, next_tid )
160 __field( int, next_prio )
161 ),
162
163 TP_fast_assign(
164 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
165 tp_assign(prev_tid, prev->pid)
166 tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
167 tp_assign(prev_state, __trace_sched_switch_state(prev))
168 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
169 tp_assign(next_tid, next->pid)
170 tp_assign(next_prio, next->prio - MAX_RT_PRIO)
171 ),
172
173 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
174 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
175 __entry->prev_state ?
176 __print_flags(__entry->prev_state, "|",
177 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
178 { 16, "Z" }, { 32, "X" }, { 64, "x" },
179 { 128, "W" }) : "R",
180 __entry->next_comm, __entry->next_tid, __entry->next_prio)
181 )
182
183 /*
184 * Tracepoint for a task being migrated:
185 */
186 TRACE_EVENT(sched_migrate_task,
187
188 TP_PROTO(struct task_struct *p, int dest_cpu),
189
190 TP_ARGS(p, dest_cpu),
191
192 TP_STRUCT__entry(
193 __array_text( char, comm, TASK_COMM_LEN )
194 __field( pid_t, tid )
195 __field( int, prio )
196 __field( int, orig_cpu )
197 __field( int, dest_cpu )
198 ),
199
200 TP_fast_assign(
201 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
202 tp_assign(tid, p->pid)
203 tp_assign(prio, p->prio - MAX_RT_PRIO)
204 tp_assign(orig_cpu, task_cpu(p))
205 tp_assign(dest_cpu, dest_cpu)
206 ),
207
208 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
209 __entry->comm, __entry->tid, __entry->prio,
210 __entry->orig_cpu, __entry->dest_cpu)
211 )
212
213 DECLARE_EVENT_CLASS(sched_process_template,
214
215 TP_PROTO(struct task_struct *p),
216
217 TP_ARGS(p),
218
219 TP_STRUCT__entry(
220 __array_text( char, comm, TASK_COMM_LEN )
221 __field( pid_t, tid )
222 __field( int, prio )
223 ),
224
225 TP_fast_assign(
226 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
227 tp_assign(tid, p->pid)
228 tp_assign(prio, p->prio - MAX_RT_PRIO)
229 ),
230
231 TP_printk("comm=%s tid=%d prio=%d",
232 __entry->comm, __entry->tid, __entry->prio)
233 )
234
235 /*
236 * Tracepoint for freeing a task:
237 */
238 DEFINE_EVENT(sched_process_template, sched_process_free,
239 TP_PROTO(struct task_struct *p),
240 TP_ARGS(p))
241
242
243 /*
244 * Tracepoint for a task exiting:
245 */
246 DEFINE_EVENT(sched_process_template, sched_process_exit,
247 TP_PROTO(struct task_struct *p),
248 TP_ARGS(p))
249
250 /*
251 * Tracepoint for waiting on task to unschedule:
252 */
253 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
254 DEFINE_EVENT(sched_process_template, sched_wait_task,
255 TP_PROTO(struct task_struct *p),
256 TP_ARGS(p))
257 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
258 DEFINE_EVENT(sched_process_template, sched_wait_task,
259 TP_PROTO(struct rq *rq, struct task_struct *p),
260 TP_ARGS(rq, p))
261 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
262
263 /*
264 * Tracepoint for a waiting task:
265 */
266 TRACE_EVENT(sched_process_wait,
267
268 TP_PROTO(struct pid *pid),
269
270 TP_ARGS(pid),
271
272 TP_STRUCT__entry(
273 __array_text( char, comm, TASK_COMM_LEN )
274 __field( pid_t, tid )
275 __field( int, prio )
276 ),
277
278 TP_fast_assign(
279 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
280 tp_assign(tid, pid_nr(pid))
281 tp_assign(prio, current->prio - MAX_RT_PRIO)
282 ),
283
284 TP_printk("comm=%s tid=%d prio=%d",
285 __entry->comm, __entry->tid, __entry->prio)
286 )
287
288 /*
289 * Tracepoint for do_fork:
290 */
291 TRACE_EVENT(sched_process_fork,
292
293 TP_PROTO(struct task_struct *parent, struct task_struct *child),
294
295 TP_ARGS(parent, child),
296
297 TP_STRUCT__entry(
298 __array_text( char, parent_comm, TASK_COMM_LEN )
299 __field( pid_t, parent_tid )
300 __array_text( char, child_comm, TASK_COMM_LEN )
301 __field( pid_t, child_tid )
302 ),
303
304 TP_fast_assign(
305 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
306 tp_assign(parent_tid, parent->pid)
307 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
308 tp_assign(child_tid, child->pid)
309 ),
310
311 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
312 __entry->parent_comm, __entry->parent_tid,
313 __entry->child_comm, __entry->child_tid)
314 )
315
316 /*
317 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
318 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
319 */
320 DECLARE_EVENT_CLASS(sched_stat_template,
321
322 TP_PROTO(struct task_struct *tsk, u64 delay),
323
324 TP_ARGS(tsk, delay),
325
326 TP_STRUCT__entry(
327 __array_text( char, comm, TASK_COMM_LEN )
328 __field( pid_t, tid )
329 __field( u64, delay )
330 ),
331
332 TP_fast_assign(
333 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
334 tp_assign(tid, tsk->pid)
335 tp_assign(delay, delay)
336 )
337 TP_perf_assign(
338 __perf_count(delay)
339 ),
340
341 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
342 __entry->comm, __entry->tid,
343 (unsigned long long)__entry->delay)
344 )
345
346
347 /*
348 * Tracepoint for accounting wait time (time the task is runnable
349 * but not actually running due to scheduler contention).
350 */
351 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
352 TP_PROTO(struct task_struct *tsk, u64 delay),
353 TP_ARGS(tsk, delay))
354
355 /*
356 * Tracepoint for accounting sleep time (time the task is not runnable,
357 * including iowait, see below).
358 */
359 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
360 TP_PROTO(struct task_struct *tsk, u64 delay),
361 TP_ARGS(tsk, delay))
362
363 /*
364 * Tracepoint for accounting iowait time (time the task is not runnable
365 * due to waiting on IO to complete).
366 */
367 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
368 TP_PROTO(struct task_struct *tsk, u64 delay),
369 TP_ARGS(tsk, delay))
370
371 /*
372 * Tracepoint for accounting runtime (time the task is executing
373 * on a CPU).
374 */
375 TRACE_EVENT(sched_stat_runtime,
376
377 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
378
379 TP_ARGS(tsk, runtime, vruntime),
380
381 TP_STRUCT__entry(
382 __array_text( char, comm, TASK_COMM_LEN )
383 __field( pid_t, tid )
384 __field( u64, runtime )
385 __field( u64, vruntime )
386 ),
387
388 TP_fast_assign(
389 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
390 tp_assign(tid, tsk->pid)
391 tp_assign(runtime, runtime)
392 tp_assign(vruntime, vruntime)
393 )
394 TP_perf_assign(
395 __perf_count(runtime)
396 ),
397
398 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
399 __entry->comm, __entry->tid,
400 (unsigned long long)__entry->runtime,
401 (unsigned long long)__entry->vruntime)
402 )
403
404 /*
405 * Tracepoint for showing priority inheritance modifying a tasks
406 * priority.
407 */
408 TRACE_EVENT(sched_pi_setprio,
409
410 TP_PROTO(struct task_struct *tsk, int newprio),
411
412 TP_ARGS(tsk, newprio),
413
414 TP_STRUCT__entry(
415 __array_text( char, comm, TASK_COMM_LEN )
416 __field( pid_t, tid )
417 __field( int, oldprio )
418 __field( int, newprio )
419 ),
420
421 TP_fast_assign(
422 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
423 tp_assign(tid, tsk->pid)
424 tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
425 tp_assign(newprio, newprio - MAX_RT_PRIO)
426 ),
427
428 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
429 __entry->comm, __entry->tid,
430 __entry->oldprio, __entry->newprio)
431 )
432
433 #endif /* _TRACE_SCHED_H */
434
435 /* This part must be outside protection */
436 #include "../../../probes/define_trace.h"
This page took 0.037544 seconds and 4 git commands to generate.