Rename LTTng instrumentation macros
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
6
7 #include "../../../probes/lttng-tracepoint-event.h"
8 #include <linux/sched.h>
9 #include <linux/binfmts.h>
10 #include <linux/version.h>
11 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
12 #include <linux/sched/rt.h>
13 #endif
14
15 #ifndef _TRACE_SCHED_DEF_
16 #define _TRACE_SCHED_DEF_
17
18 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
19
20 static inline long __trace_sched_switch_state(struct task_struct *p)
21 {
22 long state = p->state;
23
24 #ifdef CONFIG_PREEMPT
25 /*
26 * For all intents and purposes a preempted task is a running task.
27 */
28 if (task_preempt_count(p) & PREEMPT_ACTIVE)
29 state = TASK_RUNNING | TASK_STATE_MAX;
30 #endif
31
32 return state;
33 }
34
35 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
36
37 static inline long __trace_sched_switch_state(struct task_struct *p)
38 {
39 long state = p->state;
40
41 #ifdef CONFIG_PREEMPT
42 /*
43 * For all intents and purposes a preempted task is a running task.
44 */
45 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
46 state = TASK_RUNNING | TASK_STATE_MAX;
47 #endif
48
49 return state;
50 }
51
52 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
53
54 static inline long __trace_sched_switch_state(struct task_struct *p)
55 {
56 long state = p->state;
57
58 #ifdef CONFIG_PREEMPT
59 /*
60 * For all intents and purposes a preempted task is a running task.
61 */
62 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
63 state = TASK_RUNNING;
64 #endif
65
66 return state;
67 }
68
69 #endif
70
71 #endif /* _TRACE_SCHED_DEF_ */
72
73 /*
74 * Tracepoint for calling kthread_stop, performed to end a kthread:
75 */
76 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
77
78 TP_PROTO(struct task_struct *t),
79
80 TP_ARGS(t),
81
82 TP_STRUCT__entry(
83 __array_text( char, comm, TASK_COMM_LEN )
84 __field( pid_t, tid )
85 ),
86
87 TP_fast_assign(
88 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
89 tp_assign(tid, t->pid)
90 ),
91
92 TP_printk("comm=%s tid=%d", __entry->comm, __entry->tid)
93 )
94
95 /*
96 * Tracepoint for the return value of the kthread stopping:
97 */
98 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
99
100 TP_PROTO(int ret),
101
102 TP_ARGS(ret),
103
104 TP_STRUCT__entry(
105 __field( int, ret )
106 ),
107
108 TP_fast_assign(
109 tp_assign(ret, ret)
110 ),
111
112 TP_printk("ret=%d", __entry->ret)
113 )
114
115 /*
116 * Tracepoint for waking up a task:
117 */
118 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
119
120 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
121 TP_PROTO(struct task_struct *p, int success),
122
123 TP_ARGS(p, success),
124 #else
125 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
126
127 TP_ARGS(rq, p, success),
128 #endif
129
130 TP_STRUCT__entry(
131 __array_text( char, comm, TASK_COMM_LEN )
132 __field( pid_t, tid )
133 __field( int, prio )
134 __field( int, success )
135 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
136 __field( int, target_cpu )
137 #endif
138 ),
139
140 TP_fast_assign(
141 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
142 tp_assign(tid, p->pid)
143 tp_assign(prio, p->prio)
144 tp_assign(success, success)
145 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
146 tp_assign(target_cpu, task_cpu(p))
147 #endif
148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
149 )
150 TP_perf_assign(
151 __perf_task(p)
152 #endif
153 ),
154
155 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
156 TP_printk("comm=%s tid=%d prio=%d success=%d target_cpu=%03d",
157 __entry->comm, __entry->tid, __entry->prio,
158 __entry->success, __entry->target_cpu)
159 #else
160 TP_printk("comm=%s tid=%d prio=%d success=%d",
161 __entry->comm, __entry->tid, __entry->prio,
162 __entry->success)
163 #endif
164 )
165
166 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
167
168 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
169 TP_PROTO(struct task_struct *p, int success),
170 TP_ARGS(p, success))
171
172 /*
173 * Tracepoint for waking up a new task:
174 */
175 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
176 TP_PROTO(struct task_struct *p, int success),
177 TP_ARGS(p, success))
178
179 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
180
181 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
182 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
183 TP_ARGS(rq, p, success))
184
185 /*
186 * Tracepoint for waking up a new task:
187 */
188 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
189 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
190 TP_ARGS(rq, p, success))
191
192 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
193
194 /*
195 * Tracepoint for task switches, performed by the scheduler:
196 */
197 LTTNG_TRACEPOINT_EVENT(sched_switch,
198
199 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
200 TP_PROTO(struct task_struct *prev,
201 struct task_struct *next),
202
203 TP_ARGS(prev, next),
204 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
205 TP_PROTO(struct rq *rq, struct task_struct *prev,
206 struct task_struct *next),
207
208 TP_ARGS(rq, prev, next),
209 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
210
211 TP_STRUCT__entry(
212 __array_text( char, prev_comm, TASK_COMM_LEN )
213 __field( pid_t, prev_tid )
214 __field( int, prev_prio )
215 __field( long, prev_state )
216 __array_text( char, next_comm, TASK_COMM_LEN )
217 __field( pid_t, next_tid )
218 __field( int, next_prio )
219 ),
220
221 TP_fast_assign(
222 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
223 tp_assign(prev_tid, prev->pid)
224 tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
225 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
226 tp_assign(prev_state, __trace_sched_switch_state(prev))
227 #else
228 tp_assign(prev_state, prev->state)
229 #endif
230 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
231 tp_assign(next_tid, next->pid)
232 tp_assign(next_prio, next->prio - MAX_RT_PRIO)
233 ),
234
235 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
236 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_tid=%d next_prio=%d",
237 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
238 __entry->prev_state & (TASK_STATE_MAX-1) ?
239 __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
240 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
241 { 16, "Z" }, { 32, "X" }, { 64, "x" },
242 { 128, "W" }) : "R",
243 __entry->prev_state & TASK_STATE_MAX ? "+" : "",
244 __entry->next_comm, __entry->next_tid, __entry->next_prio)
245 #else
246 TP_printk("prev_comm=%s prev_tid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_tid=%d next_prio=%d",
247 __entry->prev_comm, __entry->prev_tid, __entry->prev_prio,
248 __entry->prev_state ?
249 __print_flags(__entry->prev_state, "|",
250 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
251 { 16, "Z" }, { 32, "X" }, { 64, "x" },
252 { 128, "W" }) : "R",
253 __entry->next_comm, __entry->next_tid, __entry->next_prio)
254 #endif
255 )
256
257 /*
258 * Tracepoint for a task being migrated:
259 */
260 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
261
262 TP_PROTO(struct task_struct *p, int dest_cpu),
263
264 TP_ARGS(p, dest_cpu),
265
266 TP_STRUCT__entry(
267 __array_text( char, comm, TASK_COMM_LEN )
268 __field( pid_t, tid )
269 __field( int, prio )
270 __field( int, orig_cpu )
271 __field( int, dest_cpu )
272 ),
273
274 TP_fast_assign(
275 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
276 tp_assign(tid, p->pid)
277 tp_assign(prio, p->prio - MAX_RT_PRIO)
278 tp_assign(orig_cpu, task_cpu(p))
279 tp_assign(dest_cpu, dest_cpu)
280 ),
281
282 TP_printk("comm=%s tid=%d prio=%d orig_cpu=%d dest_cpu=%d",
283 __entry->comm, __entry->tid, __entry->prio,
284 __entry->orig_cpu, __entry->dest_cpu)
285 )
286
287 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
288
289 TP_PROTO(struct task_struct *p),
290
291 TP_ARGS(p),
292
293 TP_STRUCT__entry(
294 __array_text( char, comm, TASK_COMM_LEN )
295 __field( pid_t, tid )
296 __field( int, prio )
297 ),
298
299 TP_fast_assign(
300 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
301 tp_assign(tid, p->pid)
302 tp_assign(prio, p->prio - MAX_RT_PRIO)
303 ),
304
305 TP_printk("comm=%s tid=%d prio=%d",
306 __entry->comm, __entry->tid, __entry->prio)
307 )
308
309 /*
310 * Tracepoint for freeing a task:
311 */
312 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
313 TP_PROTO(struct task_struct *p),
314 TP_ARGS(p))
315
316
317 /*
318 * Tracepoint for a task exiting:
319 */
320 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
321 TP_PROTO(struct task_struct *p),
322 TP_ARGS(p))
323
324 /*
325 * Tracepoint for waiting on task to unschedule:
326 */
327 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
328 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
329 TP_PROTO(struct task_struct *p),
330 TP_ARGS(p))
331 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
332 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
333 TP_PROTO(struct rq *rq, struct task_struct *p),
334 TP_ARGS(rq, p))
335 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
336
337 /*
338 * Tracepoint for a waiting task:
339 */
340 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
341
342 TP_PROTO(struct pid *pid),
343
344 TP_ARGS(pid),
345
346 TP_STRUCT__entry(
347 __array_text( char, comm, TASK_COMM_LEN )
348 __field( pid_t, tid )
349 __field( int, prio )
350 ),
351
352 TP_fast_assign(
353 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
354 tp_assign(tid, pid_nr(pid))
355 tp_assign(prio, current->prio - MAX_RT_PRIO)
356 ),
357
358 TP_printk("comm=%s tid=%d prio=%d",
359 __entry->comm, __entry->tid, __entry->prio)
360 )
361
362 /*
363 * Tracepoint for do_fork.
364 * Saving both TID and PID information, especially for the child, allows
365 * trace analyzers to distinguish between creation of a new process and
366 * creation of a new thread. Newly created processes will have child_tid
367 * == child_pid, while creation of a thread yields to child_tid !=
368 * child_pid.
369 */
370 LTTNG_TRACEPOINT_EVENT(sched_process_fork,
371
372 TP_PROTO(struct task_struct *parent, struct task_struct *child),
373
374 TP_ARGS(parent, child),
375
376 TP_STRUCT__entry(
377 __array_text( char, parent_comm, TASK_COMM_LEN )
378 __field( pid_t, parent_tid )
379 __field( pid_t, parent_pid )
380 __array_text( char, child_comm, TASK_COMM_LEN )
381 __field( pid_t, child_tid )
382 __field( pid_t, child_pid )
383 ),
384
385 TP_fast_assign(
386 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
387 tp_assign(parent_tid, parent->pid)
388 tp_assign(parent_pid, parent->tgid)
389 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
390 tp_assign(child_tid, child->pid)
391 tp_assign(child_pid, child->tgid)
392 ),
393
394 TP_printk("comm=%s tid=%d child_comm=%s child_tid=%d",
395 __entry->parent_comm, __entry->parent_tid,
396 __entry->child_comm, __entry->child_tid)
397 )
398
399 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
400 /*
401 * Tracepoint for sending a signal:
402 */
403 LTTNG_TRACEPOINT_EVENT(sched_signal_send,
404
405 TP_PROTO(int sig, struct task_struct *p),
406
407 TP_ARGS(sig, p),
408
409 TP_STRUCT__entry(
410 __field( int, sig )
411 __array( char, comm, TASK_COMM_LEN )
412 __field( pid_t, pid )
413 ),
414
415 TP_fast_assign(
416 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
417 tp_assign(pid, p->pid)
418 tp_assign(sig, sig)
419 ),
420
421 TP_printk("sig=%d comm=%s pid=%d",
422 __entry->sig, __entry->comm, __entry->pid)
423 )
424 #endif
425
426 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
427 /*
428 * Tracepoint for exec:
429 */
430 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
431
432 TP_PROTO(struct task_struct *p, pid_t old_pid,
433 struct linux_binprm *bprm),
434
435 TP_ARGS(p, old_pid, bprm),
436
437 TP_STRUCT__entry(
438 __string( filename, bprm->filename )
439 __field( pid_t, tid )
440 __field( pid_t, old_tid )
441 ),
442
443 TP_fast_assign(
444 tp_strcpy(filename, bprm->filename)
445 tp_assign(tid, p->pid)
446 tp_assign(old_tid, old_pid)
447 ),
448
449 TP_printk("filename=%s tid=%d old_tid=%d", __get_str(filename),
450 __entry->tid, __entry->old_tid)
451 )
452 #endif
453
454 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
455 /*
456 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
457 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
458 */
459 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
460
461 TP_PROTO(struct task_struct *tsk, u64 delay),
462
463 TP_ARGS(tsk, delay),
464
465 TP_STRUCT__entry(
466 __array_text( char, comm, TASK_COMM_LEN )
467 __field( pid_t, tid )
468 __field( u64, delay )
469 ),
470
471 TP_fast_assign(
472 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
473 tp_assign(tid, tsk->pid)
474 tp_assign(delay, delay)
475 )
476 TP_perf_assign(
477 __perf_count(delay)
478 ),
479
480 TP_printk("comm=%s tid=%d delay=%Lu [ns]",
481 __entry->comm, __entry->tid,
482 (unsigned long long)__entry->delay)
483 )
484
485
486 /*
487 * Tracepoint for accounting wait time (time the task is runnable
488 * but not actually running due to scheduler contention).
489 */
490 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
491 TP_PROTO(struct task_struct *tsk, u64 delay),
492 TP_ARGS(tsk, delay))
493
494 /*
495 * Tracepoint for accounting sleep time (time the task is not runnable,
496 * including iowait, see below).
497 */
498 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
499 TP_PROTO(struct task_struct *tsk, u64 delay),
500 TP_ARGS(tsk, delay))
501
502 /*
503 * Tracepoint for accounting iowait time (time the task is not runnable
504 * due to waiting on IO to complete).
505 */
506 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
507 TP_PROTO(struct task_struct *tsk, u64 delay),
508 TP_ARGS(tsk, delay))
509
510 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
511 /*
512 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
513 */
514 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
515 TP_PROTO(struct task_struct *tsk, u64 delay),
516 TP_ARGS(tsk, delay))
517 #endif
518
519 /*
520 * Tracepoint for accounting runtime (time the task is executing
521 * on a CPU).
522 */
523 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
524
525 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
526
527 TP_ARGS(tsk, runtime, vruntime),
528
529 TP_STRUCT__entry(
530 __array_text( char, comm, TASK_COMM_LEN )
531 __field( pid_t, tid )
532 __field( u64, runtime )
533 __field( u64, vruntime )
534 ),
535
536 TP_fast_assign(
537 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
538 tp_assign(tid, tsk->pid)
539 tp_assign(runtime, runtime)
540 tp_assign(vruntime, vruntime)
541 )
542 TP_perf_assign(
543 __perf_count(runtime)
544 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0))
545 __perf_task(tsk)
546 #endif
547 ),
548
549 TP_printk("comm=%s tid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
550 __entry->comm, __entry->tid,
551 (unsigned long long)__entry->runtime,
552 (unsigned long long)__entry->vruntime)
553 )
554 #endif
555
556 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
557 /*
558 * Tracepoint for showing priority inheritance modifying a tasks
559 * priority.
560 */
561 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
562
563 TP_PROTO(struct task_struct *tsk, int newprio),
564
565 TP_ARGS(tsk, newprio),
566
567 TP_STRUCT__entry(
568 __array_text( char, comm, TASK_COMM_LEN )
569 __field( pid_t, tid )
570 __field( int, oldprio )
571 __field( int, newprio )
572 ),
573
574 TP_fast_assign(
575 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
576 tp_assign(tid, tsk->pid)
577 tp_assign(oldprio, tsk->prio - MAX_RT_PRIO)
578 tp_assign(newprio, newprio - MAX_RT_PRIO)
579 ),
580
581 TP_printk("comm=%s tid=%d oldprio=%d newprio=%d",
582 __entry->comm, __entry->tid,
583 __entry->oldprio, __entry->newprio)
584 )
585 #endif
586
587 #endif /* LTTNG_TRACE_SCHED_H */
588
589 /* This part must be outside protection */
590 #include "../../../probes/define_trace.h"
This page took 0.090632 seconds and 4 git commands to generate.