cb5b5b2bb245615830fad98a8058e406b75b9903
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define LTTNG_TRACE_SCHED_H
6
7 #include <probes/lttng-tracepoint-event.h>
8 #include <linux/sched.h>
9 #include <linux/pid_namespace.h>
10 #include <linux/binfmts.h>
11 #include <linux/version.h>
12 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
13 #include <linux/sched/rt.h>
14 #endif
15
16 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
17 #define lttng_proc_inum ns.inum
18 #else
19 #define lttng_proc_inum proc_inum
20 #endif
21
22 #define LTTNG_MAX_PID_NS_LEVEL 32
23
24 #ifndef _TRACE_SCHED_DEF_
25 #define _TRACE_SCHED_DEF_
26
27 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
28
29 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
30 {
31 #ifdef CONFIG_SCHED_DEBUG
32 BUG_ON(p != current);
33 #endif /* CONFIG_SCHED_DEBUG */
34 /*
35 * Preemption ignores task state, therefore preempted tasks are always RUNNING
36 * (we will not have dequeued if state != RUNNING).
37 */
38 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
39 }
40
41 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
42
43 static inline long __trace_sched_switch_state(struct task_struct *p)
44 {
45 long state = p->state;
46
47 #ifdef CONFIG_PREEMPT
48 #ifdef CONFIG_SCHED_DEBUG
49 BUG_ON(p != current);
50 #endif /* CONFIG_SCHED_DEBUG */
51 /*
52 * For all intents and purposes a preempted task is a running task.
53 */
54 if (preempt_count() & PREEMPT_ACTIVE)
55 state = TASK_RUNNING | TASK_STATE_MAX;
56 #endif /* CONFIG_PREEMPT */
57
58 return state;
59 }
60
61 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
62
63 static inline long __trace_sched_switch_state(struct task_struct *p)
64 {
65 long state = p->state;
66
67 #ifdef CONFIG_PREEMPT
68 /*
69 * For all intents and purposes a preempted task is a running task.
70 */
71 if (task_preempt_count(p) & PREEMPT_ACTIVE)
72 state = TASK_RUNNING | TASK_STATE_MAX;
73 #endif
74
75 return state;
76 }
77
78 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
79
80 static inline long __trace_sched_switch_state(struct task_struct *p)
81 {
82 long state = p->state;
83
84 #ifdef CONFIG_PREEMPT
85 /*
86 * For all intents and purposes a preempted task is a running task.
87 */
88 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
89 state = TASK_RUNNING | TASK_STATE_MAX;
90 #endif
91
92 return state;
93 }
94
95 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
96
97 static inline long __trace_sched_switch_state(struct task_struct *p)
98 {
99 long state = p->state;
100
101 #ifdef CONFIG_PREEMPT
102 /*
103 * For all intents and purposes a preempted task is a running task.
104 */
105 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
106 state = TASK_RUNNING;
107 #endif
108
109 return state;
110 }
111
112 #endif
113
114 #endif /* _TRACE_SCHED_DEF_ */
115
116 /*
117 * Tracepoint for calling kthread_stop, performed to end a kthread:
118 */
119 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
120
121 TP_PROTO(struct task_struct *t),
122
123 TP_ARGS(t),
124
125 TP_FIELDS(
126 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
127 ctf_integer(pid_t, tid, t->pid)
128 )
129 )
130
131 /*
132 * Tracepoint for the return value of the kthread stopping:
133 */
134 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
135
136 TP_PROTO(int ret),
137
138 TP_ARGS(ret),
139
140 TP_FIELDS(
141 ctf_integer(int, ret, ret)
142 )
143 )
144
145 /*
146 * Tracepoint for waking up a task:
147 */
148 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
149 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0))
150 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
151
152 TP_PROTO(struct task_struct *p),
153
154 TP_ARGS(p),
155
156 TP_FIELDS(
157 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
158 ctf_integer(pid_t, tid, p->pid)
159 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
160 ctf_integer(int, target_cpu, task_cpu(p))
161 )
162 )
163 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
164 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
165
166 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
167 TP_PROTO(struct task_struct *p, int success),
168
169 TP_ARGS(p, success),
170 #else
171 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
172
173 TP_ARGS(rq, p, success),
174 #endif
175
176 TP_FIELDS(
177 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
178 ctf_integer(pid_t, tid, p->pid)
179 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
180 ctf_integer(int, success, success)
181 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
182 ctf_integer(int, target_cpu, task_cpu(p))
183 #endif
184 )
185 )
186 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
187
188 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
189 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0))
190
191 /*
192 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
193 * called from the waking context.
194 */
195 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
196 TP_PROTO(struct task_struct *p),
197 TP_ARGS(p))
198
199 /*
200 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
201 * It it not always called from the waking context.
202 */
203 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
204 TP_PROTO(struct task_struct *p),
205 TP_ARGS(p))
206
207 /*
208 * Tracepoint for waking up a new task:
209 */
210 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
211 TP_PROTO(struct task_struct *p),
212 TP_ARGS(p))
213
214 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
215
216 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
217 TP_PROTO(struct task_struct *p, int success),
218 TP_ARGS(p, success))
219
220 /*
221 * Tracepoint for waking up a new task:
222 */
223 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
224 TP_PROTO(struct task_struct *p, int success),
225 TP_ARGS(p, success))
226
227 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
228
229 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
230 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
231 TP_ARGS(rq, p, success))
232
233 /*
234 * Tracepoint for waking up a new task:
235 */
236 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
237 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
238 TP_ARGS(rq, p, success))
239
240 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
241
242 /*
243 * Tracepoint for task switches, performed by the scheduler:
244 */
245 LTTNG_TRACEPOINT_EVENT(sched_switch,
246
247 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
248 TP_PROTO(bool preempt,
249 struct task_struct *prev,
250 struct task_struct *next),
251
252 TP_ARGS(preempt, prev, next),
253 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
254 TP_PROTO(struct task_struct *prev,
255 struct task_struct *next),
256
257 TP_ARGS(prev, next),
258 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
259 TP_PROTO(struct rq *rq, struct task_struct *prev,
260 struct task_struct *next),
261
262 TP_ARGS(rq, prev, next),
263 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
264
265 TP_FIELDS(
266 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
267 ctf_integer(pid_t, prev_tid, prev->pid)
268 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
269 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
270 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
271 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
272 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
273 #else
274 ctf_integer(long, prev_state, prev->state)
275 #endif
276 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
277 ctf_integer(pid_t, next_tid, next->pid)
278 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
279 )
280 )
281
282 /*
283 * Tracepoint for a task being migrated:
284 */
285 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
286
287 TP_PROTO(struct task_struct *p, int dest_cpu),
288
289 TP_ARGS(p, dest_cpu),
290
291 TP_FIELDS(
292 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
293 ctf_integer(pid_t, tid, p->pid)
294 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
295 ctf_integer(int, orig_cpu, task_cpu(p))
296 ctf_integer(int, dest_cpu, dest_cpu)
297 )
298 )
299
300 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
301
302 TP_PROTO(struct task_struct *p),
303
304 TP_ARGS(p),
305
306 TP_FIELDS(
307 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
308 ctf_integer(pid_t, tid, p->pid)
309 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
310 )
311 )
312
313 /*
314 * Tracepoint for freeing a task:
315 */
316 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
317 TP_PROTO(struct task_struct *p),
318 TP_ARGS(p))
319
320
321 /*
322 * Tracepoint for a task exiting:
323 */
324 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
325 TP_PROTO(struct task_struct *p),
326 TP_ARGS(p))
327
328 /*
329 * Tracepoint for waiting on task to unschedule:
330 */
331 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
332 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
333 TP_PROTO(struct task_struct *p),
334 TP_ARGS(p))
335 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
336 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
337 TP_PROTO(struct rq *rq, struct task_struct *p),
338 TP_ARGS(rq, p))
339 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
340
341 /*
342 * Tracepoint for a waiting task:
343 */
344 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
345
346 TP_PROTO(struct pid *pid),
347
348 TP_ARGS(pid),
349
350 TP_FIELDS(
351 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
352 ctf_integer(pid_t, tid, pid_nr(pid))
353 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
354 )
355 )
356
357 /*
358 * Tracepoint for do_fork.
359 * Saving both TID and PID information, especially for the child, allows
360 * trace analyzers to distinguish between creation of a new process and
361 * creation of a new thread. Newly created processes will have child_tid
362 * == child_pid, while creation of a thread yields to child_tid !=
363 * child_pid.
364 */
365 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
366
367 TP_PROTO(struct task_struct *parent, struct task_struct *child),
368
369 TP_ARGS(parent, child),
370
371 TP_locvar(
372 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
373 unsigned int ns_level;
374 ),
375
376 TP_code_pre(
377 if (child) {
378 struct pid *child_pid;
379 unsigned int i;
380
381 child_pid = task_pid(child);
382 tp_locvar->ns_level =
383 min_t(unsigned int, child_pid->level + 1,
384 LTTNG_MAX_PID_NS_LEVEL);
385 for (i = 0; i < tp_locvar->ns_level; i++)
386 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
387 }
388 ),
389
390 TP_FIELDS(
391 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
392 ctf_integer(pid_t, parent_tid, parent->pid)
393 ctf_integer(pid_t, parent_pid, parent->tgid)
394 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
395 ctf_integer(unsigned int, parent_ns_inum,
396 ({
397 unsigned int parent_ns_inum = 0;
398
399 if (parent) {
400 struct pid_namespace *pid_ns;
401
402 pid_ns = task_active_pid_ns(parent);
403 if (pid_ns)
404 parent_ns_inum =
405 pid_ns->lttng_proc_inum;
406 }
407 parent_ns_inum;
408 }))
409 #endif
410 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
411 ctf_integer(pid_t, child_tid, child->pid)
412 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
413 ctf_integer(pid_t, child_pid, child->tgid)
414 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
415 ctf_integer(unsigned int, child_ns_inum,
416 ({
417 unsigned int child_ns_inum = 0;
418
419 if (child) {
420 struct pid_namespace *pid_ns;
421
422 pid_ns = task_active_pid_ns(child);
423 if (pid_ns)
424 child_ns_inum =
425 pid_ns->lttng_proc_inum;
426 }
427 child_ns_inum;
428 }))
429 #endif
430 ),
431
432 TP_code_post()
433 )
434
435 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
436 /*
437 * Tracepoint for sending a signal:
438 */
439 LTTNG_TRACEPOINT_EVENT(sched_signal_send,
440
441 TP_PROTO(int sig, struct task_struct *p),
442
443 TP_ARGS(sig, p),
444
445 TP_FIELDS(
446 ctf_integer(int, sig, sig)
447 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
448 ctf_integer(pid_t, tid, p->pid)
449 )
450 )
451 #endif
452
453 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
454 /*
455 * Tracepoint for exec:
456 */
457 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
458
459 TP_PROTO(struct task_struct *p, pid_t old_pid,
460 struct linux_binprm *bprm),
461
462 TP_ARGS(p, old_pid, bprm),
463
464 TP_FIELDS(
465 ctf_string(filename, bprm->filename)
466 ctf_integer(pid_t, tid, p->pid)
467 ctf_integer(pid_t, old_tid, old_pid)
468 )
469 )
470 #endif
471
472 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
473 /*
474 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
475 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
476 */
477 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
478
479 TP_PROTO(struct task_struct *tsk, u64 delay),
480
481 TP_ARGS(tsk, delay),
482
483 TP_FIELDS(
484 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
485 ctf_integer(pid_t, tid, tsk->pid)
486 ctf_integer(u64, delay, delay)
487 )
488 )
489
490
491 /*
492 * Tracepoint for accounting wait time (time the task is runnable
493 * but not actually running due to scheduler contention).
494 */
495 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
496 TP_PROTO(struct task_struct *tsk, u64 delay),
497 TP_ARGS(tsk, delay))
498
499 /*
500 * Tracepoint for accounting sleep time (time the task is not runnable,
501 * including iowait, see below).
502 */
503 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
504 TP_PROTO(struct task_struct *tsk, u64 delay),
505 TP_ARGS(tsk, delay))
506
507 /*
508 * Tracepoint for accounting iowait time (time the task is not runnable
509 * due to waiting on IO to complete).
510 */
511 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
512 TP_PROTO(struct task_struct *tsk, u64 delay),
513 TP_ARGS(tsk, delay))
514
515 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
516 /*
517 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
518 */
519 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
520 TP_PROTO(struct task_struct *tsk, u64 delay),
521 TP_ARGS(tsk, delay))
522 #endif
523
524 /*
525 * Tracepoint for accounting runtime (time the task is executing
526 * on a CPU).
527 */
528 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
529
530 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
531
532 TP_ARGS(tsk, runtime, vruntime),
533
534 TP_FIELDS(
535 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
536 ctf_integer(pid_t, tid, tsk->pid)
537 ctf_integer(u64, runtime, runtime)
538 ctf_integer(u64, vruntime, vruntime)
539 )
540 )
541 #endif
542
543 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \
544 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
545 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
546 /*
547 * Tracepoint for showing priority inheritance modifying a tasks
548 * priority.
549 */
550 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
551
552 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
553
554 TP_ARGS(tsk, pi_task),
555
556 TP_FIELDS(
557 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
558 ctf_integer(pid_t, tid, tsk->pid)
559 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
560 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
561 )
562 )
563 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
564 /*
565 * Tracepoint for showing priority inheritance modifying a tasks
566 * priority.
567 */
568 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
569
570 TP_PROTO(struct task_struct *tsk, int newprio),
571
572 TP_ARGS(tsk, newprio),
573
574 TP_FIELDS(
575 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
576 ctf_integer(pid_t, tid, tsk->pid)
577 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
578 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
579 )
580 )
581 #endif
582
583 #endif /* LTTNG_TRACE_SCHED_H */
584
585 /* This part must be outside protection */
586 #include <probes/define_trace.h>
This page took 0.039455 seconds and 3 git commands to generate.