fix: sched/tracing: Don't re-read p->state when emitting sched_switch event (v5.18)
[lttng-modules.git] / include / instrumentation / events / sched.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_SCHED_H
7
8 #include <lttng/tracepoint-event.h>
9 #include <linux/sched.h>
10 #include <linux/pid_namespace.h>
11 #include <linux/binfmts.h>
12 #include <lttng/kernel-version.h>
13 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0))
14 #include <linux/sched/rt.h>
15 #endif
16 #include <wrapper/namespace.h>
17
18 #define LTTNG_MAX_PID_NS_LEVEL 32
19
20 #ifndef _TRACE_SCHED_DEF_
21 #define _TRACE_SCHED_DEF_
22
23 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
24
25 static inline long __trace_sched_switch_state(bool preempt,
26 unsigned int prev_state,
27 struct task_struct *p)
28 {
29 unsigned int state;
30
31 #ifdef CONFIG_SCHED_DEBUG
32 BUG_ON(p != current);
33 #endif /* CONFIG_SCHED_DEBUG */
34
35 /*
36 * Preemption ignores task state, therefore preempted tasks are always
37 * RUNNING (we will not have dequeued if state != RUNNING).
38 */
39 if (preempt)
40 return TASK_REPORT_MAX;
41
42 /*
43 * task_state_index() uses fls() and returns a value from 0-8 range.
44 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
45 * it for left shift operation to get the correct task->state
46 * mapping.
47 */
48 state = __task_state_index(prev_state, p->exit_state);
49
50 return state ? (1 << (state - 1)) : state;
51 }
52
53 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0))
54
55 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
56 {
57 unsigned int state;
58
59 #ifdef CONFIG_SCHED_DEBUG
60 BUG_ON(p != current);
61 #endif /* CONFIG_SCHED_DEBUG */
62
63 /*
64 * Preemption ignores task state, therefore preempted tasks are always
65 * RUNNING (we will not have dequeued if state != RUNNING).
66 */
67 if (preempt)
68 return TASK_REPORT_MAX;
69
70 /*
71 * task_state_index() uses fls() and returns a value from 0-8 range.
72 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
73 * it for left shift operation to get the correct task->state
74 * mapping.
75 */
76 state = task_state_index(p);
77
78 return state ? (1 << (state - 1)) : state;
79 }
80
81 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,14,0))
82
83 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
84 {
85 unsigned int state;
86
87 #ifdef CONFIG_SCHED_DEBUG
88 BUG_ON(p != current);
89 #endif /* CONFIG_SCHED_DEBUG */
90
91 /*
92 * Preemption ignores task state, therefore preempted tasks are always
93 * RUNNING (we will not have dequeued if state != RUNNING).
94 */
95 if (preempt)
96 return TASK_REPORT_MAX;
97
98 /*
99 * __get_task_state() uses fls() and returns a value from 0-8 range.
100 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
101 * it for left shift operation to get the correct task->state
102 * mapping.
103 */
104 state = __get_task_state(p);
105
106 return state ? (1 << (state - 1)) : state;
107 }
108
109 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
110
111 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
112 {
113 #ifdef CONFIG_SCHED_DEBUG
114 BUG_ON(p != current);
115 #endif /* CONFIG_SCHED_DEBUG */
116 /*
117 * Preemption ignores task state, therefore preempted tasks are always RUNNING
118 * (we will not have dequeued if state != RUNNING).
119 */
120 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
121 }
122
123 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,0))
124
125 static inline long __trace_sched_switch_state(struct task_struct *p)
126 {
127 long state = p->state;
128
129 #ifdef CONFIG_PREEMPT
130 #ifdef CONFIG_SCHED_DEBUG
131 BUG_ON(p != current);
132 #endif /* CONFIG_SCHED_DEBUG */
133 /*
134 * For all intents and purposes a preempted task is a running task.
135 */
136 if (preempt_count() & PREEMPT_ACTIVE)
137 state = TASK_RUNNING | TASK_STATE_MAX;
138 #endif /* CONFIG_PREEMPT */
139
140 return state;
141 }
142
143 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,13,0))
144
145 static inline long __trace_sched_switch_state(struct task_struct *p)
146 {
147 long state = p->state;
148
149 #ifdef CONFIG_PREEMPT
150 /*
151 * For all intents and purposes a preempted task is a running task.
152 */
153 if (task_preempt_count(p) & PREEMPT_ACTIVE)
154 state = TASK_RUNNING | TASK_STATE_MAX;
155 #endif
156
157 return state;
158 }
159
160 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,2,0))
161
162 static inline long __trace_sched_switch_state(struct task_struct *p)
163 {
164 long state = p->state;
165
166 #ifdef CONFIG_PREEMPT
167 /*
168 * For all intents and purposes a preempted task is a running task.
169 */
170 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
171 state = TASK_RUNNING | TASK_STATE_MAX;
172 #endif
173
174 return state;
175 }
176
177 #else
178
179 static inline long __trace_sched_switch_state(struct task_struct *p)
180 {
181 long state = p->state;
182
183 #ifdef CONFIG_PREEMPT
184 /*
185 * For all intents and purposes a preempted task is a running task.
186 */
187 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
188 state = TASK_RUNNING;
189 #endif
190
191 return state;
192 }
193
194 #endif
195
196 #endif /* _TRACE_SCHED_DEF_ */
197
198 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
199 /*
200 * Enumeration of the task state bitmask.
201 * Only bit flags are enumerated here, not composition of states.
202 */
203 LTTNG_TRACEPOINT_ENUM(task_state,
204 TP_ENUM_VALUES(
205 ctf_enum_value("TASK_RUNNING", TASK_RUNNING)
206 ctf_enum_value("TASK_INTERRUPTIBLE", TASK_INTERRUPTIBLE)
207 ctf_enum_value("TASK_UNINTERRUPTIBLE", TASK_UNINTERRUPTIBLE)
208 ctf_enum_value("TASK_STOPPED", __TASK_STOPPED)
209 ctf_enum_value("TASK_TRACED", __TASK_TRACED)
210 ctf_enum_value("EXIT_DEAD", EXIT_DEAD)
211 ctf_enum_value("EXIT_ZOMBIE", EXIT_ZOMBIE)
212
213 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0))
214 ctf_enum_value("TASK_PARKED", TASK_PARKED)
215 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0)) */
216
217 ctf_enum_value("TASK_DEAD", TASK_DEAD)
218 ctf_enum_value("TASK_WAKEKILL", TASK_WAKEKILL)
219 ctf_enum_value("TASK_WAKING", TASK_WAKING)
220
221 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0))
222 ctf_enum_value("TASK_NOLOAD", TASK_NOLOAD)
223 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */
224
225 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0))
226 ctf_enum_value("TASK_NEW", TASK_NEW)
227 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0)) */
228
229 ctf_enum_value("TASK_STATE_MAX", TASK_STATE_MAX)
230 )
231 )
232 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
233
234 /*
235 * Tracepoint for calling kthread_stop, performed to end a kthread:
236 */
237 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
238
239 TP_PROTO(struct task_struct *t),
240
241 TP_ARGS(t),
242
243 TP_FIELDS(
244 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
245 ctf_integer(pid_t, tid, t->pid)
246 )
247 )
248
249 /*
250 * Tracepoint for the return value of the kthread stopping:
251 */
252 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
253
254 TP_PROTO(int ret),
255
256 TP_ARGS(ret),
257
258 TP_FIELDS(
259 ctf_integer(int, ret, ret)
260 )
261 )
262
263 /*
264 * Tracepoint for waking up a task:
265 */
266 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \
267 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
268 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
269 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
270 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
271 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
272 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
273 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
274 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
275
276 TP_PROTO(struct task_struct *p),
277
278 TP_ARGS(p),
279
280 TP_FIELDS(
281 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
282 ctf_integer(pid_t, tid, p->pid)
283 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
284 ctf_integer(int, target_cpu, task_cpu(p))
285 )
286 )
287 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
288 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
289
290 TP_PROTO(struct task_struct *p, int success),
291
292 TP_ARGS(p, success),
293
294 TP_FIELDS(
295 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
296 ctf_integer(pid_t, tid, p->pid)
297 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
298 ctf_integer(int, success, success)
299 ctf_integer(int, target_cpu, task_cpu(p))
300 )
301 )
302 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
303
304 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \
305 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
306 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
307 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
308 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
309 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
310 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
311 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
312
313 /*
314 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
315 * called from the waking context.
316 */
317 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
318 TP_PROTO(struct task_struct *p),
319 TP_ARGS(p))
320
321 /*
322 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
323 * It it not always called from the waking context.
324 */
325 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
326 TP_PROTO(struct task_struct *p),
327 TP_ARGS(p))
328
329 /*
330 * Tracepoint for waking up a new task:
331 */
332 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
333 TP_PROTO(struct task_struct *p),
334 TP_ARGS(p))
335
336 #else
337
338 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
339 TP_PROTO(struct task_struct *p, int success),
340 TP_ARGS(p, success))
341
342 /*
343 * Tracepoint for waking up a new task:
344 */
345 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
346 TP_PROTO(struct task_struct *p, int success),
347 TP_ARGS(p, success))
348
349 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
350
351 /*
352 * Tracepoint for task switches, performed by the scheduler:
353 */
354
355 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0))
356 LTTNG_TRACEPOINT_EVENT(sched_switch,
357
358 TP_PROTO(bool preempt,
359 unsigned int prev_state,
360 struct task_struct *prev,
361 struct task_struct *next),
362
363 TP_ARGS(preempt, prev_state, prev, next),
364
365 TP_FIELDS(
366 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
367 ctf_integer(pid_t, prev_tid, prev->pid)
368 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
369 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
370 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev))
371 #else
372 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev))
373 #endif
374 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
375 ctf_integer(pid_t, next_tid, next->pid)
376 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
377 )
378 )
379
380 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
381
382 LTTNG_TRACEPOINT_EVENT(sched_switch,
383
384 TP_PROTO(bool preempt,
385 struct task_struct *prev,
386 struct task_struct *next),
387
388 TP_ARGS(preempt, prev, next),
389
390 TP_FIELDS(
391 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
392 ctf_integer(pid_t, prev_tid, prev->pid)
393 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
394 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
395 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev))
396 #else
397 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
398 #endif
399 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
400 ctf_integer(pid_t, next_tid, next->pid)
401 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
402 )
403 )
404
405 #else
406
407 LTTNG_TRACEPOINT_EVENT(sched_switch,
408
409 TP_PROTO(struct task_struct *prev,
410 struct task_struct *next),
411
412 TP_ARGS(prev, next),
413
414 TP_FIELDS(
415 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
416 ctf_integer(pid_t, prev_tid, prev->pid)
417 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
418 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
419 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(prev))
420 #else
421 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
422 #endif
423 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
424 ctf_integer(pid_t, next_tid, next->pid)
425 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
426 )
427 )
428 #endif
429
430 /*
431 * Tracepoint for a task being migrated:
432 */
433 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
434
435 TP_PROTO(struct task_struct *p, int dest_cpu),
436
437 TP_ARGS(p, dest_cpu),
438
439 TP_FIELDS(
440 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
441 ctf_integer(pid_t, tid, p->pid)
442 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
443 ctf_integer(int, orig_cpu, task_cpu(p))
444 ctf_integer(int, dest_cpu, dest_cpu)
445 )
446 )
447
448 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
449
450 TP_PROTO(struct task_struct *p),
451
452 TP_ARGS(p),
453
454 TP_FIELDS(
455 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
456 ctf_integer(pid_t, tid, p->pid)
457 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
458 )
459 )
460
461 /*
462 * Tracepoint for freeing a task:
463 */
464 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
465 TP_PROTO(struct task_struct *p),
466 TP_ARGS(p))
467
468
469 /*
470 * Tracepoint for a task exiting:
471 */
472 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
473 TP_PROTO(struct task_struct *p),
474 TP_ARGS(p))
475
476 /*
477 * Tracepoint for waiting on task to unschedule:
478 */
479 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
480 TP_PROTO(struct task_struct *p),
481 TP_ARGS(p))
482
483 /*
484 * Tracepoint for a waiting task:
485 */
486 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
487
488 TP_PROTO(struct pid *pid),
489
490 TP_ARGS(pid),
491
492 TP_FIELDS(
493 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
494 ctf_integer(pid_t, tid, pid_nr(pid))
495 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
496 )
497 )
498
499 /*
500 * Tracepoint for do_fork.
501 * Saving both TID and PID information, especially for the child, allows
502 * trace analyzers to distinguish between creation of a new process and
503 * creation of a new thread. Newly created processes will have child_tid
504 * == child_pid, while creation of a thread yields to child_tid !=
505 * child_pid.
506 */
507 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
508
509 TP_PROTO(struct task_struct *parent, struct task_struct *child),
510
511 TP_ARGS(parent, child),
512
513 TP_locvar(
514 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
515 unsigned int ns_level;
516 ),
517
518 TP_code_pre(
519 if (child) {
520 struct pid *child_pid;
521 unsigned int i;
522
523 child_pid = task_pid(child);
524 tp_locvar->ns_level =
525 min_t(unsigned int, child_pid->level + 1,
526 LTTNG_MAX_PID_NS_LEVEL);
527 for (i = 0; i < tp_locvar->ns_level; i++)
528 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
529 }
530 ),
531
532 TP_FIELDS(
533 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
534 ctf_integer(pid_t, parent_tid, parent->pid)
535 ctf_integer(pid_t, parent_pid, parent->tgid)
536 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
537 ctf_integer(unsigned int, parent_ns_inum,
538 ({
539 unsigned int parent_ns_inum = 0;
540
541 if (parent) {
542 struct pid_namespace *pid_ns;
543
544 pid_ns = task_active_pid_ns(parent);
545 if (pid_ns)
546 parent_ns_inum =
547 pid_ns->lttng_ns_inum;
548 }
549 parent_ns_inum;
550 }))
551 #endif
552 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
553 ctf_integer(pid_t, child_tid, child->pid)
554 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
555 ctf_integer(pid_t, child_pid, child->tgid)
556 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
557 ctf_integer(unsigned int, child_ns_inum,
558 ({
559 unsigned int child_ns_inum = 0;
560
561 if (child) {
562 struct pid_namespace *pid_ns;
563
564 pid_ns = task_active_pid_ns(child);
565 if (pid_ns)
566 child_ns_inum =
567 pid_ns->lttng_ns_inum;
568 }
569 child_ns_inum;
570 }))
571 #endif
572 ),
573
574 TP_code_post()
575 )
576
577 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
578 /*
579 * Tracepoint for exec:
580 */
581 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
582
583 TP_PROTO(struct task_struct *p, pid_t old_pid,
584 struct linux_binprm *bprm),
585
586 TP_ARGS(p, old_pid, bprm),
587
588 TP_FIELDS(
589 ctf_string(filename, bprm->filename)
590 ctf_integer(pid_t, tid, p->pid)
591 ctf_integer(pid_t, old_tid, old_pid)
592 )
593 )
594 #endif
595
596 /*
597 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
598 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
599 */
600 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
601
602 TP_PROTO(struct task_struct *tsk, u64 delay),
603
604 TP_ARGS(tsk, delay),
605
606 TP_FIELDS(
607 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
608 ctf_integer(pid_t, tid, tsk->pid)
609 ctf_integer(u64, delay, delay)
610 )
611 )
612
613
614 /*
615 * Tracepoint for accounting wait time (time the task is runnable
616 * but not actually running due to scheduler contention).
617 */
618 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
619 TP_PROTO(struct task_struct *tsk, u64 delay),
620 TP_ARGS(tsk, delay))
621
622 /*
623 * Tracepoint for accounting sleep time (time the task is not runnable,
624 * including iowait, see below).
625 */
626 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
627 TP_PROTO(struct task_struct *tsk, u64 delay),
628 TP_ARGS(tsk, delay))
629
630 /*
631 * Tracepoint for accounting iowait time (time the task is not runnable
632 * due to waiting on IO to complete).
633 */
634 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
635 TP_PROTO(struct task_struct *tsk, u64 delay),
636 TP_ARGS(tsk, delay))
637
638 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,3,0))
639 /*
640 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
641 */
642 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
643 TP_PROTO(struct task_struct *tsk, u64 delay),
644 TP_ARGS(tsk, delay))
645 #endif
646
647 /*
648 * Tracepoint for accounting runtime (time the task is executing
649 * on a CPU).
650 */
651 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
652
653 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
654
655 TP_ARGS(tsk, runtime, vruntime),
656
657 TP_FIELDS(
658 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
659 ctf_integer(pid_t, tid, tsk->pid)
660 ctf_integer(u64, runtime, runtime)
661 ctf_integer(u64, vruntime, vruntime)
662 )
663 )
664
665 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0) || \
666 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
667 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
668 /*
669 * Tracepoint for showing priority inheritance modifying a tasks
670 * priority.
671 */
672 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
673
674 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
675
676 TP_ARGS(tsk, pi_task),
677
678 TP_FIELDS(
679 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
680 ctf_integer(pid_t, tid, tsk->pid)
681 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
682 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
683 )
684 )
685 #else
686 /*
687 * Tracepoint for showing priority inheritance modifying a tasks
688 * priority.
689 */
690 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
691
692 TP_PROTO(struct task_struct *tsk, int newprio),
693
694 TP_ARGS(tsk, newprio),
695
696 TP_FIELDS(
697 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
698 ctf_integer(pid_t, tid, tsk->pid)
699 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
700 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
701 )
702 )
703 #endif
704
705 #endif /* LTTNG_TRACE_SCHED_H */
706
707 /* This part must be outside protection */
708 #include <lttng/define_trace.h>
This page took 0.047892 seconds and 4 git commands to generate.