Fix: update sched prev_state instrumentation for upstream kernel
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_SCHED_H
7
8 #include <probes/lttng-tracepoint-event.h>
9 #include <linux/sched.h>
10 #include <linux/pid_namespace.h>
11 #include <linux/binfmts.h>
12 #include <linux/version.h>
13 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
14 #include <linux/sched/rt.h>
15 #endif
16
17 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
18 #define lttng_proc_inum ns.inum
19 #else
20 #define lttng_proc_inum proc_inum
21 #endif
22
23 #define LTTNG_MAX_PID_NS_LEVEL 32
24
25 #ifndef _TRACE_SCHED_DEF_
26 #define _TRACE_SCHED_DEF_
27
28 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0))
29
30 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
31 {
32 unsigned int state;
33
34 #ifdef CONFIG_SCHED_DEBUG
35 BUG_ON(p != current);
36 #endif /* CONFIG_SCHED_DEBUG */
37
38 /*
39 * Preemption ignores task state, therefore preempted tasks are always
40 * RUNNING (we will not have dequeued if state != RUNNING).
41 */
42 if (preempt)
43 return TASK_REPORT_MAX;
44
45 /*
46 * task_state_index() uses fls() and returns a value from 0-8 range.
47 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
48 * it for left shift operation to get the correct task->state
49 * mapping.
50 */
51 state = task_state_index(p);
52
53 return state ? (1 << (state - 1)) : state;
54 }
55
56 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))
57
58 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
59 {
60 unsigned int state;
61
62 #ifdef CONFIG_SCHED_DEBUG
63 BUG_ON(p != current);
64 #endif /* CONFIG_SCHED_DEBUG */
65
66 /*
67 * Preemption ignores task state, therefore preempted tasks are always
68 * RUNNING (we will not have dequeued if state != RUNNING).
69 */
70 if (preempt)
71 return TASK_REPORT_MAX;
72
73 /*
74 * __get_task_state() uses fls() and returns a value from 0-8 range.
75 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
76 * it for left shift operation to get the correct task->state
77 * mapping.
78 */
79 state = __get_task_state(p);
80
81 return state ? (1 << (state - 1)) : state;
82 }
83
84 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
85
86 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
87 {
88 #ifdef CONFIG_SCHED_DEBUG
89 BUG_ON(p != current);
90 #endif /* CONFIG_SCHED_DEBUG */
91 /*
92 * Preemption ignores task state, therefore preempted tasks are always RUNNING
93 * (we will not have dequeued if state != RUNNING).
94 */
95 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
96 }
97
98 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
99
100 static inline long __trace_sched_switch_state(struct task_struct *p)
101 {
102 long state = p->state;
103
104 #ifdef CONFIG_PREEMPT
105 #ifdef CONFIG_SCHED_DEBUG
106 BUG_ON(p != current);
107 #endif /* CONFIG_SCHED_DEBUG */
108 /*
109 * For all intents and purposes a preempted task is a running task.
110 */
111 if (preempt_count() & PREEMPT_ACTIVE)
112 state = TASK_RUNNING | TASK_STATE_MAX;
113 #endif /* CONFIG_PREEMPT */
114
115 return state;
116 }
117
118 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
119
120 static inline long __trace_sched_switch_state(struct task_struct *p)
121 {
122 long state = p->state;
123
124 #ifdef CONFIG_PREEMPT
125 /*
126 * For all intents and purposes a preempted task is a running task.
127 */
128 if (task_preempt_count(p) & PREEMPT_ACTIVE)
129 state = TASK_RUNNING | TASK_STATE_MAX;
130 #endif
131
132 return state;
133 }
134
135 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
136
137 static inline long __trace_sched_switch_state(struct task_struct *p)
138 {
139 long state = p->state;
140
141 #ifdef CONFIG_PREEMPT
142 /*
143 * For all intents and purposes a preempted task is a running task.
144 */
145 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
146 state = TASK_RUNNING | TASK_STATE_MAX;
147 #endif
148
149 return state;
150 }
151
152 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
153
154 static inline long __trace_sched_switch_state(struct task_struct *p)
155 {
156 long state = p->state;
157
158 #ifdef CONFIG_PREEMPT
159 /*
160 * For all intents and purposes a preempted task is a running task.
161 */
162 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
163 state = TASK_RUNNING;
164 #endif
165
166 return state;
167 }
168
169 #endif
170
171 #endif /* _TRACE_SCHED_DEF_ */
172
173 /*
174 * Tracepoint for calling kthread_stop, performed to end a kthread:
175 */
176 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
177
178 TP_PROTO(struct task_struct *t),
179
180 TP_ARGS(t),
181
182 TP_FIELDS(
183 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
184 ctf_integer(pid_t, tid, t->pid)
185 )
186 )
187
188 /*
189 * Tracepoint for the return value of the kthread stopping:
190 */
191 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
192
193 TP_PROTO(int ret),
194
195 TP_ARGS(ret),
196
197 TP_FIELDS(
198 ctf_integer(int, ret, ret)
199 )
200 )
201
202 /*
203 * Tracepoint for waking up a task:
204 */
205 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
206 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
207 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
208 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
209 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
210 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
211 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
212 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
213 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
214
215 TP_PROTO(struct task_struct *p),
216
217 TP_ARGS(p),
218
219 TP_FIELDS(
220 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
221 ctf_integer(pid_t, tid, p->pid)
222 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
223 ctf_integer(int, target_cpu, task_cpu(p))
224 )
225 )
226 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
227 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
228
229 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
230 TP_PROTO(struct task_struct *p, int success),
231
232 TP_ARGS(p, success),
233 #else
234 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
235
236 TP_ARGS(rq, p, success),
237 #endif
238
239 TP_FIELDS(
240 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
241 ctf_integer(pid_t, tid, p->pid)
242 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
243 ctf_integer(int, success, success)
244 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
245 ctf_integer(int, target_cpu, task_cpu(p))
246 #endif
247 )
248 )
249 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
250
251 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0) || \
252 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
253 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
254 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
255 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
256 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
257 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
258 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
259
260 /*
261 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
262 * called from the waking context.
263 */
264 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
265 TP_PROTO(struct task_struct *p),
266 TP_ARGS(p))
267
268 /*
269 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
270 * It it not always called from the waking context.
271 */
272 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
273 TP_PROTO(struct task_struct *p),
274 TP_ARGS(p))
275
276 /*
277 * Tracepoint for waking up a new task:
278 */
279 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
280 TP_PROTO(struct task_struct *p),
281 TP_ARGS(p))
282
283 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
284
285 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
286 TP_PROTO(struct task_struct *p, int success),
287 TP_ARGS(p, success))
288
289 /*
290 * Tracepoint for waking up a new task:
291 */
292 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
293 TP_PROTO(struct task_struct *p, int success),
294 TP_ARGS(p, success))
295
296 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
297
298 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
299 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
300 TP_ARGS(rq, p, success))
301
302 /*
303 * Tracepoint for waking up a new task:
304 */
305 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
306 TP_PROTO(struct rq *rq, struct task_struct *p, int success),
307 TP_ARGS(rq, p, success))
308
309 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
310
311 /*
312 * Tracepoint for task switches, performed by the scheduler:
313 */
314 LTTNG_TRACEPOINT_EVENT(sched_switch,
315
316 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
317 TP_PROTO(bool preempt,
318 struct task_struct *prev,
319 struct task_struct *next),
320
321 TP_ARGS(preempt, prev, next),
322 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
323 TP_PROTO(struct task_struct *prev,
324 struct task_struct *next),
325
326 TP_ARGS(prev, next),
327 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
328 TP_PROTO(struct rq *rq, struct task_struct *prev,
329 struct task_struct *next),
330
331 TP_ARGS(rq, prev, next),
332 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
333
334 TP_FIELDS(
335 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
336 ctf_integer(pid_t, prev_tid, prev->pid)
337 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
338 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
339 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
340 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
341 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
342 #else
343 ctf_integer(long, prev_state, prev->state)
344 #endif
345 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
346 ctf_integer(pid_t, next_tid, next->pid)
347 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
348 )
349 )
350
351 /*
352 * Tracepoint for a task being migrated:
353 */
354 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
355
356 TP_PROTO(struct task_struct *p, int dest_cpu),
357
358 TP_ARGS(p, dest_cpu),
359
360 TP_FIELDS(
361 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
362 ctf_integer(pid_t, tid, p->pid)
363 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
364 ctf_integer(int, orig_cpu, task_cpu(p))
365 ctf_integer(int, dest_cpu, dest_cpu)
366 )
367 )
368
369 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
370
371 TP_PROTO(struct task_struct *p),
372
373 TP_ARGS(p),
374
375 TP_FIELDS(
376 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
377 ctf_integer(pid_t, tid, p->pid)
378 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
379 )
380 )
381
382 /*
383 * Tracepoint for freeing a task:
384 */
385 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
386 TP_PROTO(struct task_struct *p),
387 TP_ARGS(p))
388
389
390 /*
391 * Tracepoint for a task exiting:
392 */
393 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
394 TP_PROTO(struct task_struct *p),
395 TP_ARGS(p))
396
397 /*
398 * Tracepoint for waiting on task to unschedule:
399 */
400 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
401 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
402 TP_PROTO(struct task_struct *p),
403 TP_ARGS(p))
404 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
405 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
406 TP_PROTO(struct rq *rq, struct task_struct *p),
407 TP_ARGS(rq, p))
408 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
409
410 /*
411 * Tracepoint for a waiting task:
412 */
413 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
414
415 TP_PROTO(struct pid *pid),
416
417 TP_ARGS(pid),
418
419 TP_FIELDS(
420 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
421 ctf_integer(pid_t, tid, pid_nr(pid))
422 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
423 )
424 )
425
426 /*
427 * Tracepoint for do_fork.
428 * Saving both TID and PID information, especially for the child, allows
429 * trace analyzers to distinguish between creation of a new process and
430 * creation of a new thread. Newly created processes will have child_tid
431 * == child_pid, while creation of a thread yields to child_tid !=
432 * child_pid.
433 */
434 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
435
436 TP_PROTO(struct task_struct *parent, struct task_struct *child),
437
438 TP_ARGS(parent, child),
439
440 TP_locvar(
441 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
442 unsigned int ns_level;
443 ),
444
445 TP_code_pre(
446 if (child) {
447 struct pid *child_pid;
448 unsigned int i;
449
450 child_pid = task_pid(child);
451 tp_locvar->ns_level =
452 min_t(unsigned int, child_pid->level + 1,
453 LTTNG_MAX_PID_NS_LEVEL);
454 for (i = 0; i < tp_locvar->ns_level; i++)
455 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
456 }
457 ),
458
459 TP_FIELDS(
460 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
461 ctf_integer(pid_t, parent_tid, parent->pid)
462 ctf_integer(pid_t, parent_pid, parent->tgid)
463 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
464 ctf_integer(unsigned int, parent_ns_inum,
465 ({
466 unsigned int parent_ns_inum = 0;
467
468 if (parent) {
469 struct pid_namespace *pid_ns;
470
471 pid_ns = task_active_pid_ns(parent);
472 if (pid_ns)
473 parent_ns_inum =
474 pid_ns->lttng_proc_inum;
475 }
476 parent_ns_inum;
477 }))
478 #endif
479 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
480 ctf_integer(pid_t, child_tid, child->pid)
481 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
482 ctf_integer(pid_t, child_pid, child->tgid)
483 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0))
484 ctf_integer(unsigned int, child_ns_inum,
485 ({
486 unsigned int child_ns_inum = 0;
487
488 if (child) {
489 struct pid_namespace *pid_ns;
490
491 pid_ns = task_active_pid_ns(child);
492 if (pid_ns)
493 child_ns_inum =
494 pid_ns->lttng_proc_inum;
495 }
496 child_ns_inum;
497 }))
498 #endif
499 ),
500
501 TP_code_post()
502 )
503
504 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33))
505 /*
506 * Tracepoint for sending a signal:
507 */
508 LTTNG_TRACEPOINT_EVENT(sched_signal_send,
509
510 TP_PROTO(int sig, struct task_struct *p),
511
512 TP_ARGS(sig, p),
513
514 TP_FIELDS(
515 ctf_integer(int, sig, sig)
516 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
517 ctf_integer(pid_t, tid, p->pid)
518 )
519 )
520 #endif
521
522 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0))
523 /*
524 * Tracepoint for exec:
525 */
526 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
527
528 TP_PROTO(struct task_struct *p, pid_t old_pid,
529 struct linux_binprm *bprm),
530
531 TP_ARGS(p, old_pid, bprm),
532
533 TP_FIELDS(
534 ctf_string(filename, bprm->filename)
535 ctf_integer(pid_t, tid, p->pid)
536 ctf_integer(pid_t, old_tid, old_pid)
537 )
538 )
539 #endif
540
541 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))
542 /*
543 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
544 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
545 */
546 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
547
548 TP_PROTO(struct task_struct *tsk, u64 delay),
549
550 TP_ARGS(tsk, delay),
551
552 TP_FIELDS(
553 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
554 ctf_integer(pid_t, tid, tsk->pid)
555 ctf_integer(u64, delay, delay)
556 )
557 )
558
559
560 /*
561 * Tracepoint for accounting wait time (time the task is runnable
562 * but not actually running due to scheduler contention).
563 */
564 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
565 TP_PROTO(struct task_struct *tsk, u64 delay),
566 TP_ARGS(tsk, delay))
567
568 /*
569 * Tracepoint for accounting sleep time (time the task is not runnable,
570 * including iowait, see below).
571 */
572 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
573 TP_PROTO(struct task_struct *tsk, u64 delay),
574 TP_ARGS(tsk, delay))
575
576 /*
577 * Tracepoint for accounting iowait time (time the task is not runnable
578 * due to waiting on IO to complete).
579 */
580 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
581 TP_PROTO(struct task_struct *tsk, u64 delay),
582 TP_ARGS(tsk, delay))
583
584 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
585 /*
586 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
587 */
588 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
589 TP_PROTO(struct task_struct *tsk, u64 delay),
590 TP_ARGS(tsk, delay))
591 #endif
592
593 /*
594 * Tracepoint for accounting runtime (time the task is executing
595 * on a CPU).
596 */
597 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
598
599 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
600
601 TP_ARGS(tsk, runtime, vruntime),
602
603 TP_FIELDS(
604 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
605 ctf_integer(pid_t, tid, tsk->pid)
606 ctf_integer(u64, runtime, runtime)
607 ctf_integer(u64, vruntime, vruntime)
608 )
609 )
610 #endif
611
612 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0) || \
613 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
614 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
615 /*
616 * Tracepoint for showing priority inheritance modifying a tasks
617 * priority.
618 */
619 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
620
621 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
622
623 TP_ARGS(tsk, pi_task),
624
625 TP_FIELDS(
626 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
627 ctf_integer(pid_t, tid, tsk->pid)
628 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
629 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
630 )
631 )
632 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
633 /*
634 * Tracepoint for showing priority inheritance modifying a tasks
635 * priority.
636 */
637 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
638
639 TP_PROTO(struct task_struct *tsk, int newprio),
640
641 TP_ARGS(tsk, newprio),
642
643 TP_FIELDS(
644 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
645 ctf_integer(pid_t, tid, tsk->pid)
646 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
647 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
648 )
649 )
650 #endif
651
652 #endif /* LTTNG_TRACE_SCHED_H */
653
654 /* This part must be outside protection */
655 #include <probes/define_trace.h>
This page took 0.042712 seconds and 4 git commands to generate.