Disable sched_switch bitwise enum in default build
[lttng-modules.git] / include / instrumentation / events / sched.h
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #undef TRACE_SYSTEM
3 #define TRACE_SYSTEM sched
4
5 #if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
6 #define LTTNG_TRACE_SCHED_H
7
8 #include <lttng/tracepoint-event.h>
9 #include <linux/sched.h>
10 #include <linux/pid_namespace.h>
11 #include <linux/binfmts.h>
12 #include <lttng/kernel-version.h>
13 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0))
14 #include <linux/sched/rt.h>
15 #endif
16 #include <wrapper/namespace.h>
17
18 #define LTTNG_MAX_PID_NS_LEVEL 32
19
20 #ifndef _TRACE_SCHED_DEF_
21 #define _TRACE_SCHED_DEF_
22
23 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0))
24
25 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
26 {
27 unsigned int state;
28
29 #ifdef CONFIG_SCHED_DEBUG
30 BUG_ON(p != current);
31 #endif /* CONFIG_SCHED_DEBUG */
32
33 /*
34 * Preemption ignores task state, therefore preempted tasks are always
35 * RUNNING (we will not have dequeued if state != RUNNING).
36 */
37 if (preempt)
38 return TASK_REPORT_MAX;
39
40 /*
41 * task_state_index() uses fls() and returns a value from 0-8 range.
42 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
43 * it for left shift operation to get the correct task->state
44 * mapping.
45 */
46 state = task_state_index(p);
47
48 return state ? (1 << (state - 1)) : state;
49 }
50
51 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,14,0))
52
53 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
54 {
55 unsigned int state;
56
57 #ifdef CONFIG_SCHED_DEBUG
58 BUG_ON(p != current);
59 #endif /* CONFIG_SCHED_DEBUG */
60
61 /*
62 * Preemption ignores task state, therefore preempted tasks are always
63 * RUNNING (we will not have dequeued if state != RUNNING).
64 */
65 if (preempt)
66 return TASK_REPORT_MAX;
67
68 /*
69 * __get_task_state() uses fls() and returns a value from 0-8 range.
70 * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using
71 * it for left shift operation to get the correct task->state
72 * mapping.
73 */
74 state = __get_task_state(p);
75
76 return state ? (1 << (state - 1)) : state;
77 }
78
79 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
80
81 static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
82 {
83 #ifdef CONFIG_SCHED_DEBUG
84 BUG_ON(p != current);
85 #endif /* CONFIG_SCHED_DEBUG */
86 /*
87 * Preemption ignores task state, therefore preempted tasks are always RUNNING
88 * (we will not have dequeued if state != RUNNING).
89 */
90 return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
91 }
92
93 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,0))
94
95 static inline long __trace_sched_switch_state(struct task_struct *p)
96 {
97 long state = p->state;
98
99 #ifdef CONFIG_PREEMPT
100 #ifdef CONFIG_SCHED_DEBUG
101 BUG_ON(p != current);
102 #endif /* CONFIG_SCHED_DEBUG */
103 /*
104 * For all intents and purposes a preempted task is a running task.
105 */
106 if (preempt_count() & PREEMPT_ACTIVE)
107 state = TASK_RUNNING | TASK_STATE_MAX;
108 #endif /* CONFIG_PREEMPT */
109
110 return state;
111 }
112
113 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,13,0))
114
115 static inline long __trace_sched_switch_state(struct task_struct *p)
116 {
117 long state = p->state;
118
119 #ifdef CONFIG_PREEMPT
120 /*
121 * For all intents and purposes a preempted task is a running task.
122 */
123 if (task_preempt_count(p) & PREEMPT_ACTIVE)
124 state = TASK_RUNNING | TASK_STATE_MAX;
125 #endif
126
127 return state;
128 }
129
130 #elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,2,0))
131
132 static inline long __trace_sched_switch_state(struct task_struct *p)
133 {
134 long state = p->state;
135
136 #ifdef CONFIG_PREEMPT
137 /*
138 * For all intents and purposes a preempted task is a running task.
139 */
140 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
141 state = TASK_RUNNING | TASK_STATE_MAX;
142 #endif
143
144 return state;
145 }
146
147 #else
148
149 static inline long __trace_sched_switch_state(struct task_struct *p)
150 {
151 long state = p->state;
152
153 #ifdef CONFIG_PREEMPT
154 /*
155 * For all intents and purposes a preempted task is a running task.
156 */
157 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
158 state = TASK_RUNNING;
159 #endif
160
161 return state;
162 }
163
164 #endif
165
166 #endif /* _TRACE_SCHED_DEF_ */
167
168 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
169 /*
170 * Enumeration of the task state bitmask.
171 * Only bit flags are enumerated here, not composition of states.
172 */
173 LTTNG_TRACEPOINT_ENUM(task_state,
174 TP_ENUM_VALUES(
175 ctf_enum_value("TASK_RUNNING", TASK_RUNNING)
176 ctf_enum_value("TASK_INTERRUPTIBLE", TASK_INTERRUPTIBLE)
177 ctf_enum_value("TASK_UNINTERRUPTIBLE", TASK_UNINTERRUPTIBLE)
178 ctf_enum_value("TASK_STOPPED", __TASK_STOPPED)
179 ctf_enum_value("TASK_TRACED", __TASK_TRACED)
180 ctf_enum_value("EXIT_DEAD", EXIT_DEAD)
181 ctf_enum_value("EXIT_ZOMBIE", EXIT_ZOMBIE)
182
183 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0))
184 ctf_enum_value("TASK_PARKED", TASK_PARKED)
185 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0)) */
186
187 ctf_enum_value("TASK_DEAD", TASK_DEAD)
188 ctf_enum_value("TASK_WAKEKILL", TASK_WAKEKILL)
189 ctf_enum_value("TASK_WAKING", TASK_WAKING)
190
191 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0))
192 ctf_enum_value("TASK_NOLOAD", TASK_NOLOAD)
193 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */
194
195 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0))
196 ctf_enum_value("TASK_NEW", TASK_NEW)
197 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0)) */
198
199 ctf_enum_value("TASK_STATE_MAX", TASK_STATE_MAX)
200 )
201 )
202 #endif /* CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM */
203
204 /*
205 * Tracepoint for calling kthread_stop, performed to end a kthread:
206 */
207 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
208
209 TP_PROTO(struct task_struct *t),
210
211 TP_ARGS(t),
212
213 TP_FIELDS(
214 ctf_array_text(char, comm, t->comm, TASK_COMM_LEN)
215 ctf_integer(pid_t, tid, t->pid)
216 )
217 )
218
219 /*
220 * Tracepoint for the return value of the kthread stopping:
221 */
222 LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
223
224 TP_PROTO(int ret),
225
226 TP_ARGS(ret),
227
228 TP_FIELDS(
229 ctf_integer(int, ret, ret)
230 )
231 )
232
233 /*
234 * Tracepoint for waking up a task:
235 */
236 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \
237 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
238 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
239 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
240 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
241 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
242 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
243 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
244 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
245
246 TP_PROTO(struct task_struct *p),
247
248 TP_ARGS(p),
249
250 TP_FIELDS(
251 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
252 ctf_integer(pid_t, tid, p->pid)
253 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
254 ctf_integer(int, target_cpu, task_cpu(p))
255 )
256 )
257 #else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
258 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
259
260 TP_PROTO(struct task_struct *p, int success),
261
262 TP_ARGS(p, success),
263
264 TP_FIELDS(
265 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
266 ctf_integer(pid_t, tid, p->pid)
267 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
268 ctf_integer(int, success, success)
269 ctf_integer(int, target_cpu, task_cpu(p))
270 )
271 )
272 #endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
273
274 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \
275 LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \
276 LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \
277 LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \
278 LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \
279 LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \
280 LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \
281 LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0))
282
283 /*
284 * Tracepoint called when waking a task; this tracepoint is guaranteed to be
285 * called from the waking context.
286 */
287 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
288 TP_PROTO(struct task_struct *p),
289 TP_ARGS(p))
290
291 /*
292 * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
293 * It it not always called from the waking context.
294 */
295 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
296 TP_PROTO(struct task_struct *p),
297 TP_ARGS(p))
298
299 /*
300 * Tracepoint for waking up a new task:
301 */
302 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
303 TP_PROTO(struct task_struct *p),
304 TP_ARGS(p))
305
306 #else
307
308 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
309 TP_PROTO(struct task_struct *p, int success),
310 TP_ARGS(p, success))
311
312 /*
313 * Tracepoint for waking up a new task:
314 */
315 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
316 TP_PROTO(struct task_struct *p, int success),
317 TP_ARGS(p, success))
318
319 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */
320
321 /*
322 * Tracepoint for task switches, performed by the scheduler:
323 */
324 LTTNG_TRACEPOINT_EVENT(sched_switch,
325
326 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
327 TP_PROTO(bool preempt,
328 struct task_struct *prev,
329 struct task_struct *next),
330
331 TP_ARGS(preempt, prev, next),
332 #else
333 TP_PROTO(struct task_struct *prev,
334 struct task_struct *next),
335
336 TP_ARGS(prev, next),
337 #endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) */
338
339 TP_FIELDS(
340 ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN)
341 ctf_integer(pid_t, prev_tid, prev->pid)
342 ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO)
343 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0))
344 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
345 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev))
346 #else
347 ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev))
348 #endif
349 #else
350 #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM
351 ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(prev))
352 #else
353 ctf_integer(long, prev_state, __trace_sched_switch_state(prev))
354 #endif
355 #endif
356 ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN)
357 ctf_integer(pid_t, next_tid, next->pid)
358 ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO)
359 )
360 )
361
362 /*
363 * Tracepoint for a task being migrated:
364 */
365 LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
366
367 TP_PROTO(struct task_struct *p, int dest_cpu),
368
369 TP_ARGS(p, dest_cpu),
370
371 TP_FIELDS(
372 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
373 ctf_integer(pid_t, tid, p->pid)
374 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
375 ctf_integer(int, orig_cpu, task_cpu(p))
376 ctf_integer(int, dest_cpu, dest_cpu)
377 )
378 )
379
380 LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
381
382 TP_PROTO(struct task_struct *p),
383
384 TP_ARGS(p),
385
386 TP_FIELDS(
387 ctf_array_text(char, comm, p->comm, TASK_COMM_LEN)
388 ctf_integer(pid_t, tid, p->pid)
389 ctf_integer(int, prio, p->prio - MAX_RT_PRIO)
390 )
391 )
392
393 /*
394 * Tracepoint for freeing a task:
395 */
396 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
397 TP_PROTO(struct task_struct *p),
398 TP_ARGS(p))
399
400
401 /*
402 * Tracepoint for a task exiting:
403 */
404 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
405 TP_PROTO(struct task_struct *p),
406 TP_ARGS(p))
407
408 /*
409 * Tracepoint for waiting on task to unschedule:
410 */
411 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
412 TP_PROTO(struct task_struct *p),
413 TP_ARGS(p))
414
415 /*
416 * Tracepoint for a waiting task:
417 */
418 LTTNG_TRACEPOINT_EVENT(sched_process_wait,
419
420 TP_PROTO(struct pid *pid),
421
422 TP_ARGS(pid),
423
424 TP_FIELDS(
425 ctf_array_text(char, comm, current->comm, TASK_COMM_LEN)
426 ctf_integer(pid_t, tid, pid_nr(pid))
427 ctf_integer(int, prio, current->prio - MAX_RT_PRIO)
428 )
429 )
430
431 /*
432 * Tracepoint for do_fork.
433 * Saving both TID and PID information, especially for the child, allows
434 * trace analyzers to distinguish between creation of a new process and
435 * creation of a new thread. Newly created processes will have child_tid
436 * == child_pid, while creation of a thread yields to child_tid !=
437 * child_pid.
438 */
439 LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork,
440
441 TP_PROTO(struct task_struct *parent, struct task_struct *child),
442
443 TP_ARGS(parent, child),
444
445 TP_locvar(
446 pid_t vtids[LTTNG_MAX_PID_NS_LEVEL];
447 unsigned int ns_level;
448 ),
449
450 TP_code_pre(
451 if (child) {
452 struct pid *child_pid;
453 unsigned int i;
454
455 child_pid = task_pid(child);
456 tp_locvar->ns_level =
457 min_t(unsigned int, child_pid->level + 1,
458 LTTNG_MAX_PID_NS_LEVEL);
459 for (i = 0; i < tp_locvar->ns_level; i++)
460 tp_locvar->vtids[i] = child_pid->numbers[i].nr;
461 }
462 ),
463
464 TP_FIELDS(
465 ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN)
466 ctf_integer(pid_t, parent_tid, parent->pid)
467 ctf_integer(pid_t, parent_pid, parent->tgid)
468 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
469 ctf_integer(unsigned int, parent_ns_inum,
470 ({
471 unsigned int parent_ns_inum = 0;
472
473 if (parent) {
474 struct pid_namespace *pid_ns;
475
476 pid_ns = task_active_pid_ns(parent);
477 if (pid_ns)
478 parent_ns_inum =
479 pid_ns->lttng_ns_inum;
480 }
481 parent_ns_inum;
482 }))
483 #endif
484 ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN)
485 ctf_integer(pid_t, child_tid, child->pid)
486 ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level)
487 ctf_integer(pid_t, child_pid, child->tgid)
488 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0))
489 ctf_integer(unsigned int, child_ns_inum,
490 ({
491 unsigned int child_ns_inum = 0;
492
493 if (child) {
494 struct pid_namespace *pid_ns;
495
496 pid_ns = task_active_pid_ns(child);
497 if (pid_ns)
498 child_ns_inum =
499 pid_ns->lttng_ns_inum;
500 }
501 child_ns_inum;
502 }))
503 #endif
504 ),
505
506 TP_code_post()
507 )
508
509 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0))
510 /*
511 * Tracepoint for exec:
512 */
513 LTTNG_TRACEPOINT_EVENT(sched_process_exec,
514
515 TP_PROTO(struct task_struct *p, pid_t old_pid,
516 struct linux_binprm *bprm),
517
518 TP_ARGS(p, old_pid, bprm),
519
520 TP_FIELDS(
521 ctf_string(filename, bprm->filename)
522 ctf_integer(pid_t, tid, p->pid)
523 ctf_integer(pid_t, old_tid, old_pid)
524 )
525 )
526 #endif
527
528 /*
529 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
530 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
531 */
532 LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
533
534 TP_PROTO(struct task_struct *tsk, u64 delay),
535
536 TP_ARGS(tsk, delay),
537
538 TP_FIELDS(
539 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
540 ctf_integer(pid_t, tid, tsk->pid)
541 ctf_integer(u64, delay, delay)
542 )
543 )
544
545
546 /*
547 * Tracepoint for accounting wait time (time the task is runnable
548 * but not actually running due to scheduler contention).
549 */
550 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
551 TP_PROTO(struct task_struct *tsk, u64 delay),
552 TP_ARGS(tsk, delay))
553
554 /*
555 * Tracepoint for accounting sleep time (time the task is not runnable,
556 * including iowait, see below).
557 */
558 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
559 TP_PROTO(struct task_struct *tsk, u64 delay),
560 TP_ARGS(tsk, delay))
561
562 /*
563 * Tracepoint for accounting iowait time (time the task is not runnable
564 * due to waiting on IO to complete).
565 */
566 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
567 TP_PROTO(struct task_struct *tsk, u64 delay),
568 TP_ARGS(tsk, delay))
569
570 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,3,0))
571 /*
572 * Tracepoint for accounting blocked time (time the task is in uninterruptible).
573 */
574 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
575 TP_PROTO(struct task_struct *tsk, u64 delay),
576 TP_ARGS(tsk, delay))
577 #endif
578
579 /*
580 * Tracepoint for accounting runtime (time the task is executing
581 * on a CPU).
582 */
583 LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
584
585 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
586
587 TP_ARGS(tsk, runtime, vruntime),
588
589 TP_FIELDS(
590 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
591 ctf_integer(pid_t, tid, tsk->pid)
592 ctf_integer(u64, runtime, runtime)
593 ctf_integer(u64, vruntime, vruntime)
594 )
595 )
596
597 #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0) || \
598 LTTNG_RT_KERNEL_RANGE(4,9,27,18, 4,10,0,0) || \
599 LTTNG_RT_KERNEL_RANGE(4,11,5,1, 4,12,0,0))
600 /*
601 * Tracepoint for showing priority inheritance modifying a tasks
602 * priority.
603 */
604 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
605
606 TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
607
608 TP_ARGS(tsk, pi_task),
609
610 TP_FIELDS(
611 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
612 ctf_integer(pid_t, tid, tsk->pid)
613 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
614 ctf_integer(int, newprio, pi_task ? pi_task->prio - MAX_RT_PRIO : tsk->prio - MAX_RT_PRIO)
615 )
616 )
617 #else
618 /*
619 * Tracepoint for showing priority inheritance modifying a tasks
620 * priority.
621 */
622 LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
623
624 TP_PROTO(struct task_struct *tsk, int newprio),
625
626 TP_ARGS(tsk, newprio),
627
628 TP_FIELDS(
629 ctf_array_text(char, comm, tsk->comm, TASK_COMM_LEN)
630 ctf_integer(pid_t, tid, tsk->pid)
631 ctf_integer(int, oldprio, tsk->prio - MAX_RT_PRIO)
632 ctf_integer(int, newprio, newprio - MAX_RT_PRIO)
633 )
634 )
635 #endif
636
637 #endif /* LTTNG_TRACE_SCHED_H */
638
639 /* This part must be outside protection */
640 #include <lttng/define_trace.h>
This page took 0.043752 seconds and 4 git commands to generate.