X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=include%2Finstrumentation%2Fevents%2Fsched.h;h=f6f258ad5de2e91185b4baf1ddc55b1f31fc007a;hb=9f2d2694cf0655682a1f0c29a7f3b868680524a5;hp=91953a6f1ca993990cdd7034efb6aee388357b88;hpb=e54b3828b8ddfd5a9b9b5545b5fb1d96ba6551d4;p=lttng-modules.git diff --git a/include/instrumentation/events/sched.h b/include/instrumentation/events/sched.h index 91953a6f..f6f258ad 100644 --- a/include/instrumentation/events/sched.h +++ b/include/instrumentation/events/sched.h @@ -10,19 +10,19 @@ #include #include #include -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0)) #include -#endif -#include #define LTTNG_MAX_PID_NS_LEVEL 32 #ifndef _TRACE_SCHED_DEF_ #define _TRACE_SCHED_DEF_ -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0) \ + || LTTNG_RHEL_KERNEL_RANGE(5,14,0,162,0,0, 5,15,0,0,0,0)) -static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +static inline long __trace_sched_switch_state(bool preempt, + unsigned int prev_state, + struct task_struct *p) { unsigned int state; @@ -43,12 +43,12 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * * it for left shift operation to get the correct task->state * mapping. */ - state = task_state_index(p); + state = __task_state_index(prev_state, p->exit_state); return state ? (1 << (state - 1)) : state; } -#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,14,0)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,15,0)) static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) { @@ -66,101 +66,57 @@ static inline long __trace_sched_switch_state(bool preempt, struct task_struct * return TASK_REPORT_MAX; /* - * __get_task_state() uses fls() and returns a value from 0-8 range. + * task_state_index() uses fls() and returns a value from 0-8 range. * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using * it for left shift operation to get the correct task->state * mapping. */ - state = __get_task_state(p); + state = task_state_index(p); return state ? (1 << (state - 1)) : state; } -#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,14,0)) static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) { -#ifdef CONFIG_SCHED_DEBUG - BUG_ON(p != current); -#endif /* CONFIG_SCHED_DEBUG */ - /* - * Preemption ignores task state, therefore preempted tasks are always RUNNING - * (we will not have dequeued if state != RUNNING). - */ - return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; -} - -#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,19,0)) - -static inline long __trace_sched_switch_state(struct task_struct *p) -{ - long state = p->state; + unsigned int state; -#ifdef CONFIG_PREEMPT #ifdef CONFIG_SCHED_DEBUG - BUG_ON(p != current); + BUG_ON(p != current); #endif /* CONFIG_SCHED_DEBUG */ - /* - * For all intents and purposes a preempted task is a running task. - */ - if (preempt_count() & PREEMPT_ACTIVE) - state = TASK_RUNNING | TASK_STATE_MAX; -#endif /* CONFIG_PREEMPT */ - - return state; -} - -#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,13,0)) - -static inline long __trace_sched_switch_state(struct task_struct *p) -{ - long state = p->state; -#ifdef CONFIG_PREEMPT - /* - * For all intents and purposes a preempted task is a running task. - */ - if (task_preempt_count(p) & PREEMPT_ACTIVE) - state = TASK_RUNNING | TASK_STATE_MAX; -#endif - - return state; -} - -#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,2,0)) - -static inline long __trace_sched_switch_state(struct task_struct *p) -{ - long state = p->state; + /* + * Preemption ignores task state, therefore preempted tasks are always + * RUNNING (we will not have dequeued if state != RUNNING). + */ + if (preempt) + return TASK_REPORT_MAX; -#ifdef CONFIG_PREEMPT - /* - * For all intents and purposes a preempted task is a running task. - */ - if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE) - state = TASK_RUNNING | TASK_STATE_MAX; -#endif + /* + * __get_task_state() uses fls() and returns a value from 0-8 range. + * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using + * it for left shift operation to get the correct task->state + * mapping. + */ + state = __get_task_state(p); - return state; + return state ? (1 << (state - 1)) : state; } #else -static inline long __trace_sched_switch_state(struct task_struct *p) +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) { - long state = p->state; - -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ /* - * For all intents and purposes a preempted task is a running task. + * Preemption ignores task state, therefore preempted tasks are always RUNNING + * (we will not have dequeued if state != RUNNING). */ - if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE) - state = TASK_RUNNING; -#endif - - return state; + return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; } - #endif #endif /* _TRACE_SCHED_DEF_ */ @@ -179,18 +135,11 @@ LTTNG_TRACEPOINT_ENUM(task_state, ctf_enum_value("TASK_TRACED", __TASK_TRACED) ctf_enum_value("EXIT_DEAD", EXIT_DEAD) ctf_enum_value("EXIT_ZOMBIE", EXIT_ZOMBIE) - -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0)) ctf_enum_value("TASK_PARKED", TASK_PARKED) -#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,9,0)) */ - ctf_enum_value("TASK_DEAD", TASK_DEAD) ctf_enum_value("TASK_WAKEKILL", TASK_WAKEKILL) ctf_enum_value("TASK_WAKING", TASK_WAKING) - -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) ctf_enum_value("TASK_NOLOAD", TASK_NOLOAD) -#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,2,0)) */ #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,8,0)) ctf_enum_value("TASK_NEW", TASK_NEW) @@ -233,14 +182,6 @@ LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret, /* * Tracepoint for waking up a task: */ -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \ - LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0)) LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, TP_PROTO(struct task_struct *p), @@ -254,31 +195,6 @@ LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, ctf_integer(int, target_cpu, task_cpu(p)) ) ) -#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */ -LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, - - TP_PROTO(struct task_struct *p, int success), - - TP_ARGS(p, success), - - TP_FIELDS( - ctf_array_text(char, comm, p->comm, TASK_COMM_LEN) - ctf_integer(pid_t, tid, p->pid) - ctf_integer(int, prio, p->prio - MAX_RT_PRIO) - ctf_integer(int, success, success) - ctf_integer(int, target_cpu, task_cpu(p)) - ) -) -#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */ - -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0) || \ - LTTNG_RT_KERNEL_RANGE(4,1,10,11, 4,2,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,18,27,26, 3,19,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,14,61,63, 3,15,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,12,54,73, 3,13,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,10,97,106, 3,11,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,4,110,139, 3,5,0,0) || \ - LTTNG_RT_KERNEL_RANGE(3,2,77,111, 3,3,0,0)) /* * Tracepoint called when waking a task; this tracepoint is guaranteed to be @@ -303,61 +219,61 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct task_struct *p), TP_ARGS(p)) -#else - -LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup, - TP_PROTO(struct task_struct *p, int success), - TP_ARGS(p, success)) - /* - * Tracepoint for waking up a new task: + * Tracepoint for task switches, performed by the scheduler: */ -LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new, - TP_PROTO(struct task_struct *p, int success), - TP_ARGS(p, success)) -#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,3,0)) */ +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,18,0) \ + || LTTNG_RHEL_KERNEL_RANGE(5,14,0,162,0,0, 5,15,0,0,0,0)) +LTTNG_TRACEPOINT_EVENT(sched_switch, + + TP_PROTO(bool preempt, + struct task_struct *prev, + struct task_struct *next, + unsigned int prev_state), + + TP_ARGS(preempt, prev, next, prev_state), + + TP_FIELDS( + ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN) + ctf_integer(pid_t, prev_tid, prev->pid) + ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO) +#ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM + ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev)) +#else + ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev_state, prev)) +#endif + ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN) + ctf_integer(pid_t, next_tid, next->pid) + ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO) + ) +) + +#else -/* - * Tracepoint for task switches, performed by the scheduler: - */ LTTNG_TRACEPOINT_EVENT(sched_switch, -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) TP_PROTO(bool preempt, struct task_struct *prev, struct task_struct *next), TP_ARGS(preempt, prev, next), -#else - TP_PROTO(struct task_struct *prev, - struct task_struct *next), - - TP_ARGS(prev, next), -#endif /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) */ TP_FIELDS( ctf_array_text(char, prev_comm, prev->comm, TASK_COMM_LEN) ctf_integer(pid_t, prev_tid, prev->pid) ctf_integer(int, prev_prio, prev->prio - MAX_RT_PRIO) -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,4,0)) #ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(preempt, prev)) #else ctf_integer(long, prev_state, __trace_sched_switch_state(preempt, prev)) -#endif -#else -#ifdef CONFIG_LTTNG_EXPERIMENTAL_BITWISE_ENUM - ctf_enum(task_state, long, prev_state, __trace_sched_switch_state(prev)) -#else - ctf_integer(long, prev_state, __trace_sched_switch_state(prev)) -#endif #endif ctf_array_text(char, next_comm, next->comm, TASK_COMM_LEN) ctf_integer(pid_t, next_tid, next->pid) ctf_integer(int, next_prio, next->prio - MAX_RT_PRIO) ) ) +#endif /* * Tracepoint for a task being migrated: @@ -465,7 +381,6 @@ LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork, ctf_array_text(char, parent_comm, parent->comm, TASK_COMM_LEN) ctf_integer(pid_t, parent_tid, parent->pid) ctf_integer(pid_t, parent_pid, parent->tgid) -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0)) ctf_integer(unsigned int, parent_ns_inum, ({ unsigned int parent_ns_inum = 0; @@ -476,16 +391,14 @@ LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork, pid_ns = task_active_pid_ns(parent); if (pid_ns) parent_ns_inum = - pid_ns->lttng_ns_inum; + pid_ns->ns.inum; } parent_ns_inum; })) -#endif ctf_array_text(char, child_comm, child->comm, TASK_COMM_LEN) ctf_integer(pid_t, child_tid, child->pid) ctf_sequence(pid_t, vtids, tp_locvar->vtids, u8, tp_locvar->ns_level) ctf_integer(pid_t, child_pid, child->tgid) -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,8,0)) ctf_integer(unsigned int, child_ns_inum, ({ unsigned int child_ns_inum = 0; @@ -496,17 +409,15 @@ LTTNG_TRACEPOINT_EVENT_CODE(sched_process_fork, pid_ns = task_active_pid_ns(child); if (pid_ns) child_ns_inum = - pid_ns->lttng_ns_inum; + pid_ns->ns.inum; } child_ns_inum; })) -#endif ), TP_code_post() ) -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,4,0)) /* * Tracepoint for exec: */ @@ -523,7 +434,6 @@ LTTNG_TRACEPOINT_EVENT(sched_process_exec, ctf_integer(pid_t, old_tid, old_pid) ) ) -#endif /* * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE @@ -567,14 +477,12 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)) -#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,3,0)) /* * Tracepoint for accounting blocked time (time the task is in uninterruptible). */ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)) -#endif /* * Tracepoint for accounting runtime (time the task is executing