From 27e6eda75df97483d07d61ba731bbc428e2161f9 Mon Sep 17 00:00:00 2001 From: Gabriel-Andrew Pollo-Guilbert Date: Mon, 16 Sep 2019 13:57:38 -0400 Subject: [PATCH] Fix: update sched prev_state instrumentation for upstream kernel Introduced in upstream Linux kernel 4.14. commit efb40f588b4370ffaeffafbd50f6ff213d954254 Author: Peter Zijlstra Date: Fri Sep 22 18:19:53 2017 +0200 sched/tracing: Fix trace_sched_switch task-state printing Introduced in upstream Linux kernel 4.15. Backported in 13f12749af15 (4.14.64). commit 3f5fe9fef5b2da06b6319fab8123056da5217c3f Author: Thomas Gleixner Date: Wed Nov 22 13:05:48 2017 +0100 sched/debug: Fix task state recording/printout Introduced in upstream Linux kernel 4.20. Backported in e1e5fa73e466 (4.14.102) and fd8152818f11 (4.19.9). commit 3054426dc68e5d63aa6a6e9b91ac4ec78e3f3805 Author: Pavankumar Kondeti Date: Tue Oct 30 12:24:33 2018 +0530 sched, trace: Fix prev_state output in sched_switch tracepoint Signed-off-by: Gabriel-Andrew Pollo-Guilbert Signed-off-by: Michael Jeanson Signed-off-by: Mathieu Desnoyers --- instrumentation/events/lttng-module/sched.h | 58 ++++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/instrumentation/events/lttng-module/sched.h b/instrumentation/events/lttng-module/sched.h index 77d77b2a..5b4313a9 100644 --- a/instrumentation/events/lttng-module/sched.h +++ b/instrumentation/events/lttng-module/sched.h @@ -25,7 +25,63 @@ #ifndef _TRACE_SCHED_DEF_ #define _TRACE_SCHED_DEF_ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) + +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +{ + unsigned int state; + +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + + /* + * Preemption ignores task state, therefore preempted tasks are always + * RUNNING (we will not have dequeued if state != RUNNING). + */ + if (preempt) + return TASK_REPORT_MAX; + + /* + * task_state_index() uses fls() and returns a value from 0-8 range. + * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using + * it for left shift operation to get the correct task->state + * mapping. + */ + state = task_state_index(p); + + return state ? (1 << (state - 1)) : state; +} + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) + +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +{ + unsigned int state; + +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + + /* + * Preemption ignores task state, therefore preempted tasks are always + * RUNNING (we will not have dequeued if state != RUNNING). + */ + if (preempt) + return TASK_REPORT_MAX; + + /* + * __get_task_state() uses fls() and returns a value from 0-8 range. + * Decrement it by 1 (except TASK_RUNNING state i.e 0) before using + * it for left shift operation to get the correct task->state + * mapping. + */ + state = __get_task_state(p); + + return state ? (1 << (state - 1)) : state; +} + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) { -- 2.34.1