Fix: update sched instrumentation for kernel 4.4.0
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
index e42acd23f23cd8761c8daa7a800cfab72a62ccfe..347edb75e309bb01544d19b7455a4bb2b562f03f 100644 (file)
@@ -1,18 +1,72 @@
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM sched
 
-#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
-#define _TRACE_SCHED_H
+#if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define LTTNG_TRACE_SCHED_H
 
+#include "../../../probes/lttng-tracepoint-event.h"
 #include <linux/sched.h>
-#include <linux/tracepoint.h>
 #include <linux/binfmts.h>
 #include <linux/version.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
+#include <linux/sched/rt.h>
+#endif
 
 #ifndef _TRACE_SCHED_DEF_
 #define _TRACE_SCHED_DEF_
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
+
+static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_DEBUG
+       BUG_ON(p != current);
+#endif /* CONFIG_SCHED_DEBUG */
+       /*
+        * Preemption ignores task state, therefore preempted tasks are always RUNNING
+        * (we will not have dequeued if state != RUNNING).
+        */
+       return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
+}
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
+
+static inline long __trace_sched_switch_state(struct task_struct *p)
+{
+       long state = p->state;
+
+#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_SCHED_DEBUG
+       BUG_ON(p != current);
+#endif /* CONFIG_SCHED_DEBUG */
+       /*
+        * For all intents and purposes a preempted task is a running task.
+        */
+       if (preempt_count() & PREEMPT_ACTIVE)
+               state = TASK_RUNNING | TASK_STATE_MAX;
+#endif /* CONFIG_PREEMPT */
+
+       return state;
+}
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0))
+
+static inline long __trace_sched_switch_state(struct task_struct *p)
+{
+       long state = p->state;
+
+#ifdef CONFIG_PREEMPT
+       /*
+        * For all intents and purposes a preempted task is a running task.
+        */
+       if (task_preempt_count(p) & PREEMPT_ACTIVE)
+               state = TASK_RUNNING | TASK_STATE_MAX;
+#endif
+
+       return state;
+}
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
 
 static inline long __trace_sched_switch_state(struct task_struct *p)
 {
@@ -23,11 +77,24 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
         * For all intents and purposes a preempted task is a running task.
         */
        if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
                state = TASK_RUNNING | TASK_STATE_MAX;
-#else
-               state = TASK_RUNNING;
 #endif
+
+       return state;
+}
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+
+static inline long __trace_sched_switch_state(struct task_struct *p)
+{
+       long state = p->state;
+
+#ifdef CONFIG_PREEMPT
+       /*
+        * For all intents and purposes a preempted task is a running task.
+        */
+       if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
+               state = TASK_RUNNING;
 #endif
 
        return state;
@@ -40,7 +107,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p)
 /*
  * Tracepoint for calling kthread_stop, performed to end a kthread:
  */
-TRACE_EVENT(sched_kthread_stop,
+LTTNG_TRACEPOINT_EVENT(sched_kthread_stop,
 
        TP_PROTO(struct task_struct *t),
 
@@ -62,7 +129,7 @@ TRACE_EVENT(sched_kthread_stop,
 /*
  * Tracepoint for the return value of the kthread stopping:
  */
-TRACE_EVENT(sched_kthread_stop_ret,
+LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
 
        TP_PROTO(int ret),
 
@@ -82,7 +149,36 @@ TRACE_EVENT(sched_kthread_stop_ret,
 /*
  * Tracepoint for waking up a task:
  */
-DECLARE_EVENT_CLASS(sched_wakeup_template,
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
+LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
+
+       TP_PROTO(struct task_struct *p),
+
+       TP_ARGS(p),
+
+       TP_STRUCT__entry(
+               __array_text(   char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  tid                     )
+               __field(        int,    prio                    )
+               __field(        int,    target_cpu              )
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(comm, p->comm, TASK_COMM_LEN)
+               tp_assign(tid, p->pid)
+               tp_assign(prio, p->prio)
+               tp_assign(target_cpu, task_cpu(p))
+       )
+       TP_perf_assign(
+               __perf_task(p)
+       ),
+
+       TP_printk("comm=%s tid=%d prio=%d target_cpu=%03d",
+                 __entry->comm, __entry->tid, __entry->prio,
+                 __entry->target_cpu)
+)
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
+LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
 
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
        TP_PROTO(struct task_struct *p, int success),
@@ -129,30 +225,56 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
                  __entry->success)
 #endif
 )
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
 
-DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
+/*
+ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
+ * called from the waking context.
+ */
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p))
+
+/*
+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
+ * It it not always called from the waking context.
+ */
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p))
+
+/*
+ * Tracepoint for waking up a new task:
+ */
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p))
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
             TP_PROTO(struct task_struct *p, int success),
             TP_ARGS(p, success))
 
 /*
  * Tracepoint for waking up a new task:
  */
-DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
             TP_PROTO(struct task_struct *p, int success),
             TP_ARGS(p, success))
 
 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
 
-DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
             TP_PROTO(struct rq *rq, struct task_struct *p, int success),
             TP_ARGS(rq, p, success))
 
 /*
  * Tracepoint for waking up a new task:
  */
-DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
             TP_PROTO(struct rq *rq, struct task_struct *p, int success),
             TP_ARGS(rq, p, success))
 
@@ -161,9 +283,15 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
 /*
  * Tracepoint for task switches, performed by the scheduler:
  */
-TRACE_EVENT(sched_switch,
+LTTNG_TRACEPOINT_EVENT(sched_switch,
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
+       TP_PROTO(bool preempt,
+                struct task_struct *prev,
+                struct task_struct *next),
+
+       TP_ARGS(preempt, prev, next),
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
        TP_PROTO(struct task_struct *prev,
                 struct task_struct *next),
 
@@ -189,7 +317,9 @@ TRACE_EVENT(sched_switch,
                tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
                tp_assign(prev_tid, prev->pid)
                tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
+               tp_assign(prev_state, __trace_sched_switch_state(preempt, prev))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
                tp_assign(prev_state, __trace_sched_switch_state(prev))
 #else
                tp_assign(prev_state, prev->state)
@@ -224,7 +354,7 @@ TRACE_EVENT(sched_switch,
 /*
  * Tracepoint for a task being migrated:
  */
-TRACE_EVENT(sched_migrate_task,
+LTTNG_TRACEPOINT_EVENT(sched_migrate_task,
 
        TP_PROTO(struct task_struct *p, int dest_cpu),
 
@@ -251,7 +381,7 @@ TRACE_EVENT(sched_migrate_task,
                  __entry->orig_cpu, __entry->dest_cpu)
 )
 
-DECLARE_EVENT_CLASS(sched_process_template,
+LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template,
 
        TP_PROTO(struct task_struct *p),
 
@@ -276,7 +406,7 @@ DECLARE_EVENT_CLASS(sched_process_template,
 /*
  * Tracepoint for freeing a task:
  */
-DEFINE_EVENT(sched_process_template, sched_process_free,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free,
             TP_PROTO(struct task_struct *p),
             TP_ARGS(p))
             
@@ -284,7 +414,7 @@ DEFINE_EVENT(sched_process_template, sched_process_free,
 /*
  * Tracepoint for a task exiting:
  */
-DEFINE_EVENT(sched_process_template, sched_process_exit,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit,
             TP_PROTO(struct task_struct *p),
             TP_ARGS(p))
 
@@ -292,11 +422,11 @@ DEFINE_EVENT(sched_process_template, sched_process_exit,
  * Tracepoint for waiting on task to unschedule:
  */
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
-DEFINE_EVENT(sched_process_template, sched_wait_task,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
        TP_PROTO(struct task_struct *p),
        TP_ARGS(p))
 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
-DEFINE_EVENT(sched_process_template, sched_wait_task,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task,
        TP_PROTO(struct rq *rq, struct task_struct *p),
        TP_ARGS(rq, p))
 #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */
@@ -304,7 +434,7 @@ DEFINE_EVENT(sched_process_template, sched_wait_task,
 /*
  * Tracepoint for a waiting task:
  */
-TRACE_EVENT(sched_process_wait,
+LTTNG_TRACEPOINT_EVENT(sched_process_wait,
 
        TP_PROTO(struct pid *pid),
 
@@ -334,7 +464,7 @@ TRACE_EVENT(sched_process_wait,
  * == child_pid, while creation of a thread yields to child_tid !=
  * child_pid.
  */
-TRACE_EVENT(sched_process_fork,
+LTTNG_TRACEPOINT_EVENT(sched_process_fork,
 
        TP_PROTO(struct task_struct *parent, struct task_struct *child),
 
@@ -367,7 +497,7 @@ TRACE_EVENT(sched_process_fork,
 /*
  * Tracepoint for sending a signal:
  */
-TRACE_EVENT(sched_signal_send,
+LTTNG_TRACEPOINT_EVENT(sched_signal_send,
 
        TP_PROTO(int sig, struct task_struct *p),
 
@@ -394,7 +524,7 @@ TRACE_EVENT(sched_signal_send,
 /*
  * Tracepoint for exec:
  */
-TRACE_EVENT(sched_process_exec,
+LTTNG_TRACEPOINT_EVENT(sched_process_exec,
 
        TP_PROTO(struct task_struct *p, pid_t old_pid,
                 struct linux_binprm *bprm),
@@ -423,7 +553,7 @@ TRACE_EVENT(sched_process_exec,
  * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
  *     adding sched_stat support to SCHED_FIFO/RR would be welcome.
  */
-DECLARE_EVENT_CLASS(sched_stat_template,
+LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template,
 
        TP_PROTO(struct task_struct *tsk, u64 delay),
 
@@ -454,7 +584,7 @@ DECLARE_EVENT_CLASS(sched_stat_template,
  * Tracepoint for accounting wait time (time the task is runnable
  * but not actually running due to scheduler contention).
  */
-DEFINE_EVENT(sched_stat_template, sched_stat_wait,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait,
             TP_PROTO(struct task_struct *tsk, u64 delay),
             TP_ARGS(tsk, delay))
 
@@ -462,7 +592,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_wait,
  * Tracepoint for accounting sleep time (time the task is not runnable,
  * including iowait, see below).
  */
-DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep,
             TP_PROTO(struct task_struct *tsk, u64 delay),
             TP_ARGS(tsk, delay))
 
@@ -470,7 +600,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
  * Tracepoint for accounting iowait time (time the task is not runnable
  * due to waiting on IO to complete).
  */
-DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait,
             TP_PROTO(struct task_struct *tsk, u64 delay),
             TP_ARGS(tsk, delay))
 
@@ -478,7 +608,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
 /*
  * Tracepoint for accounting blocked time (time the task is in uninterruptible).
  */
-DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked,
             TP_PROTO(struct task_struct *tsk, u64 delay),
             TP_ARGS(tsk, delay))
 #endif
@@ -487,7 +617,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
  * Tracepoint for accounting runtime (time the task is executing
  * on a CPU).
  */
-TRACE_EVENT(sched_stat_runtime,
+LTTNG_TRACEPOINT_EVENT(sched_stat_runtime,
 
        TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
 
@@ -525,7 +655,7 @@ TRACE_EVENT(sched_stat_runtime,
  * Tracepoint for showing priority inheritance modifying a tasks
  * priority.
  */
-TRACE_EVENT(sched_pi_setprio,
+LTTNG_TRACEPOINT_EVENT(sched_pi_setprio,
 
        TP_PROTO(struct task_struct *tsk, int newprio),
 
@@ -551,7 +681,7 @@ TRACE_EVENT(sched_pi_setprio,
 )
 #endif
 
-#endif /* _TRACE_SCHED_H */
+#endif /* LTTNG_TRACE_SCHED_H */
 
 /* This part must be outside protection */
 #include "../../../probes/define_trace.h"
This page took 0.028272 seconds and 4 git commands to generate.