Fix: update sched instrumentation for kernel 4.4.0
[lttng-modules.git] / instrumentation / events / lttng-module / sched.h
index 1bea326135a428e0db10c42388a9683d58edd745..347edb75e309bb01544d19b7455a4bb2b562f03f 100644 (file)
 #ifndef _TRACE_SCHED_DEF_
 #define _TRACE_SCHED_DEF_
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
+
+static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p)
+{
+#ifdef CONFIG_SCHED_DEBUG
+       BUG_ON(p != current);
+#endif /* CONFIG_SCHED_DEBUG */
+       /*
+        * Preemption ignores task state, therefore preempted tasks are always RUNNING
+        * (we will not have dequeued if state != RUNNING).
+        */
+       return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state;
+}
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0))
 
 static inline long __trace_sched_switch_state(struct task_struct *p)
 {
@@ -135,6 +149,35 @@ LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret,
 /*
  * Tracepoint for waking up a task:
  */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
+LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
+
+       TP_PROTO(struct task_struct *p),
+
+       TP_ARGS(p),
+
+       TP_STRUCT__entry(
+               __array_text(   char,   comm,   TASK_COMM_LEN   )
+               __field(        pid_t,  tid                     )
+               __field(        int,    prio                    )
+               __field(        int,    target_cpu              )
+       ),
+
+       TP_fast_assign(
+               tp_memcpy(comm, p->comm, TASK_COMM_LEN)
+               tp_assign(tid, p->pid)
+               tp_assign(prio, p->prio)
+               tp_assign(target_cpu, task_cpu(p))
+       )
+       TP_perf_assign(
+               __perf_task(p)
+       ),
+
+       TP_printk("comm=%s tid=%d prio=%d target_cpu=%03d",
+                 __entry->comm, __entry->tid, __entry->prio,
+                 __entry->target_cpu)
+)
+#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
 LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
 
 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
@@ -182,8 +225,34 @@ LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template,
                  __entry->success)
 #endif
 )
+#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
+
+/*
+ * Tracepoint called when waking a task; this tracepoint is guaranteed to be
+ * called from the waking context.
+ */
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking,
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p))
+
+/*
+ * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG.
+ * It it not always called from the waking context.
+ */
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p))
+
+/*
+ * Tracepoint for waking up a new task:
+ */
+LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
+            TP_PROTO(struct task_struct *p),
+            TP_ARGS(p))
+
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
 
 LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup,
             TP_PROTO(struct task_struct *p, int success),
@@ -216,7 +285,13 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new,
  */
 LTTNG_TRACEPOINT_EVENT(sched_switch,
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
+       TP_PROTO(bool preempt,
+                struct task_struct *prev,
+                struct task_struct *next),
+
+       TP_ARGS(preempt, prev, next),
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
        TP_PROTO(struct task_struct *prev,
                 struct task_struct *next),
 
@@ -242,7 +317,9 @@ LTTNG_TRACEPOINT_EVENT(sched_switch,
                tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
                tp_assign(prev_tid, prev->pid)
                tp_assign(prev_prio, prev->prio - MAX_RT_PRIO)
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
+               tp_assign(prev_state, __trace_sched_switch_state(preempt, prev))
+#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35))
                tp_assign(prev_state, __trace_sched_switch_state(prev))
 #else
                tp_assign(prev_state, prev->state)
This page took 0.02586 seconds and 4 git commands to generate.