X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=instrumentation%2Fevents%2Flttng-module%2Fsched.h;h=347edb75e309bb01544d19b7455a4bb2b562f03f;hb=b11672c7f30b9ffbad47a5c2178155f8caf0be0e;hp=9e490cfe411a2ac03f6268e278939c9d0109e665;hpb=33673ee7b705adb6b24350c30966d4937d41ed95;p=lttng-modules.git diff --git a/instrumentation/events/lttng-module/sched.h b/instrumentation/events/lttng-module/sched.h index 9e490cfe..347edb75 100644 --- a/instrumentation/events/lttng-module/sched.h +++ b/instrumentation/events/lttng-module/sched.h @@ -1,11 +1,11 @@ #undef TRACE_SYSTEM #define TRACE_SYSTEM sched -#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_SCHED_H +#if !defined(LTTNG_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ) +#define LTTNG_TRACE_SCHED_H +#include "../../../probes/lttng-tracepoint-event.h" #include -#include #include #include #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)) @@ -15,7 +15,41 @@ #ifndef _TRACE_SCHED_DEF_ #define _TRACE_SCHED_DEF_ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) + +static inline long __trace_sched_switch_state(bool preempt, struct task_struct *p) +{ +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + /* + * Preemption ignores task state, therefore preempted tasks are always RUNNING + * (we will not have dequeued if state != RUNNING). + */ + return preempt ? TASK_RUNNING | TASK_STATE_MAX : p->state; +} + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,19,0)) + +static inline long __trace_sched_switch_state(struct task_struct *p) +{ + long state = p->state; + +#ifdef CONFIG_PREEMPT +#ifdef CONFIG_SCHED_DEBUG + BUG_ON(p != current); +#endif /* CONFIG_SCHED_DEBUG */ + /* + * For all intents and purposes a preempted task is a running task. + */ + if (preempt_count() & PREEMPT_ACTIVE) + state = TASK_RUNNING | TASK_STATE_MAX; +#endif /* CONFIG_PREEMPT */ + + return state; +} + +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)) static inline long __trace_sched_switch_state(struct task_struct *p) { @@ -73,7 +107,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p) /* * Tracepoint for calling kthread_stop, performed to end a kthread: */ -TRACE_EVENT(sched_kthread_stop, +LTTNG_TRACEPOINT_EVENT(sched_kthread_stop, TP_PROTO(struct task_struct *t), @@ -95,7 +129,7 @@ TRACE_EVENT(sched_kthread_stop, /* * Tracepoint for the return value of the kthread stopping: */ -TRACE_EVENT(sched_kthread_stop_ret, +LTTNG_TRACEPOINT_EVENT(sched_kthread_stop_ret, TP_PROTO(int ret), @@ -115,7 +149,36 @@ TRACE_EVENT(sched_kthread_stop_ret, /* * Tracepoint for waking up a task: */ -DECLARE_EVENT_CLASS(sched_wakeup_template, +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) +LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, + + TP_PROTO(struct task_struct *p), + + TP_ARGS(p), + + TP_STRUCT__entry( + __array_text( char, comm, TASK_COMM_LEN ) + __field( pid_t, tid ) + __field( int, prio ) + __field( int, target_cpu ) + ), + + TP_fast_assign( + tp_memcpy(comm, p->comm, TASK_COMM_LEN) + tp_assign(tid, p->pid) + tp_assign(prio, p->prio) + tp_assign(target_cpu, task_cpu(p)) + ) + TP_perf_assign( + __perf_task(p) + ), + + TP_printk("comm=%s tid=%d prio=%d target_cpu=%03d", + __entry->comm, __entry->tid, __entry->prio, + __entry->target_cpu) +) +#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */ +LTTNG_TRACEPOINT_EVENT_CLASS(sched_wakeup_template, #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) TP_PROTO(struct task_struct *p, int success), @@ -162,30 +225,56 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, __entry->success) #endif ) +#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) */ -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) + +/* + * Tracepoint called when waking a task; this tracepoint is guaranteed to be + * called from the waking context. + */ +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_waking, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)) + +/* + * Tracepoint called when the task is actually woken; p->state == TASK_RUNNNG. + * It it not always called from the waking context. + */ +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)) + +/* + * Tracepoint for waking up a new task: + */ +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new, + TP_PROTO(struct task_struct *p), + TP_ARGS(p)) -DEFINE_EVENT(sched_wakeup_template, sched_wakeup, +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) + +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup, TP_PROTO(struct task_struct *p, int success), TP_ARGS(p, success)) /* * Tracepoint for waking up a new task: */ -DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct task_struct *p, int success), TP_ARGS(p, success)) #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ -DEFINE_EVENT(sched_wakeup_template, sched_wakeup, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup, TP_PROTO(struct rq *rq, struct task_struct *p, int success), TP_ARGS(rq, p, success)) /* * Tracepoint for waking up a new task: */ -DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_wakeup_template, sched_wakeup_new, TP_PROTO(struct rq *rq, struct task_struct *p, int success), TP_ARGS(rq, p, success)) @@ -194,9 +283,15 @@ DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new, /* * Tracepoint for task switches, performed by the scheduler: */ -TRACE_EVENT(sched_switch, +LTTNG_TRACEPOINT_EVENT(sched_switch, -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) + TP_PROTO(bool preempt, + struct task_struct *prev, + struct task_struct *next), + + TP_ARGS(preempt, prev, next), +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) TP_PROTO(struct task_struct *prev, struct task_struct *next), @@ -222,7 +317,9 @@ TRACE_EVENT(sched_switch, tp_memcpy(next_comm, next->comm, TASK_COMM_LEN) tp_assign(prev_tid, prev->pid) tp_assign(prev_prio, prev->prio - MAX_RT_PRIO) -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) +#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) + tp_assign(prev_state, __trace_sched_switch_state(preempt, prev)) +#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) tp_assign(prev_state, __trace_sched_switch_state(prev)) #else tp_assign(prev_state, prev->state) @@ -257,7 +354,7 @@ TRACE_EVENT(sched_switch, /* * Tracepoint for a task being migrated: */ -TRACE_EVENT(sched_migrate_task, +LTTNG_TRACEPOINT_EVENT(sched_migrate_task, TP_PROTO(struct task_struct *p, int dest_cpu), @@ -284,7 +381,7 @@ TRACE_EVENT(sched_migrate_task, __entry->orig_cpu, __entry->dest_cpu) ) -DECLARE_EVENT_CLASS(sched_process_template, +LTTNG_TRACEPOINT_EVENT_CLASS(sched_process_template, TP_PROTO(struct task_struct *p), @@ -309,7 +406,7 @@ DECLARE_EVENT_CLASS(sched_process_template, /* * Tracepoint for freeing a task: */ -DEFINE_EVENT(sched_process_template, sched_process_free, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_free, TP_PROTO(struct task_struct *p), TP_ARGS(p)) @@ -317,7 +414,7 @@ DEFINE_EVENT(sched_process_template, sched_process_free, /* * Tracepoint for a task exiting: */ -DEFINE_EVENT(sched_process_template, sched_process_exit, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_process_exit, TP_PROTO(struct task_struct *p), TP_ARGS(p)) @@ -325,11 +422,11 @@ DEFINE_EVENT(sched_process_template, sched_process_exit, * Tracepoint for waiting on task to unschedule: */ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) -DEFINE_EVENT(sched_process_template, sched_wait_task, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task, TP_PROTO(struct task_struct *p), TP_ARGS(p)) #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ -DEFINE_EVENT(sched_process_template, sched_wait_task, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_process_template, sched_wait_task, TP_PROTO(struct rq *rq, struct task_struct *p), TP_ARGS(rq, p)) #endif /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) */ @@ -337,7 +434,7 @@ DEFINE_EVENT(sched_process_template, sched_wait_task, /* * Tracepoint for a waiting task: */ -TRACE_EVENT(sched_process_wait, +LTTNG_TRACEPOINT_EVENT(sched_process_wait, TP_PROTO(struct pid *pid), @@ -367,7 +464,7 @@ TRACE_EVENT(sched_process_wait, * == child_pid, while creation of a thread yields to child_tid != * child_pid. */ -TRACE_EVENT(sched_process_fork, +LTTNG_TRACEPOINT_EVENT(sched_process_fork, TP_PROTO(struct task_struct *parent, struct task_struct *child), @@ -400,7 +497,7 @@ TRACE_EVENT(sched_process_fork, /* * Tracepoint for sending a signal: */ -TRACE_EVENT(sched_signal_send, +LTTNG_TRACEPOINT_EVENT(sched_signal_send, TP_PROTO(int sig, struct task_struct *p), @@ -427,7 +524,7 @@ TRACE_EVENT(sched_signal_send, /* * Tracepoint for exec: */ -TRACE_EVENT(sched_process_exec, +LTTNG_TRACEPOINT_EVENT(sched_process_exec, TP_PROTO(struct task_struct *p, pid_t old_pid, struct linux_binprm *bprm), @@ -456,7 +553,7 @@ TRACE_EVENT(sched_process_exec, * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE * adding sched_stat support to SCHED_FIFO/RR would be welcome. */ -DECLARE_EVENT_CLASS(sched_stat_template, +LTTNG_TRACEPOINT_EVENT_CLASS(sched_stat_template, TP_PROTO(struct task_struct *tsk, u64 delay), @@ -487,7 +584,7 @@ DECLARE_EVENT_CLASS(sched_stat_template, * Tracepoint for accounting wait time (time the task is runnable * but not actually running due to scheduler contention). */ -DEFINE_EVENT(sched_stat_template, sched_stat_wait, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_wait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)) @@ -495,7 +592,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_wait, * Tracepoint for accounting sleep time (time the task is not runnable, * including iowait, see below). */ -DEFINE_EVENT(sched_stat_template, sched_stat_sleep, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_sleep, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)) @@ -503,7 +600,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_sleep, * Tracepoint for accounting iowait time (time the task is not runnable * due to waiting on IO to complete). */ -DEFINE_EVENT(sched_stat_template, sched_stat_iowait, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_iowait, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)) @@ -511,7 +608,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_iowait, /* * Tracepoint for accounting blocked time (time the task is in uninterruptible). */ -DEFINE_EVENT(sched_stat_template, sched_stat_blocked, +LTTNG_TRACEPOINT_EVENT_INSTANCE(sched_stat_template, sched_stat_blocked, TP_PROTO(struct task_struct *tsk, u64 delay), TP_ARGS(tsk, delay)) #endif @@ -520,7 +617,7 @@ DEFINE_EVENT(sched_stat_template, sched_stat_blocked, * Tracepoint for accounting runtime (time the task is executing * on a CPU). */ -TRACE_EVENT(sched_stat_runtime, +LTTNG_TRACEPOINT_EVENT(sched_stat_runtime, TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime), @@ -558,7 +655,7 @@ TRACE_EVENT(sched_stat_runtime, * Tracepoint for showing priority inheritance modifying a tasks * priority. */ -TRACE_EVENT(sched_pi_setprio, +LTTNG_TRACEPOINT_EVENT(sched_pi_setprio, TP_PROTO(struct task_struct *tsk, int newprio), @@ -584,7 +681,7 @@ TRACE_EVENT(sched_pi_setprio, ) #endif -#endif /* _TRACE_SCHED_H */ +#endif /* LTTNG_TRACE_SCHED_H */ /* This part must be outside protection */ #include "../../../probes/define_trace.h"