Fix bitfield.h include directory
[lttng-modules.git] / instrumentation / events / lttng-patch / sched.h
1 #undef TRACE_SYSTEM
2 #define TRACE_SYSTEM sched
3
4 #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
5 #define _TRACE_SCHED_H
6
7 #include <linux/sched.h>
8 #include <linux/tracepoint.h>
9
10 #ifndef _TRACE_SCHED_DEF_
11 #define _TRACE_SCHED_DEF_
12
13 #ifdef CREATE_TRACE_POINTS
14 static inline long __trace_sched_switch_state(struct task_struct *p)
15 {
16 long state = p->state;
17
18 #ifdef CONFIG_PREEMPT
19 /*
20 * For all intents and purposes a preempted task is a running task.
21 */
22 if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE)
23 state = TASK_RUNNING;
24 #endif
25
26 return state;
27 }
28 #endif
29
30 #endif /* _TRACE_SCHED_DEF_ */
31
32 /*
33 * Tracepoint for calling kthread_stop, performed to end a kthread:
34 */
35 TRACE_EVENT(sched_kthread_stop,
36
37 TP_PROTO(struct task_struct *t),
38
39 TP_ARGS(t),
40
41 TP_STRUCT__entry(
42 __array( char, comm, TASK_COMM_LEN )
43 __field( pid_t, pid )
44 ),
45
46 TP_fast_assign(
47 tp_memcpy(comm, t->comm, TASK_COMM_LEN)
48 tp_assign(pid, t->pid)
49 ),
50
51 TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
52 )
53
54 /*
55 * Tracepoint for the return value of the kthread stopping:
56 */
57 TRACE_EVENT(sched_kthread_stop_ret,
58
59 TP_PROTO(int ret),
60
61 TP_ARGS(ret),
62
63 TP_STRUCT__entry(
64 __field( int, ret )
65 ),
66
67 TP_fast_assign(
68 tp_assign(ret, ret)
69 ),
70
71 TP_printk("ret=%d", __entry->ret)
72 )
73
74 /*
75 * Tracepoint for waking up a task:
76 */
77 DECLARE_EVENT_CLASS(sched_wakeup_template,
78
79 TP_PROTO(struct task_struct *p, int success),
80
81 TP_ARGS(p, success),
82
83 TP_STRUCT__entry(
84 __array( char, comm, TASK_COMM_LEN )
85 __field( pid_t, pid )
86 __field( int, prio )
87 __field( int, success )
88 __field( int, target_cpu )
89 ),
90
91 TP_fast_assign(
92 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
93 tp_assign(pid, p->pid)
94 tp_assign(prio, p->prio)
95 tp_assign(success, success)
96 tp_assign(target_cpu, task_cpu(p))
97 ),
98
99 TP_printk("comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
100 __entry->comm, __entry->pid, __entry->prio,
101 __entry->success, __entry->target_cpu)
102 )
103
104 DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
105 TP_PROTO(struct task_struct *p, int success),
106 TP_ARGS(p, success))
107
108 /*
109 * Tracepoint for waking up a new task:
110 */
111 DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
112 TP_PROTO(struct task_struct *p, int success),
113 TP_ARGS(p, success))
114
115 /*
116 * Tracepoint for task switches, performed by the scheduler:
117 */
118 TRACE_EVENT(sched_switch,
119
120 TP_PROTO(struct task_struct *prev,
121 struct task_struct *next),
122
123 TP_ARGS(prev, next),
124
125 TP_STRUCT__entry(
126 __array( char, prev_comm, TASK_COMM_LEN )
127 __field( pid_t, prev_pid )
128 __field( int, prev_prio )
129 __field( long, prev_state )
130 __array( char, next_comm, TASK_COMM_LEN )
131 __field( pid_t, next_pid )
132 __field( int, next_prio )
133 ),
134
135 TP_fast_assign(
136 tp_memcpy(next_comm, next->comm, TASK_COMM_LEN)
137 tp_assign(prev_pid, prev->pid)
138 tp_assign(prev_prio, prev->prio)
139 tp_assign(prev_state, __trace_sched_switch_state(prev))
140 tp_memcpy(prev_comm, prev->comm, TASK_COMM_LEN)
141 tp_assign(next_pid, next->pid)
142 tp_assign(next_prio, next->prio)
143 ),
144
145 TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d",
146 __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
147 __entry->prev_state ?
148 __print_flags(__entry->prev_state, "|",
149 { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
150 { 16, "Z" }, { 32, "X" }, { 64, "x" },
151 { 128, "W" }) : "R",
152 __entry->next_comm, __entry->next_pid, __entry->next_prio)
153 )
154
155 /*
156 * Tracepoint for a task being migrated:
157 */
158 TRACE_EVENT(sched_migrate_task,
159
160 TP_PROTO(struct task_struct *p, int dest_cpu),
161
162 TP_ARGS(p, dest_cpu),
163
164 TP_STRUCT__entry(
165 __array( char, comm, TASK_COMM_LEN )
166 __field( pid_t, pid )
167 __field( int, prio )
168 __field( int, orig_cpu )
169 __field( int, dest_cpu )
170 ),
171
172 TP_fast_assign(
173 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
174 tp_assign(pid, p->pid)
175 tp_assign(prio, p->prio)
176 tp_assign(orig_cpu, task_cpu(p))
177 tp_assign(dest_cpu, dest_cpu)
178 ),
179
180 TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
181 __entry->comm, __entry->pid, __entry->prio,
182 __entry->orig_cpu, __entry->dest_cpu)
183 )
184
185 DECLARE_EVENT_CLASS(sched_process_template,
186
187 TP_PROTO(struct task_struct *p),
188
189 TP_ARGS(p),
190
191 TP_STRUCT__entry(
192 __array( char, comm, TASK_COMM_LEN )
193 __field( pid_t, pid )
194 __field( int, prio )
195 ),
196
197 TP_fast_assign(
198 tp_memcpy(comm, p->comm, TASK_COMM_LEN)
199 tp_assign(pid, p->pid)
200 tp_assign(prio, p->prio)
201 ),
202
203 TP_printk("comm=%s pid=%d prio=%d",
204 __entry->comm, __entry->pid, __entry->prio)
205 )
206
207 /*
208 * Tracepoint for freeing a task:
209 */
210 DEFINE_EVENT(sched_process_template, sched_process_free,
211 TP_PROTO(struct task_struct *p),
212 TP_ARGS(p))
213
214
215 /*
216 * Tracepoint for a task exiting:
217 */
218 DEFINE_EVENT(sched_process_template, sched_process_exit,
219 TP_PROTO(struct task_struct *p),
220 TP_ARGS(p))
221
222 /*
223 * Tracepoint for waiting on task to unschedule:
224 */
225 DEFINE_EVENT(sched_process_template, sched_wait_task,
226 TP_PROTO(struct task_struct *p),
227 TP_ARGS(p))
228
229 /*
230 * Tracepoint for a waiting task:
231 */
232 TRACE_EVENT(sched_process_wait,
233
234 TP_PROTO(struct pid *pid),
235
236 TP_ARGS(pid),
237
238 TP_STRUCT__entry(
239 __array( char, comm, TASK_COMM_LEN )
240 __field( pid_t, pid )
241 __field( int, prio )
242 ),
243
244 TP_fast_assign(
245 tp_memcpy(comm, current->comm, TASK_COMM_LEN)
246 tp_assign(pid, pid_nr(pid))
247 tp_assign(prio, current->prio)
248 ),
249
250 TP_printk("comm=%s pid=%d prio=%d",
251 __entry->comm, __entry->pid, __entry->prio)
252 )
253
254 /*
255 * Tracepoint for do_fork:
256 */
257 TRACE_EVENT(sched_process_fork,
258
259 TP_PROTO(struct task_struct *parent, struct task_struct *child),
260
261 TP_ARGS(parent, child),
262
263 TP_STRUCT__entry(
264 __array( char, parent_comm, TASK_COMM_LEN )
265 __field( pid_t, parent_pid )
266 __array( char, child_comm, TASK_COMM_LEN )
267 __field( pid_t, child_pid )
268 ),
269
270 TP_fast_assign(
271 tp_memcpy(parent_comm, parent->comm, TASK_COMM_LEN)
272 tp_assign(parent_pid, parent->pid)
273 tp_memcpy(child_comm, child->comm, TASK_COMM_LEN)
274 tp_assign(child_pid, child->pid)
275 ),
276
277 TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
278 __entry->parent_comm, __entry->parent_pid,
279 __entry->child_comm, __entry->child_pid)
280 )
281
282 /*
283 * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
284 * adding sched_stat support to SCHED_FIFO/RR would be welcome.
285 */
286 DECLARE_EVENT_CLASS(sched_stat_template,
287
288 TP_PROTO(struct task_struct *tsk, u64 delay),
289
290 TP_ARGS(tsk, delay),
291
292 TP_STRUCT__entry(
293 __array( char, comm, TASK_COMM_LEN )
294 __field( pid_t, pid )
295 __field( u64, delay )
296 ),
297
298 TP_fast_assign(
299 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
300 tp_assign(pid, tsk->pid)
301 tp_assign(delay, delay)
302 )
303 TP_perf_assign(
304 __perf_count(delay)
305 ),
306
307 TP_printk("comm=%s pid=%d delay=%Lu [ns]",
308 __entry->comm, __entry->pid,
309 (unsigned long long)__entry->delay)
310 )
311
312
313 /*
314 * Tracepoint for accounting wait time (time the task is runnable
315 * but not actually running due to scheduler contention).
316 */
317 DEFINE_EVENT(sched_stat_template, sched_stat_wait,
318 TP_PROTO(struct task_struct *tsk, u64 delay),
319 TP_ARGS(tsk, delay))
320
321 /*
322 * Tracepoint for accounting sleep time (time the task is not runnable,
323 * including iowait, see below).
324 */
325 DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
326 TP_PROTO(struct task_struct *tsk, u64 delay),
327 TP_ARGS(tsk, delay))
328
329 /*
330 * Tracepoint for accounting iowait time (time the task is not runnable
331 * due to waiting on IO to complete).
332 */
333 DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
334 TP_PROTO(struct task_struct *tsk, u64 delay),
335 TP_ARGS(tsk, delay))
336
337 /*
338 * Tracepoint for accounting runtime (time the task is executing
339 * on a CPU).
340 */
341 TRACE_EVENT(sched_stat_runtime,
342
343 TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
344
345 TP_ARGS(tsk, runtime, vruntime),
346
347 TP_STRUCT__entry(
348 __array( char, comm, TASK_COMM_LEN )
349 __field( pid_t, pid )
350 __field( u64, runtime )
351 __field( u64, vruntime )
352 ),
353
354 TP_fast_assign(
355 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
356 tp_assign(pid, tsk->pid)
357 tp_assign(runtime, runtime)
358 tp_assign(vruntime, vruntime)
359 )
360 TP_perf_assign(
361 __perf_count(runtime)
362 ),
363
364 TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
365 __entry->comm, __entry->pid,
366 (unsigned long long)__entry->runtime,
367 (unsigned long long)__entry->vruntime)
368 )
369
370 /*
371 * Tracepoint for showing priority inheritance modifying a tasks
372 * priority.
373 */
374 TRACE_EVENT(sched_pi_setprio,
375
376 TP_PROTO(struct task_struct *tsk, int newprio),
377
378 TP_ARGS(tsk, newprio),
379
380 TP_STRUCT__entry(
381 __array( char, comm, TASK_COMM_LEN )
382 __field( pid_t, pid )
383 __field( int, oldprio )
384 __field( int, newprio )
385 ),
386
387 TP_fast_assign(
388 tp_memcpy(comm, tsk->comm, TASK_COMM_LEN)
389 tp_assign(pid, tsk->pid)
390 tp_assign(oldprio, tsk->prio)
391 tp_assign(newprio, newprio)
392 ),
393
394 TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
395 __entry->comm, __entry->pid,
396 __entry->oldprio, __entry->newprio)
397 )
398
399 #endif /* _TRACE_SCHED_H */
400
401 /* This part must be outside protection */
402 #include <trace/define_trace.h>
This page took 0.038647 seconds and 4 git commands to generate.