lttng-modules v0.19-stable: setup_trace_write: Fix recursive locking
[lttng-modules.git] / probes / kernel-trace.c
1 /*
2 * ltt/probes/kernel-trace.c
3 *
4 * kernel tracepoint probes.
5 *
6 * (C) Copyright 2009 - Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10 #include <linux/module.h>
11 #include <linux/irq.h>
12 #include <trace/events/signal.h>
13 #include <trace/irq.h>
14 #include <trace/sched.h>
15 #include <trace/timer.h>
16 #include <trace/kernel.h>
17 #include <trace/fault.h>
18 #include <trace/events/sched.h>
19
20 #include "../ltt-tracer.h"
21 #include "../ltt-type-serializer.h"
22
23 /*
24 * This should probably be added to s390.
25 */
26 #ifdef CONFIG_S390
27 static struct pt_regs *get_irq_regs(void)
28 {
29 return task_pt_regs(current);
30 }
31 #endif
32
33 /*
34 * FIXME :
35 * currently, the specialized tracepoint probes cannot call into other marker
36 * probes, such as ftrace enable/disable. Given we want them to be as fast as
37 * possible, it might not be so bad to lose this flexibility. But that means
38 * such probes would have to connect to tracepoints on their own.
39 */
40
41 /* kernel_irq_entry specialized tracepoint probe */
42
43 void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs,
44 struct irqaction *action);
45
46 DEFINE_MARKER_TP(kernel, irq_entry, irq_entry, probe_irq_entry,
47 "ip %lu handler %p irq_id #2u%u kernel_mode #1u%u");
48
49 notrace void probe_irq_entry(void *_data, unsigned int id, struct pt_regs *regs,
50 struct irqaction *action)
51 {
52 struct marker *marker;
53 struct serialize_long_long_short_char data;
54
55 if (unlikely(!regs))
56 regs = get_irq_regs();
57 if (likely(regs)) {
58 data.f1 = instruction_pointer(regs);
59 data.f4 = !user_mode(regs);
60 } else {
61 data.f1 = 0UL;
62 data.f4 = 1;
63 }
64 data.f2 = (unsigned long) (action ? action->handler : NULL);
65 data.f3 = id;
66
67 marker = &GET_MARKER(kernel, irq_entry);
68 ltt_specialized_trace(marker, marker->single.probe_private,
69 &data, serialize_sizeof(data), sizeof(long));
70 }
71
72 void probe_irq_next_handler(void *_data, unsigned int id, struct irqaction *action,
73 irqreturn_t prev_ret);
74
75 DEFINE_MARKER_TP(kernel, irq_next_handler, irq_next_handler,
76 probe_irq_next_handler,
77 "handler %p prev_ret #1u%u");
78
79 notrace void probe_irq_next_handler(void *_data, unsigned int id, struct irqaction *action,
80 irqreturn_t prev_ret)
81 {
82 struct marker *marker;
83 struct serialize_long_char data;
84
85 data.f1 = (unsigned long) (action ? action->handler : NULL);
86 data.f2 = prev_ret;
87
88 marker = &GET_MARKER(kernel, irq_next_handler);
89 ltt_specialized_trace(marker, marker->single.probe_private,
90 &data, serialize_sizeof(data), sizeof(long));
91 }
92
93 /* kernel_irq_exit specialized tracepoint probe */
94
95 void probe_irq_exit(void *_data, irqreturn_t retval);
96
97 DEFINE_MARKER_TP(kernel, irq_exit, irq_exit, probe_irq_exit,
98 "handled #1u%u");
99
100 notrace void probe_irq_exit(void *_data, irqreturn_t retval)
101 {
102 struct marker *marker;
103 unsigned char data;
104
105 data = IRQ_RETVAL(retval);
106
107 marker = &GET_MARKER(kernel, irq_exit);
108 ltt_specialized_trace(marker, marker->single.probe_private,
109 &data, sizeof(data), sizeof(data));
110 }
111
112 /* kernel_softirq_entry specialized tracepoint probe */
113
114 void probe_softirq_entry(void *_data, unsigned int vec_nr);
115
116 DEFINE_MARKER_TP(kernel, softirq_entry, softirq_entry,
117 probe_softirq_entry, "softirq_id #1u%lu");
118
119 notrace void probe_softirq_entry(void *_data, unsigned int vec_nr)
120 {
121 struct marker *marker;
122 unsigned char data;
123
124 data = vec_nr;
125
126 marker = &GET_MARKER(kernel, softirq_entry);
127 ltt_specialized_trace(marker, marker->single.probe_private,
128 &data, sizeof(data), sizeof(data));
129 }
130
131 /* kernel_softirq_exit specialized tracepoint probe */
132
133 void probe_softirq_exit(void *_data, unsigned int vec_nr);
134
135 DEFINE_MARKER_TP(kernel, softirq_exit, softirq_exit,
136 probe_softirq_exit, "softirq_id #1u%lu");
137
138 notrace void probe_softirq_exit(void *_data, unsigned int vec_nr)
139 {
140 struct marker *marker;
141 unsigned char data;
142
143 data = vec_nr;
144
145 marker = &GET_MARKER(kernel, softirq_exit);
146 ltt_specialized_trace(marker, marker->single.probe_private,
147 &data, sizeof(data), sizeof(data));
148 }
149
150 /* kernel_softirq_raise specialized tracepoint probe */
151
152 void probe_softirq_raise(void *_data, unsigned int nr);
153
154 DEFINE_MARKER_TP(kernel, softirq_raise, softirq_raise,
155 probe_softirq_raise, "softirq_id #1u%u");
156
157 notrace void probe_softirq_raise(void *_data, unsigned int nr)
158 {
159 struct marker *marker;
160 unsigned char data;
161
162 data = nr;
163
164 marker = &GET_MARKER(kernel, softirq_raise);
165 ltt_specialized_trace(marker, marker->single.probe_private,
166 &data, sizeof(data), sizeof(data));
167 }
168
169 /* Standard probes */
170 void probe_irq_tasklet_low_entry(void *_data, struct tasklet_struct *t)
171 {
172 trace_mark_tp(kernel, tasklet_low_entry, irq_tasklet_low_entry,
173 probe_irq_tasklet_low_entry, "func %p data %lu",
174 t->func, t->data);
175 }
176
177 void probe_irq_tasklet_low_exit(void *_data, struct tasklet_struct *t)
178 {
179 trace_mark_tp(kernel, tasklet_low_exit, irq_tasklet_low_exit,
180 probe_irq_tasklet_low_exit, "func %p data %lu",
181 t->func, t->data);
182 }
183
184 void probe_irq_tasklet_high_entry(void *_data, struct tasklet_struct *t)
185 {
186 trace_mark_tp(kernel, tasklet_high_entry, irq_tasklet_high_entry,
187 probe_irq_tasklet_high_entry, "func %p data %lu",
188 t->func, t->data);
189 }
190
191 void probe_irq_tasklet_high_exit(void *_data, struct tasklet_struct *t)
192 {
193 trace_mark_tp(kernel, tasklet_high_exit, irq_tasklet_high_exit,
194 probe_irq_tasklet_high_exit, "func %p data %lu",
195 t->func, t->data);
196 }
197
198 void probe_sched_kthread_stop(void *_data, struct task_struct *t)
199 {
200 trace_mark_tp(kernel, kthread_stop, sched_kthread_stop,
201 probe_sched_kthread_stop, "pid %d", t->pid);
202 }
203
204 void probe_sched_kthread_stop_ret(void *_data, int ret)
205 {
206 trace_mark_tp(kernel, kthread_stop_ret, sched_kthread_stop_ret,
207 probe_sched_kthread_stop_ret, "ret %d", ret);
208 }
209
210 void probe_sched_wait_task(void *_data, struct task_struct *p)
211 {
212 trace_mark_tp(kernel, sched_wait_task, sched_wait_task,
213 probe_sched_wait_task, "pid %d state #2d%ld",
214 p->pid, p->state);
215 }
216
217 /* kernel_sched_try_wakeup specialized tracepoint probe */
218
219 void probe_sched_wakeup(void *_data, struct task_struct *p, int success);
220
221 DEFINE_MARKER_TP(kernel, sched_try_wakeup, sched_wakeup,
222 probe_sched_wakeup, "pid %d cpu_id %u state #2d%ld");
223
224 notrace void probe_sched_wakeup(void *_data, struct task_struct *p, int success)
225 {
226 struct marker *marker;
227 struct serialize_int_int_short data;
228
229 data.f1 = p->pid;
230 data.f2 = task_cpu(p);
231 data.f3 = p->state;
232
233 marker = &GET_MARKER(kernel, sched_try_wakeup);
234 ltt_specialized_trace(marker, marker->single.probe_private,
235 &data, serialize_sizeof(data), sizeof(int));
236 }
237
238 void probe_sched_wakeup_new(void *_data, struct task_struct *p, int success)
239 {
240 trace_mark_tp(kernel, sched_wakeup_new_task, sched_wakeup_new,
241 probe_sched_wakeup_new, "pid %d state #2d%ld cpu_id %u",
242 p->pid, p->state, task_cpu(p));
243 }
244
245 /* kernel_sched_schedule specialized tracepoint probe */
246
247 void probe_sched_switch(void *_data, struct task_struct *prev,
248 struct task_struct *next);
249
250 DEFINE_MARKER_TP(kernel, sched_schedule, sched_switch, probe_sched_switch,
251 "prev_pid %d next_pid %d prev_state #2d%ld");
252
253 notrace void probe_sched_switch(void *_data, struct task_struct *prev,
254 struct task_struct *next)
255 {
256 struct marker *marker;
257 struct serialize_int_int_short data;
258
259 data.f1 = prev->pid;
260 data.f2 = next->pid;
261 data.f3 = prev->state;
262
263 marker = &GET_MARKER(kernel, sched_schedule);
264 ltt_specialized_trace(marker, marker->single.probe_private,
265 &data, serialize_sizeof(data), sizeof(int));
266 }
267
268 void probe_sched_migrate_task(void *_data, struct task_struct *p, int dest_cpu)
269 {
270 trace_mark_tp(kernel, sched_migrate_task, sched_migrate_task,
271 probe_sched_migrate_task, "pid %d state #2d%ld dest_cpu %d",
272 p->pid, p->state, dest_cpu);
273 }
274
275 void probe_sched_signal_send(void *_data, int sig, struct siginfo *info, struct task_struct *t)
276 {
277 trace_mark_tp(kernel, send_signal, signal_generate,
278 probe_sched_signal_send, "pid %d signal %d", t->pid, sig);
279 }
280
281 void probe_sched_process_free(void *_data, struct task_struct *p)
282 {
283 trace_mark_tp(kernel, process_free, sched_process_free,
284 probe_sched_process_free, "pid %d", p->pid);
285 }
286
287 void probe_sched_process_exit(void *_data, struct task_struct *p)
288 {
289 trace_mark_tp(kernel, process_exit, sched_process_exit,
290 probe_sched_process_exit, "pid %d", p->pid);
291 }
292
293 void probe_sched_process_wait(void *_data, struct pid *pid)
294 {
295 trace_mark_tp(kernel, process_wait, sched_process_wait,
296 probe_sched_process_wait, "pid %d", pid_nr(pid));
297 }
298
299 void probe_sched_process_fork(void *_data, struct task_struct *parent,
300 struct task_struct *child)
301 {
302 trace_mark_tp(kernel, process_fork, sched_process_fork,
303 probe_sched_process_fork,
304 "parent_pid %d child_pid %d child_tgid %d",
305 parent->pid, child->pid, child->tgid);
306 }
307
308 void probe_sched_kthread_create(void *_data, void *fn, int pid)
309 {
310 trace_mark_tp(kernel, kthread_create, sched_kthread_create,
311 probe_sched_kthread_create,
312 "fn %p pid %d", fn, pid);
313 }
314
315 void probe_timer_itimer_expired(void *_data, struct signal_struct *sig)
316 {
317 trace_mark_tp(kernel, timer_itimer_expired, timer_itimer_expired,
318 probe_timer_itimer_expired, "pid %d",
319 pid_nr(sig->leader_pid));
320 }
321
322 void probe_timer_itimer_set(void *_data, int which, struct itimerval *value)
323 {
324 trace_mark_tp(kernel, timer_itimer_set,
325 timer_itimer_set, probe_timer_itimer_set,
326 "which %d interval_sec %ld interval_usec %ld "
327 "value_sec %ld value_usec %ld",
328 which,
329 value->it_interval.tv_sec,
330 value->it_interval.tv_usec,
331 value->it_value.tv_sec,
332 value->it_value.tv_usec);
333 }
334
335 /* kernel_timer_set specialized tracepoint probe */
336
337 void probe_timer_set(void *_data, struct timer_list *timer);
338
339 DEFINE_MARKER_TP(kernel, timer_set, timer_set, probe_timer_set,
340 "expires %lu function %p data %lu");
341
342 notrace void probe_timer_set(void *_data, struct timer_list *timer)
343 {
344 struct marker *marker;
345 struct serialize_long_long_long data;
346
347 data.f1 = timer->expires;
348 data.f2 = (unsigned long)timer->function;
349 data.f3 = timer->data;
350
351 marker = &GET_MARKER(kernel, timer_set);
352 ltt_specialized_trace(marker, marker->single.probe_private,
353 &data, serialize_sizeof(data), sizeof(long));
354 }
355
356 void probe_timer_update_time(void *_data, struct timespec *_xtime,
357 struct timespec *_wall_to_monotonic)
358 {
359 trace_mark_tp(kernel, timer_update_time, timer_update_time,
360 probe_timer_update_time,
361 "jiffies #8u%llu xtime_sec %ld xtime_nsec %ld "
362 "walltomonotonic_sec %ld walltomonotonic_nsec %ld",
363 (unsigned long long)jiffies_64, _xtime->tv_sec, _xtime->tv_nsec,
364 _wall_to_monotonic->tv_sec, _wall_to_monotonic->tv_nsec);
365 }
366
367 void probe_timer_timeout(void *_data, struct task_struct *p)
368 {
369 trace_mark_tp(kernel, timer_timeout, timer_timeout,
370 probe_timer_timeout, "pid %d", p->pid);
371 }
372
373 void probe_kernel_printk(void *_data, unsigned long retaddr)
374 {
375 trace_mark_tp(kernel, printk, kernel_printk,
376 probe_kernel_printk, "ip 0x%lX", retaddr);
377 }
378
379 void probe_kernel_vprintk(void *_data, unsigned long retaddr, char *buf, int len)
380 {
381 if (len > 0) {
382 unsigned int loglevel;
383 int mark_len;
384 char *mark_buf;
385 char saved_char;
386
387 if (buf[0] == '<' && buf[1] >= '0' &&
388 buf[1] <= '7' && buf[2] == '>') {
389 loglevel = buf[1] - '0';
390 mark_buf = &buf[3];
391 mark_len = len - 3;
392 } else {
393 loglevel = default_message_loglevel;
394 mark_buf = buf;
395 mark_len = len;
396 }
397 if (mark_buf[mark_len - 1] == '\n')
398 mark_len--;
399 saved_char = mark_buf[mark_len];
400 mark_buf[mark_len] = '\0';
401 trace_mark_tp(kernel, vprintk, kernel_vprintk,
402 probe_kernel_vprintk,
403 "loglevel #1u%u string %s ip 0x%lX",
404 loglevel, mark_buf, retaddr);
405 mark_buf[mark_len] = saved_char;
406 }
407 }
408
409 #ifdef CONFIG_MODULES
410 void probe_kernel_module_free(void *_data, struct module *mod)
411 {
412 trace_mark_tp(kernel, module_free, kernel_module_free,
413 probe_kernel_module_free, "name %s", mod->name);
414 }
415
416 void probe_kernel_module_load(void *_data, struct module *mod)
417 {
418 trace_mark_tp(kernel, module_load, kernel_module_load,
419 probe_kernel_module_load, "name %s", mod->name);
420 }
421 #endif
422
423 void probe_kernel_panic(void *_data, const char *fmt, va_list args)
424 {
425 char info[64];
426 vsnprintf(info, sizeof(info), fmt, args);
427 trace_mark_tp(kernel, panic, kernel_panic, probe_kernel_panic,
428 "info %s", info);
429 }
430
431 void probe_kernel_kernel_kexec(void *_data, struct kimage *image)
432 {
433 trace_mark_tp(kernel, kernel_kexec, kernel_kernel_kexec,
434 probe_kernel_kernel_kexec, "image %p", image);
435 }
436
437 void probe_kernel_crash_kexec(void *_data, struct kimage *image, struct pt_regs *regs)
438 {
439 trace_mark_tp(kernel, crash_kexec, kernel_crash_kexec,
440 probe_kernel_crash_kexec, "image %p ip %p", image,
441 regs ? (void *)instruction_pointer(regs) : NULL);
442 }
443
444 /* kernel_page_fault_entry specialized tracepoint probe */
445
446 void probe_kernel_page_fault_entry(void *_data, struct pt_regs *regs, int trapnr,
447 struct mm_struct *mm, struct vm_area_struct *vma,
448 unsigned long address, int write_access);
449
450 DEFINE_MARKER_TP(kernel, page_fault_entry, page_fault_entry,
451 probe_kernel_page_fault_entry,
452 "ip #p%lu address #p%lu trap_id #2u%u write_access #1u%u");
453
454 notrace void probe_kernel_page_fault_entry(void *_data, struct pt_regs *regs, int trapnr,
455 struct mm_struct *mm, struct vm_area_struct *vma,
456 unsigned long address, int write_access)
457 {
458 struct marker *marker;
459 struct serialize_long_long_short_char data;
460
461 if (likely(regs))
462 data.f1 = instruction_pointer(regs);
463 else
464 data.f1 = 0UL;
465 data.f2 = address;
466 data.f3 = (unsigned short)trapnr;
467 data.f4 = (unsigned char)!!write_access;
468
469 marker = &GET_MARKER(kernel, page_fault_entry);
470 ltt_specialized_trace(marker, marker->single.probe_private,
471 &data, serialize_sizeof(data), sizeof(long));
472 }
473
474 /* kernel_page_fault_exit specialized tracepoint probe */
475
476 void probe_kernel_page_fault_exit(void *_data, int res);
477
478 DEFINE_MARKER_TP(kernel, page_fault_exit, page_fault_exit,
479 probe_kernel_page_fault_exit,
480 "res %d");
481
482 notrace void probe_kernel_page_fault_exit(void *_data, int res)
483 {
484 struct marker *marker;
485
486 marker = &GET_MARKER(kernel, page_fault_exit);
487 ltt_specialized_trace(marker, marker->single.probe_private,
488 &res, sizeof(res), sizeof(res));
489 }
490
491 /* kernel_page_fault_nosem_entry specialized tracepoint probe */
492
493 void probe_kernel_page_fault_nosem_entry(void *_data, struct pt_regs *regs,
494 int trapnr, unsigned long address);
495
496 DEFINE_MARKER_TP(kernel, page_fault_nosem_entry, page_fault_nosem_entry,
497 probe_kernel_page_fault_nosem_entry,
498 "ip #p%lu address #p%lu trap_id #2u%u");
499
500 notrace void probe_kernel_page_fault_nosem_entry(void *_data, struct pt_regs *regs,
501 int trapnr, unsigned long address)
502 {
503 struct marker *marker;
504 struct serialize_long_long_short data;
505
506 if (likely(regs))
507 data.f1 = instruction_pointer(regs);
508 else
509 data.f1 = 0UL;
510 data.f2 = address;
511 data.f3 = (unsigned short)trapnr;
512
513 marker = &GET_MARKER(kernel, page_fault_nosem_entry);
514 ltt_specialized_trace(marker, marker->single.probe_private,
515 &data, serialize_sizeof(data), sizeof(long));
516 }
517
518 /* kernel_page_fault_nosem_exit specialized tracepoint probe */
519
520 void probe_kernel_page_fault_nosem_exit(void *_data, int res);
521
522 DEFINE_MARKER_TP(kernel, page_fault_nosem_exit, page_fault_nosem_exit,
523 probe_kernel_page_fault_nosem_exit,
524 MARK_NOARGS);
525
526 notrace void probe_kernel_page_fault_nosem_exit(void *_data, int res)
527 {
528 struct marker *marker;
529
530 marker = &GET_MARKER(kernel, page_fault_nosem_exit);
531 ltt_specialized_trace(marker, marker->single.probe_private,
532 NULL, 0, 0);
533 }
534
535 /* kernel_page_fault_get_user_entry specialized tracepoint probe */
536
537 void probe_kernel_page_fault_get_user_entry(void *_data, struct mm_struct *mm,
538 struct vm_area_struct *vma, unsigned long address, int write_access);
539
540 DEFINE_MARKER_TP(kernel, page_fault_get_user_entry, page_fault_get_user_entry,
541 probe_kernel_page_fault_get_user_entry,
542 "address #p%lu write_access #1u%u");
543
544 notrace void probe_kernel_page_fault_get_user_entry(void *_data, struct mm_struct *mm,
545 struct vm_area_struct *vma, unsigned long address, int write_access)
546 {
547 struct marker *marker;
548 struct serialize_long_char data;
549
550 data.f1 = address;
551 data.f2 = (unsigned char)!!write_access;
552
553 marker = &GET_MARKER(kernel, page_fault_get_user_entry);
554 ltt_specialized_trace(marker, marker->single.probe_private,
555 &data, serialize_sizeof(data), sizeof(long));
556 }
557
558 /* kernel_page_fault_get_user_exit specialized tracepoint probe */
559
560 void probe_kernel_page_fault_get_user_exit(void *_data, int res);
561
562 DEFINE_MARKER_TP(kernel, page_fault_get_user_exit, page_fault_get_user_exit,
563 probe_kernel_page_fault_get_user_exit,
564 "res %d");
565
566 notrace void probe_kernel_page_fault_get_user_exit(void *_data, int res)
567 {
568 struct marker *marker;
569
570 marker = &GET_MARKER(kernel, page_fault_get_user_exit);
571 ltt_specialized_trace(marker, marker->single.probe_private,
572 &res, sizeof(res), sizeof(res));
573 }
574
575 MODULE_LICENSE("GPL and additional rights");
576 MODULE_AUTHOR("Mathieu Desnoyers");
577 MODULE_DESCRIPTION("kernel Tracepoint Probes");
This page took 0.040496 seconds and 4 git commands to generate.