Mass rename: ltt_*/ltt-* to LTTNG_*/LTTNG-*
[lttng-modules.git] / probes / lttng-ftrace.c
1 /*
2 * (C) Copyright 2009-2011 -
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 *
5 * LTTng function tracer integration module.
6 *
7 * Dual LGPL v2.1/GPL v2 license.
8 */
9
10 /*
11 * Ftrace function tracer does not seem to provide synchronization between probe
12 * teardown and callback execution. Therefore, we make this module permanently
13 * loaded (unloadable).
14 *
15 * TODO: Move to register_ftrace_function() (which is exported for
16 * modules) for Linux >= 3.0. It is faster (only enables the selected
17 * functions), and will stay there.
18 */
19
20 #include <linux/module.h>
21 #include <linux/ftrace.h>
22 #include <linux/slab.h>
23 #include "../lttng-events.h"
24 #include "../wrapper/ringbuffer/frontend_types.h"
25 #include "../wrapper/ftrace.h"
26 #include "../wrapper/vmalloc.h"
27 #include "../lttng-tracer.h"
28
29 static
30 void lttng_ftrace_handler(unsigned long ip, unsigned long parent_ip, void **data)
31 {
32 struct lttng_event *event = *data;
33 struct lttng_channel *chan = event->chan;
34 struct lib_ring_buffer_ctx ctx;
35 struct {
36 unsigned long ip;
37 unsigned long parent_ip;
38 } payload;
39 int ret;
40
41 if (unlikely(!ACCESS_ONCE(chan->session->active)))
42 return;
43 if (unlikely(!ACCESS_ONCE(chan->enabled)))
44 return;
45 if (unlikely(!ACCESS_ONCE(event->enabled)))
46 return;
47
48 lib_ring_buffer_ctx_init(&ctx, chan->chan, event,
49 sizeof(payload), lttng_alignof(payload), -1);
50 ret = chan->ops->event_reserve(&ctx, event->id);
51 if (ret < 0)
52 return;
53 payload.ip = ip;
54 payload.parent_ip = parent_ip;
55 lib_ring_buffer_align_ctx(&ctx, lttng_alignof(payload));
56 chan->ops->event_write(&ctx, &payload, sizeof(payload));
57 chan->ops->event_commit(&ctx);
58 return;
59 }
60
61 /*
62 * Create event description
63 */
64 static
65 int lttng_create_ftrace_event(const char *name, struct lttng_event *event)
66 {
67 struct lttng_event_field *fields;
68 struct lttng_event_desc *desc;
69 int ret;
70
71 desc = kzalloc(sizeof(*event->desc), GFP_KERNEL);
72 if (!desc)
73 return -ENOMEM;
74 desc->name = kstrdup(name, GFP_KERNEL);
75 if (!desc->name) {
76 ret = -ENOMEM;
77 goto error_str;
78 }
79 desc->nr_fields = 2;
80 desc->fields = fields =
81 kzalloc(2 * sizeof(struct lttng_event_field), GFP_KERNEL);
82 if (!desc->fields) {
83 ret = -ENOMEM;
84 goto error_fields;
85 }
86 fields[0].name = "ip";
87 fields[0].type.atype = atype_integer;
88 fields[0].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
89 fields[0].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
90 fields[0].type.u.basic.integer.signedness = is_signed_type(unsigned long);
91 fields[0].type.u.basic.integer.reverse_byte_order = 0;
92 fields[0].type.u.basic.integer.base = 16;
93 fields[0].type.u.basic.integer.encoding = lttng_encode_none;
94
95 fields[1].name = "parent_ip";
96 fields[1].type.atype = atype_integer;
97 fields[1].type.u.basic.integer.size = sizeof(unsigned long) * CHAR_BIT;
98 fields[1].type.u.basic.integer.alignment = lttng_alignof(unsigned long) * CHAR_BIT;
99 fields[1].type.u.basic.integer.signedness = is_signed_type(unsigned long);
100 fields[1].type.u.basic.integer.reverse_byte_order = 0;
101 fields[1].type.u.basic.integer.base = 16;
102 fields[1].type.u.basic.integer.encoding = lttng_encode_none;
103
104 desc->owner = THIS_MODULE;
105 event->desc = desc;
106
107 return 0;
108
109 error_fields:
110 kfree(desc->name);
111 error_str:
112 kfree(desc);
113 return ret;
114 }
115
116 static
117 struct ftrace_probe_ops lttng_ftrace_ops = {
118 .func = lttng_ftrace_handler,
119 };
120
121 int lttng_ftrace_register(const char *name,
122 const char *symbol_name,
123 struct lttng_event *event)
124 {
125 int ret;
126
127 ret = lttng_create_ftrace_event(name, event);
128 if (ret)
129 goto error;
130
131 event->u.ftrace.symbol_name = kstrdup(symbol_name, GFP_KERNEL);
132 if (!event->u.ftrace.symbol_name)
133 goto name_error;
134
135 /* Ensure the memory we just allocated don't trigger page faults */
136 wrapper_vmalloc_sync_all();
137
138 ret = wrapper_register_ftrace_function_probe(event->u.ftrace.symbol_name,
139 &lttng_ftrace_ops, event);
140 if (ret < 0)
141 goto register_error;
142 return 0;
143
144 register_error:
145 kfree(event->u.ftrace.symbol_name);
146 name_error:
147 kfree(event->desc->name);
148 kfree(event->desc);
149 error:
150 return ret;
151 }
152 EXPORT_SYMBOL_GPL(lttng_ftrace_register);
153
154 void lttng_ftrace_unregister(struct lttng_event *event)
155 {
156 wrapper_unregister_ftrace_function_probe(event->u.ftrace.symbol_name,
157 &lttng_ftrace_ops, event);
158 }
159 EXPORT_SYMBOL_GPL(lttng_ftrace_unregister);
160
161 void lttng_ftrace_destroy_private(struct lttng_event *event)
162 {
163 kfree(event->u.ftrace.symbol_name);
164 kfree(event->desc->fields);
165 kfree(event->desc->name);
166 kfree(event->desc);
167 }
168 EXPORT_SYMBOL_GPL(lttng_ftrace_destroy_private);
169
170 int lttng_ftrace_init(void)
171 {
172 wrapper_vmalloc_sync_all();
173 return 0;
174 }
175 module_init(lttng_ftrace_init)
176
177 /*
178 * Ftrace takes care of waiting for a grace period (RCU sched) at probe
179 * unregistration, and disables preemption around probe call.
180 */
181 void lttng_ftrace_exit(void)
182 {
183 }
184 module_exit(lttng_ftrace_exit)
185
186 MODULE_LICENSE("GPL and additional rights");
187 MODULE_AUTHOR("Mathieu Desnoyers");
188 MODULE_DESCRIPTION("Linux Trace Toolkit Ftrace Support");
This page took 0.033341 seconds and 4 git commands to generate.