Commit | Line | Data |
---|---|---|
1c8284eb MD |
1 | /* |
2 | * (C) Copyright 2009 - | |
3 | * Mathieu Desnoyers (mathieu.desnoyers@polymtl.ca) | |
4 | * | |
5 | * LTTng kprobes integration module. | |
6 | * | |
7 | * Dual LGPL v2.1/GPL v2 license. | |
8 | */ | |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/kprobes.h> | |
12 | #include <linux/marker.h> | |
13 | #include <linux/mutex.h> | |
14 | #include <linux/jhash.h> | |
15 | #include <linux/seq_file.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/debugfs.h> | |
18 | #include <linux/kallsyms.h> | |
19 | ||
20 | #include "ltt-type-serializer.h" | |
21 | #include "ltt-tracer.h" | |
22 | ||
23 | #define LTT_KPROBES_DIR "kprobes" | |
24 | #define LTT_KPROBES_ENABLE "enable" | |
25 | #define LTT_KPROBES_DISABLE "disable" | |
26 | #define LTT_KPROBES_LIST "list" | |
27 | ||
28 | /* Active LTTng kprobes hash table */ | |
29 | static DEFINE_MUTEX(ltt_kprobes_mutex); | |
30 | ||
31 | #define LTT_KPROBE_HASH_BITS 6 | |
32 | #define LTT_KPROBE_TABLE_SIZE (1 << LTT_KPROBE_HASH_BITS) | |
33 | static struct hlist_head ltt_kprobe_table[LTT_KPROBE_TABLE_SIZE]; | |
34 | ||
35 | struct kprobe_entry { | |
36 | struct hlist_node hlist; | |
37 | struct kprobe kp; | |
38 | char key[0]; | |
39 | }; | |
40 | ||
41 | static struct dentry *ltt_kprobes_dir, | |
42 | *ltt_kprobes_enable_dentry, | |
43 | *ltt_kprobes_disable_dentry, | |
44 | *ltt_kprobes_list_dentry; | |
45 | ||
46 | static int module_exit; | |
47 | ||
48 | ||
49 | static void trace_kprobe_table_entry(void *call_data, struct kprobe_entry *e) | |
50 | { | |
51 | unsigned long addr; | |
52 | char *namebuf = (char *)__get_free_page(GFP_KERNEL); | |
53 | ||
54 | if (e->kp.addr) { | |
55 | sprint_symbol(namebuf, (unsigned long)e->kp.addr); | |
56 | addr = (unsigned long)e->kp.addr; | |
57 | } else { | |
58 | strncpy(namebuf, e->kp.symbol_name, PAGE_SIZE - 1); | |
59 | /* TODO : add offset */ | |
60 | addr = kallsyms_lookup_name(namebuf); | |
61 | } | |
62 | if (addr) | |
63 | __trace_mark(0, kprobe_state, kprobe_table, call_data, | |
64 | "ip 0x%lX symbol %s", addr, namebuf); | |
65 | free_page((unsigned long)namebuf); | |
66 | } | |
67 | ||
68 | DEFINE_MARKER(kernel, kprobe, "ip %lX"); | |
69 | ||
70 | static int ltt_kprobe_handler_pre(struct kprobe *p, struct pt_regs *regs) | |
71 | { | |
72 | struct marker *marker; | |
73 | unsigned long data; | |
74 | ||
75 | data = (unsigned long)p->addr; | |
76 | marker = &GET_MARKER(kernel, kprobe); | |
77 | ltt_specialized_trace(marker, marker->single.probe_private, | |
78 | &data, sizeof(data), sizeof(data)); | |
79 | return 0; | |
80 | } | |
81 | ||
82 | static int ltt_register_kprobe(const char *key) | |
83 | { | |
84 | struct hlist_head *head; | |
85 | struct hlist_node *node; | |
86 | struct kprobe_entry *e = NULL; | |
87 | char *symbol_name = NULL; | |
88 | unsigned long addr; | |
89 | unsigned int offset = 0; | |
90 | u32 hash; | |
91 | size_t key_len = strlen(key) + 1; | |
92 | int ret; | |
93 | ||
94 | if (key_len == 1) | |
95 | return -ENOENT; /* only \0 */ | |
96 | ||
97 | if (sscanf(key, "%li", &addr) != 1) | |
98 | addr = 0; | |
99 | ||
100 | if (!addr) { | |
101 | const char *symbol_end = NULL; | |
102 | unsigned int symbol_len; /* includes final \0 */ | |
103 | ||
104 | symbol_end = strchr(key, ' '); | |
105 | if (symbol_end) | |
106 | symbol_len = symbol_end - key + 1; | |
107 | else | |
108 | symbol_len = key_len; | |
109 | symbol_name = kmalloc(symbol_len, GFP_KERNEL); | |
110 | if (!symbol_name) { | |
111 | ret = -ENOMEM; | |
112 | goto error; | |
113 | } | |
114 | memcpy(symbol_name, key, symbol_len - 1); | |
115 | symbol_name[symbol_len-1] = '\0'; | |
116 | if (symbol_end) { | |
117 | symbol_end++; /* start of offset */ | |
118 | if (sscanf(symbol_end, "%i", &offset) != 1) | |
119 | offset = 0; | |
120 | } | |
121 | } | |
122 | ||
123 | hash = jhash(key, key_len-1, 0); | |
124 | head = <t_kprobe_table[hash & ((1 << LTT_KPROBE_HASH_BITS)-1)]; | |
125 | hlist_for_each_entry(e, node, head, hlist) { | |
126 | if (!strcmp(key, e->key)) { | |
127 | printk(KERN_NOTICE "Kprobe %s busy\n", key); | |
128 | ret = -EBUSY; | |
129 | goto error; | |
130 | } | |
131 | } | |
132 | /* | |
133 | * Using kzalloc here to allocate a variable length element. Could | |
134 | * cause some memory fragmentation if overused. | |
135 | */ | |
136 | e = kzalloc(sizeof(struct kprobe_entry) + key_len, GFP_KERNEL); | |
137 | if (!e) { | |
138 | ret = -ENOMEM; | |
139 | goto error; | |
140 | } | |
141 | memcpy(e->key, key, key_len); | |
142 | hlist_add_head(&e->hlist, head); | |
143 | e->kp.pre_handler = ltt_kprobe_handler_pre; | |
144 | e->kp.symbol_name = symbol_name; | |
145 | e->kp.offset = offset; | |
146 | e->kp.addr = (void *)addr; | |
147 | ret = register_kprobe(&e->kp); | |
148 | if (ret < 0) | |
149 | goto error_list_del; | |
150 | trace_kprobe_table_entry(NULL, e); | |
151 | return 0; | |
152 | ||
153 | error_list_del: | |
154 | hlist_del(&e->hlist); | |
155 | error: | |
156 | kfree(symbol_name); | |
157 | kfree(e); | |
158 | return ret; | |
159 | } | |
160 | ||
161 | static int ltt_unregister_kprobe(const char *key) | |
162 | { | |
163 | struct hlist_head *head; | |
164 | struct hlist_node *node; | |
165 | struct kprobe_entry *e; | |
166 | int found = 0; | |
167 | size_t key_len = strlen(key) + 1; | |
168 | u32 hash; | |
169 | ||
170 | hash = jhash(key, key_len-1, 0); | |
171 | head = <t_kprobe_table[hash & ((1 << LTT_KPROBE_HASH_BITS)-1)]; | |
172 | hlist_for_each_entry(e, node, head, hlist) { | |
173 | if (!strcmp(key, e->key)) { | |
174 | found = 1; | |
175 | break; | |
176 | } | |
177 | } | |
178 | if (!found) | |
179 | return -ENOENT; | |
180 | hlist_del(&e->hlist); | |
181 | unregister_kprobe(&e->kp); | |
182 | kfree(e->kp.symbol_name); | |
183 | kfree(e); | |
184 | return 0; | |
185 | } | |
186 | ||
187 | static void ltt_unregister_all_kprobes(void) | |
188 | { | |
189 | struct kprobe_entry *e; | |
190 | struct hlist_head *head; | |
191 | struct hlist_node *node, *tmp; | |
192 | unsigned int i; | |
193 | ||
194 | for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { | |
195 | head = <t_kprobe_table[i]; | |
196 | hlist_for_each_entry_safe(e, node, tmp, head, hlist) { | |
197 | hlist_del(&e->hlist); | |
198 | unregister_kprobe(&e->kp); | |
199 | kfree(e->kp.symbol_name); | |
200 | kfree(e); | |
201 | } | |
202 | } | |
203 | } | |
204 | ||
205 | /* | |
206 | * Allows to specify either | |
207 | * - symbol | |
208 | * - symbol offset | |
209 | * - address | |
210 | */ | |
211 | static ssize_t enable_op_write(struct file *file, | |
212 | const char __user *user_buf, size_t count, loff_t *ppos) | |
213 | { | |
214 | int err, buf_size; | |
215 | char *end; | |
216 | char *buf = (char *)__get_free_page(GFP_KERNEL); | |
217 | ||
218 | mutex_lock(<t_kprobes_mutex); | |
219 | if (module_exit) { | |
220 | err = -EPERM; | |
221 | goto error; | |
222 | } | |
223 | ||
224 | buf_size = min_t(size_t, count, PAGE_SIZE - 1); | |
225 | err = copy_from_user(buf, user_buf, buf_size); | |
226 | if (err) | |
227 | goto error; | |
228 | buf[buf_size] = '\0'; | |
229 | end = strchr(buf, '\n'); | |
230 | if (end) | |
231 | *end = '\0'; | |
232 | err = ltt_register_kprobe(buf); | |
233 | if (err) | |
234 | goto error; | |
235 | ||
236 | mutex_unlock(<t_kprobes_mutex); | |
237 | free_page((unsigned long)buf); | |
238 | return count; | |
239 | error: | |
240 | mutex_unlock(<t_kprobes_mutex); | |
241 | free_page((unsigned long)buf); | |
242 | return err; | |
243 | } | |
244 | ||
245 | static const struct file_operations ltt_kprobes_enable = { | |
246 | .write = enable_op_write, | |
247 | }; | |
248 | ||
249 | static ssize_t disable_op_write(struct file *file, | |
250 | const char __user *user_buf, size_t count, loff_t *ppos) | |
251 | { | |
252 | int err, buf_size; | |
253 | char *end; | |
254 | char *buf = (char *)__get_free_page(GFP_KERNEL); | |
255 | ||
256 | mutex_lock(<t_kprobes_mutex); | |
257 | if (module_exit) | |
258 | goto end; | |
259 | ||
260 | buf_size = min_t(size_t, count, PAGE_SIZE - 1); | |
261 | err = copy_from_user(buf, user_buf, buf_size); | |
262 | if (err) | |
263 | goto error; | |
264 | buf[buf_size] = '\0'; | |
265 | end = strchr(buf, '\n'); | |
266 | if (end) | |
267 | *end = '\0'; | |
268 | err = ltt_unregister_kprobe(buf); | |
269 | if (err) | |
270 | goto error; | |
271 | end: | |
272 | mutex_unlock(<t_kprobes_mutex); | |
273 | free_page((unsigned long)buf); | |
274 | return count; | |
275 | error: | |
276 | mutex_unlock(<t_kprobes_mutex); | |
277 | free_page((unsigned long)buf); | |
278 | return err; | |
279 | } | |
280 | ||
281 | static const struct file_operations ltt_kprobes_disable = { | |
282 | .write = disable_op_write, | |
283 | }; | |
284 | ||
285 | /* | |
286 | * This seqfile read is not perfectly safe, as a kprobe could be removed from | |
287 | * the hash table between two reads. This will result in an incomplete output. | |
288 | */ | |
289 | static struct kprobe_entry *ltt_find_next_kprobe(struct kprobe_entry *prev) | |
290 | { | |
291 | struct kprobe_entry *e; | |
292 | struct hlist_head *head; | |
293 | struct hlist_node *node; | |
294 | unsigned int i; | |
295 | int found = 0; | |
296 | ||
297 | if (prev == (void *)-1UL) | |
298 | return NULL; | |
299 | ||
300 | if (!prev) | |
301 | found = 1; | |
302 | ||
303 | for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { | |
304 | head = <t_kprobe_table[i]; | |
305 | hlist_for_each_entry(e, node, head, hlist) { | |
306 | if (found) | |
307 | return e; | |
308 | if (e == prev) | |
309 | found = 1; | |
310 | } | |
311 | } | |
312 | return NULL; | |
313 | } | |
314 | ||
315 | static void *lk_next(struct seq_file *m, void *p, loff_t *pos) | |
316 | { | |
317 | m->private = ltt_find_next_kprobe(m->private); | |
318 | if (!m->private) { | |
319 | m->private = (void *)-1UL; | |
320 | return NULL; | |
321 | } | |
322 | return m->private; | |
323 | } | |
324 | ||
325 | static void *lk_start(struct seq_file *m, loff_t *pos) | |
326 | { | |
327 | mutex_lock(<t_kprobes_mutex); | |
328 | if (!*pos) | |
329 | m->private = NULL; | |
330 | m->private = ltt_find_next_kprobe(m->private); | |
331 | if (!m->private) { | |
332 | m->private = (void *)-1UL; | |
333 | return NULL; | |
334 | } | |
335 | return m->private; | |
336 | } | |
337 | ||
338 | static void lk_stop(struct seq_file *m, void *p) | |
339 | { | |
340 | mutex_unlock(<t_kprobes_mutex); | |
341 | } | |
342 | ||
343 | static int lk_show(struct seq_file *m, void *p) | |
344 | { | |
345 | struct kprobe_entry *e = m->private; | |
346 | seq_printf(m, "%s\n", e->key); | |
347 | return 0; | |
348 | } | |
349 | ||
350 | static const struct seq_operations ltt_kprobes_list_op = { | |
351 | .start = lk_start, | |
352 | .next = lk_next, | |
353 | .stop = lk_stop, | |
354 | .show = lk_show, | |
355 | }; | |
356 | ||
357 | static int ltt_kprobes_list_open(struct inode *inode, struct file *file) | |
358 | { | |
359 | int ret; | |
360 | ||
361 | ret = seq_open(file, <t_kprobes_list_op); | |
362 | if (ret == 0) | |
363 | ((struct seq_file *)file->private_data)->private = NULL; | |
364 | return ret; | |
365 | } | |
366 | ||
367 | static int ltt_kprobes_list_release(struct inode *inode, struct file *file) | |
368 | { | |
369 | struct seq_file *seq = file->private_data; | |
370 | ||
371 | seq->private = NULL; | |
372 | return seq_release(inode, file); | |
373 | } | |
374 | ||
375 | static const struct file_operations ltt_kprobes_list = { | |
376 | .open = ltt_kprobes_list_open, | |
377 | .read = seq_read, | |
378 | .llseek = seq_lseek, | |
379 | .release = ltt_kprobes_list_release, | |
380 | }; | |
381 | ||
382 | /* | |
383 | * kprobes table dump. Callback invoked by ltt-statedump. ltt-statedump must | |
384 | * take a reference to this module before calling this callback. | |
385 | */ | |
386 | void ltt_dump_kprobes_table(void *call_data) | |
387 | { | |
388 | struct kprobe_entry *e; | |
389 | struct hlist_head *head; | |
390 | struct hlist_node *node; | |
391 | unsigned int i; | |
392 | ||
393 | for (i = 0; i < LTT_KPROBE_TABLE_SIZE; i++) { | |
394 | head = <t_kprobe_table[i]; | |
395 | hlist_for_each_entry(e, node, head, hlist) | |
396 | trace_kprobe_table_entry(call_data, e); | |
397 | } | |
398 | } | |
399 | EXPORT_SYMBOL_GPL(ltt_dump_kprobes_table); | |
400 | ||
401 | static int __init ltt_kprobes_init(void) | |
402 | { | |
403 | struct dentry *ltt_root_dentry; | |
404 | int ret = 0; | |
405 | ||
406 | printk(KERN_INFO "LTT : ltt-kprobes init\n"); | |
407 | mutex_lock(<t_kprobes_mutex); | |
408 | ||
409 | ltt_root_dentry = get_ltt_root(); | |
410 | if (!ltt_root_dentry) { | |
411 | ret = -ENOENT; | |
412 | goto err_no_root; | |
413 | } | |
414 | ||
415 | ltt_kprobes_dir = debugfs_create_dir(LTT_KPROBES_DIR, ltt_root_dentry); | |
416 | if (!ltt_kprobes_dir) { | |
417 | printk(KERN_ERR | |
418 | "ltt_kprobes_init: failed to create dir %s\n", | |
419 | LTT_KPROBES_DIR); | |
420 | ret = -ENOMEM; | |
421 | goto err_no_dir; | |
422 | } | |
423 | ||
424 | ltt_kprobes_enable_dentry = debugfs_create_file(LTT_KPROBES_ENABLE, | |
425 | S_IWUSR, | |
426 | ltt_kprobes_dir, NULL, | |
427 | <t_kprobes_enable); | |
428 | if (IS_ERR(ltt_kprobes_enable_dentry) || !ltt_kprobes_enable_dentry) { | |
429 | printk(KERN_ERR | |
430 | "ltt_kprobes_init: failed to create file %s\n", | |
431 | LTT_KPROBES_ENABLE); | |
432 | ret = -ENOMEM; | |
433 | goto err_no_enable; | |
434 | } | |
435 | ||
436 | ltt_kprobes_disable_dentry = debugfs_create_file(LTT_KPROBES_DISABLE, | |
437 | S_IWUSR, | |
438 | ltt_kprobes_dir, NULL, | |
439 | <t_kprobes_disable); | |
440 | if (IS_ERR(ltt_kprobes_disable_dentry) || !ltt_kprobes_disable_dentry) { | |
441 | printk(KERN_ERR | |
442 | "ltt_kprobes_init: failed to create file %s\n", | |
443 | LTT_KPROBES_DISABLE); | |
444 | ret = -ENOMEM; | |
445 | goto err_no_disable; | |
446 | } | |
447 | ||
448 | ltt_kprobes_list_dentry = debugfs_create_file(LTT_KPROBES_LIST, | |
449 | S_IWUSR, ltt_kprobes_dir, | |
450 | NULL, <t_kprobes_list); | |
451 | if (IS_ERR(ltt_kprobes_list_dentry) || !ltt_kprobes_list_dentry) { | |
452 | printk(KERN_ERR | |
453 | "ltt_kprobes_init: failed to create file %s\n", | |
454 | LTT_KPROBES_LIST); | |
455 | ret = -ENOMEM; | |
456 | goto err_no_list; | |
457 | } | |
458 | ltt_statedump_register_kprobes_dump(ltt_dump_kprobes_table); | |
459 | ||
460 | mutex_unlock(<t_kprobes_mutex); | |
461 | return ret; | |
462 | ||
463 | err_no_list: | |
464 | debugfs_remove(ltt_kprobes_disable_dentry); | |
465 | err_no_disable: | |
466 | debugfs_remove(ltt_kprobes_enable_dentry); | |
467 | err_no_enable: | |
468 | debugfs_remove(ltt_kprobes_dir); | |
469 | err_no_dir: | |
470 | err_no_root: | |
471 | mutex_unlock(<t_kprobes_mutex); | |
472 | return ret; | |
473 | } | |
474 | module_init(ltt_kprobes_init); | |
475 | ||
476 | static void __exit ltt_kprobes_exit(void) | |
477 | { | |
478 | printk(KERN_INFO "LTT : ltt-kprobes exit\n"); | |
479 | mutex_lock(<t_kprobes_mutex); | |
480 | module_exit = 1; | |
481 | ltt_statedump_unregister_kprobes_dump(ltt_dump_kprobes_table); | |
482 | debugfs_remove(ltt_kprobes_list_dentry); | |
483 | debugfs_remove(ltt_kprobes_disable_dentry); | |
484 | debugfs_remove(ltt_kprobes_enable_dentry); | |
485 | debugfs_remove(ltt_kprobes_dir); | |
486 | ltt_unregister_all_kprobes(); | |
487 | mutex_unlock(<t_kprobes_mutex); | |
488 | } | |
489 | module_exit(ltt_kprobes_exit); | |
490 | ||
491 | MODULE_LICENSE("GPL and additional rights"); | |
492 | MODULE_AUTHOR("Mathieu Desnoyers"); | |
493 | MODULE_DESCRIPTION("Linux Trace Toolkit Kprobes Support"); |