Fix a zmalloc bug
[ust.git] / libust / tracepoint.c
1 /*
2 * Copyright (C) 2008 Mathieu Desnoyers
3 * Copyright (C) 2009 Pierre-Marc Fournier
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Ported to userspace by Pierre-Marc Fournier.
20 */
21
22 #include <errno.h>
23 #include <ust/tracepoint.h>
24 #include <ust/core.h>
25 #include <ust/kcompat/kcompat.h>
26 #include "usterr.h"
27
28 #define _LGPL_SOURCE
29 #include <urcu-bp.h>
30
31 //extern struct tracepoint __start___tracepoints[] __attribute__((visibility("hidden")));
32 //extern struct tracepoint __stop___tracepoints[] __attribute__((visibility("hidden")));
33
34 /* Set to 1 to enable tracepoint debug output */
35 static const int tracepoint_debug;
36
37 /* libraries that contain tracepoints (struct tracepoint_lib) */
38 static LIST_HEAD(libs);
39
40 /*
41 * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
42 * builtin and module tracepoints and the hash table.
43 */
44 static DEFINE_MUTEX(tracepoints_mutex);
45
46 /*
47 * Tracepoint hash table, containing the active tracepoints.
48 * Protected by tracepoints_mutex.
49 */
50 #define TRACEPOINT_HASH_BITS 6
51 #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
52 static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
53
54 /*
55 * Note about RCU :
56 * It is used to to delay the free of multiple probes array until a quiescent
57 * state is reached.
58 * Tracepoint entries modifications are protected by the tracepoints_mutex.
59 */
60 struct tracepoint_entry {
61 struct hlist_node hlist;
62 struct probe *probes;
63 int refcount; /* Number of times armed. 0 if disarmed. */
64 char name[0];
65 };
66
67 struct tp_probes {
68 union {
69 //ust// struct rcu_head rcu;
70 struct list_head list;
71 } u;
72 struct probe probes[0];
73 };
74
75 static inline void *allocate_probes(int count)
76 {
77 struct tp_probes *p = zmalloc(count * sizeof(struct probe)
78 + sizeof(struct tp_probes));
79 return p == NULL ? NULL : p->probes;
80 }
81
82 //ust// static void rcu_free_old_probes(struct rcu_head *head)
83 //ust// {
84 //ust// kfree(container_of(head, struct tp_probes, u.rcu));
85 //ust// }
86
87 static inline void release_probes(void *old)
88 {
89 if (old) {
90 struct tp_probes *tp_probes = _ust_container_of(old,
91 struct tp_probes, probes[0]);
92 //ust// call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
93 synchronize_rcu();
94 free(tp_probes);
95 }
96 }
97
98 static void debug_print_probes(struct tracepoint_entry *entry)
99 {
100 int i;
101
102 if (!tracepoint_debug || !entry->probes)
103 return;
104
105 for (i = 0; entry->probes[i].func; i++)
106 DBG("Probe %d : %p", i, entry->probes[i].func);
107 }
108
109 static void *
110 tracepoint_entry_add_probe(struct tracepoint_entry *entry,
111 void *probe, void *data)
112 {
113 int nr_probes = 0;
114 struct probe *old, *new;
115
116 WARN_ON(!probe);
117
118 debug_print_probes(entry);
119 old = entry->probes;
120 if (old) {
121 /* (N -> N+1), (N != 0, 1) probes */
122 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
123 if (old[nr_probes].func == probe &&
124 old[nr_probes].data == data)
125 return ERR_PTR(-EEXIST);
126 }
127 /* + 2 : one for new probe, one for NULL func */
128 new = allocate_probes(nr_probes + 2);
129 if (new == NULL)
130 return ERR_PTR(-ENOMEM);
131 if (old)
132 memcpy(new, old, nr_probes * sizeof(struct probe));
133 new[nr_probes].func = probe;
134 new[nr_probes].data = data;
135 new[nr_probes + 1].func = NULL;
136 entry->refcount = nr_probes + 1;
137 entry->probes = new;
138 debug_print_probes(entry);
139 return old;
140 }
141
142 static void *
143 tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe,
144 void *data)
145 {
146 int nr_probes = 0, nr_del = 0, i;
147 struct probe *old, *new;
148
149 old = entry->probes;
150
151 if (!old)
152 return ERR_PTR(-ENOENT);
153
154 debug_print_probes(entry);
155 /* (N -> M), (N > 1, M >= 0) probes */
156 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
157 if (!probe ||
158 (old[nr_probes].func == probe &&
159 old[nr_probes].data == data))
160 nr_del++;
161 }
162
163 if (nr_probes - nr_del == 0) {
164 /* N -> 0, (N > 1) */
165 entry->probes = NULL;
166 entry->refcount = 0;
167 debug_print_probes(entry);
168 return old;
169 } else {
170 int j = 0;
171 /* N -> M, (N > 1, M > 0) */
172 /* + 1 for NULL */
173 new = allocate_probes(nr_probes - nr_del + 1);
174 if (new == NULL)
175 return ERR_PTR(-ENOMEM);
176 for (i = 0; old[i].func; i++)
177 if (probe &&
178 (old[i].func != probe || old[i].data != data))
179 new[j++] = old[i];
180 new[nr_probes - nr_del].func = NULL;
181 entry->refcount = nr_probes - nr_del;
182 entry->probes = new;
183 }
184 debug_print_probes(entry);
185 return old;
186 }
187
188 /*
189 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
190 * Must be called with tracepoints_mutex held.
191 * Returns NULL if not present.
192 */
193 static struct tracepoint_entry *get_tracepoint(const char *name)
194 {
195 struct hlist_head *head;
196 struct hlist_node *node;
197 struct tracepoint_entry *e;
198 u32 hash = jhash(name, strlen(name), 0);
199
200 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
201 hlist_for_each_entry(e, node, head, hlist) {
202 if (!strcmp(name, e->name))
203 return e;
204 }
205 return NULL;
206 }
207
208 /*
209 * Add the tracepoint to the tracepoint hash table. Must be called with
210 * tracepoints_mutex held.
211 */
212 static struct tracepoint_entry *add_tracepoint(const char *name)
213 {
214 struct hlist_head *head;
215 struct hlist_node *node;
216 struct tracepoint_entry *e;
217 size_t name_len = strlen(name) + 1;
218 u32 hash = jhash(name, name_len-1, 0);
219
220 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
221 hlist_for_each_entry(e, node, head, hlist) {
222 if (!strcmp(name, e->name)) {
223 DBG("tracepoint %s busy", name);
224 return ERR_PTR(-EEXIST); /* Already there */
225 }
226 }
227 /*
228 * Using zmalloc here to allocate a variable length element. Could
229 * cause some memory fragmentation if overused.
230 */
231 e = zmalloc(sizeof(struct tracepoint_entry) + name_len);
232 if (!e)
233 return ERR_PTR(-ENOMEM);
234 memcpy(&e->name[0], name, name_len);
235 e->probes = NULL;
236 e->refcount = 0;
237 hlist_add_head(&e->hlist, head);
238 return e;
239 }
240
241 /*
242 * Remove the tracepoint from the tracepoint hash table. Must be called with
243 * mutex_lock held.
244 */
245 static inline void remove_tracepoint(struct tracepoint_entry *e)
246 {
247 hlist_del(&e->hlist);
248 free(e);
249 }
250
251 /*
252 * Sets the probe callback corresponding to one tracepoint.
253 */
254 static void set_tracepoint(struct tracepoint_entry **entry,
255 struct tracepoint *elem, int active)
256 {
257 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
258
259 /*
260 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
261 * probe callbacks array is consistent before setting a pointer to it.
262 * This array is referenced by __DO_TRACE from
263 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
264 * is used.
265 */
266 rcu_assign_pointer(elem->probes, (*entry)->probes);
267 elem->state__imv = active;
268 }
269
270 /*
271 * Disable a tracepoint and its probe callback.
272 * Note: only waiting an RCU period after setting elem->call to the empty
273 * function insures that the original callback is not used anymore. This insured
274 * by preempt_disable around the call site.
275 */
276 static void disable_tracepoint(struct tracepoint *elem)
277 {
278 elem->state__imv = 0;
279 rcu_assign_pointer(elem->probes, NULL);
280 }
281
282 /**
283 * tracepoint_update_probe_range - Update a probe range
284 * @begin: beginning of the range
285 * @end: end of the range
286 *
287 * Updates the probe callback corresponding to a range of tracepoints.
288 */
289 void tracepoint_update_probe_range(struct tracepoint *begin,
290 struct tracepoint *end)
291 {
292 struct tracepoint *iter;
293 struct tracepoint_entry *mark_entry;
294
295 pthread_mutex_lock(&tracepoints_mutex);
296 for (iter = begin; iter < end; iter++) {
297 if (!iter->name) {
298 disable_tracepoint(iter);
299 continue;
300 }
301 mark_entry = get_tracepoint(iter->name);
302 if (mark_entry) {
303 set_tracepoint(&mark_entry, iter,
304 !!mark_entry->refcount);
305 } else {
306 disable_tracepoint(iter);
307 }
308 }
309 pthread_mutex_unlock(&tracepoints_mutex);
310 }
311
312 static void lib_update_tracepoints(void)
313 {
314 struct tracepoint_lib *lib;
315
316 //ust// pthread_mutex_lock(&module_mutex);
317 list_for_each_entry(lib, &libs, list)
318 tracepoint_update_probe_range(lib->tracepoints_start,
319 lib->tracepoints_start + lib->tracepoints_count);
320 //ust// pthread_mutex_unlock(&module_mutex);
321 }
322
323 /*
324 * Update probes, removing the faulty probes.
325 */
326 static void tracepoint_update_probes(void)
327 {
328 /* Core kernel tracepoints */
329 //ust// tracepoint_update_probe_range(__start___tracepoints,
330 //ust// __stop___tracepoints);
331 /* tracepoints in modules. */
332 lib_update_tracepoints();
333 /* Update immediate values */
334 core_imv_update();
335 //ust// module_imv_update();
336 }
337
338 static struct probe *
339 tracepoint_add_probe(const char *name, void *probe, void *data)
340 {
341 struct tracepoint_entry *entry;
342 struct probe *old;
343
344 entry = get_tracepoint(name);
345 if (!entry) {
346 entry = add_tracepoint(name);
347 if (IS_ERR(entry))
348 return (struct probe *)entry;
349 }
350 old = tracepoint_entry_add_probe(entry, probe, data);
351 if (IS_ERR(old) && !entry->refcount)
352 remove_tracepoint(entry);
353 return old;
354 }
355
356 /**
357 * tracepoint_probe_register - Connect a probe to a tracepoint
358 * @name: tracepoint name
359 * @probe: probe handler
360 *
361 * Returns 0 if ok, error value on error.
362 * The probe address must at least be aligned on the architecture pointer size.
363 */
364 int tracepoint_probe_register(const char *name, void *probe, void *data)
365 {
366 void *old;
367
368 pthread_mutex_lock(&tracepoints_mutex);
369 old = tracepoint_add_probe(name, probe, data);
370 pthread_mutex_unlock(&tracepoints_mutex);
371 if (IS_ERR(old))
372 return PTR_ERR(old);
373
374 tracepoint_update_probes(); /* may update entry */
375 release_probes(old);
376 return 0;
377 }
378 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register);
379
380 static void *tracepoint_remove_probe(const char *name, void *probe, void *data)
381 {
382 struct tracepoint_entry *entry;
383 void *old;
384
385 entry = get_tracepoint(name);
386 if (!entry)
387 return ERR_PTR(-ENOENT);
388 old = tracepoint_entry_remove_probe(entry, probe, data);
389 if (IS_ERR(old))
390 return old;
391 if (!entry->refcount)
392 remove_tracepoint(entry);
393 return old;
394 }
395
396 /**
397 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
398 * @name: tracepoint name
399 * @probe: probe function pointer
400 * @probe: probe data pointer
401 *
402 * We do not need to call a synchronize_sched to make sure the probes have
403 * finished running before doing a module unload, because the module unload
404 * itself uses stop_machine(), which insures that every preempt disabled section
405 * have finished.
406 */
407 int tracepoint_probe_unregister(const char *name, void *probe, void *data)
408 {
409 void *old;
410
411 pthread_mutex_lock(&tracepoints_mutex);
412 old = tracepoint_remove_probe(name, probe, data);
413 pthread_mutex_unlock(&tracepoints_mutex);
414 if (IS_ERR(old))
415 return PTR_ERR(old);
416
417 tracepoint_update_probes(); /* may update entry */
418 release_probes(old);
419 return 0;
420 }
421 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
422
423 static LIST_HEAD(old_probes);
424 static int need_update;
425
426 static void tracepoint_add_old_probes(void *old)
427 {
428 need_update = 1;
429 if (old) {
430 struct tp_probes *tp_probes = _ust_container_of(old,
431 struct tp_probes, probes[0]);
432 list_add(&tp_probes->u.list, &old_probes);
433 }
434 }
435
436 /**
437 * tracepoint_probe_register_noupdate - register a probe but not connect
438 * @name: tracepoint name
439 * @probe: probe handler
440 *
441 * caller must call tracepoint_probe_update_all()
442 */
443 int tracepoint_probe_register_noupdate(const char *name, void *probe,
444 void *data)
445 {
446 void *old;
447
448 pthread_mutex_lock(&tracepoints_mutex);
449 old = tracepoint_add_probe(name, probe, data);
450 if (IS_ERR(old)) {
451 pthread_mutex_unlock(&tracepoints_mutex);
452 return PTR_ERR(old);
453 }
454 tracepoint_add_old_probes(old);
455 pthread_mutex_unlock(&tracepoints_mutex);
456 return 0;
457 }
458 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
459
460 /**
461 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
462 * @name: tracepoint name
463 * @probe: probe function pointer
464 *
465 * caller must call tracepoint_probe_update_all()
466 */
467 int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
468 void *data)
469 {
470 void *old;
471
472 pthread_mutex_lock(&tracepoints_mutex);
473 old = tracepoint_remove_probe(name, probe, data);
474 if (IS_ERR(old)) {
475 pthread_mutex_unlock(&tracepoints_mutex);
476 return PTR_ERR(old);
477 }
478 tracepoint_add_old_probes(old);
479 pthread_mutex_unlock(&tracepoints_mutex);
480 return 0;
481 }
482 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
483
484 /**
485 * tracepoint_probe_update_all - update tracepoints
486 */
487 void tracepoint_probe_update_all(void)
488 {
489 LIST_HEAD(release_probes);
490 struct tp_probes *pos, *next;
491
492 pthread_mutex_lock(&tracepoints_mutex);
493 if (!need_update) {
494 pthread_mutex_unlock(&tracepoints_mutex);
495 return;
496 }
497 if (!list_empty(&old_probes))
498 list_replace_init(&old_probes, &release_probes);
499 need_update = 0;
500 pthread_mutex_unlock(&tracepoints_mutex);
501
502 tracepoint_update_probes();
503 list_for_each_entry_safe(pos, next, &release_probes, u.list) {
504 list_del(&pos->u.list);
505 //ust// call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
506 synchronize_rcu();
507 free(pos);
508 }
509 }
510 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
511
512 /*
513 * Returns 0 if current not found.
514 * Returns 1 if current found.
515 */
516 int lib_get_iter_tracepoints(struct tracepoint_iter *iter)
517 {
518 struct tracepoint_lib *iter_lib;
519 int found = 0;
520
521 //ust// pthread_mutex_lock(&module_mutex);
522 list_for_each_entry(iter_lib, &libs, list) {
523 if (iter_lib < iter->lib)
524 continue;
525 else if (iter_lib > iter->lib)
526 iter->tracepoint = NULL;
527 found = tracepoint_get_iter_range(&iter->tracepoint,
528 iter_lib->tracepoints_start,
529 iter_lib->tracepoints_start + iter_lib->tracepoints_count);
530 if (found) {
531 iter->lib = iter_lib;
532 break;
533 }
534 }
535 //ust// pthread_mutex_unlock(&module_mutex);
536 return found;
537 }
538
539 /**
540 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
541 * @tracepoint: current tracepoints (in), next tracepoint (out)
542 * @begin: beginning of the range
543 * @end: end of the range
544 *
545 * Returns whether a next tracepoint has been found (1) or not (0).
546 * Will return the first tracepoint in the range if the input tracepoint is
547 * NULL.
548 */
549 int tracepoint_get_iter_range(struct tracepoint **tracepoint,
550 struct tracepoint *begin, struct tracepoint *end)
551 {
552 if (!*tracepoint && begin != end) {
553 *tracepoint = begin;
554 return 1;
555 }
556 if (*tracepoint >= begin && *tracepoint < end)
557 return 1;
558 return 0;
559 }
560 //ust// EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
561
562 static void tracepoint_get_iter(struct tracepoint_iter *iter)
563 {
564 int found = 0;
565
566 //ust// /* Core kernel tracepoints */
567 //ust// if (!iter->module) {
568 //ust// found = tracepoint_get_iter_range(&iter->tracepoint,
569 //ust// __start___tracepoints, __stop___tracepoints);
570 //ust// if (found)
571 //ust// goto end;
572 //ust// }
573 /* tracepoints in libs. */
574 found = lib_get_iter_tracepoints(iter);
575 //ust// end:
576 if (!found)
577 tracepoint_iter_reset(iter);
578 }
579
580 void tracepoint_iter_start(struct tracepoint_iter *iter)
581 {
582 tracepoint_get_iter(iter);
583 }
584 //ust// EXPORT_SYMBOL_GPL(tracepoint_iter_start);
585
586 void tracepoint_iter_next(struct tracepoint_iter *iter)
587 {
588 iter->tracepoint++;
589 /*
590 * iter->tracepoint may be invalid because we blindly incremented it.
591 * Make sure it is valid by marshalling on the tracepoints, getting the
592 * tracepoints from following modules if necessary.
593 */
594 tracepoint_get_iter(iter);
595 }
596 //ust// EXPORT_SYMBOL_GPL(tracepoint_iter_next);
597
598 void tracepoint_iter_stop(struct tracepoint_iter *iter)
599 {
600 }
601 //ust// EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
602
603 void tracepoint_iter_reset(struct tracepoint_iter *iter)
604 {
605 //ust// iter->module = NULL;
606 iter->tracepoint = NULL;
607 }
608 //ust// EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
609
610 //ust// #ifdef CONFIG_MODULES
611
612 //ust// int tracepoint_module_notify(struct notifier_block *self,
613 //ust// unsigned long val, void *data)
614 //ust// {
615 //ust// struct module *mod = data;
616 //ust//
617 //ust// switch (val) {
618 //ust// case MODULE_STATE_COMING:
619 //ust// tracepoint_update_probe_range(mod->tracepoints,
620 //ust// mod->tracepoints + mod->num_tracepoints);
621 //ust// break;
622 //ust// case MODULE_STATE_GOING:
623 //ust// tracepoint_update_probe_range(mod->tracepoints,
624 //ust// mod->tracepoints + mod->num_tracepoints);
625 //ust// break;
626 //ust// }
627 //ust// return 0;
628 //ust// }
629
630 //ust// struct notifier_block tracepoint_module_nb = {
631 //ust// .notifier_call = tracepoint_module_notify,
632 //ust// .priority = 0,
633 //ust// };
634
635 //ust// static int init_tracepoints(void)
636 //ust// {
637 //ust// return register_module_notifier(&tracepoint_module_nb);
638 //ust// }
639 //ust// __initcall(init_tracepoints);
640
641 //ust// #endif /* CONFIG_MODULES */
642
643 static void (*new_tracepoint_cb)(struct tracepoint *) = NULL;
644
645 void tracepoint_set_new_tracepoint_cb(void (*cb)(struct tracepoint *))
646 {
647 new_tracepoint_cb = cb;
648 }
649
650 static void new_tracepoints(struct tracepoint *start, struct tracepoint *end)
651 {
652 if(new_tracepoint_cb) {
653 struct tracepoint *t;
654 for(t=start; t < end; t++) {
655 new_tracepoint_cb(t);
656 }
657 }
658 }
659
660 int tracepoint_register_lib(struct tracepoint *tracepoints_start, int tracepoints_count)
661 {
662 struct tracepoint_lib *pl;
663
664 pl = (struct tracepoint_lib *) zmalloc(sizeof(struct tracepoint_lib));
665
666 pl->tracepoints_start = tracepoints_start;
667 pl->tracepoints_count = tracepoints_count;
668
669 /* FIXME: maybe protect this with its own mutex? */
670 pthread_mutex_lock(&tracepoints_mutex);
671 list_add(&pl->list, &libs);
672 pthread_mutex_unlock(&tracepoints_mutex);
673
674 new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
675
676 /* FIXME: update just the loaded lib */
677 lib_update_tracepoints();
678
679 DBG("just registered a tracepoints section from %p and having %d tracepoints", tracepoints_start, tracepoints_count);
680
681 return 0;
682 }
683
684 int tracepoint_unregister_lib(struct tracepoint *tracepoints_start)
685 {
686 struct tracepoint_lib *lib;
687
688 pthread_mutex_lock(&tracepoints_mutex);
689
690 list_for_each_entry(lib, &libs, list) {
691 if(lib->tracepoints_start == tracepoints_start) {
692 struct tracepoint_lib *lib2free = lib;
693 list_del(&lib->list);
694 free(lib2free);
695 break;
696 }
697 }
698
699 pthread_mutex_unlock(&tracepoints_mutex);
700
701 return 0;
702 }
This page took 0.042789 seconds and 4 git commands to generate.