remove mutex_lock, mutex_unlock macros
[ust.git] / libust / tracepoint.c
1 /*
2 * Copyright (C) 2008 Mathieu Desnoyers
3 * Copyright (C) 2009 Pierre-Marc Fournier
4 *
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
18 *
19 * Ported to userspace by Pierre-Marc Fournier.
20 */
21
22 #include <errno.h>
23 #include <ust/tracepoint.h>
24 #include <ust/core.h>
25 #include <ust/kcompat/kcompat.h>
26 #include "usterr.h"
27
28 #define _LGPL_SOURCE
29 #include <urcu-bp.h>
30
31 //extern struct tracepoint __start___tracepoints[] __attribute__((visibility("hidden")));
32 //extern struct tracepoint __stop___tracepoints[] __attribute__((visibility("hidden")));
33
34 /* Set to 1 to enable tracepoint debug output */
35 static const int tracepoint_debug;
36
37 /* libraries that contain tracepoints (struct tracepoint_lib) */
38 static LIST_HEAD(libs);
39
40 /*
41 * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
42 * builtin and module tracepoints and the hash table.
43 */
44 static DEFINE_MUTEX(tracepoints_mutex);
45
46 /*
47 * Tracepoint hash table, containing the active tracepoints.
48 * Protected by tracepoints_mutex.
49 */
50 #define TRACEPOINT_HASH_BITS 6
51 #define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
52 static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
53
54 /*
55 * Note about RCU :
56 * It is used to to delay the free of multiple probes array until a quiescent
57 * state is reached.
58 * Tracepoint entries modifications are protected by the tracepoints_mutex.
59 */
60 struct tracepoint_entry {
61 struct hlist_node hlist;
62 void **funcs;
63 int refcount; /* Number of times armed. 0 if disarmed. */
64 char name[0];
65 };
66
67 struct tp_probes {
68 union {
69 //ust// struct rcu_head rcu;
70 struct list_head list;
71 } u;
72 void *probes[0];
73 };
74
75 static inline void *allocate_probes(int count)
76 {
77 struct tp_probes *p = malloc(count * sizeof(void *)
78 + sizeof(struct tp_probes));
79 return p == NULL ? NULL : p->probes;
80 }
81
82 //ust// static void rcu_free_old_probes(struct rcu_head *head)
83 //ust// {
84 //ust// kfree(container_of(head, struct tp_probes, u.rcu));
85 //ust// }
86
87 static inline void release_probes(void *old)
88 {
89 if (old) {
90 struct tp_probes *tp_probes = container_of(old,
91 struct tp_probes, probes[0]);
92 //ust// call_rcu_sched(&tp_probes->u.rcu, rcu_free_old_probes);
93 synchronize_rcu();
94 free(tp_probes);
95 }
96 }
97
98 static void debug_print_probes(struct tracepoint_entry *entry)
99 {
100 int i;
101
102 if (!tracepoint_debug || !entry->funcs)
103 return;
104
105 for (i = 0; entry->funcs[i]; i++)
106 DBG("Probe %d : %p", i, entry->funcs[i]);
107 }
108
109 static void *
110 tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
111 {
112 int nr_probes = 0;
113 void **old, **new;
114
115 WARN_ON(!probe);
116
117 debug_print_probes(entry);
118 old = entry->funcs;
119 if (old) {
120 /* (N -> N+1), (N != 0, 1) probes */
121 for (nr_probes = 0; old[nr_probes]; nr_probes++)
122 if (old[nr_probes] == probe)
123 return ERR_PTR(-EEXIST);
124 }
125 /* + 2 : one for new probe, one for NULL func */
126 new = allocate_probes(nr_probes + 2);
127 if (new == NULL)
128 return ERR_PTR(-ENOMEM);
129 if (old)
130 memcpy(new, old, nr_probes * sizeof(void *));
131 new[nr_probes] = probe;
132 new[nr_probes + 1] = NULL;
133 entry->refcount = nr_probes + 1;
134 entry->funcs = new;
135 debug_print_probes(entry);
136 return old;
137 }
138
139 static void *
140 tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
141 {
142 int nr_probes = 0, nr_del = 0, i;
143 void **old, **new;
144
145 old = entry->funcs;
146
147 if (!old)
148 return ERR_PTR(-ENOENT);
149
150 debug_print_probes(entry);
151 /* (N -> M), (N > 1, M >= 0) probes */
152 for (nr_probes = 0; old[nr_probes]; nr_probes++) {
153 if ((!probe || old[nr_probes] == probe))
154 nr_del++;
155 }
156
157 if (nr_probes - nr_del == 0) {
158 /* N -> 0, (N > 1) */
159 entry->funcs = NULL;
160 entry->refcount = 0;
161 debug_print_probes(entry);
162 return old;
163 } else {
164 int j = 0;
165 /* N -> M, (N > 1, M > 0) */
166 /* + 1 for NULL */
167 new = allocate_probes(nr_probes - nr_del + 1);
168 if (new == NULL)
169 return ERR_PTR(-ENOMEM);
170 for (i = 0; old[i]; i++)
171 if ((probe && old[i] != probe))
172 new[j++] = old[i];
173 new[nr_probes - nr_del] = NULL;
174 entry->refcount = nr_probes - nr_del;
175 entry->funcs = new;
176 }
177 debug_print_probes(entry);
178 return old;
179 }
180
181 /*
182 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
183 * Must be called with tracepoints_mutex held.
184 * Returns NULL if not present.
185 */
186 static struct tracepoint_entry *get_tracepoint(const char *name)
187 {
188 struct hlist_head *head;
189 struct hlist_node *node;
190 struct tracepoint_entry *e;
191 u32 hash = jhash(name, strlen(name), 0);
192
193 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
194 hlist_for_each_entry(e, node, head, hlist) {
195 if (!strcmp(name, e->name))
196 return e;
197 }
198 return NULL;
199 }
200
201 /*
202 * Add the tracepoint to the tracepoint hash table. Must be called with
203 * tracepoints_mutex held.
204 */
205 static struct tracepoint_entry *add_tracepoint(const char *name)
206 {
207 struct hlist_head *head;
208 struct hlist_node *node;
209 struct tracepoint_entry *e;
210 size_t name_len = strlen(name) + 1;
211 u32 hash = jhash(name, name_len-1, 0);
212
213 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
214 hlist_for_each_entry(e, node, head, hlist) {
215 if (!strcmp(name, e->name)) {
216 DBG("tracepoint %s busy", name);
217 return ERR_PTR(-EEXIST); /* Already there */
218 }
219 }
220 /*
221 * Using kmalloc here to allocate a variable length element. Could
222 * cause some memory fragmentation if overused.
223 */
224 e = malloc(sizeof(struct tracepoint_entry) + name_len);
225 if (!e)
226 return ERR_PTR(-ENOMEM);
227 memcpy(&e->name[0], name, name_len);
228 e->funcs = NULL;
229 e->refcount = 0;
230 hlist_add_head(&e->hlist, head);
231 return e;
232 }
233
234 /*
235 * Remove the tracepoint from the tracepoint hash table. Must be called with
236 * mutex_lock held.
237 */
238 static inline void remove_tracepoint(struct tracepoint_entry *e)
239 {
240 hlist_del(&e->hlist);
241 free(e);
242 }
243
244 /*
245 * Sets the probe callback corresponding to one tracepoint.
246 */
247 static void set_tracepoint(struct tracepoint_entry **entry,
248 struct tracepoint *elem, int active)
249 {
250 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
251
252 /*
253 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
254 * probe callbacks array is consistent before setting a pointer to it.
255 * This array is referenced by __DO_TRACE from
256 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
257 * is used.
258 */
259 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
260 elem->state__imv = active;
261 }
262
263 /*
264 * Disable a tracepoint and its probe callback.
265 * Note: only waiting an RCU period after setting elem->call to the empty
266 * function insures that the original callback is not used anymore. This insured
267 * by preempt_disable around the call site.
268 */
269 static void disable_tracepoint(struct tracepoint *elem)
270 {
271 elem->state__imv = 0;
272 rcu_assign_pointer(elem->funcs, NULL);
273 }
274
275 /**
276 * tracepoint_update_probe_range - Update a probe range
277 * @begin: beginning of the range
278 * @end: end of the range
279 *
280 * Updates the probe callback corresponding to a range of tracepoints.
281 */
282 void tracepoint_update_probe_range(struct tracepoint *begin,
283 struct tracepoint *end)
284 {
285 struct tracepoint *iter;
286 struct tracepoint_entry *mark_entry;
287
288 pthread_mutex_lock(&tracepoints_mutex);
289 for (iter = begin; iter < end; iter++) {
290 mark_entry = get_tracepoint(iter->name);
291 if (mark_entry) {
292 set_tracepoint(&mark_entry, iter,
293 !!mark_entry->refcount);
294 } else {
295 disable_tracepoint(iter);
296 }
297 }
298 pthread_mutex_unlock(&tracepoints_mutex);
299 }
300
301 static void lib_update_tracepoints(void)
302 {
303 struct tracepoint_lib *lib;
304
305 //ust// pthread_mutex_lock(&module_mutex);
306 list_for_each_entry(lib, &libs, list)
307 tracepoint_update_probe_range(lib->tracepoints_start,
308 lib->tracepoints_start + lib->tracepoints_count);
309 //ust// pthread_mutex_unlock(&module_mutex);
310 }
311
312 /*
313 * Update probes, removing the faulty probes.
314 */
315 static void tracepoint_update_probes(void)
316 {
317 /* Core kernel tracepoints */
318 //ust// tracepoint_update_probe_range(__start___tracepoints,
319 //ust// __stop___tracepoints);
320 /* tracepoints in modules. */
321 lib_update_tracepoints();
322 /* Update immediate values */
323 core_imv_update();
324 //ust// module_imv_update();
325 }
326
327 static void *tracepoint_add_probe(const char *name, void *probe)
328 {
329 struct tracepoint_entry *entry;
330 void *old;
331
332 entry = get_tracepoint(name);
333 if (!entry) {
334 entry = add_tracepoint(name);
335 if (IS_ERR(entry))
336 return entry;
337 }
338 old = tracepoint_entry_add_probe(entry, probe);
339 if (IS_ERR(old) && !entry->refcount)
340 remove_tracepoint(entry);
341 return old;
342 }
343
344 /**
345 * tracepoint_probe_register - Connect a probe to a tracepoint
346 * @name: tracepoint name
347 * @probe: probe handler
348 *
349 * Returns 0 if ok, error value on error.
350 * The probe address must at least be aligned on the architecture pointer size.
351 */
352 int tracepoint_probe_register(const char *name, void *probe)
353 {
354 void *old;
355
356 pthread_mutex_lock(&tracepoints_mutex);
357 old = tracepoint_add_probe(name, probe);
358 pthread_mutex_unlock(&tracepoints_mutex);
359 if (IS_ERR(old))
360 return PTR_ERR(old);
361
362 tracepoint_update_probes(); /* may update entry */
363 release_probes(old);
364 return 0;
365 }
366 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register);
367
368 static void *tracepoint_remove_probe(const char *name, void *probe)
369 {
370 struct tracepoint_entry *entry;
371 void *old;
372
373 entry = get_tracepoint(name);
374 if (!entry)
375 return ERR_PTR(-ENOENT);
376 old = tracepoint_entry_remove_probe(entry, probe);
377 if (IS_ERR(old))
378 return old;
379 if (!entry->refcount)
380 remove_tracepoint(entry);
381 return old;
382 }
383
384 /**
385 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
386 * @name: tracepoint name
387 * @probe: probe function pointer
388 *
389 * We do not need to call a synchronize_sched to make sure the probes have
390 * finished running before doing a module unload, because the module unload
391 * itself uses stop_machine(), which insures that every preempt disabled section
392 * have finished.
393 */
394 int tracepoint_probe_unregister(const char *name, void *probe)
395 {
396 void *old;
397
398 pthread_mutex_lock(&tracepoints_mutex);
399 old = tracepoint_remove_probe(name, probe);
400 pthread_mutex_unlock(&tracepoints_mutex);
401 if (IS_ERR(old))
402 return PTR_ERR(old);
403
404 tracepoint_update_probes(); /* may update entry */
405 release_probes(old);
406 return 0;
407 }
408 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
409
410 static LIST_HEAD(old_probes);
411 static int need_update;
412
413 static void tracepoint_add_old_probes(void *old)
414 {
415 need_update = 1;
416 if (old) {
417 struct tp_probes *tp_probes = container_of(old,
418 struct tp_probes, probes[0]);
419 list_add(&tp_probes->u.list, &old_probes);
420 }
421 }
422
423 /**
424 * tracepoint_probe_register_noupdate - register a probe but not connect
425 * @name: tracepoint name
426 * @probe: probe handler
427 *
428 * caller must call tracepoint_probe_update_all()
429 */
430 int tracepoint_probe_register_noupdate(const char *name, void *probe)
431 {
432 void *old;
433
434 pthread_mutex_lock(&tracepoints_mutex);
435 old = tracepoint_add_probe(name, probe);
436 if (IS_ERR(old)) {
437 pthread_mutex_unlock(&tracepoints_mutex);
438 return PTR_ERR(old);
439 }
440 tracepoint_add_old_probes(old);
441 pthread_mutex_unlock(&tracepoints_mutex);
442 return 0;
443 }
444 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
445
446 /**
447 * tracepoint_probe_unregister_noupdate - remove a probe but not disconnect
448 * @name: tracepoint name
449 * @probe: probe function pointer
450 *
451 * caller must call tracepoint_probe_update_all()
452 */
453 int tracepoint_probe_unregister_noupdate(const char *name, void *probe)
454 {
455 void *old;
456
457 pthread_mutex_lock(&tracepoints_mutex);
458 old = tracepoint_remove_probe(name, probe);
459 if (IS_ERR(old)) {
460 pthread_mutex_unlock(&tracepoints_mutex);
461 return PTR_ERR(old);
462 }
463 tracepoint_add_old_probes(old);
464 pthread_mutex_unlock(&tracepoints_mutex);
465 return 0;
466 }
467 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_unregister_noupdate);
468
469 /**
470 * tracepoint_probe_update_all - update tracepoints
471 */
472 void tracepoint_probe_update_all(void)
473 {
474 LIST_HEAD(release_probes);
475 struct tp_probes *pos, *next;
476
477 pthread_mutex_lock(&tracepoints_mutex);
478 if (!need_update) {
479 pthread_mutex_unlock(&tracepoints_mutex);
480 return;
481 }
482 if (!list_empty(&old_probes))
483 list_replace_init(&old_probes, &release_probes);
484 need_update = 0;
485 pthread_mutex_unlock(&tracepoints_mutex);
486
487 tracepoint_update_probes();
488 list_for_each_entry_safe(pos, next, &release_probes, u.list) {
489 list_del(&pos->u.list);
490 //ust// call_rcu_sched(&pos->u.rcu, rcu_free_old_probes);
491 synchronize_rcu();
492 free(pos);
493 }
494 }
495 //ust// EXPORT_SYMBOL_GPL(tracepoint_probe_update_all);
496
497 /*
498 * Returns 0 if current not found.
499 * Returns 1 if current found.
500 */
501 int lib_get_iter_tracepoints(struct tracepoint_iter *iter)
502 {
503 struct tracepoint_lib *iter_lib;
504 int found = 0;
505
506 //ust// pthread_mutex_lock(&module_mutex);
507 list_for_each_entry(iter_lib, &libs, list) {
508 if (iter_lib < iter->lib)
509 continue;
510 else if (iter_lib > iter->lib)
511 iter->tracepoint = NULL;
512 found = tracepoint_get_iter_range(&iter->tracepoint,
513 iter_lib->tracepoints_start,
514 iter_lib->tracepoints_start + iter_lib->tracepoints_count);
515 if (found) {
516 iter->lib = iter_lib;
517 break;
518 }
519 }
520 //ust// pthread_mutex_unlock(&module_mutex);
521 return found;
522 }
523
524 /**
525 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
526 * @tracepoint: current tracepoints (in), next tracepoint (out)
527 * @begin: beginning of the range
528 * @end: end of the range
529 *
530 * Returns whether a next tracepoint has been found (1) or not (0).
531 * Will return the first tracepoint in the range if the input tracepoint is
532 * NULL.
533 */
534 int tracepoint_get_iter_range(struct tracepoint **tracepoint,
535 struct tracepoint *begin, struct tracepoint *end)
536 {
537 if (!*tracepoint && begin != end) {
538 *tracepoint = begin;
539 return 1;
540 }
541 if (*tracepoint >= begin && *tracepoint < end)
542 return 1;
543 return 0;
544 }
545 //ust// EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
546
547 static void tracepoint_get_iter(struct tracepoint_iter *iter)
548 {
549 int found = 0;
550
551 //ust// /* Core kernel tracepoints */
552 //ust// if (!iter->module) {
553 //ust// found = tracepoint_get_iter_range(&iter->tracepoint,
554 //ust// __start___tracepoints, __stop___tracepoints);
555 //ust// if (found)
556 //ust// goto end;
557 //ust// }
558 /* tracepoints in libs. */
559 found = lib_get_iter_tracepoints(iter);
560 //ust// end:
561 if (!found)
562 tracepoint_iter_reset(iter);
563 }
564
565 void tracepoint_iter_start(struct tracepoint_iter *iter)
566 {
567 tracepoint_get_iter(iter);
568 }
569 //ust// EXPORT_SYMBOL_GPL(tracepoint_iter_start);
570
571 void tracepoint_iter_next(struct tracepoint_iter *iter)
572 {
573 iter->tracepoint++;
574 /*
575 * iter->tracepoint may be invalid because we blindly incremented it.
576 * Make sure it is valid by marshalling on the tracepoints, getting the
577 * tracepoints from following modules if necessary.
578 */
579 tracepoint_get_iter(iter);
580 }
581 //ust// EXPORT_SYMBOL_GPL(tracepoint_iter_next);
582
583 void tracepoint_iter_stop(struct tracepoint_iter *iter)
584 {
585 }
586 //ust// EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
587
588 void tracepoint_iter_reset(struct tracepoint_iter *iter)
589 {
590 //ust// iter->module = NULL;
591 iter->tracepoint = NULL;
592 }
593 //ust// EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
594
595 //ust// #ifdef CONFIG_MODULES
596
597 //ust// int tracepoint_module_notify(struct notifier_block *self,
598 //ust// unsigned long val, void *data)
599 //ust// {
600 //ust// struct module *mod = data;
601 //ust//
602 //ust// switch (val) {
603 //ust// case MODULE_STATE_COMING:
604 //ust// tracepoint_update_probe_range(mod->tracepoints,
605 //ust// mod->tracepoints + mod->num_tracepoints);
606 //ust// break;
607 //ust// case MODULE_STATE_GOING:
608 //ust// tracepoint_update_probe_range(mod->tracepoints,
609 //ust// mod->tracepoints + mod->num_tracepoints);
610 //ust// break;
611 //ust// }
612 //ust// return 0;
613 //ust// }
614
615 //ust// struct notifier_block tracepoint_module_nb = {
616 //ust// .notifier_call = tracepoint_module_notify,
617 //ust// .priority = 0,
618 //ust// };
619
620 //ust// static int init_tracepoints(void)
621 //ust// {
622 //ust// return register_module_notifier(&tracepoint_module_nb);
623 //ust// }
624 //ust// __initcall(init_tracepoints);
625
626 //ust// #endif /* CONFIG_MODULES */
627
628 static void (*new_tracepoint_cb)(struct tracepoint *) = NULL;
629
630 void tracepoint_set_new_tracepoint_cb(void (*cb)(struct tracepoint *))
631 {
632 new_tracepoint_cb = cb;
633 }
634
635 static void new_tracepoints(struct tracepoint *start, struct tracepoint *end)
636 {
637 if(new_tracepoint_cb) {
638 struct tracepoint *t;
639 for(t=start; t < end; t++) {
640 new_tracepoint_cb(t);
641 }
642 }
643 }
644
645 int tracepoint_register_lib(struct tracepoint *tracepoints_start, int tracepoints_count)
646 {
647 struct tracepoint_lib *pl;
648
649 pl = (struct tracepoint_lib *) malloc(sizeof(struct tracepoint_lib));
650
651 pl->tracepoints_start = tracepoints_start;
652 pl->tracepoints_count = tracepoints_count;
653
654 /* FIXME: maybe protect this with its own mutex? */
655 pthread_mutex_lock(&tracepoints_mutex);
656 list_add(&pl->list, &libs);
657 pthread_mutex_unlock(&tracepoints_mutex);
658
659 new_tracepoints(tracepoints_start, tracepoints_start + tracepoints_count);
660
661 /* FIXME: update just the loaded lib */
662 lib_update_tracepoints();
663
664 DBG("just registered a tracepoints section from %p and having %d tracepoints", tracepoints_start, tracepoints_count);
665
666 return 0;
667 }
668
669 int tracepoint_unregister_lib(struct tracepoint *tracepoints_start)
670 {
671 struct tracepoint_lib *lib;
672
673 pthread_mutex_lock(&tracepoints_mutex);
674
675 list_for_each_entry(lib, &libs, list) {
676 if(lib->tracepoints_start == tracepoints_start) {
677 struct tracepoint_lib *lib2free = lib;
678 list_del(&lib->list);
679 free(lib2free);
680 break;
681 }
682 }
683
684 pthread_mutex_unlock(&tracepoints_mutex);
685
686 return 0;
687 }
This page took 0.042135 seconds and 4 git commands to generate.