continue working on build system
[ust.git] / libust / marker.c
1 /*
2 * Copyright (C) 2007 Mathieu Desnoyers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18 //ust// #include <linux/module.h>
19 //ust// #include <linux/mutex.h>
20 //ust// #include <linux/types.h>
21 //#include "jhash.h"
22 //#include "list.h"
23 //#include "rcupdate.h"
24 //ust// #include <linux/marker.h>
25 #include <errno.h>
26 //ust// #include <linux/slab.h>
27 //ust// #include <linux/immediate.h>
28 //ust// #include <linux/sched.h>
29 //ust// #include <linux/uaccess.h>
30 //ust// #include <linux/user_marker.h>
31 //ust// #include <linux/ltt-tracer.h>
32
33 #include "marker.h"
34 #include "kernelcompat.h"
35 #include "usterr.h"
36 #include "channels.h"
37 #include "tracercore.h"
38 #include "tracer.h"
39 #include "urcu.h"
40
41 extern struct marker __start___markers[] __attribute__((visibility("hidden")));
42 extern struct marker __stop___markers[] __attribute__((visibility("hidden")));
43
44 /* Set to 1 to enable marker debug output */
45 static const int marker_debug;
46
47 /*
48 * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
49 * and module markers and the hash table.
50 */
51 static DEFINE_MUTEX(markers_mutex);
52
53 void lock_markers(void)
54 {
55 mutex_lock(&markers_mutex);
56 }
57
58 void unlock_markers(void)
59 {
60 mutex_unlock(&markers_mutex);
61 }
62
63 /*
64 * Marker hash table, containing the active markers.
65 * Protected by module_mutex.
66 */
67 #define MARKER_HASH_BITS 6
68 #define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
69 static struct hlist_head marker_table[MARKER_TABLE_SIZE];
70
71 /*
72 * Note about RCU :
73 * It is used to make sure every handler has finished using its private data
74 * between two consecutive operation (add or remove) on a given marker. It is
75 * also used to delay the free of multiple probes array until a quiescent state
76 * is reached.
77 * marker entries modifications are protected by the markers_mutex.
78 */
79 struct marker_entry {
80 struct hlist_node hlist;
81 char *format;
82 char *name;
83 /* Probe wrapper */
84 void (*call)(const struct marker *mdata, void *call_private, ...);
85 struct marker_probe_closure single;
86 struct marker_probe_closure *multi;
87 int refcount; /* Number of times armed. 0 if disarmed. */
88 struct rcu_head rcu;
89 void *oldptr;
90 int rcu_pending;
91 u16 channel_id;
92 u16 event_id;
93 unsigned char ptype:1;
94 unsigned char format_allocated:1;
95 char channel[0]; /* Contains channel'\0'name'\0'format'\0' */
96 };
97
98 #ifdef CONFIG_MARKERS_USERSPACE
99 static void marker_update_processes(void);
100 #else
101 static void marker_update_processes(void)
102 {
103 }
104 #endif
105
106 /**
107 * __mark_empty_function - Empty probe callback
108 * @mdata: marker data
109 * @probe_private: probe private data
110 * @call_private: call site private data
111 * @fmt: format string
112 * @...: variable argument list
113 *
114 * Empty callback provided as a probe to the markers. By providing this to a
115 * disabled marker, we make sure the execution flow is always valid even
116 * though the function pointer change and the marker enabling are two distinct
117 * operations that modifies the execution flow of preemptible code.
118 */
119 notrace void __mark_empty_function(const struct marker *mdata,
120 void *probe_private, void *call_private, const char *fmt, va_list *args)
121 {
122 }
123 //ust// EXPORT_SYMBOL_GPL(__mark_empty_function);
124
125 /*
126 * marker_probe_cb Callback that prepares the variable argument list for probes.
127 * @mdata: pointer of type struct marker
128 * @call_private: caller site private data
129 * @...: Variable argument list.
130 *
131 * Since we do not use "typical" pointer based RCU in the 1 argument case, we
132 * need to put a full smp_rmb() in this branch. This is why we do not use
133 * rcu_dereference() for the pointer read.
134 */
135 notrace void marker_probe_cb(const struct marker *mdata,
136 void *call_private, ...)
137 {
138 va_list args;
139 char ptype;
140
141 /*
142 * rcu_read_lock_sched does two things : disabling preemption to make
143 * sure the teardown of the callbacks can be done correctly when they
144 * are in modules and they insure RCU read coherency.
145 */
146 //ust// rcu_read_lock_sched_notrace();
147 ptype = mdata->ptype;
148 if (likely(!ptype)) {
149 marker_probe_func *func;
150 /* Must read the ptype before ptr. They are not data dependant,
151 * so we put an explicit smp_rmb() here. */
152 smp_rmb();
153 func = mdata->single.func;
154 /* Must read the ptr before private data. They are not data
155 * dependant, so we put an explicit smp_rmb() here. */
156 smp_rmb();
157 va_start(args, call_private);
158 func(mdata, mdata->single.probe_private, call_private,
159 mdata->format, &args);
160 va_end(args);
161 } else {
162 struct marker_probe_closure *multi;
163 int i;
164 /*
165 * Read mdata->ptype before mdata->multi.
166 */
167 smp_rmb();
168 multi = mdata->multi;
169 /*
170 * multi points to an array, therefore accessing the array
171 * depends on reading multi. However, even in this case,
172 * we must insure that the pointer is read _before_ the array
173 * data. Same as rcu_dereference, but we need a full smp_rmb()
174 * in the fast path, so put the explicit barrier here.
175 */
176 smp_read_barrier_depends();
177 for (i = 0; multi[i].func; i++) {
178 va_start(args, call_private);
179 multi[i].func(mdata, multi[i].probe_private,
180 call_private, mdata->format, &args);
181 va_end(args);
182 }
183 }
184 //ust// rcu_read_unlock_sched_notrace();
185 }
186 //ust// EXPORT_SYMBOL_GPL(marker_probe_cb);
187
188 /*
189 * marker_probe_cb Callback that does not prepare the variable argument list.
190 * @mdata: pointer of type struct marker
191 * @call_private: caller site private data
192 * @...: Variable argument list.
193 *
194 * Should be connected to markers "MARK_NOARGS".
195 */
196 static notrace void marker_probe_cb_noarg(const struct marker *mdata,
197 void *call_private, ...)
198 {
199 va_list args; /* not initialized */
200 char ptype;
201
202 //ust// rcu_read_lock_sched_notrace();
203 ptype = mdata->ptype;
204 if (likely(!ptype)) {
205 marker_probe_func *func;
206 /* Must read the ptype before ptr. They are not data dependant,
207 * so we put an explicit smp_rmb() here. */
208 smp_rmb();
209 func = mdata->single.func;
210 /* Must read the ptr before private data. They are not data
211 * dependant, so we put an explicit smp_rmb() here. */
212 smp_rmb();
213 func(mdata, mdata->single.probe_private, call_private,
214 mdata->format, &args);
215 } else {
216 struct marker_probe_closure *multi;
217 int i;
218 /*
219 * Read mdata->ptype before mdata->multi.
220 */
221 smp_rmb();
222 multi = mdata->multi;
223 /*
224 * multi points to an array, therefore accessing the array
225 * depends on reading multi. However, even in this case,
226 * we must insure that the pointer is read _before_ the array
227 * data. Same as rcu_dereference, but we need a full smp_rmb()
228 * in the fast path, so put the explicit barrier here.
229 */
230 smp_read_barrier_depends();
231 for (i = 0; multi[i].func; i++)
232 multi[i].func(mdata, multi[i].probe_private,
233 call_private, mdata->format, &args);
234 }
235 //ust// rcu_read_unlock_sched_notrace();
236 }
237
238 static void free_old_closure(struct rcu_head *head)
239 {
240 struct marker_entry *entry = container_of(head,
241 struct marker_entry, rcu);
242 kfree(entry->oldptr);
243 /* Make sure we free the data before setting the pending flag to 0 */
244 smp_wmb();
245 entry->rcu_pending = 0;
246 }
247
248 static void debug_print_probes(struct marker_entry *entry)
249 {
250 int i;
251
252 if (!marker_debug)
253 return;
254
255 if (!entry->ptype) {
256 printk(KERN_DEBUG "Single probe : %p %p\n",
257 entry->single.func,
258 entry->single.probe_private);
259 } else {
260 for (i = 0; entry->multi[i].func; i++)
261 printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
262 entry->multi[i].func,
263 entry->multi[i].probe_private);
264 }
265 }
266
267 static struct marker_probe_closure *
268 marker_entry_add_probe(struct marker_entry *entry,
269 marker_probe_func *probe, void *probe_private)
270 {
271 int nr_probes = 0;
272 struct marker_probe_closure *old, *new;
273
274 WARN_ON(!probe);
275
276 debug_print_probes(entry);
277 old = entry->multi;
278 if (!entry->ptype) {
279 if (entry->single.func == probe &&
280 entry->single.probe_private == probe_private)
281 return ERR_PTR(-EBUSY);
282 if (entry->single.func == __mark_empty_function) {
283 /* 0 -> 1 probes */
284 entry->single.func = probe;
285 entry->single.probe_private = probe_private;
286 entry->refcount = 1;
287 entry->ptype = 0;
288 debug_print_probes(entry);
289 return NULL;
290 } else {
291 /* 1 -> 2 probes */
292 nr_probes = 1;
293 old = NULL;
294 }
295 } else {
296 /* (N -> N+1), (N != 0, 1) probes */
297 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
298 if (old[nr_probes].func == probe
299 && old[nr_probes].probe_private
300 == probe_private)
301 return ERR_PTR(-EBUSY);
302 }
303 /* + 2 : one for new probe, one for NULL func */
304 new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
305 GFP_KERNEL);
306 if (new == NULL)
307 return ERR_PTR(-ENOMEM);
308 if (!old)
309 new[0] = entry->single;
310 else
311 memcpy(new, old,
312 nr_probes * sizeof(struct marker_probe_closure));
313 new[nr_probes].func = probe;
314 new[nr_probes].probe_private = probe_private;
315 entry->refcount = nr_probes + 1;
316 entry->multi = new;
317 entry->ptype = 1;
318 debug_print_probes(entry);
319 return old;
320 }
321
322 static struct marker_probe_closure *
323 marker_entry_remove_probe(struct marker_entry *entry,
324 marker_probe_func *probe, void *probe_private)
325 {
326 int nr_probes = 0, nr_del = 0, i;
327 struct marker_probe_closure *old, *new;
328
329 old = entry->multi;
330
331 debug_print_probes(entry);
332 if (!entry->ptype) {
333 /* 0 -> N is an error */
334 WARN_ON(entry->single.func == __mark_empty_function);
335 /* 1 -> 0 probes */
336 WARN_ON(probe && entry->single.func != probe);
337 WARN_ON(entry->single.probe_private != probe_private);
338 entry->single.func = __mark_empty_function;
339 entry->refcount = 0;
340 entry->ptype = 0;
341 debug_print_probes(entry);
342 return NULL;
343 } else {
344 /* (N -> M), (N > 1, M >= 0) probes */
345 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
346 if ((!probe || old[nr_probes].func == probe)
347 && old[nr_probes].probe_private
348 == probe_private)
349 nr_del++;
350 }
351 }
352
353 if (nr_probes - nr_del == 0) {
354 /* N -> 0, (N > 1) */
355 entry->single.func = __mark_empty_function;
356 entry->refcount = 0;
357 entry->ptype = 0;
358 } else if (nr_probes - nr_del == 1) {
359 /* N -> 1, (N > 1) */
360 for (i = 0; old[i].func; i++)
361 if ((probe && old[i].func != probe) ||
362 old[i].probe_private != probe_private)
363 entry->single = old[i];
364 entry->refcount = 1;
365 entry->ptype = 0;
366 } else {
367 int j = 0;
368 /* N -> M, (N > 1, M > 1) */
369 /* + 1 for NULL */
370 new = kzalloc((nr_probes - nr_del + 1)
371 * sizeof(struct marker_probe_closure), GFP_KERNEL);
372 if (new == NULL)
373 return ERR_PTR(-ENOMEM);
374 for (i = 0; old[i].func; i++)
375 if ((probe && old[i].func != probe) ||
376 old[i].probe_private != probe_private)
377 new[j++] = old[i];
378 entry->refcount = nr_probes - nr_del;
379 entry->ptype = 1;
380 entry->multi = new;
381 }
382 debug_print_probes(entry);
383 return old;
384 }
385
386 /*
387 * Get marker if the marker is present in the marker hash table.
388 * Must be called with markers_mutex held.
389 * Returns NULL if not present.
390 */
391 static struct marker_entry *get_marker(const char *channel, const char *name)
392 {
393 struct hlist_head *head;
394 struct hlist_node *node;
395 struct marker_entry *e;
396 size_t channel_len = strlen(channel) + 1;
397 size_t name_len = strlen(name) + 1;
398 u32 hash;
399
400 hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
401 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
402 hlist_for_each_entry(e, node, head, hlist) {
403 if (!strcmp(channel, e->channel) && !strcmp(name, e->name))
404 return e;
405 }
406 return NULL;
407 }
408
409 /*
410 * Add the marker to the marker hash table. Must be called with markers_mutex
411 * held.
412 */
413 static struct marker_entry *add_marker(const char *channel, const char *name,
414 const char *format)
415 {
416 struct hlist_head *head;
417 struct hlist_node *node;
418 struct marker_entry *e;
419 size_t channel_len = strlen(channel) + 1;
420 size_t name_len = strlen(name) + 1;
421 size_t format_len = 0;
422 u32 hash;
423
424 hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
425 if (format)
426 format_len = strlen(format) + 1;
427 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
428 hlist_for_each_entry(e, node, head, hlist) {
429 if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
430 printk(KERN_NOTICE
431 "Marker %s.%s busy\n", channel, name);
432 return ERR_PTR(-EBUSY); /* Already there */
433 }
434 }
435 /*
436 * Using kmalloc here to allocate a variable length element. Could
437 * cause some memory fragmentation if overused.
438 */
439 e = kmalloc(sizeof(struct marker_entry)
440 + channel_len + name_len + format_len,
441 GFP_KERNEL);
442 if (!e)
443 return ERR_PTR(-ENOMEM);
444 memcpy(e->channel, channel, channel_len);
445 e->name = &e->channel[channel_len];
446 memcpy(e->name, name, name_len);
447 if (format) {
448 e->format = &e->name[channel_len + name_len];
449 memcpy(e->format, format, format_len);
450 if (strcmp(e->format, MARK_NOARGS) == 0)
451 e->call = marker_probe_cb_noarg;
452 else
453 e->call = marker_probe_cb;
454 trace_mark(metadata, core_marker_format,
455 "channel %s name %s format %s",
456 e->channel, e->name, e->format);
457 } else {
458 e->format = NULL;
459 e->call = marker_probe_cb;
460 }
461 e->single.func = __mark_empty_function;
462 e->single.probe_private = NULL;
463 e->multi = NULL;
464 e->ptype = 0;
465 e->format_allocated = 0;
466 e->refcount = 0;
467 e->rcu_pending = 0;
468 hlist_add_head(&e->hlist, head);
469 return e;
470 }
471
472 /*
473 * Remove the marker from the marker hash table. Must be called with mutex_lock
474 * held.
475 */
476 static int remove_marker(const char *channel, const char *name)
477 {
478 struct hlist_head *head;
479 struct hlist_node *node;
480 struct marker_entry *e;
481 int found = 0;
482 size_t channel_len = strlen(channel) + 1;
483 size_t name_len = strlen(name) + 1;
484 u32 hash;
485 int ret;
486
487 hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
488 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
489 hlist_for_each_entry(e, node, head, hlist) {
490 if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
491 found = 1;
492 break;
493 }
494 }
495 if (!found)
496 return -ENOENT;
497 if (e->single.func != __mark_empty_function)
498 return -EBUSY;
499 hlist_del(&e->hlist);
500 if (e->format_allocated)
501 kfree(e->format);
502 ret = ltt_channels_unregister(e->channel);
503 WARN_ON(ret);
504 /* Make sure the call_rcu has been executed */
505 //ust// if (e->rcu_pending)
506 //ust// rcu_barrier_sched();
507 kfree(e);
508 return 0;
509 }
510
511 /*
512 * Set the mark_entry format to the format found in the element.
513 */
514 static int marker_set_format(struct marker_entry *entry, const char *format)
515 {
516 entry->format = kstrdup(format, GFP_KERNEL);
517 if (!entry->format)
518 return -ENOMEM;
519 entry->format_allocated = 1;
520
521 trace_mark(metadata, core_marker_format,
522 "channel %s name %s format %s",
523 entry->channel, entry->name, entry->format);
524 return 0;
525 }
526
527 /*
528 * Sets the probe callback corresponding to one marker.
529 */
530 static int set_marker(struct marker_entry *entry, struct marker *elem,
531 int active)
532 {
533 int ret = 0;
534 WARN_ON(strcmp(entry->name, elem->name) != 0);
535
536 if (entry->format) {
537 if (strcmp(entry->format, elem->format) != 0) {
538 printk(KERN_NOTICE
539 "Format mismatch for probe %s "
540 "(%s), marker (%s)\n",
541 entry->name,
542 entry->format,
543 elem->format);
544 return -EPERM;
545 }
546 } else {
547 ret = marker_set_format(entry, elem->format);
548 if (ret)
549 return ret;
550 }
551
552 /*
553 * probe_cb setup (statically known) is done here. It is
554 * asynchronous with the rest of execution, therefore we only
555 * pass from a "safe" callback (with argument) to an "unsafe"
556 * callback (does not set arguments).
557 */
558 elem->call = entry->call;
559 elem->channel_id = entry->channel_id;
560 elem->event_id = entry->event_id;
561 /*
562 * Sanity check :
563 * We only update the single probe private data when the ptr is
564 * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
565 */
566 WARN_ON(elem->single.func != __mark_empty_function
567 && elem->single.probe_private != entry->single.probe_private
568 && !elem->ptype);
569 elem->single.probe_private = entry->single.probe_private;
570 /*
571 * Make sure the private data is valid when we update the
572 * single probe ptr.
573 */
574 smp_wmb();
575 elem->single.func = entry->single.func;
576 /*
577 * We also make sure that the new probe callbacks array is consistent
578 * before setting a pointer to it.
579 */
580 rcu_assign_pointer(elem->multi, entry->multi);
581 /*
582 * Update the function or multi probe array pointer before setting the
583 * ptype.
584 */
585 smp_wmb();
586 elem->ptype = entry->ptype;
587
588 //ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) {
589 //ust// WARN_ON(!elem->tp_cb);
590 //ust// /*
591 //ust// * It is ok to directly call the probe registration because type
592 //ust// * checking has been done in the __trace_mark_tp() macro.
593 //ust// */
594 //ust//
595 //ust// if (active) {
596 //ust// /*
597 //ust// * try_module_get should always succeed because we hold
598 //ust// * markers_mutex to get the tp_cb address.
599 //ust// */
600 //ust// ret = try_module_get(__module_text_address(
601 //ust// (unsigned long)elem->tp_cb));
602 //ust// BUG_ON(!ret);
603 //ust// ret = tracepoint_probe_register_noupdate(
604 //ust// elem->tp_name,
605 //ust// elem->tp_cb);
606 //ust// } else {
607 //ust// ret = tracepoint_probe_unregister_noupdate(
608 //ust// elem->tp_name,
609 //ust// elem->tp_cb);
610 //ust// /*
611 //ust// * tracepoint_probe_update_all() must be called
612 //ust// * before the module containing tp_cb is unloaded.
613 //ust// */
614 //ust// module_put(__module_text_address(
615 //ust// (unsigned long)elem->tp_cb));
616 //ust// }
617 //ust// }
618 elem->state__imv = active;
619
620 return ret;
621 }
622
623 /*
624 * Disable a marker and its probe callback.
625 * Note: only waiting an RCU period after setting elem->call to the empty
626 * function insures that the original callback is not used anymore. This insured
627 * by rcu_read_lock_sched around the call site.
628 */
629 static void disable_marker(struct marker *elem)
630 {
631 int ret;
632
633 /* leave "call" as is. It is known statically. */
634 //ust// if (elem->tp_name && _imv_read(elem->state)) {
635 //ust// WARN_ON(!elem->tp_cb);
636 //ust// /*
637 //ust// * It is ok to directly call the probe registration because type
638 //ust// * checking has been done in the __trace_mark_tp() macro.
639 //ust// */
640 //ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
641 //ust// elem->tp_cb);
642 //ust// WARN_ON(ret);
643 //ust// /*
644 //ust// * tracepoint_probe_update_all() must be called
645 //ust// * before the module containing tp_cb is unloaded.
646 //ust// */
647 //ust// module_put(__module_text_address((unsigned long)elem->tp_cb));
648 //ust// }
649 elem->state__imv = 0;
650 elem->single.func = __mark_empty_function;
651 /* Update the function before setting the ptype */
652 smp_wmb();
653 elem->ptype = 0; /* single probe */
654 /*
655 * Leave the private data and channel_id/event_id there, because removal
656 * is racy and should be done only after an RCU period. These are never
657 * used until the next initialization anyway.
658 */
659 }
660
661 /**
662 * marker_update_probe_range - Update a probe range
663 * @begin: beginning of the range
664 * @end: end of the range
665 *
666 * Updates the probe callback corresponding to a range of markers.
667 */
668 void marker_update_probe_range(struct marker *begin,
669 struct marker *end)
670 {
671 struct marker *iter;
672 struct marker_entry *mark_entry;
673
674 mutex_lock(&markers_mutex);
675 for (iter = begin; iter < end; iter++) {
676 mark_entry = get_marker(iter->channel, iter->name);
677 if (mark_entry) {
678 set_marker(mark_entry, iter, !!mark_entry->refcount);
679 /*
680 * ignore error, continue
681 */
682
683 /* This is added for UST. We emit a core_marker_id event
684 * for markers that are already registered to a probe
685 * upon library load. Otherwise, no core_marker_id will
686 * be generated for these markers. Is this the right thing
687 * to do?
688 */
689 trace_mark(metadata, core_marker_id,
690 "channel %s name %s event_id %hu "
691 "int #1u%zu long #1u%zu pointer #1u%zu "
692 "size_t #1u%zu alignment #1u%u",
693 iter->channel, iter->name, mark_entry->event_id,
694 sizeof(int), sizeof(long), sizeof(void *),
695 sizeof(size_t), ltt_get_alignment());
696 } else {
697 disable_marker(iter);
698 }
699 }
700 mutex_unlock(&markers_mutex);
701 }
702
703 /*
704 * Update probes, removing the faulty probes.
705 *
706 * Internal callback only changed before the first probe is connected to it.
707 * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
708 * transitions. All other transitions will leave the old private data valid.
709 * This makes the non-atomicity of the callback/private data updates valid.
710 *
711 * "special case" updates :
712 * 0 -> 1 callback
713 * 1 -> 0 callback
714 * 1 -> 2 callbacks
715 * 2 -> 1 callbacks
716 * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
717 * Site effect : marker_set_format may delete the marker entry (creating a
718 * replacement).
719 */
720 static void marker_update_probes(void)
721 {
722 /* Core kernel markers */
723 //ust// marker_update_probe_range(__start___markers, __stop___markers);
724 /* Markers in modules. */
725 //ust// module_update_markers();
726 lib_update_markers();
727 //ust// tracepoint_probe_update_all();
728 /* Update immediate values */
729 core_imv_update();
730 //ust// module_imv_update(); /* FIXME: need to port for libs? */
731 marker_update_processes();
732 }
733
734 /**
735 * marker_probe_register - Connect a probe to a marker
736 * @channel: marker channel
737 * @name: marker name
738 * @format: format string
739 * @probe: probe handler
740 * @probe_private: probe private data
741 *
742 * private data must be a valid allocated memory address, or NULL.
743 * Returns 0 if ok, error value on error.
744 * The probe address must at least be aligned on the architecture pointer size.
745 */
746 int marker_probe_register(const char *channel, const char *name,
747 const char *format, marker_probe_func *probe,
748 void *probe_private)
749 {
750 struct marker_entry *entry;
751 int ret = 0, ret_err;
752 struct marker_probe_closure *old;
753 int first_probe = 0;
754
755 mutex_lock(&markers_mutex);
756 entry = get_marker(channel, name);
757 if (!entry) {
758 first_probe = 1;
759 entry = add_marker(channel, name, format);
760 if (IS_ERR(entry))
761 ret = PTR_ERR(entry);
762 if (ret)
763 goto end;
764 ret = ltt_channels_register(channel);
765 if (ret)
766 goto error_remove_marker;
767 ret = ltt_channels_get_index_from_name(channel);
768 if (ret < 0)
769 goto error_unregister_channel;
770 entry->channel_id = ret;
771 ret = ltt_channels_get_event_id(channel, name);
772 if (ret < 0)
773 goto error_unregister_channel;
774 entry->event_id = ret;
775 ret = 0;
776 trace_mark(metadata, core_marker_id,
777 "channel %s name %s event_id %hu "
778 "int #1u%zu long #1u%zu pointer #1u%zu "
779 "size_t #1u%zu alignment #1u%u",
780 channel, name, entry->event_id,
781 sizeof(int), sizeof(long), sizeof(void *),
782 sizeof(size_t), ltt_get_alignment());
783 } else if (format) {
784 if (!entry->format)
785 ret = marker_set_format(entry, format);
786 else if (strcmp(entry->format, format))
787 ret = -EPERM;
788 if (ret)
789 goto end;
790 }
791
792 /*
793 * If we detect that a call_rcu is pending for this marker,
794 * make sure it's executed now.
795 */
796 //ust// if (entry->rcu_pending)
797 //ust// rcu_barrier_sched();
798 old = marker_entry_add_probe(entry, probe, probe_private);
799 if (IS_ERR(old)) {
800 ret = PTR_ERR(old);
801 if (first_probe)
802 goto error_unregister_channel;
803 else
804 goto end;
805 }
806 mutex_unlock(&markers_mutex);
807
808 marker_update_probes();
809
810 mutex_lock(&markers_mutex);
811 entry = get_marker(channel, name);
812 if (!entry)
813 goto end;
814 //ust// if (entry->rcu_pending)
815 //ust// rcu_barrier_sched();
816 entry->oldptr = old;
817 entry->rcu_pending = 1;
818 /* write rcu_pending before calling the RCU callback */
819 smp_wmb();
820 //ust// call_rcu_sched(&entry->rcu, free_old_closure);
821 synchronize_rcu(); free_old_closure(&entry->rcu);
822 goto end;
823
824 error_unregister_channel:
825 ret_err = ltt_channels_unregister(channel);
826 WARN_ON(ret_err);
827 error_remove_marker:
828 ret_err = remove_marker(channel, name);
829 WARN_ON(ret_err);
830 end:
831 mutex_unlock(&markers_mutex);
832 return ret;
833 }
834 //ust// EXPORT_SYMBOL_GPL(marker_probe_register);
835
836 /**
837 * marker_probe_unregister - Disconnect a probe from a marker
838 * @channel: marker channel
839 * @name: marker name
840 * @probe: probe function pointer
841 * @probe_private: probe private data
842 *
843 * Returns the private data given to marker_probe_register, or an ERR_PTR().
844 * We do not need to call a synchronize_sched to make sure the probes have
845 * finished running before doing a module unload, because the module unload
846 * itself uses stop_machine(), which insures that every preempt disabled section
847 * have finished.
848 */
849 int marker_probe_unregister(const char *channel, const char *name,
850 marker_probe_func *probe, void *probe_private)
851 {
852 struct marker_entry *entry;
853 struct marker_probe_closure *old;
854 int ret = -ENOENT;
855
856 mutex_lock(&markers_mutex);
857 entry = get_marker(channel, name);
858 if (!entry)
859 goto end;
860 //ust// if (entry->rcu_pending)
861 //ust// rcu_barrier_sched();
862 old = marker_entry_remove_probe(entry, probe, probe_private);
863 mutex_unlock(&markers_mutex);
864
865 marker_update_probes();
866
867 mutex_lock(&markers_mutex);
868 entry = get_marker(channel, name);
869 if (!entry)
870 goto end;
871 //ust// if (entry->rcu_pending)
872 //ust// rcu_barrier_sched();
873 entry->oldptr = old;
874 entry->rcu_pending = 1;
875 /* write rcu_pending before calling the RCU callback */
876 smp_wmb();
877 //ust// call_rcu_sched(&entry->rcu, free_old_closure);
878 synchronize_rcu(); free_old_closure(&entry->rcu);
879 remove_marker(channel, name); /* Ignore busy error message */
880 ret = 0;
881 end:
882 mutex_unlock(&markers_mutex);
883 return ret;
884 }
885 //ust// EXPORT_SYMBOL_GPL(marker_probe_unregister);
886
887 static struct marker_entry *
888 get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
889 {
890 struct marker_entry *entry;
891 unsigned int i;
892 struct hlist_head *head;
893 struct hlist_node *node;
894
895 for (i = 0; i < MARKER_TABLE_SIZE; i++) {
896 head = &marker_table[i];
897 hlist_for_each_entry(entry, node, head, hlist) {
898 if (!entry->ptype) {
899 if (entry->single.func == probe
900 && entry->single.probe_private
901 == probe_private)
902 return entry;
903 } else {
904 struct marker_probe_closure *closure;
905 closure = entry->multi;
906 for (i = 0; closure[i].func; i++) {
907 if (closure[i].func == probe &&
908 closure[i].probe_private
909 == probe_private)
910 return entry;
911 }
912 }
913 }
914 }
915 return NULL;
916 }
917
918 /**
919 * marker_probe_unregister_private_data - Disconnect a probe from a marker
920 * @probe: probe function
921 * @probe_private: probe private data
922 *
923 * Unregister a probe by providing the registered private data.
924 * Only removes the first marker found in hash table.
925 * Return 0 on success or error value.
926 * We do not need to call a synchronize_sched to make sure the probes have
927 * finished running before doing a module unload, because the module unload
928 * itself uses stop_machine(), which insures that every preempt disabled section
929 * have finished.
930 */
931 int marker_probe_unregister_private_data(marker_probe_func *probe,
932 void *probe_private)
933 {
934 struct marker_entry *entry;
935 int ret = 0;
936 struct marker_probe_closure *old;
937 const char *channel = NULL, *name = NULL;
938
939 mutex_lock(&markers_mutex);
940 entry = get_marker_from_private_data(probe, probe_private);
941 if (!entry) {
942 ret = -ENOENT;
943 goto end;
944 }
945 //ust// if (entry->rcu_pending)
946 //ust// rcu_barrier_sched();
947 old = marker_entry_remove_probe(entry, NULL, probe_private);
948 channel = kstrdup(entry->channel, GFP_KERNEL);
949 name = kstrdup(entry->name, GFP_KERNEL);
950 mutex_unlock(&markers_mutex);
951
952 marker_update_probes();
953
954 mutex_lock(&markers_mutex);
955 entry = get_marker(channel, name);
956 if (!entry)
957 goto end;
958 //ust// if (entry->rcu_pending)
959 //ust// rcu_barrier_sched();
960 entry->oldptr = old;
961 entry->rcu_pending = 1;
962 /* write rcu_pending before calling the RCU callback */
963 smp_wmb();
964 //ust// call_rcu_sched(&entry->rcu, free_old_closure);
965 synchronize_rcu(); free_old_closure(&entry->rcu);
966 /* Ignore busy error message */
967 remove_marker(channel, name);
968 end:
969 mutex_unlock(&markers_mutex);
970 kfree(channel);
971 kfree(name);
972 return ret;
973 }
974 //ust// EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
975
976 /**
977 * marker_get_private_data - Get a marker's probe private data
978 * @channel: marker channel
979 * @name: marker name
980 * @probe: probe to match
981 * @num: get the nth matching probe's private data
982 *
983 * Returns the nth private data pointer (starting from 0) matching, or an
984 * ERR_PTR.
985 * Returns the private data pointer, or an ERR_PTR.
986 * The private data pointer should _only_ be dereferenced if the caller is the
987 * owner of the data, or its content could vanish. This is mostly used to
988 * confirm that a caller is the owner of a registered probe.
989 */
990 void *marker_get_private_data(const char *channel, const char *name,
991 marker_probe_func *probe, int num)
992 {
993 struct hlist_head *head;
994 struct hlist_node *node;
995 struct marker_entry *e;
996 size_t channel_len = strlen(channel) + 1;
997 size_t name_len = strlen(name) + 1;
998 int i;
999 u32 hash;
1000
1001 hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
1002 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
1003 hlist_for_each_entry(e, node, head, hlist) {
1004 if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
1005 if (!e->ptype) {
1006 if (num == 0 && e->single.func == probe)
1007 return e->single.probe_private;
1008 } else {
1009 struct marker_probe_closure *closure;
1010 int match = 0;
1011 closure = e->multi;
1012 for (i = 0; closure[i].func; i++) {
1013 if (closure[i].func != probe)
1014 continue;
1015 if (match++ == num)
1016 return closure[i].probe_private;
1017 }
1018 }
1019 break;
1020 }
1021 }
1022 return ERR_PTR(-ENOENT);
1023 }
1024 //ust// EXPORT_SYMBOL_GPL(marker_get_private_data);
1025
1026 /**
1027 * markers_compact_event_ids - Compact markers event IDs and reassign channels
1028 *
1029 * Called when no channel users are active by the channel infrastructure.
1030 * Called with lock_markers() and channel mutex held.
1031 */
1032 //ust// void markers_compact_event_ids(void)
1033 //ust// {
1034 //ust// struct marker_entry *entry;
1035 //ust// unsigned int i;
1036 //ust// struct hlist_head *head;
1037 //ust// struct hlist_node *node;
1038 //ust// int ret;
1039 //ust//
1040 //ust// for (i = 0; i < MARKER_TABLE_SIZE; i++) {
1041 //ust// head = &marker_table[i];
1042 //ust// hlist_for_each_entry(entry, node, head, hlist) {
1043 //ust// ret = ltt_channels_get_index_from_name(entry->channel);
1044 //ust// WARN_ON(ret < 0);
1045 //ust// entry->channel_id = ret;
1046 //ust// ret = _ltt_channels_get_event_id(entry->channel,
1047 //ust// entry->name);
1048 //ust// WARN_ON(ret < 0);
1049 //ust// entry->event_id = ret;
1050 //ust// }
1051 //ust// }
1052 //ust// }
1053
1054 //ust//#ifdef CONFIG_MODULES
1055
1056 /**
1057 * marker_get_iter_range - Get a next marker iterator given a range.
1058 * @marker: current markers (in), next marker (out)
1059 * @begin: beginning of the range
1060 * @end: end of the range
1061 *
1062 * Returns whether a next marker has been found (1) or not (0).
1063 * Will return the first marker in the range if the input marker is NULL.
1064 */
1065 int marker_get_iter_range(struct marker **marker, struct marker *begin,
1066 struct marker *end)
1067 {
1068 if (!*marker && begin != end) {
1069 *marker = begin;
1070 return 1;
1071 }
1072 if (*marker >= begin && *marker < end)
1073 return 1;
1074 return 0;
1075 }
1076 //ust// EXPORT_SYMBOL_GPL(marker_get_iter_range);
1077
1078 static void marker_get_iter(struct marker_iter *iter)
1079 {
1080 int found = 0;
1081
1082 /* Core kernel markers */
1083 if (!iter->lib) {
1084 /* ust FIXME: how come we cannot disable the following line? we shouldn't need core stuff */
1085 found = marker_get_iter_range(&iter->marker,
1086 __start___markers, __stop___markers);
1087 if (found)
1088 goto end;
1089 }
1090 /* Markers in modules. */
1091 found = lib_get_iter_markers(iter);
1092 end:
1093 if (!found)
1094 marker_iter_reset(iter);
1095 }
1096
1097 void marker_iter_start(struct marker_iter *iter)
1098 {
1099 marker_get_iter(iter);
1100 }
1101 //ust// EXPORT_SYMBOL_GPL(marker_iter_start);
1102
1103 void marker_iter_next(struct marker_iter *iter)
1104 {
1105 iter->marker++;
1106 /*
1107 * iter->marker may be invalid because we blindly incremented it.
1108 * Make sure it is valid by marshalling on the markers, getting the
1109 * markers from following modules if necessary.
1110 */
1111 marker_get_iter(iter);
1112 }
1113 //ust// EXPORT_SYMBOL_GPL(marker_iter_next);
1114
1115 void marker_iter_stop(struct marker_iter *iter)
1116 {
1117 }
1118 //ust// EXPORT_SYMBOL_GPL(marker_iter_stop);
1119
1120 void marker_iter_reset(struct marker_iter *iter)
1121 {
1122 iter->lib = NULL;
1123 iter->marker = NULL;
1124 }
1125 //ust// EXPORT_SYMBOL_GPL(marker_iter_reset);
1126
1127 #ifdef CONFIG_MARKERS_USERSPACE
1128 /*
1129 * must be called with current->user_markers_mutex held
1130 */
1131 static void free_user_marker(char __user *state, struct hlist_head *head)
1132 {
1133 struct user_marker *umark;
1134 struct hlist_node *pos, *n;
1135
1136 hlist_for_each_entry_safe(umark, pos, n, head, hlist) {
1137 if (umark->state == state) {
1138 hlist_del(&umark->hlist);
1139 kfree(umark);
1140 }
1141 }
1142 }
1143
1144 //ust// asmlinkage long sys_marker(char __user *name, char __user *format,
1145 //ust// char __user *state, int reg)
1146 //ust// {
1147 //ust// struct user_marker *umark;
1148 //ust// long len;
1149 //ust// struct marker_entry *entry;
1150 //ust// int ret = 0;
1151 //ust//
1152 //ust// printk(KERN_DEBUG "Program %s %s marker [%p, %p]\n",
1153 //ust// current->comm, reg ? "registers" : "unregisters",
1154 //ust// name, state);
1155 //ust// if (reg) {
1156 //ust// umark = kmalloc(sizeof(struct user_marker), GFP_KERNEL);
1157 //ust// umark->name[MAX_USER_MARKER_NAME_LEN - 1] = '\0';
1158 //ust// umark->format[MAX_USER_MARKER_FORMAT_LEN - 1] = '\0';
1159 //ust// umark->state = state;
1160 //ust// len = strncpy_from_user(umark->name, name,
1161 //ust// MAX_USER_MARKER_NAME_LEN - 1);
1162 //ust// if (len < 0) {
1163 //ust// ret = -EFAULT;
1164 //ust// goto error;
1165 //ust// }
1166 //ust// len = strncpy_from_user(umark->format, format,
1167 //ust// MAX_USER_MARKER_FORMAT_LEN - 1);
1168 //ust// if (len < 0) {
1169 //ust// ret = -EFAULT;
1170 //ust// goto error;
1171 //ust// }
1172 //ust// printk(KERN_DEBUG "Marker name : %s, format : %s", umark->name,
1173 //ust// umark->format);
1174 //ust// mutex_lock(&markers_mutex);
1175 //ust// entry = get_marker("userspace", umark->name);
1176 //ust// if (entry) {
1177 //ust// if (entry->format &&
1178 //ust// strcmp(entry->format, umark->format) != 0) {
1179 //ust// printk(" error, wrong format in process %s",
1180 //ust// current->comm);
1181 //ust// ret = -EPERM;
1182 //ust// goto error_unlock;
1183 //ust// }
1184 //ust// printk(" %s", !!entry->refcount
1185 //ust// ? "enabled" : "disabled");
1186 //ust// if (put_user(!!entry->refcount, state)) {
1187 //ust// ret = -EFAULT;
1188 //ust// goto error_unlock;
1189 //ust// }
1190 //ust// printk("\n");
1191 //ust// } else {
1192 //ust// printk(" disabled\n");
1193 //ust// if (put_user(0, umark->state)) {
1194 //ust// printk(KERN_WARNING
1195 //ust// "Marker in %s caused a fault\n",
1196 //ust// current->comm);
1197 //ust// goto error_unlock;
1198 //ust// }
1199 //ust// }
1200 //ust// mutex_lock(&current->group_leader->user_markers_mutex);
1201 //ust// hlist_add_head(&umark->hlist,
1202 //ust// &current->group_leader->user_markers);
1203 //ust// current->group_leader->user_markers_sequence++;
1204 //ust// mutex_unlock(&current->group_leader->user_markers_mutex);
1205 //ust// mutex_unlock(&markers_mutex);
1206 //ust// } else {
1207 //ust// mutex_lock(&current->group_leader->user_markers_mutex);
1208 //ust// free_user_marker(state,
1209 //ust// &current->group_leader->user_markers);
1210 //ust// current->group_leader->user_markers_sequence++;
1211 //ust// mutex_unlock(&current->group_leader->user_markers_mutex);
1212 //ust// }
1213 //ust// goto end;
1214 //ust// error_unlock:
1215 //ust// mutex_unlock(&markers_mutex);
1216 //ust// error:
1217 //ust// kfree(umark);
1218 //ust// end:
1219 //ust// return ret;
1220 //ust// }
1221 //ust//
1222 //ust// /*
1223 //ust// * Types :
1224 //ust// * string : 0
1225 //ust// */
1226 //ust// asmlinkage long sys_trace(int type, uint16_t id,
1227 //ust// char __user *ubuf)
1228 //ust// {
1229 //ust// long ret = -EPERM;
1230 //ust// char *page;
1231 //ust// int len;
1232 //ust//
1233 //ust// switch (type) {
1234 //ust// case 0: /* String */
1235 //ust// ret = -ENOMEM;
1236 //ust// page = (char *)__get_free_page(GFP_TEMPORARY);
1237 //ust// if (!page)
1238 //ust// goto string_out;
1239 //ust// len = strncpy_from_user(page, ubuf, PAGE_SIZE);
1240 //ust// if (len < 0) {
1241 //ust// ret = -EFAULT;
1242 //ust// goto string_err;
1243 //ust// }
1244 //ust// trace_mark(userspace, string, "string %s", page);
1245 //ust// string_err:
1246 //ust// free_page((unsigned long) page);
1247 //ust// string_out:
1248 //ust// break;
1249 //ust// default:
1250 //ust// break;
1251 //ust// }
1252 //ust// return ret;
1253 //ust// }
1254
1255 //ust// static void marker_update_processes(void)
1256 //ust// {
1257 //ust// struct task_struct *g, *t;
1258 //ust//
1259 //ust// /*
1260 //ust// * markers_mutex is taken to protect the p->user_markers read.
1261 //ust// */
1262 //ust// mutex_lock(&markers_mutex);
1263 //ust// read_lock(&tasklist_lock);
1264 //ust// for_each_process(g) {
1265 //ust// WARN_ON(!thread_group_leader(g));
1266 //ust// if (hlist_empty(&g->user_markers))
1267 //ust// continue;
1268 //ust// if (strcmp(g->comm, "testprog") == 0)
1269 //ust// printk(KERN_DEBUG "set update pending for testprog\n");
1270 //ust// t = g;
1271 //ust// do {
1272 //ust// /* TODO : implement this thread flag in each arch. */
1273 //ust// set_tsk_thread_flag(t, TIF_MARKER_PENDING);
1274 //ust// } while ((t = next_thread(t)) != g);
1275 //ust// }
1276 //ust// read_unlock(&tasklist_lock);
1277 //ust// mutex_unlock(&markers_mutex);
1278 //ust// }
1279
1280 /*
1281 * Update current process.
1282 * Note that we have to wait a whole scheduler period before we are sure that
1283 * every running userspace threads have their markers updated.
1284 * (synchronize_sched() can be used to insure this).
1285 */
1286 void marker_update_process(void)
1287 {
1288 struct user_marker *umark;
1289 struct hlist_node *pos;
1290 struct marker_entry *entry;
1291
1292 mutex_lock(&markers_mutex);
1293 mutex_lock(&current->group_leader->user_markers_mutex);
1294 if (strcmp(current->comm, "testprog") == 0)
1295 printk(KERN_DEBUG "do update pending for testprog\n");
1296 hlist_for_each_entry(umark, pos,
1297 &current->group_leader->user_markers, hlist) {
1298 printk(KERN_DEBUG "Updating marker %s in %s\n",
1299 umark->name, current->comm);
1300 entry = get_marker("userspace", umark->name);
1301 if (entry) {
1302 if (entry->format &&
1303 strcmp(entry->format, umark->format) != 0) {
1304 printk(KERN_WARNING
1305 " error, wrong format in process %s\n",
1306 current->comm);
1307 break;
1308 }
1309 if (put_user(!!entry->refcount, umark->state)) {
1310 printk(KERN_WARNING
1311 "Marker in %s caused a fault\n",
1312 current->comm);
1313 break;
1314 }
1315 } else {
1316 if (put_user(0, umark->state)) {
1317 printk(KERN_WARNING
1318 "Marker in %s caused a fault\n",
1319 current->comm);
1320 break;
1321 }
1322 }
1323 }
1324 clear_thread_flag(TIF_MARKER_PENDING);
1325 mutex_unlock(&current->group_leader->user_markers_mutex);
1326 mutex_unlock(&markers_mutex);
1327 }
1328
1329 /*
1330 * Called at process exit and upon do_execve().
1331 * We assume that when the leader exits, no more references can be done to the
1332 * leader structure by the other threads.
1333 */
1334 void exit_user_markers(struct task_struct *p)
1335 {
1336 struct user_marker *umark;
1337 struct hlist_node *pos, *n;
1338
1339 if (thread_group_leader(p)) {
1340 mutex_lock(&markers_mutex);
1341 mutex_lock(&p->user_markers_mutex);
1342 hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
1343 hlist)
1344 kfree(umark);
1345 INIT_HLIST_HEAD(&p->user_markers);
1346 p->user_markers_sequence++;
1347 mutex_unlock(&p->user_markers_mutex);
1348 mutex_unlock(&markers_mutex);
1349 }
1350 }
1351
1352 int is_marker_enabled(const char *channel, const char *name)
1353 {
1354 struct marker_entry *entry;
1355
1356 mutex_lock(&markers_mutex);
1357 entry = get_marker(channel, name);
1358 mutex_unlock(&markers_mutex);
1359
1360 return entry && !!entry->refcount;
1361 }
1362 //ust// #endif
1363
1364 int marker_module_notify(struct notifier_block *self,
1365 unsigned long val, void *data)
1366 {
1367 struct module *mod = data;
1368
1369 switch (val) {
1370 case MODULE_STATE_COMING:
1371 marker_update_probe_range(mod->markers,
1372 mod->markers + mod->num_markers);
1373 break;
1374 case MODULE_STATE_GOING:
1375 marker_update_probe_range(mod->markers,
1376 mod->markers + mod->num_markers);
1377 break;
1378 }
1379 return 0;
1380 }
1381
1382 struct notifier_block marker_module_nb = {
1383 .notifier_call = marker_module_notify,
1384 .priority = 0,
1385 };
1386
1387 //ust// static int init_markers(void)
1388 //ust// {
1389 //ust// return register_module_notifier(&marker_module_nb);
1390 //ust// }
1391 //ust// __initcall(init_markers);
1392 /* TODO: call marker_module_nb() when a library is linked at runtime (dlopen)? */
1393
1394 #endif /* CONFIG_MODULES */
1395
1396 void ltt_dump_marker_state(struct ltt_trace_struct *trace)
1397 {
1398 struct marker_iter iter;
1399 struct ltt_probe_private_data call_data;
1400 const char *channel;
1401
1402 call_data.trace = trace;
1403 call_data.serializer = NULL;
1404
1405 marker_iter_reset(&iter);
1406 marker_iter_start(&iter);
1407 for (; iter.marker != NULL; marker_iter_next(&iter)) {
1408 if (!_imv_read(iter.marker->state))
1409 continue;
1410 channel = ltt_channels_get_name_from_index(
1411 iter.marker->channel_id);
1412 __trace_mark(0, metadata, core_marker_id,
1413 &call_data,
1414 "channel %s name %s event_id %hu "
1415 "int #1u%zu long #1u%zu pointer #1u%zu "
1416 "size_t #1u%zu alignment #1u%u",
1417 channel,
1418 iter.marker->name,
1419 iter.marker->event_id,
1420 sizeof(int), sizeof(long),
1421 sizeof(void *), sizeof(size_t),
1422 ltt_get_alignment());
1423 if (iter.marker->format)
1424 __trace_mark(0, metadata,
1425 core_marker_format,
1426 &call_data,
1427 "channel %s name %s format %s",
1428 channel,
1429 iter.marker->name,
1430 iter.marker->format);
1431 }
1432 marker_iter_stop(&iter);
1433 }
1434 //ust// EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
1435
1436
1437 static LIST_HEAD(libs);
1438
1439 /*
1440 * Returns 0 if current not found.
1441 * Returns 1 if current found.
1442 */
1443 int lib_get_iter_markers(struct marker_iter *iter)
1444 {
1445 struct lib *iter_lib;
1446 int found = 0;
1447
1448 //ust// mutex_lock(&module_mutex);
1449 list_for_each_entry(iter_lib, &libs, list) {
1450 if (iter_lib < iter->lib)
1451 continue;
1452 else if (iter_lib > iter->lib)
1453 iter->marker = NULL;
1454 found = marker_get_iter_range(&iter->marker,
1455 iter_lib->markers_start,
1456 iter_lib->markers_start + iter_lib->markers_count);
1457 if (found) {
1458 iter->lib = iter_lib;
1459 break;
1460 }
1461 }
1462 //ust// mutex_unlock(&module_mutex);
1463 return found;
1464 }
1465
1466 void lib_update_markers(void)
1467 {
1468 struct lib *lib;
1469
1470 //ust// mutex_lock(&module_mutex);
1471 list_for_each_entry(lib, &libs, list)
1472 marker_update_probe_range(lib->markers_start,
1473 lib->markers_start + lib->markers_count);
1474 //ust// mutex_unlock(&module_mutex);
1475 }
1476
1477 static void (*new_marker_cb)(struct marker *) = NULL;
1478
1479 void marker_set_new_marker_cb(void (*cb)(struct marker *))
1480 {
1481 new_marker_cb = cb;
1482 }
1483
1484 static void new_markers(struct marker *start, struct marker *end)
1485 {
1486 if(new_marker_cb) {
1487 struct marker *m;
1488 for(m=start; m < end; m++) {
1489 new_marker_cb(m);
1490 }
1491 }
1492 }
1493
1494 int marker_register_lib(struct marker *markers_start, int markers_count)
1495 {
1496 struct lib *pl;
1497
1498 pl = (struct lib *) malloc(sizeof(struct lib));
1499
1500 pl->markers_start = markers_start;
1501 pl->markers_count = markers_count;
1502
1503 /* FIXME: maybe protect this with its own mutex? */
1504 lock_markers();
1505 list_add(&pl->list, &libs);
1506 unlock_markers();
1507
1508 new_markers(markers_start, markers_start + markers_count);
1509
1510 /* FIXME: update just the loaded lib */
1511 lib_update_markers();
1512
1513 DBG("just registered a markers section from %p and having %d markers", markers_start, markers_count);
1514
1515 return 0;
1516 }
1517
1518 int marker_unregister_lib(struct marker *markers_start, int markers_count)
1519 {
1520 /*FIXME: implement; but before implementing, marker_register_lib must
1521 have appropriate locking. */
1522
1523 return 0;
1524 }
1525
1526 static int initialized = 0;
1527
1528 void __attribute__((constructor)) init_markers(void)
1529 {
1530 if(!initialized) {
1531 marker_register_lib(__start___markers, (((long)__stop___markers)-((long)__start___markers))/sizeof(struct marker));
1532 printf("markers_start: %p, markers_stop: %p\n", __start___markers, __stop___markers);
1533 initialized = 1;
1534 }
1535 }
This page took 0.065282 seconds and 4 git commands to generate.