ust: fix several segfaults, now seems to trace without errors
[ust.git] / libmarkers / marker.c
CommitLineData
68c1021b
PMF
1/*
2 * Copyright (C) 2007 Mathieu Desnoyers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
59b161cd
PMF
18//ust// #include <linux/module.h>
19//ust// #include <linux/mutex.h>
20//ust// #include <linux/types.h>
21#include "jhash.h"
22#include "list.h"
23#include "rcupdate.h"
24//ust// #include <linux/marker.h>
25#include <errno.h>
26//ust// #include <linux/slab.h>
27//ust// #include <linux/immediate.h>
28//ust// #include <linux/sched.h>
29//ust// #include <linux/uaccess.h>
30//ust// #include <linux/user_marker.h>
31//ust// #include <linux/ltt-tracer.h>
32
33#include "marker.h"
34#include "kernelcompat.h"
35#include "usterr.h"
36#include "channels.h"
37#include "tracercore.h"
68c1021b
PMF
38
39extern struct marker __start___markers[];
40extern struct marker __stop___markers[];
41
42/* Set to 1 to enable marker debug output */
43static const int marker_debug;
44
45/*
46 * markers_mutex nests inside module_mutex. Markers mutex protects the builtin
47 * and module markers and the hash table.
48 */
49static DEFINE_MUTEX(markers_mutex);
50
51void lock_markers(void)
52{
53 mutex_lock(&markers_mutex);
54}
55
56void unlock_markers(void)
57{
58 mutex_unlock(&markers_mutex);
59}
60
61/*
62 * Marker hash table, containing the active markers.
63 * Protected by module_mutex.
64 */
65#define MARKER_HASH_BITS 6
66#define MARKER_TABLE_SIZE (1 << MARKER_HASH_BITS)
67static struct hlist_head marker_table[MARKER_TABLE_SIZE];
68
69/*
70 * Note about RCU :
71 * It is used to make sure every handler has finished using its private data
72 * between two consecutive operation (add or remove) on a given marker. It is
73 * also used to delay the free of multiple probes array until a quiescent state
74 * is reached.
75 * marker entries modifications are protected by the markers_mutex.
76 */
77struct marker_entry {
78 struct hlist_node hlist;
79 char *format;
80 char *name;
81 /* Probe wrapper */
82 void (*call)(const struct marker *mdata, void *call_private, ...);
83 struct marker_probe_closure single;
84 struct marker_probe_closure *multi;
85 int refcount; /* Number of times armed. 0 if disarmed. */
86 struct rcu_head rcu;
87 void *oldptr;
88 int rcu_pending;
89 u16 channel_id;
90 u16 event_id;
91 unsigned char ptype:1;
92 unsigned char format_allocated:1;
93 char channel[0]; /* Contains channel'\0'name'\0'format'\0' */
94};
95
96#ifdef CONFIG_MARKERS_USERSPACE
97static void marker_update_processes(void);
98#else
99static void marker_update_processes(void)
100{
101}
102#endif
103
104/**
105 * __mark_empty_function - Empty probe callback
106 * @mdata: marker data
107 * @probe_private: probe private data
108 * @call_private: call site private data
109 * @fmt: format string
110 * @...: variable argument list
111 *
112 * Empty callback provided as a probe to the markers. By providing this to a
113 * disabled marker, we make sure the execution flow is always valid even
114 * though the function pointer change and the marker enabling are two distinct
115 * operations that modifies the execution flow of preemptible code.
116 */
117notrace void __mark_empty_function(const struct marker *mdata,
118 void *probe_private, void *call_private, const char *fmt, va_list *args)
119{
120}
59b161cd 121//ust// EXPORT_SYMBOL_GPL(__mark_empty_function);
68c1021b
PMF
122
123/*
124 * marker_probe_cb Callback that prepares the variable argument list for probes.
125 * @mdata: pointer of type struct marker
126 * @call_private: caller site private data
127 * @...: Variable argument list.
128 *
129 * Since we do not use "typical" pointer based RCU in the 1 argument case, we
130 * need to put a full smp_rmb() in this branch. This is why we do not use
131 * rcu_dereference() for the pointer read.
132 */
133notrace void marker_probe_cb(const struct marker *mdata,
134 void *call_private, ...)
135{
136 va_list args;
137 char ptype;
138
139 /*
140 * rcu_read_lock_sched does two things : disabling preemption to make
141 * sure the teardown of the callbacks can be done correctly when they
142 * are in modules and they insure RCU read coherency.
143 */
59b161cd 144//ust// rcu_read_lock_sched_notrace();
68c1021b
PMF
145 ptype = mdata->ptype;
146 if (likely(!ptype)) {
147 marker_probe_func *func;
148 /* Must read the ptype before ptr. They are not data dependant,
149 * so we put an explicit smp_rmb() here. */
150 smp_rmb();
151 func = mdata->single.func;
152 /* Must read the ptr before private data. They are not data
153 * dependant, so we put an explicit smp_rmb() here. */
154 smp_rmb();
155 va_start(args, call_private);
156 func(mdata, mdata->single.probe_private, call_private,
157 mdata->format, &args);
158 va_end(args);
159 } else {
160 struct marker_probe_closure *multi;
161 int i;
162 /*
163 * Read mdata->ptype before mdata->multi.
164 */
165 smp_rmb();
166 multi = mdata->multi;
167 /*
168 * multi points to an array, therefore accessing the array
169 * depends on reading multi. However, even in this case,
170 * we must insure that the pointer is read _before_ the array
171 * data. Same as rcu_dereference, but we need a full smp_rmb()
172 * in the fast path, so put the explicit barrier here.
173 */
174 smp_read_barrier_depends();
175 for (i = 0; multi[i].func; i++) {
176 va_start(args, call_private);
177 multi[i].func(mdata, multi[i].probe_private,
178 call_private, mdata->format, &args);
179 va_end(args);
180 }
181 }
59b161cd 182//ust// rcu_read_unlock_sched_notrace();
68c1021b 183}
59b161cd 184//ust// EXPORT_SYMBOL_GPL(marker_probe_cb);
68c1021b
PMF
185
186/*
187 * marker_probe_cb Callback that does not prepare the variable argument list.
188 * @mdata: pointer of type struct marker
189 * @call_private: caller site private data
190 * @...: Variable argument list.
191 *
192 * Should be connected to markers "MARK_NOARGS".
193 */
194static notrace void marker_probe_cb_noarg(const struct marker *mdata,
195 void *call_private, ...)
196{
197 va_list args; /* not initialized */
198 char ptype;
199
59b161cd 200//ust// rcu_read_lock_sched_notrace();
68c1021b
PMF
201 ptype = mdata->ptype;
202 if (likely(!ptype)) {
203 marker_probe_func *func;
204 /* Must read the ptype before ptr. They are not data dependant,
205 * so we put an explicit smp_rmb() here. */
206 smp_rmb();
207 func = mdata->single.func;
208 /* Must read the ptr before private data. They are not data
209 * dependant, so we put an explicit smp_rmb() here. */
210 smp_rmb();
211 func(mdata, mdata->single.probe_private, call_private,
212 mdata->format, &args);
213 } else {
214 struct marker_probe_closure *multi;
215 int i;
216 /*
217 * Read mdata->ptype before mdata->multi.
218 */
219 smp_rmb();
220 multi = mdata->multi;
221 /*
222 * multi points to an array, therefore accessing the array
223 * depends on reading multi. However, even in this case,
224 * we must insure that the pointer is read _before_ the array
225 * data. Same as rcu_dereference, but we need a full smp_rmb()
226 * in the fast path, so put the explicit barrier here.
227 */
228 smp_read_barrier_depends();
229 for (i = 0; multi[i].func; i++)
230 multi[i].func(mdata, multi[i].probe_private,
231 call_private, mdata->format, &args);
232 }
59b161cd 233//ust// rcu_read_unlock_sched_notrace();
68c1021b
PMF
234}
235
236static void free_old_closure(struct rcu_head *head)
237{
238 struct marker_entry *entry = container_of(head,
239 struct marker_entry, rcu);
240 kfree(entry->oldptr);
241 /* Make sure we free the data before setting the pending flag to 0 */
242 smp_wmb();
243 entry->rcu_pending = 0;
244}
245
246static void debug_print_probes(struct marker_entry *entry)
247{
248 int i;
249
250 if (!marker_debug)
251 return;
252
253 if (!entry->ptype) {
254 printk(KERN_DEBUG "Single probe : %p %p\n",
255 entry->single.func,
256 entry->single.probe_private);
257 } else {
258 for (i = 0; entry->multi[i].func; i++)
259 printk(KERN_DEBUG "Multi probe %d : %p %p\n", i,
260 entry->multi[i].func,
261 entry->multi[i].probe_private);
262 }
263}
264
265static struct marker_probe_closure *
266marker_entry_add_probe(struct marker_entry *entry,
267 marker_probe_func *probe, void *probe_private)
268{
269 int nr_probes = 0;
270 struct marker_probe_closure *old, *new;
271
272 WARN_ON(!probe);
273
274 debug_print_probes(entry);
275 old = entry->multi;
276 if (!entry->ptype) {
277 if (entry->single.func == probe &&
278 entry->single.probe_private == probe_private)
279 return ERR_PTR(-EBUSY);
280 if (entry->single.func == __mark_empty_function) {
281 /* 0 -> 1 probes */
282 entry->single.func = probe;
283 entry->single.probe_private = probe_private;
284 entry->refcount = 1;
285 entry->ptype = 0;
286 debug_print_probes(entry);
287 return NULL;
288 } else {
289 /* 1 -> 2 probes */
290 nr_probes = 1;
291 old = NULL;
292 }
293 } else {
294 /* (N -> N+1), (N != 0, 1) probes */
295 for (nr_probes = 0; old[nr_probes].func; nr_probes++)
296 if (old[nr_probes].func == probe
297 && old[nr_probes].probe_private
298 == probe_private)
299 return ERR_PTR(-EBUSY);
300 }
301 /* + 2 : one for new probe, one for NULL func */
302 new = kzalloc((nr_probes + 2) * sizeof(struct marker_probe_closure),
303 GFP_KERNEL);
304 if (new == NULL)
305 return ERR_PTR(-ENOMEM);
306 if (!old)
307 new[0] = entry->single;
308 else
309 memcpy(new, old,
310 nr_probes * sizeof(struct marker_probe_closure));
311 new[nr_probes].func = probe;
312 new[nr_probes].probe_private = probe_private;
313 entry->refcount = nr_probes + 1;
314 entry->multi = new;
315 entry->ptype = 1;
316 debug_print_probes(entry);
317 return old;
318}
319
320static struct marker_probe_closure *
321marker_entry_remove_probe(struct marker_entry *entry,
322 marker_probe_func *probe, void *probe_private)
323{
324 int nr_probes = 0, nr_del = 0, i;
325 struct marker_probe_closure *old, *new;
326
327 old = entry->multi;
328
329 debug_print_probes(entry);
330 if (!entry->ptype) {
331 /* 0 -> N is an error */
332 WARN_ON(entry->single.func == __mark_empty_function);
333 /* 1 -> 0 probes */
334 WARN_ON(probe && entry->single.func != probe);
335 WARN_ON(entry->single.probe_private != probe_private);
336 entry->single.func = __mark_empty_function;
337 entry->refcount = 0;
338 entry->ptype = 0;
339 debug_print_probes(entry);
340 return NULL;
341 } else {
342 /* (N -> M), (N > 1, M >= 0) probes */
343 for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
344 if ((!probe || old[nr_probes].func == probe)
345 && old[nr_probes].probe_private
346 == probe_private)
347 nr_del++;
348 }
349 }
350
351 if (nr_probes - nr_del == 0) {
352 /* N -> 0, (N > 1) */
353 entry->single.func = __mark_empty_function;
354 entry->refcount = 0;
355 entry->ptype = 0;
356 } else if (nr_probes - nr_del == 1) {
357 /* N -> 1, (N > 1) */
358 for (i = 0; old[i].func; i++)
359 if ((probe && old[i].func != probe) ||
360 old[i].probe_private != probe_private)
361 entry->single = old[i];
362 entry->refcount = 1;
363 entry->ptype = 0;
364 } else {
365 int j = 0;
366 /* N -> M, (N > 1, M > 1) */
367 /* + 1 for NULL */
368 new = kzalloc((nr_probes - nr_del + 1)
369 * sizeof(struct marker_probe_closure), GFP_KERNEL);
370 if (new == NULL)
371 return ERR_PTR(-ENOMEM);
372 for (i = 0; old[i].func; i++)
373 if ((probe && old[i].func != probe) ||
374 old[i].probe_private != probe_private)
375 new[j++] = old[i];
376 entry->refcount = nr_probes - nr_del;
377 entry->ptype = 1;
378 entry->multi = new;
379 }
380 debug_print_probes(entry);
381 return old;
382}
383
384/*
385 * Get marker if the marker is present in the marker hash table.
386 * Must be called with markers_mutex held.
387 * Returns NULL if not present.
388 */
389static struct marker_entry *get_marker(const char *channel, const char *name)
390{
391 struct hlist_head *head;
392 struct hlist_node *node;
393 struct marker_entry *e;
394 size_t channel_len = strlen(channel) + 1;
395 size_t name_len = strlen(name) + 1;
396 u32 hash;
397
398 hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
399 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
400 hlist_for_each_entry(e, node, head, hlist) {
401 if (!strcmp(channel, e->channel) && !strcmp(name, e->name))
402 return e;
403 }
404 return NULL;
405}
406
407/*
408 * Add the marker to the marker hash table. Must be called with markers_mutex
409 * held.
410 */
411static struct marker_entry *add_marker(const char *channel, const char *name,
412 const char *format)
413{
414 struct hlist_head *head;
415 struct hlist_node *node;
416 struct marker_entry *e;
417 size_t channel_len = strlen(channel) + 1;
418 size_t name_len = strlen(name) + 1;
419 size_t format_len = 0;
420 u32 hash;
421
422 hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
423 if (format)
424 format_len = strlen(format) + 1;
425 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
426 hlist_for_each_entry(e, node, head, hlist) {
427 if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
428 printk(KERN_NOTICE
429 "Marker %s.%s busy\n", channel, name);
430 return ERR_PTR(-EBUSY); /* Already there */
431 }
432 }
433 /*
434 * Using kmalloc here to allocate a variable length element. Could
435 * cause some memory fragmentation if overused.
436 */
437 e = kmalloc(sizeof(struct marker_entry)
438 + channel_len + name_len + format_len,
439 GFP_KERNEL);
440 if (!e)
441 return ERR_PTR(-ENOMEM);
442 memcpy(e->channel, channel, channel_len);
443 e->name = &e->channel[channel_len];
444 memcpy(e->name, name, name_len);
445 if (format) {
446 e->format = &e->name[channel_len + name_len];
447 memcpy(e->format, format, format_len);
448 if (strcmp(e->format, MARK_NOARGS) == 0)
449 e->call = marker_probe_cb_noarg;
450 else
451 e->call = marker_probe_cb;
452 trace_mark(metadata, core_marker_format,
453 "channel %s name %s format %s",
454 e->channel, e->name, e->format);
455 } else {
456 e->format = NULL;
457 e->call = marker_probe_cb;
458 }
459 e->single.func = __mark_empty_function;
460 e->single.probe_private = NULL;
461 e->multi = NULL;
462 e->ptype = 0;
463 e->format_allocated = 0;
464 e->refcount = 0;
465 e->rcu_pending = 0;
466 hlist_add_head(&e->hlist, head);
467 return e;
468}
469
470/*
471 * Remove the marker from the marker hash table. Must be called with mutex_lock
472 * held.
473 */
474static int remove_marker(const char *channel, const char *name)
475{
476 struct hlist_head *head;
477 struct hlist_node *node;
478 struct marker_entry *e;
479 int found = 0;
480 size_t channel_len = strlen(channel) + 1;
481 size_t name_len = strlen(name) + 1;
482 u32 hash;
483 int ret;
484
485 hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
486 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
487 hlist_for_each_entry(e, node, head, hlist) {
488 if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
489 found = 1;
490 break;
491 }
492 }
493 if (!found)
494 return -ENOENT;
495 if (e->single.func != __mark_empty_function)
496 return -EBUSY;
497 hlist_del(&e->hlist);
498 if (e->format_allocated)
499 kfree(e->format);
500 ret = ltt_channels_unregister(e->channel);
501 WARN_ON(ret);
502 /* Make sure the call_rcu has been executed */
503 if (e->rcu_pending)
504 rcu_barrier_sched();
505 kfree(e);
506 return 0;
507}
508
509/*
510 * Set the mark_entry format to the format found in the element.
511 */
512static int marker_set_format(struct marker_entry *entry, const char *format)
513{
514 entry->format = kstrdup(format, GFP_KERNEL);
515 if (!entry->format)
516 return -ENOMEM;
517 entry->format_allocated = 1;
518
519 trace_mark(metadata, core_marker_format,
520 "channel %s name %s format %s",
521 entry->channel, entry->name, entry->format);
522 return 0;
523}
524
525/*
526 * Sets the probe callback corresponding to one marker.
527 */
528static int set_marker(struct marker_entry *entry, struct marker *elem,
529 int active)
530{
531 int ret = 0;
532 WARN_ON(strcmp(entry->name, elem->name) != 0);
533
534 if (entry->format) {
535 if (strcmp(entry->format, elem->format) != 0) {
536 printk(KERN_NOTICE
537 "Format mismatch for probe %s "
538 "(%s), marker (%s)\n",
539 entry->name,
540 entry->format,
541 elem->format);
542 return -EPERM;
543 }
544 } else {
545 ret = marker_set_format(entry, elem->format);
546 if (ret)
547 return ret;
548 }
549
550 /*
551 * probe_cb setup (statically known) is done here. It is
552 * asynchronous with the rest of execution, therefore we only
553 * pass from a "safe" callback (with argument) to an "unsafe"
554 * callback (does not set arguments).
555 */
556 elem->call = entry->call;
557 elem->channel_id = entry->channel_id;
558 elem->event_id = entry->event_id;
559 /*
560 * Sanity check :
561 * We only update the single probe private data when the ptr is
562 * set to a _non_ single probe! (0 -> 1 and N -> 1, N != 1)
563 */
564 WARN_ON(elem->single.func != __mark_empty_function
565 && elem->single.probe_private != entry->single.probe_private
566 && !elem->ptype);
567 elem->single.probe_private = entry->single.probe_private;
568 /*
569 * Make sure the private data is valid when we update the
570 * single probe ptr.
571 */
572 smp_wmb();
573 elem->single.func = entry->single.func;
574 /*
575 * We also make sure that the new probe callbacks array is consistent
576 * before setting a pointer to it.
577 */
578 rcu_assign_pointer(elem->multi, entry->multi);
579 /*
580 * Update the function or multi probe array pointer before setting the
581 * ptype.
582 */
583 smp_wmb();
584 elem->ptype = entry->ptype;
585
59b161cd
PMF
586//ust// if (elem->tp_name && (active ^ _imv_read(elem->state))) {
587//ust// WARN_ON(!elem->tp_cb);
588//ust// /*
589//ust// * It is ok to directly call the probe registration because type
590//ust// * checking has been done in the __trace_mark_tp() macro.
591//ust// */
592//ust//
593//ust// if (active) {
594//ust// /*
595//ust// * try_module_get should always succeed because we hold
596//ust// * markers_mutex to get the tp_cb address.
597//ust// */
598//ust// ret = try_module_get(__module_text_address(
599//ust// (unsigned long)elem->tp_cb));
600//ust// BUG_ON(!ret);
601//ust// ret = tracepoint_probe_register_noupdate(
602//ust// elem->tp_name,
603//ust// elem->tp_cb);
604//ust// } else {
605//ust// ret = tracepoint_probe_unregister_noupdate(
606//ust// elem->tp_name,
607//ust// elem->tp_cb);
608//ust// /*
609//ust// * tracepoint_probe_update_all() must be called
610//ust// * before the module containing tp_cb is unloaded.
611//ust// */
612//ust// module_put(__module_text_address(
613//ust// (unsigned long)elem->tp_cb));
614//ust// }
615//ust// }
68c1021b
PMF
616 elem->state__imv = active;
617
618 return ret;
619}
620
621/*
622 * Disable a marker and its probe callback.
623 * Note: only waiting an RCU period after setting elem->call to the empty
624 * function insures that the original callback is not used anymore. This insured
625 * by rcu_read_lock_sched around the call site.
626 */
627static void disable_marker(struct marker *elem)
628{
629 int ret;
630
631 /* leave "call" as is. It is known statically. */
59b161cd
PMF
632//ust// if (elem->tp_name && _imv_read(elem->state)) {
633//ust// WARN_ON(!elem->tp_cb);
634//ust// /*
635//ust// * It is ok to directly call the probe registration because type
636//ust// * checking has been done in the __trace_mark_tp() macro.
637//ust// */
638//ust// ret = tracepoint_probe_unregister_noupdate(elem->tp_name,
639//ust// elem->tp_cb);
640//ust// WARN_ON(ret);
641//ust// /*
642//ust// * tracepoint_probe_update_all() must be called
643//ust// * before the module containing tp_cb is unloaded.
644//ust// */
645//ust// module_put(__module_text_address((unsigned long)elem->tp_cb));
646//ust// }
68c1021b
PMF
647 elem->state__imv = 0;
648 elem->single.func = __mark_empty_function;
649 /* Update the function before setting the ptype */
650 smp_wmb();
651 elem->ptype = 0; /* single probe */
652 /*
653 * Leave the private data and channel_id/event_id there, because removal
654 * is racy and should be done only after an RCU period. These are never
655 * used until the next initialization anyway.
656 */
657}
658
659/**
660 * marker_update_probe_range - Update a probe range
661 * @begin: beginning of the range
662 * @end: end of the range
663 *
664 * Updates the probe callback corresponding to a range of markers.
665 */
666void marker_update_probe_range(struct marker *begin,
667 struct marker *end)
668{
669 struct marker *iter;
670 struct marker_entry *mark_entry;
671
672 mutex_lock(&markers_mutex);
673 for (iter = begin; iter < end; iter++) {
674 mark_entry = get_marker(iter->channel, iter->name);
675 if (mark_entry) {
676 set_marker(mark_entry, iter, !!mark_entry->refcount);
677 /*
678 * ignore error, continue
679 */
680 } else {
681 disable_marker(iter);
682 }
683 }
684 mutex_unlock(&markers_mutex);
685}
686
687/*
688 * Update probes, removing the faulty probes.
689 *
690 * Internal callback only changed before the first probe is connected to it.
691 * Single probe private data can only be changed on 0 -> 1 and 2 -> 1
692 * transitions. All other transitions will leave the old private data valid.
693 * This makes the non-atomicity of the callback/private data updates valid.
694 *
695 * "special case" updates :
696 * 0 -> 1 callback
697 * 1 -> 0 callback
698 * 1 -> 2 callbacks
699 * 2 -> 1 callbacks
700 * Other updates all behave the same, just like the 2 -> 3 or 3 -> 2 updates.
701 * Site effect : marker_set_format may delete the marker entry (creating a
702 * replacement).
703 */
704static void marker_update_probes(void)
705{
706 /* Core kernel markers */
707 marker_update_probe_range(__start___markers, __stop___markers);
708 /* Markers in modules. */
59b161cd 709//ust// module_update_markers();
b6bf28ec 710//ust// tracepoint_probe_update_all();
68c1021b
PMF
711 /* Update immediate values */
712 core_imv_update();
59b161cd 713//ust// module_imv_update();
68c1021b
PMF
714 marker_update_processes();
715}
716
717/**
718 * marker_probe_register - Connect a probe to a marker
719 * @channel: marker channel
720 * @name: marker name
721 * @format: format string
722 * @probe: probe handler
723 * @probe_private: probe private data
724 *
725 * private data must be a valid allocated memory address, or NULL.
726 * Returns 0 if ok, error value on error.
727 * The probe address must at least be aligned on the architecture pointer size.
728 */
729int marker_probe_register(const char *channel, const char *name,
730 const char *format, marker_probe_func *probe,
731 void *probe_private)
732{
733 struct marker_entry *entry;
734 int ret = 0, ret_err;
735 struct marker_probe_closure *old;
736 int first_probe = 0;
737
738 mutex_lock(&markers_mutex);
739 entry = get_marker(channel, name);
740 if (!entry) {
741 first_probe = 1;
742 entry = add_marker(channel, name, format);
743 if (IS_ERR(entry))
744 ret = PTR_ERR(entry);
745 if (ret)
746 goto end;
747 ret = ltt_channels_register(channel);
748 if (ret)
749 goto error_remove_marker;
750 ret = ltt_channels_get_index_from_name(channel);
751 if (ret < 0)
752 goto error_unregister_channel;
753 entry->channel_id = ret;
754 ret = ltt_channels_get_event_id(channel, name);
755 if (ret < 0)
756 goto error_unregister_channel;
757 entry->event_id = ret;
758 ret = 0;
759 trace_mark(metadata, core_marker_id,
760 "channel %s name %s event_id %hu "
761 "int #1u%zu long #1u%zu pointer #1u%zu "
762 "size_t #1u%zu alignment #1u%u",
763 channel, name, entry->event_id,
764 sizeof(int), sizeof(long), sizeof(void *),
765 sizeof(size_t), ltt_get_alignment());
766 } else if (format) {
767 if (!entry->format)
768 ret = marker_set_format(entry, format);
769 else if (strcmp(entry->format, format))
770 ret = -EPERM;
771 if (ret)
772 goto end;
773 }
774
775 /*
776 * If we detect that a call_rcu is pending for this marker,
777 * make sure it's executed now.
778 */
779 if (entry->rcu_pending)
780 rcu_barrier_sched();
781 old = marker_entry_add_probe(entry, probe, probe_private);
782 if (IS_ERR(old)) {
783 ret = PTR_ERR(old);
784 if (first_probe)
785 goto error_unregister_channel;
786 else
787 goto end;
788 }
789 mutex_unlock(&markers_mutex);
790
791 marker_update_probes();
792
793 mutex_lock(&markers_mutex);
794 entry = get_marker(channel, name);
795 if (!entry)
796 goto end;
797 if (entry->rcu_pending)
798 rcu_barrier_sched();
799 entry->oldptr = old;
800 entry->rcu_pending = 1;
801 /* write rcu_pending before calling the RCU callback */
802 smp_wmb();
803 call_rcu_sched(&entry->rcu, free_old_closure);
804 goto end;
805
806error_unregister_channel:
807 ret_err = ltt_channels_unregister(channel);
808 WARN_ON(ret_err);
809error_remove_marker:
810 ret_err = remove_marker(channel, name);
811 WARN_ON(ret_err);
812end:
813 mutex_unlock(&markers_mutex);
814 return ret;
815}
59b161cd 816//ust// EXPORT_SYMBOL_GPL(marker_probe_register);
68c1021b
PMF
817
818/**
819 * marker_probe_unregister - Disconnect a probe from a marker
820 * @channel: marker channel
821 * @name: marker name
822 * @probe: probe function pointer
823 * @probe_private: probe private data
824 *
825 * Returns the private data given to marker_probe_register, or an ERR_PTR().
826 * We do not need to call a synchronize_sched to make sure the probes have
827 * finished running before doing a module unload, because the module unload
828 * itself uses stop_machine(), which insures that every preempt disabled section
829 * have finished.
830 */
831int marker_probe_unregister(const char *channel, const char *name,
832 marker_probe_func *probe, void *probe_private)
833{
834 struct marker_entry *entry;
835 struct marker_probe_closure *old;
836 int ret = -ENOENT;
837
838 mutex_lock(&markers_mutex);
839 entry = get_marker(channel, name);
840 if (!entry)
841 goto end;
842 if (entry->rcu_pending)
843 rcu_barrier_sched();
844 old = marker_entry_remove_probe(entry, probe, probe_private);
845 mutex_unlock(&markers_mutex);
846
847 marker_update_probes();
848
849 mutex_lock(&markers_mutex);
850 entry = get_marker(channel, name);
851 if (!entry)
852 goto end;
853 if (entry->rcu_pending)
854 rcu_barrier_sched();
855 entry->oldptr = old;
856 entry->rcu_pending = 1;
857 /* write rcu_pending before calling the RCU callback */
858 smp_wmb();
859 call_rcu_sched(&entry->rcu, free_old_closure);
860 remove_marker(channel, name); /* Ignore busy error message */
861 ret = 0;
862end:
863 mutex_unlock(&markers_mutex);
864 return ret;
865}
59b161cd 866//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister);
68c1021b
PMF
867
868static struct marker_entry *
869get_marker_from_private_data(marker_probe_func *probe, void *probe_private)
870{
871 struct marker_entry *entry;
872 unsigned int i;
873 struct hlist_head *head;
874 struct hlist_node *node;
875
876 for (i = 0; i < MARKER_TABLE_SIZE; i++) {
877 head = &marker_table[i];
878 hlist_for_each_entry(entry, node, head, hlist) {
879 if (!entry->ptype) {
880 if (entry->single.func == probe
881 && entry->single.probe_private
882 == probe_private)
883 return entry;
884 } else {
885 struct marker_probe_closure *closure;
886 closure = entry->multi;
887 for (i = 0; closure[i].func; i++) {
888 if (closure[i].func == probe &&
889 closure[i].probe_private
890 == probe_private)
891 return entry;
892 }
893 }
894 }
895 }
896 return NULL;
897}
898
899/**
900 * marker_probe_unregister_private_data - Disconnect a probe from a marker
901 * @probe: probe function
902 * @probe_private: probe private data
903 *
904 * Unregister a probe by providing the registered private data.
905 * Only removes the first marker found in hash table.
906 * Return 0 on success or error value.
907 * We do not need to call a synchronize_sched to make sure the probes have
908 * finished running before doing a module unload, because the module unload
909 * itself uses stop_machine(), which insures that every preempt disabled section
910 * have finished.
911 */
912int marker_probe_unregister_private_data(marker_probe_func *probe,
913 void *probe_private)
914{
915 struct marker_entry *entry;
916 int ret = 0;
917 struct marker_probe_closure *old;
918 const char *channel = NULL, *name = NULL;
919
920 mutex_lock(&markers_mutex);
921 entry = get_marker_from_private_data(probe, probe_private);
922 if (!entry) {
923 ret = -ENOENT;
924 goto end;
925 }
926 if (entry->rcu_pending)
927 rcu_barrier_sched();
928 old = marker_entry_remove_probe(entry, NULL, probe_private);
929 channel = kstrdup(entry->channel, GFP_KERNEL);
930 name = kstrdup(entry->name, GFP_KERNEL);
931 mutex_unlock(&markers_mutex);
932
933 marker_update_probes();
934
935 mutex_lock(&markers_mutex);
936 entry = get_marker(channel, name);
937 if (!entry)
938 goto end;
939 if (entry->rcu_pending)
940 rcu_barrier_sched();
941 entry->oldptr = old;
942 entry->rcu_pending = 1;
943 /* write rcu_pending before calling the RCU callback */
944 smp_wmb();
945 call_rcu_sched(&entry->rcu, free_old_closure);
946 /* Ignore busy error message */
947 remove_marker(channel, name);
948end:
949 mutex_unlock(&markers_mutex);
950 kfree(channel);
951 kfree(name);
952 return ret;
953}
59b161cd 954//ust// EXPORT_SYMBOL_GPL(marker_probe_unregister_private_data);
68c1021b
PMF
955
956/**
957 * marker_get_private_data - Get a marker's probe private data
958 * @channel: marker channel
959 * @name: marker name
960 * @probe: probe to match
961 * @num: get the nth matching probe's private data
962 *
963 * Returns the nth private data pointer (starting from 0) matching, or an
964 * ERR_PTR.
965 * Returns the private data pointer, or an ERR_PTR.
966 * The private data pointer should _only_ be dereferenced if the caller is the
967 * owner of the data, or its content could vanish. This is mostly used to
968 * confirm that a caller is the owner of a registered probe.
969 */
970void *marker_get_private_data(const char *channel, const char *name,
971 marker_probe_func *probe, int num)
972{
973 struct hlist_head *head;
974 struct hlist_node *node;
975 struct marker_entry *e;
976 size_t channel_len = strlen(channel) + 1;
977 size_t name_len = strlen(name) + 1;
978 int i;
979 u32 hash;
980
981 hash = jhash(channel, channel_len-1, 0) ^ jhash(name, name_len-1, 0);
982 head = &marker_table[hash & ((1 << MARKER_HASH_BITS)-1)];
983 hlist_for_each_entry(e, node, head, hlist) {
984 if (!strcmp(channel, e->channel) && !strcmp(name, e->name)) {
985 if (!e->ptype) {
986 if (num == 0 && e->single.func == probe)
987 return e->single.probe_private;
988 } else {
989 struct marker_probe_closure *closure;
990 int match = 0;
991 closure = e->multi;
992 for (i = 0; closure[i].func; i++) {
993 if (closure[i].func != probe)
994 continue;
995 if (match++ == num)
996 return closure[i].probe_private;
997 }
998 }
999 break;
1000 }
1001 }
1002 return ERR_PTR(-ENOENT);
1003}
59b161cd 1004//ust// EXPORT_SYMBOL_GPL(marker_get_private_data);
68c1021b
PMF
1005
1006/**
1007 * markers_compact_event_ids - Compact markers event IDs and reassign channels
1008 *
1009 * Called when no channel users are active by the channel infrastructure.
1010 * Called with lock_markers() and channel mutex held.
1011 */
59b161cd
PMF
1012//ust// void markers_compact_event_ids(void)
1013//ust// {
1014//ust// struct marker_entry *entry;
1015//ust// unsigned int i;
1016//ust// struct hlist_head *head;
1017//ust// struct hlist_node *node;
1018//ust// int ret;
1019//ust//
1020//ust// for (i = 0; i < MARKER_TABLE_SIZE; i++) {
1021//ust// head = &marker_table[i];
1022//ust// hlist_for_each_entry(entry, node, head, hlist) {
1023//ust// ret = ltt_channels_get_index_from_name(entry->channel);
1024//ust// WARN_ON(ret < 0);
1025//ust// entry->channel_id = ret;
1026//ust// ret = _ltt_channels_get_event_id(entry->channel,
1027//ust// entry->name);
1028//ust// WARN_ON(ret < 0);
1029//ust// entry->event_id = ret;
1030//ust// }
1031//ust// }
1032//ust// }
68c1021b
PMF
1033
1034#ifdef CONFIG_MODULES
1035
1036/**
1037 * marker_get_iter_range - Get a next marker iterator given a range.
1038 * @marker: current markers (in), next marker (out)
1039 * @begin: beginning of the range
1040 * @end: end of the range
1041 *
1042 * Returns whether a next marker has been found (1) or not (0).
1043 * Will return the first marker in the range if the input marker is NULL.
1044 */
1045int marker_get_iter_range(struct marker **marker, struct marker *begin,
1046 struct marker *end)
1047{
1048 if (!*marker && begin != end) {
1049 *marker = begin;
1050 return 1;
1051 }
1052 if (*marker >= begin && *marker < end)
1053 return 1;
1054 return 0;
1055}
59b161cd 1056//ust// EXPORT_SYMBOL_GPL(marker_get_iter_range);
68c1021b
PMF
1057
1058static void marker_get_iter(struct marker_iter *iter)
1059{
1060 int found = 0;
1061
1062 /* Core kernel markers */
1063 if (!iter->module) {
1064 found = marker_get_iter_range(&iter->marker,
1065 __start___markers, __stop___markers);
1066 if (found)
1067 goto end;
1068 }
1069 /* Markers in modules. */
1070 found = module_get_iter_markers(iter);
1071end:
1072 if (!found)
1073 marker_iter_reset(iter);
1074}
1075
1076void marker_iter_start(struct marker_iter *iter)
1077{
1078 marker_get_iter(iter);
1079}
59b161cd 1080//ust// EXPORT_SYMBOL_GPL(marker_iter_start);
68c1021b
PMF
1081
1082void marker_iter_next(struct marker_iter *iter)
1083{
1084 iter->marker++;
1085 /*
1086 * iter->marker may be invalid because we blindly incremented it.
1087 * Make sure it is valid by marshalling on the markers, getting the
1088 * markers from following modules if necessary.
1089 */
1090 marker_get_iter(iter);
1091}
59b161cd 1092//ust// EXPORT_SYMBOL_GPL(marker_iter_next);
68c1021b
PMF
1093
1094void marker_iter_stop(struct marker_iter *iter)
1095{
1096}
59b161cd 1097//ust// EXPORT_SYMBOL_GPL(marker_iter_stop);
68c1021b
PMF
1098
1099void marker_iter_reset(struct marker_iter *iter)
1100{
1101 iter->module = NULL;
1102 iter->marker = NULL;
1103}
59b161cd 1104//ust// EXPORT_SYMBOL_GPL(marker_iter_reset);
68c1021b
PMF
1105
1106#ifdef CONFIG_MARKERS_USERSPACE
1107/*
1108 * must be called with current->user_markers_mutex held
1109 */
1110static void free_user_marker(char __user *state, struct hlist_head *head)
1111{
1112 struct user_marker *umark;
1113 struct hlist_node *pos, *n;
1114
1115 hlist_for_each_entry_safe(umark, pos, n, head, hlist) {
1116 if (umark->state == state) {
1117 hlist_del(&umark->hlist);
1118 kfree(umark);
1119 }
1120 }
1121}
1122
59b161cd
PMF
1123//ust// asmlinkage long sys_marker(char __user *name, char __user *format,
1124//ust// char __user *state, int reg)
1125//ust// {
1126//ust// struct user_marker *umark;
1127//ust// long len;
1128//ust// struct marker_entry *entry;
1129//ust// int ret = 0;
1130//ust//
1131//ust// printk(KERN_DEBUG "Program %s %s marker [%p, %p]\n",
1132//ust// current->comm, reg ? "registers" : "unregisters",
1133//ust// name, state);
1134//ust// if (reg) {
1135//ust// umark = kmalloc(sizeof(struct user_marker), GFP_KERNEL);
1136//ust// umark->name[MAX_USER_MARKER_NAME_LEN - 1] = '\0';
1137//ust// umark->format[MAX_USER_MARKER_FORMAT_LEN - 1] = '\0';
1138//ust// umark->state = state;
1139//ust// len = strncpy_from_user(umark->name, name,
1140//ust// MAX_USER_MARKER_NAME_LEN - 1);
1141//ust// if (len < 0) {
1142//ust// ret = -EFAULT;
1143//ust// goto error;
1144//ust// }
1145//ust// len = strncpy_from_user(umark->format, format,
1146//ust// MAX_USER_MARKER_FORMAT_LEN - 1);
1147//ust// if (len < 0) {
1148//ust// ret = -EFAULT;
1149//ust// goto error;
1150//ust// }
1151//ust// printk(KERN_DEBUG "Marker name : %s, format : %s", umark->name,
1152//ust// umark->format);
1153//ust// mutex_lock(&markers_mutex);
1154//ust// entry = get_marker("userspace", umark->name);
1155//ust// if (entry) {
1156//ust// if (entry->format &&
1157//ust// strcmp(entry->format, umark->format) != 0) {
1158//ust// printk(" error, wrong format in process %s",
1159//ust// current->comm);
1160//ust// ret = -EPERM;
1161//ust// goto error_unlock;
1162//ust// }
1163//ust// printk(" %s", !!entry->refcount
1164//ust// ? "enabled" : "disabled");
1165//ust// if (put_user(!!entry->refcount, state)) {
1166//ust// ret = -EFAULT;
1167//ust// goto error_unlock;
1168//ust// }
1169//ust// printk("\n");
1170//ust// } else {
1171//ust// printk(" disabled\n");
1172//ust// if (put_user(0, umark->state)) {
1173//ust// printk(KERN_WARNING
1174//ust// "Marker in %s caused a fault\n",
1175//ust// current->comm);
1176//ust// goto error_unlock;
1177//ust// }
1178//ust// }
1179//ust// mutex_lock(&current->group_leader->user_markers_mutex);
1180//ust// hlist_add_head(&umark->hlist,
1181//ust// &current->group_leader->user_markers);
1182//ust// current->group_leader->user_markers_sequence++;
1183//ust// mutex_unlock(&current->group_leader->user_markers_mutex);
1184//ust// mutex_unlock(&markers_mutex);
1185//ust// } else {
1186//ust// mutex_lock(&current->group_leader->user_markers_mutex);
1187//ust// free_user_marker(state,
1188//ust// &current->group_leader->user_markers);
1189//ust// current->group_leader->user_markers_sequence++;
1190//ust// mutex_unlock(&current->group_leader->user_markers_mutex);
1191//ust// }
1192//ust// goto end;
1193//ust// error_unlock:
1194//ust// mutex_unlock(&markers_mutex);
1195//ust// error:
1196//ust// kfree(umark);
1197//ust// end:
1198//ust// return ret;
1199//ust// }
1200//ust//
1201//ust// /*
1202//ust// * Types :
1203//ust// * string : 0
1204//ust// */
1205//ust// asmlinkage long sys_trace(int type, uint16_t id,
1206//ust// char __user *ubuf)
1207//ust// {
1208//ust// long ret = -EPERM;
1209//ust// char *page;
1210//ust// int len;
1211//ust//
1212//ust// switch (type) {
1213//ust// case 0: /* String */
1214//ust// ret = -ENOMEM;
1215//ust// page = (char *)__get_free_page(GFP_TEMPORARY);
1216//ust// if (!page)
1217//ust// goto string_out;
1218//ust// len = strncpy_from_user(page, ubuf, PAGE_SIZE);
1219//ust// if (len < 0) {
1220//ust// ret = -EFAULT;
1221//ust// goto string_err;
1222//ust// }
1223//ust// trace_mark(userspace, string, "string %s", page);
1224//ust// string_err:
1225//ust// free_page((unsigned long) page);
1226//ust// string_out:
1227//ust// break;
1228//ust// default:
1229//ust// break;
1230//ust// }
1231//ust// return ret;
1232//ust// }
1233
1234//ust// static void marker_update_processes(void)
1235//ust// {
1236//ust// struct task_struct *g, *t;
1237//ust//
1238//ust// /*
1239//ust// * markers_mutex is taken to protect the p->user_markers read.
1240//ust// */
1241//ust// mutex_lock(&markers_mutex);
1242//ust// read_lock(&tasklist_lock);
1243//ust// for_each_process(g) {
1244//ust// WARN_ON(!thread_group_leader(g));
1245//ust// if (hlist_empty(&g->user_markers))
1246//ust// continue;
1247//ust// if (strcmp(g->comm, "testprog") == 0)
1248//ust// printk(KERN_DEBUG "set update pending for testprog\n");
1249//ust// t = g;
1250//ust// do {
1251//ust// /* TODO : implement this thread flag in each arch. */
1252//ust// set_tsk_thread_flag(t, TIF_MARKER_PENDING);
1253//ust// } while ((t = next_thread(t)) != g);
1254//ust// }
1255//ust// read_unlock(&tasklist_lock);
1256//ust// mutex_unlock(&markers_mutex);
1257//ust// }
68c1021b
PMF
1258
1259/*
1260 * Update current process.
1261 * Note that we have to wait a whole scheduler period before we are sure that
1262 * every running userspace threads have their markers updated.
1263 * (synchronize_sched() can be used to insure this).
1264 */
1265void marker_update_process(void)
1266{
1267 struct user_marker *umark;
1268 struct hlist_node *pos;
1269 struct marker_entry *entry;
1270
1271 mutex_lock(&markers_mutex);
1272 mutex_lock(&current->group_leader->user_markers_mutex);
1273 if (strcmp(current->comm, "testprog") == 0)
1274 printk(KERN_DEBUG "do update pending for testprog\n");
1275 hlist_for_each_entry(umark, pos,
1276 &current->group_leader->user_markers, hlist) {
1277 printk(KERN_DEBUG "Updating marker %s in %s\n",
1278 umark->name, current->comm);
1279 entry = get_marker("userspace", umark->name);
1280 if (entry) {
1281 if (entry->format &&
1282 strcmp(entry->format, umark->format) != 0) {
1283 printk(KERN_WARNING
1284 " error, wrong format in process %s\n",
1285 current->comm);
1286 break;
1287 }
1288 if (put_user(!!entry->refcount, umark->state)) {
1289 printk(KERN_WARNING
1290 "Marker in %s caused a fault\n",
1291 current->comm);
1292 break;
1293 }
1294 } else {
1295 if (put_user(0, umark->state)) {
1296 printk(KERN_WARNING
1297 "Marker in %s caused a fault\n",
1298 current->comm);
1299 break;
1300 }
1301 }
1302 }
1303 clear_thread_flag(TIF_MARKER_PENDING);
1304 mutex_unlock(&current->group_leader->user_markers_mutex);
1305 mutex_unlock(&markers_mutex);
1306}
1307
1308/*
1309 * Called at process exit and upon do_execve().
1310 * We assume that when the leader exits, no more references can be done to the
1311 * leader structure by the other threads.
1312 */
1313void exit_user_markers(struct task_struct *p)
1314{
1315 struct user_marker *umark;
1316 struct hlist_node *pos, *n;
1317
1318 if (thread_group_leader(p)) {
1319 mutex_lock(&markers_mutex);
1320 mutex_lock(&p->user_markers_mutex);
1321 hlist_for_each_entry_safe(umark, pos, n, &p->user_markers,
1322 hlist)
1323 kfree(umark);
1324 INIT_HLIST_HEAD(&p->user_markers);
1325 p->user_markers_sequence++;
1326 mutex_unlock(&p->user_markers_mutex);
1327 mutex_unlock(&markers_mutex);
1328 }
1329}
1330
1331int is_marker_enabled(const char *channel, const char *name)
1332{
1333 struct marker_entry *entry;
1334
1335 mutex_lock(&markers_mutex);
1336 entry = get_marker(channel, name);
1337 mutex_unlock(&markers_mutex);
1338
1339 return entry && !!entry->refcount;
1340}
1341#endif
1342
1343int marker_module_notify(struct notifier_block *self,
1344 unsigned long val, void *data)
1345{
1346 struct module *mod = data;
1347
1348 switch (val) {
1349 case MODULE_STATE_COMING:
1350 marker_update_probe_range(mod->markers,
1351 mod->markers + mod->num_markers);
1352 break;
1353 case MODULE_STATE_GOING:
1354 marker_update_probe_range(mod->markers,
1355 mod->markers + mod->num_markers);
1356 break;
1357 }
1358 return 0;
1359}
1360
1361struct notifier_block marker_module_nb = {
1362 .notifier_call = marker_module_notify,
1363 .priority = 0,
1364};
1365
59b161cd
PMF
1366//ust// static int init_markers(void)
1367//ust// {
1368//ust// return register_module_notifier(&marker_module_nb);
1369//ust// }
1370//ust// __initcall(init_markers);
1371/* TODO: call marker_module_nb() when a library is linked at runtime (dlopen)? */
68c1021b
PMF
1372
1373#endif /* CONFIG_MODULES */
1374
59b161cd
PMF
1375//ust// void ltt_dump_marker_state(struct ltt_trace_struct *trace)
1376//ust// {
1377//ust// struct marker_iter iter;
1378//ust// struct ltt_probe_private_data call_data;
1379//ust// const char *channel;
1380//ust//
1381//ust// call_data.trace = trace;
1382//ust// call_data.serializer = NULL;
1383//ust//
1384//ust// marker_iter_reset(&iter);
1385//ust// marker_iter_start(&iter);
1386//ust// for (; iter.marker != NULL; marker_iter_next(&iter)) {
1387//ust// if (!_imv_read(iter.marker->state))
1388//ust// continue;
1389//ust// channel = ltt_channels_get_name_from_index(
1390//ust// iter.marker->channel_id);
1391//ust// __trace_mark(0, metadata, core_marker_id,
1392//ust// &call_data,
1393//ust// "channel %s name %s event_id %hu "
1394//ust// "int #1u%zu long #1u%zu pointer #1u%zu "
1395//ust// "size_t #1u%zu alignment #1u%u",
1396//ust// channel,
1397//ust// iter.marker->name,
1398//ust// iter.marker->event_id,
1399//ust// sizeof(int), sizeof(long),
1400//ust// sizeof(void *), sizeof(size_t),
1401//ust// ltt_get_alignment());
1402//ust// if (iter.marker->format)
1403//ust// __trace_mark(0, metadata,
1404//ust// core_marker_format,
1405//ust// &call_data,
1406//ust// "channel %s name %s format %s",
1407//ust// channel,
1408//ust// iter.marker->name,
1409//ust// iter.marker->format);
1410//ust// }
1411//ust// marker_iter_stop(&iter);
1412//ust// }
1413//ust// EXPORT_SYMBOL_GPL(ltt_dump_marker_state);
This page took 0.071739 seconds and 4 git commands to generate.