LTTng modules now builds again
[lttng-modules.git] / discard / ltt-serialize.c
1 /*
2 * LTTng serializing code.
3 *
4 * Copyright Mathieu Desnoyers, March 2007.
5 *
6 * Dual LGPL v2.1/GPL v2 license.
7 *
8 * See this discussion about weirdness about passing va_list and then va_list to
9 * functions. (related to array argument passing). va_list seems to be
10 * implemented as an array on x86_64, but not on i386... This is why we pass a
11 * va_list * to ltt_vtrace.
12 */
13
14 #include <stdarg.h>
15 #include <linux/ctype.h>
16 #include <linux/string.h>
17 #include <linux/module.h>
18
19 #include "ltt-tracer.h"
20
21 enum ltt_type {
22 LTT_TYPE_SIGNED_INT,
23 LTT_TYPE_UNSIGNED_INT,
24 LTT_TYPE_STRING,
25 LTT_TYPE_NONE,
26 };
27
28 #define LTT_ATTRIBUTE_NETWORK_BYTE_ORDER (1<<1)
29
30 /*
31 * Stack used to keep track of string length at size calculation, passed to
32 * string copy to handle racy input string updates.
33 * Can be used by any context; this is ensured by putting the stack position
34 * back to its original position after using it.
35 */
36 #define TRACER_STACK_LEN (PAGE_SIZE / sizeof(unsigned long))
37 static DEFINE_PER_CPU(unsigned long [TRACER_STACK_LEN],
38 tracer_stack);
39 static DEFINE_PER_CPU(unsigned int, tracer_stack_pos);
40
41 /*
42 * Inspired from vsnprintf
43 *
44 * The serialization format string supports the basic printf format strings.
45 * In addition, it defines new formats that can be used to serialize more
46 * complex/non portable data structures.
47 *
48 * Typical use:
49 *
50 * field_name %ctype
51 * field_name #tracetype %ctype
52 * field_name #tracetype %ctype1 %ctype2 ...
53 *
54 * A conversion is performed between format string types supported by GCC and
55 * the trace type requested. GCC type is used to perform type checking on format
56 * strings. Trace type is used to specify the exact binary representation
57 * in the trace. A mapping is done between one or more GCC types to one trace
58 * type. Sign extension, if required by the conversion, is performed following
59 * the trace type.
60 *
61 * If a gcc format is not declared with a trace format, the gcc format is
62 * also used as binary representation in the trace.
63 *
64 * Strings are supported with %s.
65 * A single tracetype (sequence) can take multiple c types as parameter.
66 *
67 * c types:
68 *
69 * see printf(3).
70 *
71 * Note: to write a uint32_t in a trace, the following expression is recommended
72 * si it can be portable:
73 *
74 * ("#4u%lu", (unsigned long)var)
75 *
76 * trace types:
77 *
78 * Serialization specific formats :
79 *
80 * Fixed size integers
81 * #1u writes uint8_t
82 * #2u writes uint16_t
83 * #4u writes uint32_t
84 * #8u writes uint64_t
85 * #1d writes int8_t
86 * #2d writes int16_t
87 * #4d writes int32_t
88 * #8d writes int64_t
89 * i.e.:
90 * #1u%lu #2u%lu #4d%lu #8d%lu #llu%hu #d%lu
91 *
92 * * Attributes:
93 *
94 * n: (for network byte order)
95 * #ntracetype%ctype
96 * is written in the trace in network byte order.
97 *
98 * i.e.: #bn4u%lu, #n%lu, #b%u
99 *
100 * TODO (eventually)
101 * Variable length sequence
102 * #a #tracetype1 #tracetype2 %array_ptr %elem_size %num_elems
103 * In the trace:
104 * #a specifies that this is a sequence
105 * #tracetype1 is the type of elements in the sequence
106 * #tracetype2 is the type of the element count
107 * GCC input:
108 * array_ptr is a pointer to an array that contains members of size
109 * elem_size.
110 * num_elems is the number of elements in the array.
111 * i.e.: #a #lu #lu %p %lu %u
112 *
113 * Callback
114 * #k callback (taken from the probe data)
115 * The following % arguments are exepected by the callback
116 *
117 * i.e.: #a #lu #lu #k %p
118 *
119 * Note: No conversion is done from floats to integers, nor from integers to
120 * floats between c types and trace types. float conversion from double to float
121 * or from float to double is also not supported.
122 *
123 * REMOVE
124 * %*b expects sizeof(data), data
125 * where sizeof(data) is 1, 2, 4 or 8
126 *
127 * Fixed length struct, union or array.
128 * FIXME: unable to extract those sizes statically.
129 * %*r expects sizeof(*ptr), ptr
130 * %*.*r expects sizeof(*ptr), __alignof__(*ptr), ptr
131 * struct and unions removed.
132 * Fixed length array:
133 * [%p]#a[len #tracetype]
134 * i.e.: [%p]#a[12 #lu]
135 *
136 * Variable length sequence
137 * %*.*:*v expects sizeof(*ptr), __alignof__(*ptr), elem_num, ptr
138 * where elem_num is the number of elements in the sequence
139 */
140 static inline
141 const char *parse_trace_type(const char *fmt, char *trace_size,
142 enum ltt_type *trace_type,
143 unsigned long *attributes)
144 {
145 int qualifier; /* 'h', 'l', or 'L' for integer fields */
146 /* 'z' support added 23/7/1999 S.H. */
147 /* 'z' changed to 'Z' --davidm 1/25/99 */
148 /* 't' added for ptrdiff_t */
149
150 /* parse attributes. */
151 repeat:
152 switch (*fmt) {
153 case 'n':
154 *attributes |= LTT_ATTRIBUTE_NETWORK_BYTE_ORDER;
155 ++fmt;
156 goto repeat;
157 }
158
159 /* get the conversion qualifier */
160 qualifier = -1;
161 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
162 *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
163 *fmt == 'S' || *fmt == '1' || *fmt == '2' ||
164 *fmt == '4' || *fmt == 8) {
165 qualifier = *fmt;
166 ++fmt;
167 if (qualifier == 'l' && *fmt == 'l') {
168 qualifier = 'L';
169 ++fmt;
170 }
171 }
172
173 switch (*fmt) {
174 case 'c':
175 *trace_type = LTT_TYPE_UNSIGNED_INT;
176 *trace_size = sizeof(unsigned char);
177 goto parse_end;
178 case 's':
179 *trace_type = LTT_TYPE_STRING;
180 goto parse_end;
181 case 'p':
182 *trace_type = LTT_TYPE_UNSIGNED_INT;
183 *trace_size = sizeof(void *);
184 goto parse_end;
185 case 'd':
186 case 'i':
187 *trace_type = LTT_TYPE_SIGNED_INT;
188 break;
189 case 'o':
190 case 'u':
191 case 'x':
192 case 'X':
193 *trace_type = LTT_TYPE_UNSIGNED_INT;
194 break;
195 default:
196 if (!*fmt)
197 --fmt;
198 goto parse_end;
199 }
200 switch (qualifier) {
201 case 'L':
202 *trace_size = sizeof(long long);
203 break;
204 case 'l':
205 *trace_size = sizeof(long);
206 break;
207 case 'Z':
208 case 'z':
209 *trace_size = sizeof(size_t);
210 break;
211 case 't':
212 *trace_size = sizeof(ptrdiff_t);
213 break;
214 case 'h':
215 *trace_size = sizeof(short);
216 break;
217 case '1':
218 *trace_size = sizeof(uint8_t);
219 break;
220 case '2':
221 *trace_size = sizeof(uint16_t);
222 break;
223 case '4':
224 *trace_size = sizeof(uint32_t);
225 break;
226 case '8':
227 *trace_size = sizeof(uint64_t);
228 break;
229 default:
230 *trace_size = sizeof(int);
231 }
232
233 parse_end:
234 return fmt;
235 }
236
237 /*
238 * Restrictions:
239 * Field width and precision are *not* supported.
240 * %n not supported.
241 */
242 static inline
243 const char *parse_c_type(const char *fmt, char *c_size, enum ltt_type *c_type,
244 char *outfmt)
245 {
246 int qualifier; /* 'h', 'l', or 'L' for integer fields */
247 /* 'z' support added 23/7/1999 S.H. */
248 /* 'z' changed to 'Z' --davidm 1/25/99 */
249 /* 't' added for ptrdiff_t */
250
251 /* process flags : ignore standard print formats for now. */
252 repeat:
253 switch (*fmt) {
254 case '-':
255 case '+':
256 case ' ':
257 case '#':
258 case '0':
259 ++fmt;
260 goto repeat;
261 }
262
263 /* get the conversion qualifier */
264 qualifier = -1;
265 if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' ||
266 *fmt == 'Z' || *fmt == 'z' || *fmt == 't' ||
267 *fmt == 'S') {
268 qualifier = *fmt;
269 ++fmt;
270 if (qualifier == 'l' && *fmt == 'l') {
271 qualifier = 'L';
272 ++fmt;
273 }
274 }
275
276 if (outfmt) {
277 if (qualifier != -1)
278 *outfmt++ = (char)qualifier;
279 *outfmt++ = *fmt;
280 *outfmt = 0;
281 }
282
283 switch (*fmt) {
284 case 'c':
285 *c_type = LTT_TYPE_UNSIGNED_INT;
286 *c_size = sizeof(unsigned char);
287 goto parse_end;
288 case 's':
289 *c_type = LTT_TYPE_STRING;
290 goto parse_end;
291 case 'p':
292 *c_type = LTT_TYPE_UNSIGNED_INT;
293 *c_size = sizeof(void *);
294 goto parse_end;
295 case 'd':
296 case 'i':
297 *c_type = LTT_TYPE_SIGNED_INT;
298 break;
299 case 'o':
300 case 'u':
301 case 'x':
302 case 'X':
303 *c_type = LTT_TYPE_UNSIGNED_INT;
304 break;
305 default:
306 if (!*fmt)
307 --fmt;
308 goto parse_end;
309 }
310 switch (qualifier) {
311 case 'L':
312 *c_size = sizeof(long long);
313 break;
314 case 'l':
315 *c_size = sizeof(long);
316 break;
317 case 'Z':
318 case 'z':
319 *c_size = sizeof(size_t);
320 break;
321 case 't':
322 *c_size = sizeof(ptrdiff_t);
323 break;
324 case 'h':
325 *c_size = sizeof(short);
326 break;
327 default:
328 *c_size = sizeof(int);
329 }
330
331 parse_end:
332 return fmt;
333 }
334
335 static inline
336 size_t serialize_trace_data(struct ltt_chanbuf *buf, size_t buf_offset,
337 char trace_size, enum ltt_type trace_type,
338 char c_size, enum ltt_type c_type,
339 unsigned int *stack_pos_ctx,
340 int *largest_align,
341 va_list *args)
342 {
343 union {
344 unsigned long v_ulong;
345 uint64_t v_uint64;
346 struct {
347 const char *s;
348 size_t len;
349 } v_string;
350 } tmp;
351
352 /*
353 * Be careful about sign extension here.
354 * Sign extension is done with the destination (trace) type.
355 */
356 switch (trace_type) {
357 case LTT_TYPE_SIGNED_INT:
358 switch (c_size) {
359 case 1:
360 tmp.v_ulong = (long)(int8_t)va_arg(*args, int);
361 break;
362 case 2:
363 tmp.v_ulong = (long)(int16_t)va_arg(*args, int);
364 break;
365 case 4:
366 tmp.v_ulong = (long)(int32_t)va_arg(*args, int);
367 break;
368 case 8:
369 tmp.v_uint64 = va_arg(*args, int64_t);
370 break;
371 default:
372 BUG();
373 }
374 break;
375 case LTT_TYPE_UNSIGNED_INT:
376 switch (c_size) {
377 case 1:
378 tmp.v_ulong = (unsigned long)(uint8_t)va_arg(*args, unsigned int);
379 break;
380 case 2:
381 tmp.v_ulong = (unsigned long)(uint16_t)va_arg(*args, unsigned int);
382 break;
383 case 4:
384 tmp.v_ulong = (unsigned long)(uint32_t)va_arg(*args, unsigned int);
385 break;
386 case 8:
387 tmp.v_uint64 = va_arg(*args, uint64_t);
388 break;
389 default:
390 BUG();
391 }
392 break;
393 case LTT_TYPE_STRING:
394 tmp.v_string.s = va_arg(*args, const char *);
395 if ((unsigned long)tmp.v_string.s < PAGE_SIZE)
396 tmp.v_string.s = "<NULL>";
397 if (!buf) {
398 /*
399 * Reserve tracer stack entry.
400 */
401 __get_cpu_var(tracer_stack_pos)++;
402 WARN_ON_ONCE(__get_cpu_var(tracer_stack_pos)
403 > TRACER_STACK_LEN);
404 barrier();
405 __get_cpu_var(tracer_stack)[*stack_pos_ctx] =
406 strlen(tmp.v_string.s) + 1;
407 }
408 tmp.v_string.len = __get_cpu_var(tracer_stack)
409 [(*stack_pos_ctx)++];
410 if (buf)
411 ltt_relay_strncpy(&buf->a, buf->a.chan, buf_offset,
412 tmp.v_string.s, tmp.v_string.len);
413 buf_offset += tmp.v_string.len;
414 goto copydone;
415 default:
416 BUG();
417 }
418
419 /*
420 * If trace_size is lower or equal to 4 bytes, there is no sign
421 * extension to do because we are already encoded in a long. Therefore,
422 * we can combine signed and unsigned ops. 4 bytes float also works
423 * with this, because we do a simple copy of 4 bytes into 4 bytes
424 * without manipulation (and we do not support conversion from integers
425 * to floats).
426 * It is also the case if c_size is 8 bytes, which is the largest
427 * possible integer.
428 */
429 if (ltt_get_alignment()) {
430 buf_offset += ltt_align(buf_offset, trace_size);
431 if (largest_align)
432 *largest_align = max_t(int, *largest_align, trace_size);
433 }
434 if (trace_size <= 4 || c_size == 8) {
435 if (buf) {
436 switch (trace_size) {
437 case 1:
438 if (c_size == 8)
439 ltt_relay_write(&buf->a, buf->a.chan,
440 buf_offset,
441 (uint8_t[]){ (uint8_t)tmp.v_uint64 },
442 sizeof(uint8_t));
443 else
444 ltt_relay_write(&buf->a, buf->a.chan,
445 buf_offset,
446 (uint8_t[]){ (uint8_t)tmp.v_ulong },
447 sizeof(uint8_t));
448 break;
449 case 2:
450 if (c_size == 8)
451 ltt_relay_write(&buf->a, buf->a.chan,
452 buf_offset,
453 (uint16_t[]){ (uint16_t)tmp.v_uint64 },
454 sizeof(uint16_t));
455 else
456 ltt_relay_write(&buf->a, buf->a.chan,
457 buf_offset,
458 (uint16_t[]){ (uint16_t)tmp.v_ulong },
459 sizeof(uint16_t));
460 break;
461 case 4:
462 if (c_size == 8)
463 ltt_relay_write(&buf->a, buf->a.chan,
464 buf_offset,
465 (uint32_t[]){ (uint32_t)tmp.v_uint64 },
466 sizeof(uint32_t));
467 else
468 ltt_relay_write(&buf->a, buf->a.chan,
469 buf_offset,
470 (uint32_t[]){ (uint32_t)tmp.v_ulong },
471 sizeof(uint32_t));
472 break;
473 case 8:
474 /*
475 * c_size cannot be other than 8 here because
476 * trace_size > 4.
477 */
478 ltt_relay_write(&buf->a, buf->a.chan, buf_offset,
479 (uint64_t[]){ (uint64_t)tmp.v_uint64 },
480 sizeof(uint64_t));
481 break;
482 default:
483 BUG();
484 }
485 }
486 buf_offset += trace_size;
487 goto copydone;
488 } else {
489 /*
490 * Perform sign extension.
491 */
492 if (buf) {
493 switch (trace_type) {
494 case LTT_TYPE_SIGNED_INT:
495 ltt_relay_write(&buf->a, buf->a.chan, buf_offset,
496 (int64_t[]){ (int64_t)tmp.v_ulong },
497 sizeof(int64_t));
498 break;
499 case LTT_TYPE_UNSIGNED_INT:
500 ltt_relay_write(&buf->a, buf->a.chan, buf_offset,
501 (uint64_t[]){ (uint64_t)tmp.v_ulong },
502 sizeof(uint64_t));
503 break;
504 default:
505 BUG();
506 }
507 }
508 buf_offset += trace_size;
509 goto copydone;
510 }
511
512 copydone:
513 return buf_offset;
514 }
515
516 notrace size_t
517 ltt_serialize_data(struct ltt_chanbuf *buf, size_t buf_offset,
518 struct ltt_serialize_closure *closure,
519 void *serialize_private, unsigned int stack_pos_ctx,
520 int *largest_align, const char *fmt, va_list *args)
521 {
522 char trace_size = 0, c_size = 0; /*
523 * 0 (unset), 1, 2, 4, 8 bytes.
524 */
525 enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE;
526 unsigned long attributes = 0;
527
528 for (; *fmt ; ++fmt) {
529 switch (*fmt) {
530 case '#':
531 /* tracetypes (#) */
532 ++fmt; /* skip first '#' */
533 if (*fmt == '#') /* Escaped ## */
534 break;
535 attributes = 0;
536 fmt = parse_trace_type(fmt, &trace_size, &trace_type,
537 &attributes);
538 break;
539 case '%':
540 /* c types (%) */
541 ++fmt; /* skip first '%' */
542 if (*fmt == '%') /* Escaped %% */
543 break;
544 fmt = parse_c_type(fmt, &c_size, &c_type, NULL);
545 /*
546 * Output c types if no trace types has been
547 * specified.
548 */
549 if (!trace_size)
550 trace_size = c_size;
551 if (trace_type == LTT_TYPE_NONE)
552 trace_type = c_type;
553 if (c_type == LTT_TYPE_STRING)
554 trace_type = LTT_TYPE_STRING;
555 /* perform trace write */
556 buf_offset = serialize_trace_data(buf, buf_offset,
557 trace_size,
558 trace_type, c_size,
559 c_type,
560 &stack_pos_ctx,
561 largest_align,
562 args);
563 trace_size = 0;
564 c_size = 0;
565 trace_type = LTT_TYPE_NONE;
566 c_size = LTT_TYPE_NONE;
567 attributes = 0;
568 break;
569 /* default is to skip the text, doing nothing */
570 }
571 }
572 return buf_offset;
573 }
574 EXPORT_SYMBOL_GPL(ltt_serialize_data);
575
576 static inline
577 uint64_t unserialize_base_type(struct ltt_chanbuf *buf,
578 size_t *ppos, char trace_size,
579 enum ltt_type trace_type)
580 {
581 uint64_t tmp;
582
583 *ppos += ltt_align(*ppos, trace_size);
584 ltt_relay_read(&buf->a, *ppos, &tmp, trace_size);
585 *ppos += trace_size;
586
587 switch (trace_type) {
588 case LTT_TYPE_SIGNED_INT:
589 switch (trace_size) {
590 case 1:
591 return (uint64_t)*(int8_t *)&tmp;
592 case 2:
593 return (uint64_t)*(int16_t *)&tmp;
594 case 4:
595 return (uint64_t)*(int32_t *)&tmp;
596 case 8:
597 return tmp;
598 }
599 break;
600 case LTT_TYPE_UNSIGNED_INT:
601 switch (trace_size) {
602 case 1:
603 return (uint64_t)*(uint8_t *)&tmp;
604 case 2:
605 return (uint64_t)*(uint16_t *)&tmp;
606 case 4:
607 return (uint64_t)*(uint32_t *)&tmp;
608 case 8:
609 return tmp;
610 }
611 break;
612 default:
613 break;
614 }
615
616 BUG();
617 return 0;
618 }
619
620 static
621 int serialize_printf_data(struct ltt_chanbuf *buf, size_t *ppos,
622 char trace_size, enum ltt_type trace_type,
623 char c_size, enum ltt_type c_type, char *output,
624 ssize_t outlen, const char *outfmt)
625 {
626 u64 value;
627 outlen = outlen < 0 ? 0 : outlen;
628
629 if (trace_type == LTT_TYPE_STRING) {
630 size_t len = ltt_relay_read_cstr(&buf->a, *ppos, output,
631 outlen);
632 *ppos += len + 1;
633 return len;
634 }
635
636 value = unserialize_base_type(buf, ppos, trace_size, trace_type);
637
638 if (c_size == 8)
639 return snprintf(output, outlen, outfmt, value);
640 else
641 return snprintf(output, outlen, outfmt, (unsigned int)value);
642 }
643
644 /**
645 * ltt_serialize_printf - Format a string and place it in a buffer
646 * @buf: The ltt-relay buffer that store binary data
647 * @buf_offset: binary data's offset in @buf (should be masked to use as offset)
648 * @msg_size: return message's length
649 * @output: The buffer to place the result into
650 * @outlen: The size of the buffer, including the trailing '\0'
651 * @fmt: The format string to use
652 *
653 * The return value is the number of characters which would
654 * be generated for the given input, excluding the trailing
655 * '\0', as per ISO C99. If the return is greater than or equal to @outlen,
656 * the resulting string is truncated.
657 */
658 size_t ltt_serialize_printf(struct ltt_chanbuf *buf, unsigned long buf_offset,
659 size_t *msg_size, char *output, size_t outlen,
660 const char *fmt)
661 {
662 char trace_size = 0, c_size = 0; /*
663 * 0 (unset), 1, 2, 4, 8 bytes.
664 */
665 enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE;
666 unsigned long attributes = 0;
667 char outfmt[4] = "%";
668 size_t outpos = 0;
669 size_t len;
670 size_t msgpos = buf_offset;
671
672 for (; *fmt ; ++fmt) {
673 switch (*fmt) {
674 case '#':
675 /* tracetypes (#) */
676 ++fmt; /* skip first '#' */
677 if (*fmt == '#') { /* Escaped ## */
678 if (outpos < outlen)
679 output[outpos] = '#';
680 outpos++;
681 break;
682 }
683 attributes = 0;
684 fmt = parse_trace_type(fmt, &trace_size, &trace_type,
685 &attributes);
686 break;
687 case '%':
688 /* c types (%) */
689 ++fmt; /* skip first '%' */
690 if (*fmt == '%') { /* Escaped %% */
691 if (outpos < outlen)
692 output[outpos] = '%';
693 outpos++;
694 break;
695 }
696 fmt = parse_c_type(fmt, &c_size, &c_type, outfmt + 1);
697 /*
698 * Output c types if no trace types has been
699 * specified.
700 */
701 if (!trace_size)
702 trace_size = c_size;
703 if (trace_type == LTT_TYPE_NONE)
704 trace_type = c_type;
705 if (c_type == LTT_TYPE_STRING)
706 trace_type = LTT_TYPE_STRING;
707
708 /* perform trace printf */
709 len = serialize_printf_data(buf, &msgpos, trace_size,
710 trace_type, c_size, c_type,
711 output + outpos,
712 outlen - outpos, outfmt);
713 outpos += len;
714 trace_size = 0;
715 c_size = 0;
716 trace_type = LTT_TYPE_NONE;
717 c_size = LTT_TYPE_NONE;
718 attributes = 0;
719 break;
720 default:
721 if (outpos < outlen)
722 output[outpos] = *fmt;
723 outpos++;
724 break;
725 }
726 }
727 if (msg_size)
728 *msg_size = (size_t)(msgpos - buf_offset);
729 /*
730 * Make sure we end output with terminating \0 when truncated.
731 */
732 if (outpos >= outlen + 1)
733 output[outlen] = '\0';
734 return outpos;
735 }
736 EXPORT_SYMBOL_GPL(ltt_serialize_printf);
737
738 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
739
740 unsigned int ltt_fmt_largest_align(size_t align_drift, const char *fmt)
741 {
742 char trace_size = 0, c_size = 0;
743 enum ltt_type trace_type = LTT_TYPE_NONE, c_type = LTT_TYPE_NONE;
744 unsigned long attributes = 0;
745 int largest_align = 1;
746
747 for (; *fmt ; ++fmt) {
748 switch (*fmt) {
749 case '#':
750 /* tracetypes (#) */
751 ++fmt; /* skip first '#' */
752 if (*fmt == '#') /* Escaped ## */
753 break;
754 attributes = 0;
755 fmt = parse_trace_type(fmt, &trace_size, &trace_type,
756 &attributes);
757
758 largest_align = max_t(int, largest_align, trace_size);
759 if (largest_align >= ltt_get_alignment())
760 goto exit;
761 break;
762 case '%':
763 /* c types (%) */
764 ++fmt; /* skip first '%' */
765 if (*fmt == '%') /* Escaped %% */
766 break;
767 fmt = parse_c_type(fmt, &c_size, &c_type, NULL);
768 /*
769 * Output c types if no trace types has been
770 * specified.
771 */
772 if (!trace_size)
773 trace_size = c_size;
774 if (trace_type == LTT_TYPE_NONE)
775 trace_type = c_type;
776 if (c_type == LTT_TYPE_STRING)
777 trace_type = LTT_TYPE_STRING;
778
779 largest_align = max_t(int, largest_align, trace_size);
780 if (largest_align >= ltt_get_alignment())
781 goto exit;
782
783 trace_size = 0;
784 c_size = 0;
785 trace_type = LTT_TYPE_NONE;
786 c_size = LTT_TYPE_NONE;
787 break;
788 }
789 }
790
791 exit:
792 largest_align = min_t(int, largest_align, ltt_get_alignment());
793 return (largest_align - align_drift) & (largest_align - 1);
794 }
795 EXPORT_SYMBOL_GPL(ltt_fmt_largest_align);
796
797 #endif
798
799 /*
800 * Calculate data size
801 * Assume that the padding for alignment starts at a sizeof(void *) address.
802 */
803 static notrace
804 size_t ltt_get_data_size(struct ltt_serialize_closure *closure,
805 void *serialize_private, unsigned int stack_pos_ctx,
806 int *largest_align, const char *fmt, va_list *args)
807 {
808 ltt_serialize_cb cb = closure->callbacks[0];
809 closure->cb_idx = 0;
810 return (size_t)cb(NULL, 0, closure, serialize_private, stack_pos_ctx,
811 largest_align, fmt, args);
812 }
813
814 static notrace
815 void ltt_write_event_data(struct ltt_chanbuf *buf, size_t buf_offset,
816 struct ltt_serialize_closure *closure,
817 void *serialize_private, unsigned int stack_pos_ctx,
818 int largest_align, const char *fmt, va_list *args)
819 {
820 ltt_serialize_cb cb = closure->callbacks[0];
821 closure->cb_idx = 0;
822 buf_offset += ltt_align(buf_offset, largest_align);
823 cb(buf, buf_offset, closure, serialize_private, stack_pos_ctx, NULL,
824 fmt, args);
825 }
826
827
828 notrace
829 void ltt_vtrace(const struct marker *mdata, void *probe_data, void *call_data,
830 const char *fmt, va_list *args)
831 {
832 int largest_align, ret;
833 struct ltt_active_marker *pdata;
834 uint16_t eID;
835 size_t data_size, slot_size;
836 unsigned int chan_index;
837 struct ltt_chanbuf *buf;
838 struct ltt_chan *chan;
839 struct ltt_trace *trace, *dest_trace = NULL;
840 uint64_t tsc;
841 long buf_offset;
842 va_list args_copy;
843 struct ltt_serialize_closure closure;
844 struct ltt_probe_private_data *private_data = call_data;
845 void *serialize_private = NULL;
846 int cpu;
847 unsigned int rflags;
848 unsigned int stack_pos_ctx;
849
850 /*
851 * This test is useful for quickly exiting static tracing when no trace
852 * is active. We expect to have an active trace when we get here.
853 */
854 if (unlikely(ltt_traces.num_active_traces == 0))
855 return;
856
857 rcu_read_lock_sched_notrace();
858 cpu = smp_processor_id();
859 __get_cpu_var(ltt_nesting)++;
860 stack_pos_ctx = __get_cpu_var(tracer_stack_pos);
861 /*
862 * asm volatile and "memory" clobber prevent the compiler from moving
863 * instructions out of the ltt nesting count. This is required to ensure
864 * that probe side-effects which can cause recursion (e.g. unforeseen
865 * traps, divisions by 0, ...) are triggered within the incremented
866 * nesting count section.
867 */
868 barrier();
869 pdata = (struct ltt_active_marker *)probe_data;
870 eID = mdata->event_id;
871 chan_index = mdata->channel_id;
872 closure.callbacks = pdata->probe->callbacks;
873
874 if (unlikely(private_data)) {
875 dest_trace = private_data->trace;
876 if (private_data->serializer)
877 closure.callbacks = &private_data->serializer;
878 serialize_private = private_data->serialize_private;
879 }
880
881 va_copy(args_copy, *args);
882 /*
883 * Assumes event payload to start on largest_align alignment.
884 */
885 largest_align = 1; /* must be non-zero for ltt_align */
886 data_size = ltt_get_data_size(&closure, serialize_private,
887 stack_pos_ctx, &largest_align,
888 fmt, &args_copy);
889 largest_align = min_t(int, largest_align, sizeof(void *));
890 va_end(args_copy);
891
892 /* Iterate on each trace */
893 list_for_each_entry_rcu(trace, &ltt_traces.head, list) {
894 /*
895 * Expect the filter to filter out events. If we get here,
896 * we went through tracepoint activation as a first step.
897 */
898 if (unlikely(dest_trace && trace != dest_trace))
899 continue;
900 if (unlikely(!trace->active))
901 continue;
902 if (unlikely(!ltt_run_filter(trace, eID)))
903 continue;
904 #ifdef LTT_DEBUG_EVENT_SIZE
905 rflags = LTT_RFLAG_ID_SIZE;
906 #else
907 if (unlikely(eID >= LTT_FREE_EVENTS))
908 rflags = LTT_RFLAG_ID;
909 else
910 rflags = 0;
911 #endif
912 /*
913 * Skip channels added after trace creation.
914 */
915 if (unlikely(chan_index >= trace->nr_channels))
916 continue;
917 chan = &trace->channels[chan_index];
918 if (!chan->active)
919 continue;
920
921 /* reserve space : header and data */
922 ret = ltt_reserve_slot(chan, trace, data_size, largest_align,
923 cpu, &buf, &slot_size, &buf_offset,
924 &tsc, &rflags);
925 if (unlikely(ret < 0))
926 continue; /* buffer full */
927
928 va_copy(args_copy, *args);
929 /* Out-of-order write : header and data */
930 buf_offset = ltt_write_event_header(&buf->a, &chan->a,
931 buf_offset, eID, data_size,
932 tsc, rflags);
933 ltt_write_event_data(buf, buf_offset, &closure,
934 serialize_private, stack_pos_ctx,
935 largest_align, fmt, &args_copy);
936 va_end(args_copy);
937 /* Out-of-order commit */
938 ltt_commit_slot(buf, chan, buf_offset, data_size, slot_size);
939 }
940 /*
941 * asm volatile and "memory" clobber prevent the compiler from moving
942 * instructions out of the ltt nesting count. This is required to ensure
943 * that probe side-effects which can cause recursion (e.g. unforeseen
944 * traps, divisions by 0, ...) are triggered within the incremented
945 * nesting count section.
946 */
947 barrier();
948 __get_cpu_var(tracer_stack_pos) = stack_pos_ctx;
949 __get_cpu_var(ltt_nesting)--;
950 rcu_read_unlock_sched_notrace();
951 }
952 EXPORT_SYMBOL_GPL(ltt_vtrace);
953
954 notrace
955 void ltt_trace(const struct marker *mdata, void *probe_data, void *call_data,
956 const char *fmt, ...)
957 {
958 va_list args;
959
960 va_start(args, fmt);
961 ltt_vtrace(mdata, probe_data, call_data, fmt, &args);
962 va_end(args);
963 }
964 EXPORT_SYMBOL_GPL(ltt_trace);
965
966 MODULE_LICENSE("GPL and additional rights");
967 MODULE_AUTHOR("Mathieu Desnoyers");
968 MODULE_DESCRIPTION("Linux Trace Toolkit Next Generation Serializer");
This page took 0.082052 seconds and 4 git commands to generate.