Fix: loglevel and model_emf_uri build fix
[lttng-ust.git] / include / lttng / ust-tracepoint-event.h
1 /*
2 * Copyright (c) 2011-2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to deal
6 * in the Software without restriction, including without limitation the rights
7 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
8 * copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
19 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
20 * SOFTWARE.
21 */
22
23 #include <stdio.h>
24 #include <stdlib.h>
25 #include <urcu/compiler.h>
26 #include <urcu/rculist.h>
27 #include <lttng/ust-events.h>
28 #include <lttng/ringbuffer-config.h>
29 #include <lttng/ust-compiler.h>
30 #include <lttng/tracepoint.h>
31 #include <byteswap.h>
32 #include <string.h>
33
34 #define __LTTNG_UST_NULL_STRING "(null)"
35
36 #undef tp_list_for_each_entry_rcu
37 #define tp_list_for_each_entry_rcu(pos, head, member) \
38 for (pos = cds_list_entry(tp_rcu_dereference_bp((head)->next), __typeof__(*pos), member); \
39 &pos->member != (head); \
40 pos = cds_list_entry(tp_rcu_dereference_bp(pos->member.next), __typeof__(*pos), member))
41
42 /*
43 * TRACEPOINT_EVENT_CLASS declares a class of tracepoints receiving the
44 * same arguments and having the same field layout.
45 *
46 * TRACEPOINT_EVENT_INSTANCE declares an instance of a tracepoint, with
47 * its own provider and name. It refers to a class (template).
48 *
49 * TRACEPOINT_EVENT declared both a class and an instance and does a
50 * direct mapping from the instance to the class.
51 */
52
53 #undef TRACEPOINT_EVENT
54 #define TRACEPOINT_EVENT(_provider, _name, _args, _fields) \
55 TRACEPOINT_EVENT_CLASS(_provider, _name, \
56 _TP_PARAMS(_args), \
57 _TP_PARAMS(_fields)) \
58 TRACEPOINT_EVENT_INSTANCE(_provider, _name, _name, \
59 _TP_PARAMS(_args))
60
61 /* Helpers */
62 #define _TP_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
63
64 #define _tp_max_t(type, x, y) \
65 ({ \
66 type __max1 = (x); \
67 type __max2 = (y); \
68 __max1 > __max2 ? __max1: __max2; \
69 })
70
71 /*
72 * Stage 0 of tracepoint event generation.
73 *
74 * Check that each TRACEPOINT_EVENT provider argument match the
75 * TRACEPOINT_PROVIDER by creating dummy callbacks.
76 */
77
78 /* Reset all macros within TRACEPOINT_EVENT */
79 #include <lttng/ust-tracepoint-event-reset.h>
80
81 static inline lttng_ust_notrace
82 void _TP_COMBINE_TOKENS(__tracepoint_provider_mismatch_, TRACEPOINT_PROVIDER)(void);
83 static inline
84 void _TP_COMBINE_TOKENS(__tracepoint_provider_mismatch_, TRACEPOINT_PROVIDER)(void)
85 {
86 }
87
88 #undef TRACEPOINT_EVENT_CLASS
89 #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
90 __tracepoint_provider_mismatch_##_provider();
91
92 #undef TRACEPOINT_EVENT_INSTANCE
93 #define TRACEPOINT_EVENT_INSTANCE(_provider, _template, _name, _args) \
94 __tracepoint_provider_mismatch_##_provider();
95
96 static inline lttng_ust_notrace
97 void _TP_COMBINE_TOKENS(__tracepoint_provider_check_, TRACEPOINT_PROVIDER)(void);
98 static inline
99 void _TP_COMBINE_TOKENS(__tracepoint_provider_check_, TRACEPOINT_PROVIDER)(void)
100 {
101 #include TRACEPOINT_INCLUDE
102 }
103
104 /*
105 * Stage 0.1 of tracepoint event generation.
106 *
107 * Check that each TRACEPOINT_EVENT provider:name does not exceed the
108 * tracepoint name length limit.
109 */
110
111 /* Reset all macros within TRACEPOINT_EVENT */
112 #include <lttng/ust-tracepoint-event-reset.h>
113
114 #undef TRACEPOINT_EVENT_INSTANCE
115 #define TRACEPOINT_EVENT_INSTANCE(_provider, _template, _name, _args) \
116 static const char \
117 __tp_name_len_check##_provider##___##_name[LTTNG_UST_SYM_NAME_LEN] \
118 __attribute__((unused)) = \
119 #_provider ":" #_name;
120
121 #include TRACEPOINT_INCLUDE
122
123 /*
124 * Stage 0.9 of tracepoint event generation
125 *
126 * Unfolding the enums
127 */
128 #include <lttng/ust-tracepoint-event-reset.h>
129
130 /* Enumeration entry (single value) */
131 #undef ctf_enum_value
132 #define ctf_enum_value(_string, _value) \
133 { \
134 .start = { \
135 .value = lttng_is_signed_type(__typeof__(_value)) ? \
136 (long long) (_value) : (_value), \
137 .signedness = lttng_is_signed_type(__typeof__(_value)), \
138 }, \
139 .end = { \
140 .value = lttng_is_signed_type(__typeof__(_value)) ? \
141 (long long) (_value) : (_value), \
142 .signedness = lttng_is_signed_type(__typeof__(_value)), \
143 }, \
144 .string = (_string), \
145 },
146
147 /* Enumeration entry (range) */
148 #undef ctf_enum_range
149 #define ctf_enum_range(_string, _range_start, _range_end) \
150 { \
151 .start = { \
152 .value = lttng_is_signed_type(__typeof__(_range_start)) ? \
153 (long long) (_range_start) : (_range_start), \
154 .signedness = lttng_is_signed_type(__typeof__(_range_start)), \
155 }, \
156 .end = { \
157 .value = lttng_is_signed_type(__typeof__(_range_end)) ? \
158 (long long) (_range_end) : (_range_end), \
159 .signedness = lttng_is_signed_type(__typeof__(_range_end)), \
160 }, \
161 .string = (_string), \
162 },
163
164 /* Enumeration entry (automatic value; follows the rules of CTF) */
165 #undef ctf_enum_auto
166 #define ctf_enum_auto(_string) \
167 { \
168 .start = { \
169 .value = -1ULL, \
170 .signedness = 0, \
171 }, \
172 .end = { \
173 .value = -1ULL, \
174 .signedness = 0, \
175 }, \
176 .string = (_string), \
177 .u = { \
178 .extra = { \
179 .options = LTTNG_ENUM_ENTRY_OPTION_IS_AUTO, \
180 }, \
181 }, \
182 },
183
184 #undef TP_ENUM_VALUES
185 #define TP_ENUM_VALUES(...) \
186 __VA_ARGS__
187
188 #undef TRACEPOINT_ENUM
189 #define TRACEPOINT_ENUM(_provider, _name, _values) \
190 const struct lttng_enum_entry __enum_values__##_provider##_##_name[] = { \
191 _values \
192 ctf_enum_value("", 0) /* Dummy, 0-len array forbidden by C99. */ \
193 };
194
195 #include TRACEPOINT_INCLUDE
196
197 /*
198 * Stage 1 of tracepoint event generation.
199 *
200 * Create event field type metadata section.
201 * Each event produce an array of fields.
202 */
203
204 /* Reset all macros within TRACEPOINT_EVENT */
205 #include <lttng/ust-tracepoint-event-reset.h>
206 #include <lttng/ust-tracepoint-event-write.h>
207 #include <lttng/ust-tracepoint-event-nowrite.h>
208
209 #undef _ctf_integer_ext
210 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _nowrite) \
211 { \
212 .name = #_item, \
213 .type = __type_integer(_type, _byte_order, _base, none),\
214 .nowrite = _nowrite, \
215 },
216
217 #undef _ctf_float
218 #define _ctf_float(_type, _item, _src, _nowrite) \
219 { \
220 .name = #_item, \
221 .type = __type_float(_type), \
222 .nowrite = _nowrite, \
223 },
224
225 #undef _ctf_array_encoded
226 #define _ctf_array_encoded(_type, _item, _src, _byte_order, \
227 _length, _encoding, _nowrite, \
228 _elem_type_base) \
229 { \
230 .name = #_item, \
231 .type = \
232 { \
233 .atype = atype_array, \
234 .u = \
235 { \
236 .array = \
237 { \
238 .elem_type = __type_integer(_type, _byte_order, _elem_type_base, _encoding), \
239 .length = _length, \
240 } \
241 } \
242 }, \
243 .nowrite = _nowrite, \
244 },
245
246 #undef _ctf_sequence_encoded
247 #define _ctf_sequence_encoded(_type, _item, _src, _byte_order, \
248 _length_type, _src_length, _encoding, _nowrite, \
249 _elem_type_base) \
250 { \
251 .name = #_item, \
252 .type = \
253 { \
254 .atype = atype_sequence, \
255 .u = \
256 { \
257 .sequence = \
258 { \
259 .length_type = __type_integer(_length_type, BYTE_ORDER, 10, none), \
260 .elem_type = __type_integer(_type, _byte_order, _elem_type_base, _encoding), \
261 }, \
262 }, \
263 }, \
264 .nowrite = _nowrite, \
265 },
266
267 #undef _ctf_string
268 #define _ctf_string(_item, _src, _nowrite) \
269 { \
270 .name = #_item, \
271 .type = \
272 { \
273 .atype = atype_string, \
274 .u = \
275 { \
276 .basic = { .string = { .encoding = lttng_encode_UTF8 } } \
277 }, \
278 }, \
279 .nowrite = _nowrite, \
280 },
281
282 #undef _ctf_enum
283 #define _ctf_enum(_provider, _name, _type, _item, _src, _nowrite) \
284 { \
285 .name = #_item, \
286 .type = { \
287 .atype = atype_enum, \
288 .u = { \
289 .basic = { \
290 .enumeration = { \
291 .desc = &__enum_##_provider##_##_name, \
292 .container_type = { \
293 .size = sizeof(_type) * CHAR_BIT, \
294 .alignment = lttng_alignof(_type) * CHAR_BIT, \
295 .signedness = lttng_is_signed_type(_type), \
296 .reverse_byte_order = 0, \
297 .base = 10, \
298 .encoding = lttng_encode_none, \
299 }, \
300 }, \
301 }, \
302 }, \
303 }, \
304 .nowrite = _nowrite, \
305 },
306
307 #undef TP_FIELDS
308 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
309
310 #undef TRACEPOINT_EVENT_CLASS
311 #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
312 static const struct lttng_event_field __event_fields___##_provider##___##_name[] = { \
313 _fields \
314 ctf_integer(int, dummy, 0) /* Dummy, C99 forbids 0-len array. */ \
315 };
316
317 #undef TRACEPOINT_ENUM
318 #define TRACEPOINT_ENUM(_provider, _name, _values) \
319 static const struct lttng_enum_desc __enum_##_provider##_##_name = { \
320 .name = #_provider "_" #_name, \
321 .entries = __enum_values__##_provider##_##_name, \
322 .nr_entries = _TP_ARRAY_SIZE(__enum_values__##_provider##_##_name) - 1, \
323 };
324
325 #include TRACEPOINT_INCLUDE
326
327 /*
328 * Stage 2 of tracepoint event generation.
329 *
330 * Create probe callback prototypes.
331 */
332
333 /* Reset all macros within TRACEPOINT_EVENT */
334 #include <lttng/ust-tracepoint-event-reset.h>
335
336 #undef TP_ARGS
337 #define TP_ARGS(...) __VA_ARGS__
338
339 #undef TRACEPOINT_EVENT_CLASS
340 #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
341 static void __event_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args));
342
343 #include TRACEPOINT_INCLUDE
344
345 /*
346 * Stage 3.0 of tracepoint event generation.
347 *
348 * Create static inline function that calculates event size.
349 */
350
351 /* Reset all macros within TRACEPOINT_EVENT */
352 #include <lttng/ust-tracepoint-event-reset.h>
353 #include <lttng/ust-tracepoint-event-write.h>
354
355 #undef _ctf_integer_ext
356 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _nowrite) \
357 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
358 __event_len += sizeof(_type);
359
360 #undef _ctf_float
361 #define _ctf_float(_type, _item, _src, _nowrite) \
362 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
363 __event_len += sizeof(_type);
364
365 #undef _ctf_array_encoded
366 #define _ctf_array_encoded(_type, _item, _src, _byte_order, _length, _encoding, \
367 _nowrite, _elem_type_base) \
368 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
369 __event_len += sizeof(_type) * (_length);
370
371 #undef _ctf_sequence_encoded
372 #define _ctf_sequence_encoded(_type, _item, _src, _byte_order, _length_type, \
373 _src_length, _encoding, _nowrite, _elem_type_base) \
374 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
375 __event_len += sizeof(_length_type); \
376 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
377 __dynamic_len[__dynamic_len_idx] = (_src_length); \
378 __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \
379 __dynamic_len_idx++;
380
381 #undef _ctf_string
382 #define _ctf_string(_item, _src, _nowrite) \
383 __event_len += __dynamic_len[__dynamic_len_idx++] = \
384 strlen((_src) ? (_src) : __LTTNG_UST_NULL_STRING) + 1;
385
386 #undef _ctf_enum
387 #define _ctf_enum(_provider, _name, _type, _item, _src, _nowrite) \
388 _ctf_integer_ext(_type, _item, _src, BYTE_ORDER, 10, _nowrite)
389
390 #undef TP_ARGS
391 #define TP_ARGS(...) __VA_ARGS__
392
393 #undef TP_FIELDS
394 #define TP_FIELDS(...) __VA_ARGS__
395
396 #undef TRACEPOINT_EVENT_CLASS
397 #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
398 static inline lttng_ust_notrace \
399 size_t __event_get_size__##_provider##___##_name(size_t *__dynamic_len, _TP_ARGS_DATA_PROTO(_args)); \
400 static inline \
401 size_t __event_get_size__##_provider##___##_name(size_t *__dynamic_len, _TP_ARGS_DATA_PROTO(_args)) \
402 { \
403 size_t __event_len = 0; \
404 unsigned int __dynamic_len_idx = 0; \
405 \
406 if (0) \
407 (void) __dynamic_len_idx; /* don't warn if unused */ \
408 _fields \
409 return __event_len; \
410 }
411
412 #include TRACEPOINT_INCLUDE
413
414 /*
415 * Stage 3.1 of tracepoint event generation.
416 *
417 * Create static inline function that layout the filter stack data.
418 * We make both write and nowrite data available to the filter.
419 */
420
421 /* Reset all macros within TRACEPOINT_EVENT */
422 #include <lttng/ust-tracepoint-event-reset.h>
423 #include <lttng/ust-tracepoint-event-write.h>
424 #include <lttng/ust-tracepoint-event-nowrite.h>
425
426 #undef _ctf_integer_ext
427 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _nowrite) \
428 if (lttng_is_signed_type(_type)) { \
429 int64_t __ctf_tmp_int64; \
430 switch (sizeof(_type)) { \
431 case 1: \
432 { \
433 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
434 __ctf_tmp_int64 = (int64_t) __tmp.v; \
435 break; \
436 } \
437 case 2: \
438 { \
439 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
440 if (_byte_order != BYTE_ORDER) \
441 __tmp.v = bswap_16(__tmp.v); \
442 __ctf_tmp_int64 = (int64_t) __tmp.v; \
443 break; \
444 } \
445 case 4: \
446 { \
447 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
448 if (_byte_order != BYTE_ORDER) \
449 __tmp.v = bswap_32(__tmp.v); \
450 __ctf_tmp_int64 = (int64_t) __tmp.v; \
451 break; \
452 } \
453 case 8: \
454 { \
455 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
456 if (_byte_order != BYTE_ORDER) \
457 __tmp.v = bswap_64(__tmp.v); \
458 __ctf_tmp_int64 = (int64_t) __tmp.v; \
459 break; \
460 } \
461 default: \
462 abort(); \
463 }; \
464 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
465 } else { \
466 uint64_t __ctf_tmp_uint64; \
467 switch (sizeof(_type)) { \
468 case 1: \
469 { \
470 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
471 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
472 break; \
473 } \
474 case 2: \
475 { \
476 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
477 if (_byte_order != BYTE_ORDER) \
478 __tmp.v = bswap_16(__tmp.v); \
479 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
480 break; \
481 } \
482 case 4: \
483 { \
484 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
485 if (_byte_order != BYTE_ORDER) \
486 __tmp.v = bswap_32(__tmp.v); \
487 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
488 break; \
489 } \
490 case 8: \
491 { \
492 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
493 if (_byte_order != BYTE_ORDER) \
494 __tmp.v = bswap_64(__tmp.v); \
495 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
496 break; \
497 } \
498 default: \
499 abort(); \
500 }; \
501 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
502 } \
503 __stack_data += sizeof(int64_t);
504
505 #undef _ctf_float
506 #define _ctf_float(_type, _item, _src, _nowrite) \
507 { \
508 double __ctf_tmp_double = (double) (_type) (_src); \
509 memcpy(__stack_data, &__ctf_tmp_double, sizeof(double)); \
510 __stack_data += sizeof(double); \
511 }
512
513 #undef _ctf_array_encoded
514 #define _ctf_array_encoded(_type, _item, _src, _byte_order, _length, \
515 _encoding, _nowrite, _elem_type_base) \
516 { \
517 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
518 const void *__ctf_tmp_ptr = (_src); \
519 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
520 __stack_data += sizeof(unsigned long); \
521 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
522 __stack_data += sizeof(void *); \
523 }
524
525 #undef _ctf_sequence_encoded
526 #define _ctf_sequence_encoded(_type, _item, _src, _byte_order, _length_type, \
527 _src_length, _encoding, _nowrite, _elem_type_base) \
528 { \
529 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
530 const void *__ctf_tmp_ptr = (_src); \
531 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
532 __stack_data += sizeof(unsigned long); \
533 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
534 __stack_data += sizeof(void *); \
535 }
536
537 #undef _ctf_string
538 #define _ctf_string(_item, _src, _nowrite) \
539 { \
540 const void *__ctf_tmp_ptr = \
541 ((_src) ? (_src) : __LTTNG_UST_NULL_STRING); \
542 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
543 __stack_data += sizeof(void *); \
544 }
545
546 #undef _ctf_enum
547 #define _ctf_enum(_provider, _name, _type, _item, _src, _nowrite) \
548 _ctf_integer_ext(_type, _item, _src, BYTE_ORDER, 10, _nowrite)
549
550 #undef TP_ARGS
551 #define TP_ARGS(...) __VA_ARGS__
552
553 #undef TP_FIELDS
554 #define TP_FIELDS(...) __VA_ARGS__
555
556 #undef TRACEPOINT_EVENT_CLASS
557 #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
558 static inline \
559 void __event_prepare_filter_stack__##_provider##___##_name(char *__stack_data,\
560 _TP_ARGS_DATA_PROTO(_args)) \
561 { \
562 _fields \
563 }
564
565 #include TRACEPOINT_INCLUDE
566
567 /*
568 * Stage 4 of tracepoint event generation.
569 *
570 * Create static inline function that calculates event payload alignment.
571 */
572
573 /* Reset all macros within TRACEPOINT_EVENT */
574 #include <lttng/ust-tracepoint-event-reset.h>
575 #include <lttng/ust-tracepoint-event-write.h>
576
577 #undef _ctf_integer_ext
578 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _nowrite) \
579 __event_align = _tp_max_t(size_t, __event_align, lttng_alignof(_type));
580
581 #undef _ctf_float
582 #define _ctf_float(_type, _item, _src, _nowrite) \
583 __event_align = _tp_max_t(size_t, __event_align, lttng_alignof(_type));
584
585 #undef _ctf_array_encoded
586 #define _ctf_array_encoded(_type, _item, _src, _byte_order, _length, \
587 _encoding, _nowrite, _elem_type_base) \
588 __event_align = _tp_max_t(size_t, __event_align, lttng_alignof(_type));
589
590 #undef _ctf_sequence_encoded
591 #define _ctf_sequence_encoded(_type, _item, _src, _byte_order, _length_type, \
592 _src_length, _encoding, _nowrite, _elem_type_base) \
593 __event_align = _tp_max_t(size_t, __event_align, lttng_alignof(_length_type)); \
594 __event_align = _tp_max_t(size_t, __event_align, lttng_alignof(_type));
595
596 #undef _ctf_string
597 #define _ctf_string(_item, _src, _nowrite)
598
599 #undef _ctf_enum
600 #define _ctf_enum(_provider, _name, _type, _item, _src, _nowrite) \
601 _ctf_integer_ext(_type, _item, _src, BYTE_ORDER, 10, _nowrite)
602
603 #undef TP_ARGS
604 #define TP_ARGS(...) __VA_ARGS__
605
606 #undef TP_FIELDS
607 #define TP_FIELDS(...) __VA_ARGS__
608
609 #undef TRACEPOINT_EVENT_CLASS
610 #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
611 static inline lttng_ust_notrace \
612 size_t __event_get_align__##_provider##___##_name(_TP_ARGS_PROTO(_args)); \
613 static inline \
614 size_t __event_get_align__##_provider##___##_name(_TP_ARGS_PROTO(_args)) \
615 { \
616 size_t __event_align = 1; \
617 _fields \
618 return __event_align; \
619 }
620
621 #include TRACEPOINT_INCLUDE
622
623
624 /*
625 * Stage 5 of tracepoint event generation.
626 *
627 * Create the probe function. This function calls event size calculation
628 * and writes event data into the buffer.
629 */
630
631 /* Reset all macros within TRACEPOINT_EVENT */
632 #include <lttng/ust-tracepoint-event-reset.h>
633 #include <lttng/ust-tracepoint-event-write.h>
634
635 #undef _ctf_integer_ext
636 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _nowrite) \
637 { \
638 _type __tmp = (_src); \
639 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
640 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
641 }
642
643 #undef _ctf_float
644 #define _ctf_float(_type, _item, _src, _nowrite) \
645 { \
646 _type __tmp = (_src); \
647 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
648 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
649 }
650
651 #undef _ctf_array_encoded
652 #define _ctf_array_encoded(_type, _item, _src, _byte_order, _length, \
653 _encoding, _nowrite, _elem_type_base) \
654 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
655 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length));
656
657 #undef _ctf_sequence_encoded
658 #define _ctf_sequence_encoded(_type, _item, _src, _byte_order, _length_type, \
659 _src_length, _encoding, _nowrite, _elem_type_base) \
660 { \
661 _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx]; \
662 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
663 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
664 } \
665 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
666 __chan->ops->event_write(&__ctx, _src, \
667 sizeof(_type) * __get_dynamic_len(dest));
668
669 /*
670 * __chan->ops->u.has_strcpy is a flag letting us know if the LTTng-UST
671 * tracepoint provider ABI implements event_strcpy. This dynamic check
672 * can be removed when the tracepoint provider ABI moves to 2.
673 */
674 #if (LTTNG_UST_PROVIDER_MAJOR > 1)
675 #error "Tracepoint probe provider major version has changed. Please remove dynamic check for has_strcpy."
676 #endif
677
678 #undef _ctf_string
679 #define _ctf_string(_item, _src, _nowrite) \
680 { \
681 const char *__ctf_tmp_string = \
682 ((_src) ? (_src) : __LTTNG_UST_NULL_STRING); \
683 lib_ring_buffer_align_ctx(&__ctx, \
684 lttng_alignof(*__ctf_tmp_string)); \
685 if (__chan->ops->u.has_strcpy) \
686 __chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \
687 __get_dynamic_len(dest)); \
688 else \
689 __chan->ops->event_write(&__ctx, __ctf_tmp_string, \
690 __get_dynamic_len(dest)); \
691 }
692
693
694 #undef _ctf_enum
695 #define _ctf_enum(_provider, _name, _type, _item, _src, _nowrite) \
696 _ctf_integer_ext(_type, _item, _src, BYTE_ORDER, 10, _nowrite)
697
698 /* Beware: this get len actually consumes the len value */
699 #undef __get_dynamic_len
700 #define __get_dynamic_len(field) __stackvar.__dynamic_len[__dynamic_len_idx++]
701
702 #undef TP_ARGS
703 #define TP_ARGS(...) __VA_ARGS__
704
705 #undef TP_FIELDS
706 #define TP_FIELDS(...) __VA_ARGS__
707
708 /*
709 * For state dump, check that "session" argument (mandatory) matches the
710 * session this event belongs to. Ensures that we write state dump data only
711 * into the started session, not into all sessions.
712 */
713 #undef _TP_SESSION_CHECK
714 #ifdef TP_SESSION_CHECK
715 #define _TP_SESSION_CHECK(session, csession) (session == csession)
716 #else /* TP_SESSION_CHECK */
717 #define _TP_SESSION_CHECK(session, csession) 1
718 #endif /* TP_SESSION_CHECK */
719
720 /*
721 * Use of __builtin_return_address(0) sometimes seems to cause stack
722 * corruption on 32-bit PowerPC. Disable this feature on that
723 * architecture for now by always using the NULL value for the ip
724 * context.
725 */
726 #undef _TP_IP_PARAM
727 #ifdef TP_IP_PARAM
728 #define _TP_IP_PARAM(x) (x)
729 #else /* TP_IP_PARAM */
730
731 #if defined(__PPC__) && !defined(__PPC64__)
732 #define _TP_IP_PARAM(x) NULL
733 #else /* #if defined(__PPC__) && !defined(__PPC64__) */
734 #define _TP_IP_PARAM(x) __builtin_return_address(0)
735 #endif /* #else #if defined(__PPC__) && !defined(__PPC64__) */
736
737 #endif /* TP_IP_PARAM */
738
739 /*
740 * Using twice size for filter stack data to hold size and pointer for
741 * each field (worse case). For integers, max size required is 64-bit.
742 * Same for double-precision floats. Those fit within
743 * 2*sizeof(unsigned long) for all supported architectures.
744 * Perform UNION (||) of filter runtime list.
745 */
746 #undef TRACEPOINT_EVENT_CLASS
747 #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
748 static lttng_ust_notrace \
749 void __event_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)); \
750 static \
751 void __event_probe__##_provider##___##_name(_TP_ARGS_DATA_PROTO(_args)) \
752 { \
753 struct lttng_event *__event = (struct lttng_event *) __tp_data; \
754 struct lttng_channel *__chan = __event->chan; \
755 struct lttng_ust_lib_ring_buffer_ctx __ctx; \
756 struct lttng_stack_ctx __lttng_ctx; \
757 size_t __event_len, __event_align; \
758 size_t __dynamic_len_idx = 0; \
759 union { \
760 size_t __dynamic_len[_TP_ARRAY_SIZE(__event_fields___##_provider##___##_name) - 1]; \
761 char __filter_stack_data[2 * sizeof(unsigned long) * (_TP_ARRAY_SIZE(__event_fields___##_provider##___##_name) - 1)]; \
762 } __stackvar; \
763 int __ret; \
764 \
765 if (0) \
766 (void) __dynamic_len_idx; /* don't warn if unused */ \
767 if (!_TP_SESSION_CHECK(session, __chan->session)) \
768 return; \
769 if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->session->active))) \
770 return; \
771 if (caa_unlikely(!CMM_ACCESS_ONCE(__chan->enabled))) \
772 return; \
773 if (caa_unlikely(!CMM_ACCESS_ONCE(__event->enabled))) \
774 return; \
775 if (caa_unlikely(!TP_RCU_LINK_TEST())) \
776 return; \
777 if (caa_unlikely(!cds_list_empty(&__event->bytecode_runtime_head))) { \
778 struct lttng_bytecode_runtime *bc_runtime; \
779 int __filter_record = __event->has_enablers_without_bytecode; \
780 \
781 __event_prepare_filter_stack__##_provider##___##_name(__stackvar.__filter_stack_data, \
782 _TP_ARGS_DATA_VAR(_args)); \
783 tp_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
784 if (caa_unlikely(bc_runtime->filter(bc_runtime, \
785 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \
786 __filter_record = 1; \
787 } \
788 if (caa_likely(!__filter_record)) \
789 return; \
790 } \
791 __event_len = __event_get_size__##_provider##___##_name(__stackvar.__dynamic_len, \
792 _TP_ARGS_DATA_VAR(_args)); \
793 __event_align = __event_get_align__##_provider##___##_name(_TP_ARGS_VAR(_args)); \
794 memset(&__lttng_ctx, 0, sizeof(__lttng_ctx)); \
795 __lttng_ctx.event = __event; \
796 __lttng_ctx.chan_ctx = tp_rcu_dereference_bp(__chan->ctx); \
797 __lttng_ctx.event_ctx = tp_rcu_dereference_bp(__event->ctx); \
798 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \
799 __event_align, -1, __chan->handle, &__lttng_ctx); \
800 __ctx.ip = _TP_IP_PARAM(TP_IP_PARAM); \
801 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
802 if (__ret < 0) \
803 return; \
804 _fields \
805 __chan->ops->event_commit(&__ctx); \
806 }
807
808 #include TRACEPOINT_INCLUDE
809
810 #undef __get_dynamic_len
811
812 /*
813 * Stage 5.1 of tracepoint event generation.
814 *
815 * Create probe signature
816 */
817
818 /* Reset all macros within TRACEPOINT_EVENT */
819 #include <lttng/ust-tracepoint-event-reset.h>
820
821 #undef TP_ARGS
822 #define TP_ARGS(...) __VA_ARGS__
823
824 #define _TP_EXTRACT_STRING2(...) #__VA_ARGS__
825
826 #undef TRACEPOINT_EVENT_CLASS
827 #define TRACEPOINT_EVENT_CLASS(_provider, _name, _args, _fields) \
828 static const char __tp_event_signature___##_provider##___##_name[] = \
829 _TP_EXTRACT_STRING2(_args);
830
831 #include TRACEPOINT_INCLUDE
832
833 #undef _TP_EXTRACT_STRING2
834
835 /*
836 * Stage 6 of tracepoint event generation.
837 *
838 * Tracepoint loglevel mapping definition generation. We generate a
839 * symbol for each mapping for a provider/event to ensure at most a 1 to
840 * 1 mapping between events and loglevels. If the symbol is repeated,
841 * the compiler will complain.
842 */
843
844 /* Reset all macros within TRACEPOINT_EVENT */
845 #include <lttng/ust-tracepoint-event-reset.h>
846
847 /*
848 * Declare _loglevel___##__provider##___##__name as non-static, with
849 * hidden visibility for c++ handling of weakref. We do a weakref to the
850 * symbol in a later stage, which requires that the symbol is not
851 * mangled.
852 */
853 #ifdef __cplusplus
854 #define LTTNG_TP_EXTERN_C extern "C"
855 #else
856 #define LTTNG_TP_EXTERN_C
857 #endif
858
859 #undef TRACEPOINT_LOGLEVEL
860 #define TRACEPOINT_LOGLEVEL(__provider, __name, __loglevel) \
861 static const int _loglevel_value___##__provider##___##__name = __loglevel; \
862 LTTNG_TP_EXTERN_C const int *_loglevel___##__provider##___##__name \
863 __attribute__((visibility("hidden"))) = \
864 &_loglevel_value___##__provider##___##__name;
865
866 #include TRACEPOINT_INCLUDE
867
868 #undef LTTNG_TP_EXTERN_C
869
870 /*
871 * Stage 6.1 of tracepoint event generation.
872 *
873 * Tracepoint UML URI info.
874 */
875
876 /* Reset all macros within TRACEPOINT_EVENT */
877 #include <lttng/ust-tracepoint-event-reset.h>
878
879 /*
880 * Declare _model_emf_uri___##__provider##___##__name as non-static,
881 * with hidden visibility for c++ handling of weakref. We do a weakref
882 * to the symbol in a later stage, which requires that the symbol is not
883 * mangled.
884 */
885 #ifdef __cplusplus
886 #define LTTNG_TP_EXTERN_C extern "C"
887 #else
888 #define LTTNG_TP_EXTERN_C
889 #endif
890
891 #undef TRACEPOINT_MODEL_EMF_URI
892 #define TRACEPOINT_MODEL_EMF_URI(__provider, __name, __uri) \
893 LTTNG_TP_EXTERN_C const char *_model_emf_uri___##__provider##___##__name \
894 __attribute__((visibility("hidden"))) = __uri; \
895
896 #include TRACEPOINT_INCLUDE
897
898 #undef LTTNG_TP_EXTERN_C
899
900 /*
901 * Stage 7.1 of tracepoint event generation.
902 *
903 * Create events description structures. We use a weakref because
904 * loglevels are optional. If not declared, the event will point to the
905 * a loglevel that contains NULL.
906 */
907
908 /* Reset all macros within TRACEPOINT_EVENT */
909 #include <lttng/ust-tracepoint-event-reset.h>
910
911 #undef TRACEPOINT_EVENT_INSTANCE
912 #define TRACEPOINT_EVENT_INSTANCE(_provider, _template, _name, _args) \
913 static const int * \
914 __ref_loglevel___##_provider##___##_name \
915 __attribute__((weakref ("_loglevel___" #_provider "___" #_name))); \
916 static const char * \
917 __ref_model_emf_uri___##_provider##___##_name \
918 __attribute__((weakref ("_model_emf_uri___" #_provider "___" #_name)));\
919 static const struct lttng_event_desc __event_desc___##_provider##_##_name = { \
920 .name = #_provider ":" #_name, \
921 .probe_callback = (void (*)(void)) &__event_probe__##_provider##___##_template,\
922 .ctx = NULL, \
923 .fields = __event_fields___##_provider##___##_template, \
924 .nr_fields = _TP_ARRAY_SIZE(__event_fields___##_provider##___##_template) - 1, \
925 .loglevel = &__ref_loglevel___##_provider##___##_name, \
926 .signature = __tp_event_signature___##_provider##___##_template, \
927 .u = { \
928 .ext = { \
929 .model_emf_uri = &__ref_model_emf_uri___##_provider##___##_name, \
930 }, \
931 }, \
932 };
933
934 #include TRACEPOINT_INCLUDE
935
936 /*
937 * Stage 7.2 of tracepoint event generation.
938 *
939 * Create array of events.
940 */
941
942 /* Reset all macros within TRACEPOINT_EVENT */
943 #include <lttng/ust-tracepoint-event-reset.h>
944
945 #undef TRACEPOINT_EVENT_INSTANCE
946 #define TRACEPOINT_EVENT_INSTANCE(_provider, _template, _name, _args) \
947 &__event_desc___##_provider##_##_name,
948
949 static const struct lttng_event_desc *_TP_COMBINE_TOKENS(__event_desc___, TRACEPOINT_PROVIDER)[] = {
950 #include TRACEPOINT_INCLUDE
951 NULL, /* Dummy, C99 forbids 0-len array. */
952 };
953
954
955 /*
956 * Stage 8 of tracepoint event generation.
957 *
958 * Create a toplevel descriptor for the whole probe.
959 */
960
961 /* non-const because list head will be modified when registered. */
962 static struct lttng_probe_desc _TP_COMBINE_TOKENS(__probe_desc___, TRACEPOINT_PROVIDER) = {
963 .provider = __tp_stringify(TRACEPOINT_PROVIDER),
964 .event_desc = _TP_COMBINE_TOKENS(__event_desc___, TRACEPOINT_PROVIDER),
965 .nr_events = _TP_ARRAY_SIZE(_TP_COMBINE_TOKENS(__event_desc___, TRACEPOINT_PROVIDER)) - 1,
966 .head = { NULL, NULL },
967 .lazy_init_head = { NULL, NULL },
968 .lazy = 0,
969 .major = LTTNG_UST_PROVIDER_MAJOR,
970 .minor = LTTNG_UST_PROVIDER_MINOR,
971 };
972
973 static int _TP_COMBINE_TOKENS(__probe_register_refcount___, TRACEPOINT_PROVIDER);
974
975 /*
976 * Stage 9 of tracepoint event generation.
977 *
978 * Register/unregister probes at module load/unload.
979 *
980 * Generate the constructor as an externally visible symbol for use when
981 * linking the probe statically.
982 *
983 * Register refcount is protected by libc dynamic loader mutex.
984 */
985
986 /* Reset all macros within TRACEPOINT_EVENT */
987 #include <lttng/ust-tracepoint-event-reset.h>
988 static void lttng_ust_notrace __attribute__((constructor))
989 _TP_COMBINE_TOKENS(__lttng_events_init__, TRACEPOINT_PROVIDER)(void);
990 static void
991 _TP_COMBINE_TOKENS(__lttng_events_init__, TRACEPOINT_PROVIDER)(void)
992 {
993 int ret;
994
995 if (_TP_COMBINE_TOKENS(__probe_register_refcount___,
996 TRACEPOINT_PROVIDER)++) {
997 return;
998 }
999 /*
1000 * __tracepoint_provider_check_ ## TRACEPOINT_PROVIDER() is a
1001 * static inline function that ensures every probe PROVIDER
1002 * argument match the provider within which they appear. It
1003 * calls empty static inline functions, and therefore has no
1004 * runtime effect. However, if it detects an error, a linker
1005 * error will appear.
1006 */
1007 _TP_COMBINE_TOKENS(__tracepoint_provider_check_, TRACEPOINT_PROVIDER)();
1008 ret = lttng_probe_register(&_TP_COMBINE_TOKENS(__probe_desc___, TRACEPOINT_PROVIDER));
1009 if (ret) {
1010 fprintf(stderr, "LTTng-UST: Error (%d) while registering tracepoint probe. Duplicate registration of tracepoint probes having the same name is not allowed.\n", ret);
1011 abort();
1012 }
1013 }
1014
1015 static void lttng_ust_notrace __attribute__((destructor))
1016 _TP_COMBINE_TOKENS(__lttng_events_exit__, TRACEPOINT_PROVIDER)(void);
1017 static void
1018 _TP_COMBINE_TOKENS(__lttng_events_exit__, TRACEPOINT_PROVIDER)(void)
1019 {
1020 if (--_TP_COMBINE_TOKENS(__probe_register_refcount___,
1021 TRACEPOINT_PROVIDER)) {
1022 return;
1023 }
1024 lttng_probe_unregister(&_TP_COMBINE_TOKENS(__probe_desc___, TRACEPOINT_PROVIDER));
1025 }
1026
1027 int _TP_COMBINE_TOKENS(__tracepoint_provider_, TRACEPOINT_PROVIDER);
This page took 0.067554 seconds and 4 git commands to generate.