f27a45c090e9e0eb163808e518982e1a8dcf0014
[lttng-modules.git] / include / lttng / tracepoint-event-impl.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng/tracepoint-event-impl.h
4 *
5 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 */
8
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/rculist.h>
12 #include <asm/byteorder.h>
13 #include <linux/swab.h>
14
15 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
16 #include <ringbuffer/frontend_types.h>
17 #include <ringbuffer/backend.h>
18 #include <wrapper/rcu.h>
19 #include <wrapper/user_namespace.h>
20 #include <lttng/types.h>
21 #include <lttng/probe-user.h>
22 #include <lttng/events.h>
23 #include <lttng/tracer-core.h>
24 #include <lttng/tp-mempool.h>
25
26 #define __LTTNG_NULL_STRING "(null)"
27
28 #undef PARAMS
29 #define PARAMS(args...) args
30
31 /*
32 * Macro declarations used for all stages.
33 */
34
35 /*
36 * LTTng name mapping macros. LTTng remaps some of the kernel events to
37 * enforce name-spacing.
38 */
39 #undef LTTNG_TRACEPOINT_EVENT_MAP
40 #define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
41 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
42 PARAMS(proto), \
43 PARAMS(args), \
44 PARAMS(fields)) \
45 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
46
47 #undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
48 #define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
49 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
50 PARAMS(fields)) \
51 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
52
53 #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
54 #define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
55 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
56 PARAMS(proto), \
57 PARAMS(args), \
58 PARAMS(_locvar), \
59 PARAMS(_code_pre), \
60 PARAMS(fields), \
61 PARAMS(_code_post)) \
62 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
63
64 #undef LTTNG_TRACEPOINT_EVENT_CODE
65 #define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
66 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
67 PARAMS(proto), \
68 PARAMS(args), \
69 PARAMS(_locvar), \
70 PARAMS(_code_pre), \
71 PARAMS(fields), \
72 PARAMS(_code_post))
73
74 /*
75 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
76 * handlers for events. That is, if all events have the same parameters
77 * and just have distinct trace points. Each tracepoint can be defined
78 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
79 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
80 *
81 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
82 * template.
83 */
84
85 #undef LTTNG_TRACEPOINT_EVENT
86 #define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
87 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
88 PARAMS(proto), \
89 PARAMS(args), \
90 PARAMS(fields))
91
92 #undef LTTNG_TRACEPOINT_EVENT_NOARGS
93 #define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
94 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
95
96 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE
97 #define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
98 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
99
100 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
101 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
102 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
103
104 #undef LTTNG_TRACEPOINT_EVENT_CLASS
105 #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
106 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
107 PARAMS(_fields), )
108
109 #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
110 #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
111 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
112
113
114 /*
115 * Stage 1 of the trace events.
116 *
117 * Create dummy trace calls for each events, verifying that the LTTng module
118 * instrumentation headers match the kernel arguments. Will be optimized
119 * out by the compiler.
120 */
121
122 /* Reset all macros within TRACEPOINT_EVENT */
123 #include <lttng/events-reset.h>
124
125 #undef TP_PROTO
126 #define TP_PROTO(...) __VA_ARGS__
127
128 #undef TP_ARGS
129 #define TP_ARGS(...) __VA_ARGS__
130
131 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
132 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
133 void trace_##_name(_proto);
134
135 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
136 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
137 void trace_##_name(void);
138
139 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
140
141 /*
142 * Stage 1.1 of the trace events.
143 *
144 * Create dummy trace prototypes for each event class, and for each used
145 * template. This will allow checking whether the prototypes from the
146 * class and the instance using the class actually match.
147 */
148
149 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
150
151 #undef TP_PROTO
152 #define TP_PROTO(...) __VA_ARGS__
153
154 #undef TP_ARGS
155 #define TP_ARGS(...) __VA_ARGS__
156
157 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
158 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
159 void __event_template_proto___##_template(_proto);
160
161 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
162 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
163 void __event_template_proto___##_template(void);
164
165 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
166 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
167 void __event_template_proto___##_name(_proto);
168
169 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
170 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
171 void __event_template_proto___##_name(void);
172
173 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
174
175 /*
176 * Stage 1.2 of tracepoint event generation
177 *
178 * Unfolding the enums
179 */
180 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
181
182 /* Enumeration entry (single value) */
183 #undef ctf_enum_value
184 #define ctf_enum_value(_string, _value) \
185 { \
186 .start = { \
187 .signedness = lttng_is_signed_type(__typeof__(_value)), \
188 .value = lttng_is_signed_type(__typeof__(_value)) ? \
189 (long long) (_value) : (_value), \
190 }, \
191 .end = { \
192 .signedness = lttng_is_signed_type(__typeof__(_value)), \
193 .value = lttng_is_signed_type(__typeof__(_value)) ? \
194 (long long) (_value) : (_value), \
195 }, \
196 .string = (_string), \
197 },
198
199 /* Enumeration entry (range) */
200 #undef ctf_enum_range
201 #define ctf_enum_range(_string, _range_start, _range_end) \
202 { \
203 .start = { \
204 .signedness = lttng_is_signed_type(__typeof__(_range_start)), \
205 .value = lttng_is_signed_type(__typeof__(_range_start)) ? \
206 (long long) (_range_start) : (_range_start), \
207 }, \
208 .end = { \
209 .signedness = lttng_is_signed_type(__typeof__(_range_end)), \
210 .value = lttng_is_signed_type(__typeof__(_range_end)) ? \
211 (long long) (_range_end) : (_range_end), \
212 }, \
213 .string = (_string), \
214 },
215
216 /* Enumeration entry (automatic value; follows the rules of CTF) */
217 #undef ctf_enum_auto
218 #define ctf_enum_auto(_string) \
219 { \
220 .start = { \
221 .signedness = -1, \
222 .value = -1, \
223 }, \
224 .end = { \
225 .signedness = -1, \
226 .value = -1, \
227 }, \
228 .string = (_string), \
229 .options = { \
230 .is_auto = 1, \
231 } \
232 },
233
234 #undef TP_ENUM_VALUES
235 #define TP_ENUM_VALUES(...) \
236 __VA_ARGS__
237
238 #undef LTTNG_TRACEPOINT_ENUM
239 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
240 const struct lttng_enum_entry __enum_values__##_name[] = { \
241 _values \
242 };
243
244 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
245
246 /*
247 * Stage 2 of the trace events.
248 *
249 * Create event field type metadata section.
250 * Each event produce an array of fields.
251 */
252
253 /* Reset all macros within TRACEPOINT_EVENT */
254 #include <lttng/events-reset.h>
255 #include <lttng/events-write.h>
256 #include <lttng/events-nowrite.h>
257
258 #undef _ctf_integer_ext
259 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
260 { \
261 .name = #_item, \
262 .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none), \
263 .nowrite = _nowrite, \
264 .user = _user, \
265 .nofilter = 0, \
266 },
267
268 #undef _ctf_array_encoded
269 #define _ctf_array_encoded(_type, _item, _src, _length, \
270 _encoding, _byte_order, _elem_type_base, _user, _nowrite) \
271 { \
272 .name = #_item, \
273 .type = \
274 { \
275 .atype = atype_array_nestable, \
276 .u = \
277 { \
278 .array_nestable = \
279 { \
280 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
281 __type_integer(_type, 0, 0, -1, _byte_order, _elem_type_base, _encoding)), \
282 .length = _length, \
283 .alignment = 0, \
284 } \
285 } \
286 }, \
287 .nowrite = _nowrite, \
288 .user = _user, \
289 .nofilter = 0, \
290 },
291
292 #undef _ctf_array_bitfield
293 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
294 { \
295 .name = #_item, \
296 .type = \
297 { \
298 .atype = atype_array_nestable, \
299 .u = \
300 { \
301 .array_nestable = \
302 { \
303 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
304 __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none)), \
305 .length = (_length) * sizeof(_type) * CHAR_BIT, \
306 .alignment = lttng_alignof(_type), \
307 } \
308 } \
309 }, \
310 .nowrite = _nowrite, \
311 .user = _user, \
312 .nofilter = 0, \
313 },
314
315
316 #undef _ctf_sequence_encoded
317 #define _ctf_sequence_encoded(_type, _item, _src, \
318 _length_type, _src_length, _encoding, \
319 _byte_order, _elem_type_base, _user, _nowrite) \
320 { \
321 .name = "_" #_item "_length", \
322 .type = __type_integer(_length_type, 0, 0, -1, __BYTE_ORDER, 10, none), \
323 .nowrite = _nowrite, \
324 .nofilter = 1, \
325 }, \
326 { \
327 .name = #_item, \
328 .type = \
329 { \
330 .atype = atype_sequence_nestable, \
331 .u = \
332 { \
333 .sequence_nestable = \
334 { \
335 .length_name = "_" #_item "_length", \
336 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
337 __type_integer(_type, 0, 0, -1, _byte_order, _elem_type_base, _encoding)), \
338 .alignment = 0, \
339 }, \
340 }, \
341 }, \
342 .nowrite = _nowrite, \
343 .user = _user, \
344 .nofilter = 0, \
345 },
346
347 #undef _ctf_sequence_bitfield
348 #define _ctf_sequence_bitfield(_type, _item, _src, \
349 _length_type, _src_length, \
350 _user, _nowrite) \
351 { \
352 .name = "_" #_item "_length", \
353 .type = __type_integer(_length_type, 0, 0, -1, __BYTE_ORDER, 10, none), \
354 .nowrite = _nowrite, \
355 .nofilter = 1, \
356 }, \
357 { \
358 .name = #_item, \
359 .type = \
360 { \
361 .atype = atype_sequence_nestable, \
362 .u = \
363 { \
364 .sequence_nestable = \
365 { \
366 .length_name = "_" #_item "_length", \
367 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
368 __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none)), \
369 .alignment = lttng_alignof(_type), \
370 }, \
371 }, \
372 }, \
373 .nowrite = _nowrite, \
374 .user = _user, \
375 .nofilter = 0, \
376 },
377
378 #undef _ctf_string
379 #define _ctf_string(_item, _src, _user, _nowrite) \
380 { \
381 .name = #_item, \
382 .type = \
383 { \
384 .atype = atype_string, \
385 .u = \
386 { \
387 .string = { .encoding = lttng_encode_UTF8 }, \
388 }, \
389 }, \
390 .nowrite = _nowrite, \
391 .user = _user, \
392 .nofilter = 0, \
393 },
394
395 #undef _ctf_enum
396 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
397 { \
398 .name = #_item, \
399 .type = { \
400 .atype = atype_enum_nestable, \
401 .u = { \
402 .enum_nestable = { \
403 .desc = &__enum_##_name, \
404 .container_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
405 __type_integer(_type, 0, 0, -1, __BYTE_ORDER, 10, none)), \
406 }, \
407 }, \
408 }, \
409 .nowrite = _nowrite, \
410 .user = _user, \
411 .nofilter = 0, \
412 },
413
414 #undef ctf_custom_field
415 #define ctf_custom_field(_type, _item, _code) \
416 { \
417 .name = #_item, \
418 .type = _type, \
419 .nowrite = 0, \
420 .user = 0, \
421 .nofilter = 1, \
422 },
423
424 #undef ctf_custom_type
425 #define ctf_custom_type(...) __VA_ARGS__
426
427 #undef TP_FIELDS
428 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
429
430 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
431 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
432 static const struct lttng_event_field __event_fields___##_name[] = { \
433 _fields \
434 };
435
436 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
437 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
438 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
439
440 #undef LTTNG_TRACEPOINT_ENUM
441 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
442 static const struct lttng_enum_desc __enum_##_name = { \
443 .name = #_name, \
444 .entries = __enum_values__##_name, \
445 .nr_entries = ARRAY_SIZE(__enum_values__##_name), \
446 };
447
448 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
449
450 /*
451 * Stage 3 of the trace events.
452 *
453 * Create probe callback prototypes.
454 */
455
456 /* Reset all macros within TRACEPOINT_EVENT */
457 #include <lttng/events-reset.h>
458
459 #undef TP_PROTO
460 #define TP_PROTO(...) __VA_ARGS__
461
462 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
463 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
464 static void __event_probe__##_name(void *__data, _proto);
465
466 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
467 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
468 static void __event_probe__##_name(void *__data);
469
470 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
471
472 /*
473 * Stage 4 of the trace events.
474 *
475 * Create static inline function that calculates event size.
476 */
477
478 /* Reset all macros within TRACEPOINT_EVENT */
479 #include <lttng/events-reset.h>
480 #include <lttng/events-write.h>
481
482 #undef _ctf_integer_ext
483 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
484 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
485 __event_len += sizeof(_type);
486
487 #undef _ctf_array_encoded
488 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
489 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
490 __event_len += sizeof(_type) * (_length);
491
492 #undef _ctf_array_bitfield
493 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
494 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
495
496 #undef _ctf_sequence_encoded
497 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
498 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
499 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
500 __event_len += sizeof(_length_type); \
501 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
502 { \
503 size_t __seqlen = (_src_length); \
504 \
505 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
506 goto error; \
507 barrier(); /* reserve before use. */ \
508 this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = __seqlen; \
509 __event_len += sizeof(_type) * __seqlen; \
510 }
511
512 #undef _ctf_sequence_bitfield
513 #define _ctf_sequence_bitfield(_type, _item, _src, \
514 _length_type, _src_length, \
515 _user, _nowrite) \
516 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
517 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
518
519 /*
520 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
521 * 1 (\0 only).
522 */
523 #undef _ctf_string
524 #define _ctf_string(_item, _src, _user, _nowrite) \
525 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
526 goto error; \
527 barrier(); /* reserve before use. */ \
528 if (_user) { \
529 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
530 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
531 } else { \
532 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
533 strlen((_src) ? (_src) : __LTTNG_NULL_STRING) + 1; \
534 }
535
536 #undef _ctf_enum
537 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
538 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
539
540 #undef ctf_align
541 #define ctf_align(_type) \
542 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type));
543
544 #undef ctf_custom_field
545 #define ctf_custom_field(_type, _item, _code) \
546 { \
547 _code \
548 }
549
550 #undef ctf_custom_code
551 #define ctf_custom_code(...) __VA_ARGS__
552
553 #undef TP_PROTO
554 #define TP_PROTO(...) __VA_ARGS__
555
556 #undef TP_FIELDS
557 #define TP_FIELDS(...) __VA_ARGS__
558
559 #undef TP_locvar
560 #define TP_locvar(...) __VA_ARGS__
561
562 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
563 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
564 static inline ssize_t __event_get_size__##_name(void *__tp_locvar, _proto) \
565 { \
566 size_t __event_len = 0; \
567 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
568 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
569 \
570 _fields \
571 return __event_len; \
572 \
573 error: \
574 __attribute__((unused)); \
575 return -1; \
576 }
577
578 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
579 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
580 static inline ssize_t __event_get_size__##_name(void *__tp_locvar) \
581 { \
582 size_t __event_len = 0; \
583 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
584 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
585 \
586 _fields \
587 return __event_len; \
588 \
589 error: \
590 __attribute__((unused)); \
591 return -1; \
592 }
593
594 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
595
596
597 /*
598 * Stage 4.1 of tracepoint event generation.
599 *
600 * Create static inline function that layout the filter stack data.
601 * We make both write and nowrite data available to the filter.
602 */
603
604 /* Reset all macros within TRACEPOINT_EVENT */
605 #include <lttng/events-reset.h>
606 #include <lttng/events-write.h>
607 #include <lttng/events-nowrite.h>
608
609 #undef _ctf_integer_ext_fetched
610 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
611 if (lttng_is_signed_type(_type)) { \
612 int64_t __ctf_tmp_int64; \
613 switch (sizeof(_type)) { \
614 case 1: \
615 { \
616 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
617 __ctf_tmp_int64 = (int64_t) __tmp.v; \
618 break; \
619 } \
620 case 2: \
621 { \
622 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
623 if (_byte_order != __BYTE_ORDER) \
624 __swab16s(&__tmp.v); \
625 __ctf_tmp_int64 = (int64_t) __tmp.v; \
626 break; \
627 } \
628 case 4: \
629 { \
630 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
631 if (_byte_order != __BYTE_ORDER) \
632 __swab32s(&__tmp.v); \
633 __ctf_tmp_int64 = (int64_t) __tmp.v; \
634 break; \
635 } \
636 case 8: \
637 { \
638 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
639 if (_byte_order != __BYTE_ORDER) \
640 __swab64s(&__tmp.v); \
641 __ctf_tmp_int64 = (int64_t) __tmp.v; \
642 break; \
643 } \
644 default: \
645 BUG_ON(1); \
646 }; \
647 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
648 } else { \
649 uint64_t __ctf_tmp_uint64; \
650 switch (sizeof(_type)) { \
651 case 1: \
652 { \
653 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
654 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
655 break; \
656 } \
657 case 2: \
658 { \
659 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
660 if (_byte_order != __BYTE_ORDER) \
661 __swab16s(&__tmp.v); \
662 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
663 break; \
664 } \
665 case 4: \
666 { \
667 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
668 if (_byte_order != __BYTE_ORDER) \
669 __swab32s(&__tmp.v); \
670 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
671 break; \
672 } \
673 case 8: \
674 { \
675 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
676 if (_byte_order != __BYTE_ORDER) \
677 __swab64s(&__tmp.v); \
678 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
679 break; \
680 } \
681 default: \
682 BUG_ON(1); \
683 }; \
684 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
685 } \
686 __stack_data += sizeof(int64_t);
687
688 #undef _ctf_integer_ext_isuser0
689 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
690 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
691
692 #undef _ctf_integer_ext_isuser1
693 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
694 { \
695 union { \
696 char __array[sizeof(_user_src)]; \
697 __typeof__(_user_src) __v; \
698 } __tmp_fetch; \
699 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
700 &(_user_src), sizeof(_user_src))) \
701 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
702 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
703 }
704
705 #undef _ctf_integer_ext
706 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
707 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
708
709 #undef _ctf_array_encoded
710 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
711 { \
712 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
713 const void *__ctf_tmp_ptr = (_src); \
714 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
715 __stack_data += sizeof(unsigned long); \
716 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
717 __stack_data += sizeof(void *); \
718 }
719
720 #undef _ctf_array_bitfield
721 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
722 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
723
724 #undef _ctf_sequence_encoded
725 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
726 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
727 { \
728 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
729 const void *__ctf_tmp_ptr = (_src); \
730 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
731 __stack_data += sizeof(unsigned long); \
732 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
733 __stack_data += sizeof(void *); \
734 }
735
736 #undef _ctf_sequence_bitfield
737 #define _ctf_sequence_bitfield(_type, _item, _src, \
738 _length_type, _src_length, \
739 _user, _nowrite) \
740 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
741 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
742
743 #undef _ctf_string
744 #define _ctf_string(_item, _src, _user, _nowrite) \
745 { \
746 const void *__ctf_tmp_ptr = \
747 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
748 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
749 __stack_data += sizeof(void *); \
750 }
751
752 #undef _ctf_enum
753 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
754 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
755
756 #undef TP_PROTO
757 #define TP_PROTO(...) __VA_ARGS__
758
759 #undef TP_FIELDS
760 #define TP_FIELDS(...) __VA_ARGS__
761
762 #undef TP_locvar
763 #define TP_locvar(...) __VA_ARGS__
764
765 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
766 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
767 static inline \
768 void __event_prepare_filter_stack__##_name(char *__stack_data, \
769 void *__tp_locvar) \
770 { \
771 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
772 \
773 _fields \
774 }
775
776 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
777 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
778 static inline \
779 void __event_prepare_filter_stack__##_name(char *__stack_data, \
780 void *__tp_locvar, _proto) \
781 { \
782 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
783 \
784 _fields \
785 }
786
787 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
788
789 /*
790 * Stage 5 of the trace events.
791 *
792 * Create static inline function that calculates event payload alignment.
793 */
794
795 /* Reset all macros within TRACEPOINT_EVENT */
796 #include <lttng/events-reset.h>
797 #include <lttng/events-write.h>
798
799 #undef _ctf_integer_ext
800 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
801 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
802
803 #undef _ctf_array_encoded
804 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
805 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
806
807 #undef _ctf_array_bitfield
808 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
809 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
810
811 #undef _ctf_sequence_encoded
812 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
813 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
814 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
815 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
816
817 #undef _ctf_sequence_bitfield
818 #define _ctf_sequence_bitfield(_type, _item, _src, \
819 _length_type, _src_length, \
820 _user, _nowrite) \
821 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
822 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
823
824 #undef _ctf_string
825 #define _ctf_string(_item, _src, _user, _nowrite)
826
827 #undef _ctf_enum
828 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
829 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
830
831 #undef ctf_align
832 #define ctf_align(_type) \
833 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
834
835 #undef TP_PROTO
836 #define TP_PROTO(...) __VA_ARGS__
837
838 #undef TP_FIELDS
839 #define TP_FIELDS(...) __VA_ARGS__
840
841 #undef TP_locvar
842 #define TP_locvar(...) __VA_ARGS__
843
844 #undef ctf_custom_field
845 #define ctf_custom_field(_type, _item, _code) _code
846
847 #undef ctf_custom_code
848 #define ctf_custom_code(...) \
849 { \
850 __VA_ARGS__ \
851 }
852
853 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
854 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
855 static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
856 { \
857 size_t __event_align = 1; \
858 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
859 \
860 _fields \
861 return __event_align; \
862 }
863
864 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
865 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
866 static inline size_t __event_get_align__##_name(void *__tp_locvar) \
867 { \
868 size_t __event_align = 1; \
869 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
870 \
871 _fields \
872 return __event_align; \
873 }
874
875 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
876
877 /*
878 * Stage 6 of tracepoint event generation.
879 *
880 * Create the probe function. This function calls event size calculation
881 * and writes event data into the buffer.
882 */
883
884 /* Reset all macros within TRACEPOINT_EVENT */
885 #include <lttng/events-reset.h>
886 #include <lttng/events-write.h>
887
888 #undef _ctf_integer_ext_fetched
889 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
890 { \
891 _type __tmp = _src; \
892 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
893 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
894 }
895
896 #undef _ctf_integer_ext_isuser0
897 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
898 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
899
900 #undef _ctf_integer_ext_isuser1
901 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
902 { \
903 union { \
904 char __array[sizeof(_user_src)]; \
905 __typeof__(_user_src) __v; \
906 } __tmp_fetch; \
907 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
908 &(_user_src), sizeof(_user_src))) \
909 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
910 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
911 }
912
913 #undef _ctf_integer_ext
914 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
915 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
916
917 #undef _ctf_array_encoded
918 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
919 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
920 if (_user) { \
921 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
922 } else { \
923 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
924 }
925
926 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
927 #undef _ctf_array_bitfield
928 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
929 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
930 if (_user) { \
931 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
932 } else { \
933 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
934 }
935 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
936 /*
937 * For big endian, we need to byteswap into little endian.
938 */
939 #undef _ctf_array_bitfield
940 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
941 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
942 { \
943 size_t _i; \
944 \
945 for (_i = 0; _i < (_length); _i++) { \
946 _type _tmp; \
947 \
948 if (_user) { \
949 if (get_user(_tmp, (_type *) _src + _i)) \
950 _tmp = 0; \
951 } else { \
952 _tmp = ((_type *) _src)[_i]; \
953 } \
954 switch (sizeof(_type)) { \
955 case 1: \
956 break; \
957 case 2: \
958 _tmp = cpu_to_le16(_tmp); \
959 break; \
960 case 4: \
961 _tmp = cpu_to_le32(_tmp); \
962 break; \
963 case 8: \
964 _tmp = cpu_to_le64(_tmp); \
965 break; \
966 default: \
967 BUG_ON(1); \
968 } \
969 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
970 } \
971 }
972 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
973
974 #undef _ctf_sequence_encoded
975 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
976 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
977 { \
978 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx]; \
979 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
980 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
981 } \
982 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
983 if (_user) { \
984 __chan->ops->event_write_from_user(&__ctx, _src, \
985 sizeof(_type) * __get_dynamic_len(dest)); \
986 } else { \
987 __chan->ops->event_write(&__ctx, _src, \
988 sizeof(_type) * __get_dynamic_len(dest)); \
989 }
990
991 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
992 #undef _ctf_sequence_bitfield
993 #define _ctf_sequence_bitfield(_type, _item, _src, \
994 _length_type, _src_length, \
995 _user, _nowrite) \
996 { \
997 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
998 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
999 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1000 } \
1001 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1002 if (_user) { \
1003 __chan->ops->event_write_from_user(&__ctx, _src, \
1004 sizeof(_type) * __get_dynamic_len(dest)); \
1005 } else { \
1006 __chan->ops->event_write(&__ctx, _src, \
1007 sizeof(_type) * __get_dynamic_len(dest)); \
1008 }
1009 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1010 /*
1011 * For big endian, we need to byteswap into little endian.
1012 */
1013 #undef _ctf_sequence_bitfield
1014 #define _ctf_sequence_bitfield(_type, _item, _src, \
1015 _length_type, _src_length, \
1016 _user, _nowrite) \
1017 { \
1018 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
1019 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1020 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1021 } \
1022 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1023 { \
1024 size_t _i, _length; \
1025 \
1026 _length = __get_dynamic_len(dest); \
1027 for (_i = 0; _i < _length; _i++) { \
1028 _type _tmp; \
1029 \
1030 if (_user) { \
1031 if (get_user(_tmp, (_type *) _src + _i)) \
1032 _tmp = 0; \
1033 } else { \
1034 _tmp = ((_type *) _src)[_i]; \
1035 } \
1036 switch (sizeof(_type)) { \
1037 case 1: \
1038 break; \
1039 case 2: \
1040 _tmp = cpu_to_le16(_tmp); \
1041 break; \
1042 case 4: \
1043 _tmp = cpu_to_le32(_tmp); \
1044 break; \
1045 case 8: \
1046 _tmp = cpu_to_le64(_tmp); \
1047 break; \
1048 default: \
1049 BUG_ON(1); \
1050 } \
1051 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
1052 } \
1053 }
1054 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1055
1056 #undef _ctf_string
1057 #define _ctf_string(_item, _src, _user, _nowrite) \
1058 if (_user) { \
1059 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
1060 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
1061 __get_dynamic_len(dest)); \
1062 } else { \
1063 const char *__ctf_tmp_string = \
1064 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
1065 lib_ring_buffer_align_ctx(&__ctx, \
1066 lttng_alignof(*__ctf_tmp_string)); \
1067 __chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \
1068 __get_dynamic_len(dest)); \
1069 }
1070
1071 #undef _ctf_enum
1072 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
1073 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
1074
1075 #undef ctf_align
1076 #define ctf_align(_type) \
1077 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type));
1078
1079 #undef ctf_custom_field
1080 #define ctf_custom_field(_type, _item, _code) _code
1081
1082 #undef ctf_custom_code
1083 #define ctf_custom_code(...) \
1084 { \
1085 __VA_ARGS__ \
1086 }
1087
1088 /* Beware: this get len actually consumes the len value */
1089 #undef __get_dynamic_len
1090 #define __get_dynamic_len(field) this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx++]
1091
1092 #undef TP_PROTO
1093 #define TP_PROTO(...) __VA_ARGS__
1094
1095 #undef TP_ARGS
1096 #define TP_ARGS(...) __VA_ARGS__
1097
1098 #undef TP_FIELDS
1099 #define TP_FIELDS(...) __VA_ARGS__
1100
1101 #undef TP_locvar
1102 #define TP_locvar(...) __VA_ARGS__
1103
1104 #undef TP_code_pre
1105 #define TP_code_pre(...) __VA_ARGS__
1106
1107 #undef TP_code_post
1108 #define TP_code_post(...) __VA_ARGS__
1109
1110 /*
1111 * For state dump, check that "session" argument (mandatory) matches the
1112 * session this event belongs to. Ensures that we write state dump data only
1113 * into the started session, not into all sessions.
1114 */
1115 #ifdef TP_SESSION_CHECK
1116 #define _TP_SESSION_CHECK(session, csession) (session == csession)
1117 #else /* TP_SESSION_CHECK */
1118 #define _TP_SESSION_CHECK(session, csession) 1
1119 #endif /* TP_SESSION_CHECK */
1120
1121 /*
1122 * Using twice size for filter stack data to hold size and pointer for
1123 * each field (worse case). For integers, max size required is 64-bit.
1124 * Same for double-precision floats. Those fit within
1125 * 2*sizeof(unsigned long) for all supported architectures.
1126 * Perform UNION (||) of filter runtime list.
1127 */
1128 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1129 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1130 static void __event_probe__##_name(void *__data, _proto) \
1131 { \
1132 struct probe_local_vars { _locvar }; \
1133 struct lttng_event *__event = __data; \
1134 struct lttng_probe_ctx __lttng_probe_ctx = { \
1135 .event = __event, \
1136 .interruptible = !irqs_disabled(), \
1137 }; \
1138 struct lttng_channel *__chan = __event->chan; \
1139 struct lttng_session *__session = __chan->session; \
1140 struct lib_ring_buffer_ctx __ctx; \
1141 ssize_t __event_len; \
1142 size_t __event_align; \
1143 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1144 union { \
1145 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1146 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1147 } __stackvar; \
1148 int __ret; \
1149 struct probe_local_vars __tp_locvar; \
1150 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1151 &__tp_locvar; \
1152 struct lttng_id_tracker_rcu *__lf; \
1153 \
1154 if (!_TP_SESSION_CHECK(session, __session)) \
1155 return; \
1156 if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
1157 return; \
1158 if (unlikely(!LTTNG_READ_ONCE(__chan->enabled))) \
1159 return; \
1160 if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \
1161 return; \
1162 __lf = lttng_rcu_dereference(__session->pid_tracker.p); \
1163 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1164 return; \
1165 __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \
1166 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1167 return; \
1168 __lf = lttng_rcu_dereference(__session->uid_tracker.p); \
1169 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1170 lttng_current_uid()))) \
1171 return; \
1172 __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \
1173 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1174 lttng_current_vuid()))) \
1175 return; \
1176 __lf = lttng_rcu_dereference(__session->gid_tracker.p); \
1177 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1178 lttng_current_gid()))) \
1179 return; \
1180 __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \
1181 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1182 lttng_current_vgid()))) \
1183 return; \
1184 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1185 __dynamic_len_idx = __orig_dynamic_len_offset; \
1186 _code_pre \
1187 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1188 struct lttng_bytecode_runtime *bc_runtime; \
1189 int __filter_record = __event->has_enablers_without_bytecode; \
1190 \
1191 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1192 tp_locvar, _args); \
1193 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
1194 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1195 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
1196 __filter_record = 1; \
1197 break; \
1198 } \
1199 } \
1200 if (likely(!__filter_record)) \
1201 goto __post; \
1202 } \
1203 __event_len = __event_get_size__##_name(tp_locvar, _args); \
1204 if (unlikely(__event_len < 0)) { \
1205 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1206 goto __post; \
1207 } \
1208 __event_align = __event_get_align__##_name(tp_locvar, _args); \
1209 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1210 __event_align, -1); \
1211 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1212 if (__ret < 0) \
1213 goto __post; \
1214 _fields \
1215 __chan->ops->event_commit(&__ctx); \
1216 __post: \
1217 _code_post \
1218 barrier(); /* use before un-reserve. */ \
1219 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1220 return; \
1221 }
1222
1223 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1224 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1225 static void __event_probe__##_name(void *__data) \
1226 { \
1227 struct probe_local_vars { _locvar }; \
1228 struct lttng_event *__event = __data; \
1229 struct lttng_probe_ctx __lttng_probe_ctx = { \
1230 .event = __event, \
1231 .interruptible = !irqs_disabled(), \
1232 }; \
1233 struct lttng_channel *__chan = __event->chan; \
1234 struct lttng_session *__session = __chan->session; \
1235 struct lib_ring_buffer_ctx __ctx; \
1236 ssize_t __event_len; \
1237 size_t __event_align; \
1238 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1239 union { \
1240 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1241 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1242 } __stackvar; \
1243 int __ret; \
1244 struct probe_local_vars __tp_locvar; \
1245 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1246 &__tp_locvar; \
1247 struct lttng_id_tracker_rcu *__lf; \
1248 \
1249 if (!_TP_SESSION_CHECK(session, __session)) \
1250 return; \
1251 if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
1252 return; \
1253 if (unlikely(!LTTNG_READ_ONCE(__chan->enabled))) \
1254 return; \
1255 if (unlikely(!LTTNG_READ_ONCE(__event->enabled))) \
1256 return; \
1257 __lf = lttng_rcu_dereference(__session->pid_tracker.p); \
1258 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1259 return; \
1260 __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \
1261 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1262 return; \
1263 __lf = lttng_rcu_dereference(__session->uid_tracker.p); \
1264 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1265 lttng_current_uid()))) \
1266 return; \
1267 __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \
1268 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1269 lttng_current_vuid()))) \
1270 return; \
1271 __lf = lttng_rcu_dereference(__session->gid_tracker.p); \
1272 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1273 lttng_current_gid()))) \
1274 return; \
1275 __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \
1276 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1277 lttng_current_vgid()))) \
1278 return; \
1279 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1280 __dynamic_len_idx = __orig_dynamic_len_offset; \
1281 _code_pre \
1282 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1283 struct lttng_bytecode_runtime *bc_runtime; \
1284 int __filter_record = __event->has_enablers_without_bytecode; \
1285 \
1286 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1287 tp_locvar); \
1288 lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \
1289 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1290 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
1291 __filter_record = 1; \
1292 break; \
1293 } \
1294 } \
1295 if (likely(!__filter_record)) \
1296 goto __post; \
1297 } \
1298 __event_len = __event_get_size__##_name(tp_locvar); \
1299 if (unlikely(__event_len < 0)) { \
1300 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1301 goto __post; \
1302 } \
1303 __event_align = __event_get_align__##_name(tp_locvar); \
1304 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1305 __event_align, -1); \
1306 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1307 if (__ret < 0) \
1308 goto __post; \
1309 _fields \
1310 __chan->ops->event_commit(&__ctx); \
1311 __post: \
1312 _code_post \
1313 barrier(); /* use before un-reserve. */ \
1314 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1315 return; \
1316 }
1317
1318 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1319
1320 #undef __get_dynamic_len
1321
1322 /*
1323 * Stage 7 of the trace events.
1324 *
1325 * Create event descriptions.
1326 */
1327
1328 /* Named field types must be defined in lttng-types.h */
1329
1330 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1331
1332 #ifndef TP_PROBE_CB
1333 #define TP_PROBE_CB(_template) &__event_probe__##_template
1334 #endif
1335
1336 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1337 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1338 static const struct lttng_event_desc __event_desc___##_map = { \
1339 .fields = __event_fields___##_template, \
1340 .name = #_map, \
1341 .kname = #_name, \
1342 .probe_callback = (void *) TP_PROBE_CB(_template), \
1343 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1344 .owner = THIS_MODULE, \
1345 };
1346
1347 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1348 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1349 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1350
1351 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1352
1353 /*
1354 * Stage 8 of the trace events.
1355 *
1356 * Create an array of event description pointers.
1357 */
1358
1359 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1360
1361 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1362 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1363 &__event_desc___##_map,
1364
1365 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1366 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1367 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1368
1369 #define TP_ID1(_token, _system) _token##_system
1370 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1371
1372 static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
1373 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1374 };
1375
1376 #undef TP_ID1
1377 #undef TP_ID
1378
1379 /*
1380 * Stage 9 of the trace events.
1381 *
1382 * Create a toplevel descriptor for the whole probe.
1383 */
1384
1385 #define TP_ID1(_token, _system) _token##_system
1386 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1387
1388 /* non-const because list head will be modified when registered. */
1389 static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
1390 .provider = __stringify(TRACE_SYSTEM),
1391 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
1392 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
1393 .head = { NULL, NULL },
1394 .lazy_init_head = { NULL, NULL },
1395 .lazy = 0,
1396 };
1397
1398 #undef TP_ID1
1399 #undef TP_ID
1400
1401 /*
1402 * Stage 10 of the trace events.
1403 *
1404 * Register/unregister probes at module load/unload.
1405 */
1406
1407 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1408
1409 #define TP_ID1(_token, _system) _token##_system
1410 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1411 #define module_init_eval1(_token, _system) module_init(_token##_system)
1412 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1413 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
1414 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1415
1416 #ifndef TP_MODULE_NOINIT
1417 static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
1418 {
1419 wrapper_vmalloc_sync_mappings();
1420 return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1421 }
1422
1423 static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
1424 {
1425 lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1426 }
1427
1428 #ifndef TP_MODULE_NOAUTOLOAD
1429 module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
1430 module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
1431 #endif
1432
1433 #endif
1434
1435 #undef module_init_eval
1436 #undef module_exit_eval
1437 #undef TP_ID1
1438 #undef TP_ID
1439
1440 #undef TP_PROTO
1441 #undef TP_ARGS
This page took 0.06607 seconds and 3 git commands to generate.