Update README.md for supported kernel
[lttng-modules.git] / probes / lttng-tracepoint-event-impl.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng-tracepoint-event-impl.h
4 *
5 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 */
8
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/rculist.h>
12 #include <asm/byteorder.h>
13 #include <linux/swab.h>
14
15 #include <probes/lttng.h>
16 #include <probes/lttng-types.h>
17 #include <probes/lttng-probe-user.h>
18 #include <include/ringbuffer/frontend_types.h>
19 #include <include/ringbuffer/backend.h>
20 #include <wrapper/user_namespace.h>
21 #include <lttng-events.h>
22 #include <lttng-tracer-core.h>
23 #include <lttng-tp-mempool.h>
24
25 #define __LTTNG_NULL_STRING "(null)"
26
27 /*
28 * Macro declarations used for all stages.
29 */
30
31 /*
32 * LTTng name mapping macros. LTTng remaps some of the kernel events to
33 * enforce name-spacing.
34 */
35 #undef LTTNG_TRACEPOINT_EVENT_MAP
36 #define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
37 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
38 PARAMS(proto), \
39 PARAMS(args), \
40 PARAMS(fields)) \
41 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
42
43 #undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
44 #define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
45 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
46 PARAMS(fields)) \
47 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
48
49 #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
50 #define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
51 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
52 PARAMS(proto), \
53 PARAMS(args), \
54 PARAMS(_locvar), \
55 PARAMS(_code_pre), \
56 PARAMS(fields), \
57 PARAMS(_code_post)) \
58 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
59
60 #undef LTTNG_TRACEPOINT_EVENT_CODE
61 #define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
62 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
63 PARAMS(proto), \
64 PARAMS(args), \
65 PARAMS(_locvar), \
66 PARAMS(_code_pre), \
67 PARAMS(fields), \
68 PARAMS(_code_post))
69
70 /*
71 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
72 * handlers for events. That is, if all events have the same parameters
73 * and just have distinct trace points. Each tracepoint can be defined
74 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
75 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
76 *
77 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
78 * template.
79 */
80
81 #undef LTTNG_TRACEPOINT_EVENT
82 #define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
83 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
84 PARAMS(proto), \
85 PARAMS(args), \
86 PARAMS(fields))
87
88 #undef LTTNG_TRACEPOINT_EVENT_NOARGS
89 #define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
90 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
91
92 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE
93 #define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
94 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
95
96 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
97 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
98 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
99
100 #undef LTTNG_TRACEPOINT_EVENT_CLASS
101 #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
102 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
103 PARAMS(_fields), )
104
105 #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
106 #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
107 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
108
109
110 /*
111 * Stage 1 of the trace events.
112 *
113 * Create dummy trace calls for each events, verifying that the LTTng module
114 * instrumentation headers match the kernel arguments. Will be optimized
115 * out by the compiler.
116 */
117
118 /* Reset all macros within TRACEPOINT_EVENT */
119 #include <probes/lttng-events-reset.h>
120
121 #undef TP_PROTO
122 #define TP_PROTO(...) __VA_ARGS__
123
124 #undef TP_ARGS
125 #define TP_ARGS(...) __VA_ARGS__
126
127 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
128 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
129 void trace_##_name(_proto);
130
131 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
132 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
133 void trace_##_name(void);
134
135 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
136
137 /*
138 * Stage 1.1 of the trace events.
139 *
140 * Create dummy trace prototypes for each event class, and for each used
141 * template. This will allow checking whether the prototypes from the
142 * class and the instance using the class actually match.
143 */
144
145 #include <probes/lttng-events-reset.h> /* Reset all macros within TRACE_EVENT */
146
147 #undef TP_PROTO
148 #define TP_PROTO(...) __VA_ARGS__
149
150 #undef TP_ARGS
151 #define TP_ARGS(...) __VA_ARGS__
152
153 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
154 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
155 void __event_template_proto___##_template(_proto);
156
157 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
158 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
159 void __event_template_proto___##_template(void);
160
161 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
162 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
163 void __event_template_proto___##_name(_proto);
164
165 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
166 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
167 void __event_template_proto___##_name(void);
168
169 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
170
171 /*
172 * Stage 1.2 of tracepoint event generation
173 *
174 * Unfolding the enums
175 */
176 #include <probes/lttng-events-reset.h> /* Reset all macros within TRACE_EVENT */
177
178 /* Enumeration entry (single value) */
179 #undef ctf_enum_value
180 #define ctf_enum_value(_string, _value) \
181 { \
182 .start = { \
183 .signedness = lttng_is_signed_type(__typeof__(_value)), \
184 .value = lttng_is_signed_type(__typeof__(_value)) ? \
185 (long long) (_value) : (_value), \
186 }, \
187 .end = { \
188 .signedness = lttng_is_signed_type(__typeof__(_value)), \
189 .value = lttng_is_signed_type(__typeof__(_value)) ? \
190 (long long) (_value) : (_value), \
191 }, \
192 .string = (_string), \
193 },
194
195 /* Enumeration entry (range) */
196 #undef ctf_enum_range
197 #define ctf_enum_range(_string, _range_start, _range_end) \
198 { \
199 .start = { \
200 .signedness = lttng_is_signed_type(__typeof__(_range_start)), \
201 .value = lttng_is_signed_type(__typeof__(_range_start)) ? \
202 (long long) (_range_start) : (_range_start), \
203 }, \
204 .end = { \
205 .signedness = lttng_is_signed_type(__typeof__(_range_end)), \
206 .value = lttng_is_signed_type(__typeof__(_range_end)) ? \
207 (long long) (_range_end) : (_range_end), \
208 }, \
209 .string = (_string), \
210 },
211
212 /* Enumeration entry (automatic value; follows the rules of CTF) */
213 #undef ctf_enum_auto
214 #define ctf_enum_auto(_string) \
215 { \
216 .start = { \
217 .signedness = -1, \
218 .value = -1, \
219 }, \
220 .end = { \
221 .signedness = -1, \
222 .value = -1, \
223 }, \
224 .string = (_string), \
225 .options = { \
226 .is_auto = 1, \
227 } \
228 },
229
230 #undef TP_ENUM_VALUES
231 #define TP_ENUM_VALUES(...) \
232 __VA_ARGS__
233
234 #undef LTTNG_TRACEPOINT_ENUM
235 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
236 const struct lttng_enum_entry __enum_values__##_name[] = { \
237 _values \
238 };
239
240 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
241
242 /*
243 * Stage 2 of the trace events.
244 *
245 * Create event field type metadata section.
246 * Each event produce an array of fields.
247 */
248
249 /* Reset all macros within TRACEPOINT_EVENT */
250 #include <probes/lttng-events-reset.h>
251 #include <probes/lttng-events-write.h>
252 #include <probes/lttng-events-nowrite.h>
253
254 #undef _ctf_integer_ext
255 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
256 { \
257 .name = #_item, \
258 .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none), \
259 .nowrite = _nowrite, \
260 .user = _user, \
261 .nofilter = 0, \
262 },
263
264 #undef _ctf_array_encoded
265 #define _ctf_array_encoded(_type, _item, _src, _length, \
266 _encoding, _byte_order, _elem_type_base, _user, _nowrite) \
267 { \
268 .name = #_item, \
269 .type = \
270 { \
271 .atype = atype_array_nestable, \
272 .u = \
273 { \
274 .array_nestable = \
275 { \
276 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
277 __type_integer(_type, 0, 0, -1, _byte_order, _elem_type_base, _encoding)), \
278 .length = _length, \
279 .alignment = 0, \
280 } \
281 } \
282 }, \
283 .nowrite = _nowrite, \
284 .user = _user, \
285 .nofilter = 0, \
286 },
287
288 #undef _ctf_array_bitfield
289 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
290 { \
291 .name = #_item, \
292 .type = \
293 { \
294 .atype = atype_array_nestable, \
295 .u = \
296 { \
297 .array_nestable = \
298 { \
299 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
300 __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none)), \
301 .length = (_length) * sizeof(_type) * CHAR_BIT, \
302 .alignment = lttng_alignof(_type), \
303 } \
304 } \
305 }, \
306 .nowrite = _nowrite, \
307 .user = _user, \
308 .nofilter = 0, \
309 },
310
311
312 #undef _ctf_sequence_encoded
313 #define _ctf_sequence_encoded(_type, _item, _src, \
314 _length_type, _src_length, _encoding, \
315 _byte_order, _elem_type_base, _user, _nowrite) \
316 { \
317 .name = "_" #_item "_length", \
318 .type = __type_integer(_length_type, 0, 0, -1, __BYTE_ORDER, 10, none), \
319 .nowrite = _nowrite, \
320 .nofilter = 1, \
321 }, \
322 { \
323 .name = #_item, \
324 .type = \
325 { \
326 .atype = atype_sequence_nestable, \
327 .u = \
328 { \
329 .sequence_nestable = \
330 { \
331 .length_name = "_" #_item "_length", \
332 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
333 __type_integer(_type, 0, 0, -1, _byte_order, _elem_type_base, _encoding)), \
334 .alignment = 0, \
335 }, \
336 }, \
337 }, \
338 .nowrite = _nowrite, \
339 .user = _user, \
340 .nofilter = 0, \
341 },
342
343 #undef _ctf_sequence_bitfield
344 #define _ctf_sequence_bitfield(_type, _item, _src, \
345 _length_type, _src_length, \
346 _user, _nowrite) \
347 { \
348 .name = "_" #_item "_length", \
349 .type = __type_integer(_length_type, 0, 0, -1, __BYTE_ORDER, 10, none), \
350 .nowrite = _nowrite, \
351 .nofilter = 1, \
352 }, \
353 { \
354 .name = #_item, \
355 .type = \
356 { \
357 .atype = atype_sequence_nestable, \
358 .u = \
359 { \
360 .sequence_nestable = \
361 { \
362 .length_name = "_" #_item "_length", \
363 .elem_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
364 __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none)), \
365 .alignment = lttng_alignof(_type), \
366 }, \
367 }, \
368 }, \
369 .nowrite = _nowrite, \
370 .user = _user, \
371 .nofilter = 0, \
372 },
373
374 #undef _ctf_string
375 #define _ctf_string(_item, _src, _user, _nowrite) \
376 { \
377 .name = #_item, \
378 .type = \
379 { \
380 .atype = atype_string, \
381 .u = \
382 { \
383 .string = { .encoding = lttng_encode_UTF8 }, \
384 }, \
385 }, \
386 .nowrite = _nowrite, \
387 .user = _user, \
388 .nofilter = 0, \
389 },
390
391 #undef _ctf_enum
392 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
393 { \
394 .name = #_item, \
395 .type = { \
396 .atype = atype_enum_nestable, \
397 .u = { \
398 .enum_nestable = { \
399 .desc = &__enum_##_name, \
400 .container_type = __LTTNG_COMPOUND_LITERAL(struct lttng_type, \
401 __type_integer(_type, 0, 0, -1, __BYTE_ORDER, 10, none)), \
402 }, \
403 }, \
404 }, \
405 .nowrite = _nowrite, \
406 .user = _user, \
407 .nofilter = 0, \
408 },
409
410 #undef ctf_custom_field
411 #define ctf_custom_field(_type, _item, _code) \
412 { \
413 .name = #_item, \
414 .type = _type, \
415 .nowrite = 0, \
416 .user = 0, \
417 .nofilter = 1, \
418 },
419
420 #undef ctf_custom_type
421 #define ctf_custom_type(...) __VA_ARGS__
422
423 #undef TP_FIELDS
424 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
425
426 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
427 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
428 static const struct lttng_event_field __event_fields___##_name[] = { \
429 _fields \
430 };
431
432 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
433 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
434 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
435
436 #undef LTTNG_TRACEPOINT_ENUM
437 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
438 static const struct lttng_enum_desc __enum_##_name = { \
439 .name = #_name, \
440 .entries = __enum_values__##_name, \
441 .nr_entries = ARRAY_SIZE(__enum_values__##_name), \
442 };
443
444 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
445
446 /*
447 * Stage 3 of the trace events.
448 *
449 * Create probe callback prototypes.
450 */
451
452 /* Reset all macros within TRACEPOINT_EVENT */
453 #include <probes/lttng-events-reset.h>
454
455 #undef TP_PROTO
456 #define TP_PROTO(...) __VA_ARGS__
457
458 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
459 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
460 static void __event_probe__##_name(void *__data, _proto);
461
462 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
463 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
464 static void __event_probe__##_name(void *__data);
465
466 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
467
468 /*
469 * Stage 4 of the trace events.
470 *
471 * Create static inline function that calculates event size.
472 */
473
474 /* Reset all macros within TRACEPOINT_EVENT */
475 #include <probes/lttng-events-reset.h>
476 #include <probes/lttng-events-write.h>
477
478 #undef _ctf_integer_ext
479 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
480 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
481 __event_len += sizeof(_type);
482
483 #undef _ctf_array_encoded
484 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
485 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
486 __event_len += sizeof(_type) * (_length);
487
488 #undef _ctf_array_bitfield
489 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
490 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
491
492 #undef _ctf_sequence_encoded
493 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
494 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
495 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
496 __event_len += sizeof(_length_type); \
497 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
498 { \
499 size_t __seqlen = (_src_length); \
500 \
501 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
502 goto error; \
503 barrier(); /* reserve before use. */ \
504 this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = __seqlen; \
505 __event_len += sizeof(_type) * __seqlen; \
506 }
507
508 #undef _ctf_sequence_bitfield
509 #define _ctf_sequence_bitfield(_type, _item, _src, \
510 _length_type, _src_length, \
511 _user, _nowrite) \
512 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
513 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
514
515 /*
516 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
517 * 1 (\0 only).
518 */
519 #undef _ctf_string
520 #define _ctf_string(_item, _src, _user, _nowrite) \
521 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
522 goto error; \
523 barrier(); /* reserve before use. */ \
524 if (_user) { \
525 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
526 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
527 } else { \
528 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
529 strlen((_src) ? (_src) : __LTTNG_NULL_STRING) + 1; \
530 }
531
532 #undef _ctf_enum
533 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
534 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
535
536 #undef ctf_align
537 #define ctf_align(_type) \
538 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type));
539
540 #undef ctf_custom_field
541 #define ctf_custom_field(_type, _item, _code) \
542 { \
543 _code \
544 }
545
546 #undef ctf_custom_code
547 #define ctf_custom_code(...) __VA_ARGS__
548
549 #undef TP_PROTO
550 #define TP_PROTO(...) __VA_ARGS__
551
552 #undef TP_FIELDS
553 #define TP_FIELDS(...) __VA_ARGS__
554
555 #undef TP_locvar
556 #define TP_locvar(...) __VA_ARGS__
557
558 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
559 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
560 static inline ssize_t __event_get_size__##_name(void *__tp_locvar, _proto) \
561 { \
562 size_t __event_len = 0; \
563 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
564 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
565 \
566 _fields \
567 return __event_len; \
568 \
569 error: \
570 __attribute__((unused)); \
571 return -1; \
572 }
573
574 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
575 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
576 static inline ssize_t __event_get_size__##_name(void *__tp_locvar) \
577 { \
578 size_t __event_len = 0; \
579 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
580 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
581 \
582 _fields \
583 return __event_len; \
584 \
585 error: \
586 __attribute__((unused)); \
587 return -1; \
588 }
589
590 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
591
592
593 /*
594 * Stage 4.1 of tracepoint event generation.
595 *
596 * Create static inline function that layout the filter stack data.
597 * We make both write and nowrite data available to the filter.
598 */
599
600 /* Reset all macros within TRACEPOINT_EVENT */
601 #include <probes/lttng-events-reset.h>
602 #include <probes/lttng-events-write.h>
603 #include <probes/lttng-events-nowrite.h>
604
605 #undef _ctf_integer_ext_fetched
606 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
607 if (lttng_is_signed_type(_type)) { \
608 int64_t __ctf_tmp_int64; \
609 switch (sizeof(_type)) { \
610 case 1: \
611 { \
612 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
613 __ctf_tmp_int64 = (int64_t) __tmp.v; \
614 break; \
615 } \
616 case 2: \
617 { \
618 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
619 if (_byte_order != __BYTE_ORDER) \
620 __swab16s(&__tmp.v); \
621 __ctf_tmp_int64 = (int64_t) __tmp.v; \
622 break; \
623 } \
624 case 4: \
625 { \
626 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
627 if (_byte_order != __BYTE_ORDER) \
628 __swab32s(&__tmp.v); \
629 __ctf_tmp_int64 = (int64_t) __tmp.v; \
630 break; \
631 } \
632 case 8: \
633 { \
634 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
635 if (_byte_order != __BYTE_ORDER) \
636 __swab64s(&__tmp.v); \
637 __ctf_tmp_int64 = (int64_t) __tmp.v; \
638 break; \
639 } \
640 default: \
641 BUG_ON(1); \
642 }; \
643 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
644 } else { \
645 uint64_t __ctf_tmp_uint64; \
646 switch (sizeof(_type)) { \
647 case 1: \
648 { \
649 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
650 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
651 break; \
652 } \
653 case 2: \
654 { \
655 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
656 if (_byte_order != __BYTE_ORDER) \
657 __swab16s(&__tmp.v); \
658 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
659 break; \
660 } \
661 case 4: \
662 { \
663 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
664 if (_byte_order != __BYTE_ORDER) \
665 __swab32s(&__tmp.v); \
666 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
667 break; \
668 } \
669 case 8: \
670 { \
671 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
672 if (_byte_order != __BYTE_ORDER) \
673 __swab64s(&__tmp.v); \
674 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
675 break; \
676 } \
677 default: \
678 BUG_ON(1); \
679 }; \
680 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
681 } \
682 __stack_data += sizeof(int64_t);
683
684 #undef _ctf_integer_ext_isuser0
685 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
686 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
687
688 #undef _ctf_integer_ext_isuser1
689 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
690 { \
691 union { \
692 char __array[sizeof(_user_src)]; \
693 __typeof__(_user_src) __v; \
694 } __tmp_fetch; \
695 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
696 &(_user_src), sizeof(_user_src))) \
697 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
698 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
699 }
700
701 #undef _ctf_integer_ext
702 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
703 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
704
705 #undef _ctf_array_encoded
706 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
707 { \
708 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
709 const void *__ctf_tmp_ptr = (_src); \
710 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
711 __stack_data += sizeof(unsigned long); \
712 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
713 __stack_data += sizeof(void *); \
714 }
715
716 #undef _ctf_array_bitfield
717 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
718 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
719
720 #undef _ctf_sequence_encoded
721 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
722 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
723 { \
724 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
725 const void *__ctf_tmp_ptr = (_src); \
726 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
727 __stack_data += sizeof(unsigned long); \
728 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
729 __stack_data += sizeof(void *); \
730 }
731
732 #undef _ctf_sequence_bitfield
733 #define _ctf_sequence_bitfield(_type, _item, _src, \
734 _length_type, _src_length, \
735 _user, _nowrite) \
736 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
737 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
738
739 #undef _ctf_string
740 #define _ctf_string(_item, _src, _user, _nowrite) \
741 { \
742 const void *__ctf_tmp_ptr = \
743 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
744 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
745 __stack_data += sizeof(void *); \
746 }
747
748 #undef _ctf_enum
749 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
750 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
751
752 #undef TP_PROTO
753 #define TP_PROTO(...) __VA_ARGS__
754
755 #undef TP_FIELDS
756 #define TP_FIELDS(...) __VA_ARGS__
757
758 #undef TP_locvar
759 #define TP_locvar(...) __VA_ARGS__
760
761 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
762 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
763 static inline \
764 void __event_prepare_filter_stack__##_name(char *__stack_data, \
765 void *__tp_locvar) \
766 { \
767 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
768 \
769 _fields \
770 }
771
772 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
773 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
774 static inline \
775 void __event_prepare_filter_stack__##_name(char *__stack_data, \
776 void *__tp_locvar, _proto) \
777 { \
778 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
779 \
780 _fields \
781 }
782
783 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
784
785 /*
786 * Stage 5 of the trace events.
787 *
788 * Create static inline function that calculates event payload alignment.
789 */
790
791 /* Reset all macros within TRACEPOINT_EVENT */
792 #include <probes/lttng-events-reset.h>
793 #include <probes/lttng-events-write.h>
794
795 #undef _ctf_integer_ext
796 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
797 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
798
799 #undef _ctf_array_encoded
800 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
801 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
802
803 #undef _ctf_array_bitfield
804 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
805 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
806
807 #undef _ctf_sequence_encoded
808 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
809 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
810 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
811 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
812
813 #undef _ctf_sequence_bitfield
814 #define _ctf_sequence_bitfield(_type, _item, _src, \
815 _length_type, _src_length, \
816 _user, _nowrite) \
817 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
818 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
819
820 #undef _ctf_string
821 #define _ctf_string(_item, _src, _user, _nowrite)
822
823 #undef _ctf_enum
824 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
825 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
826
827 #undef ctf_align
828 #define ctf_align(_type) \
829 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
830
831 #undef TP_PROTO
832 #define TP_PROTO(...) __VA_ARGS__
833
834 #undef TP_FIELDS
835 #define TP_FIELDS(...) __VA_ARGS__
836
837 #undef TP_locvar
838 #define TP_locvar(...) __VA_ARGS__
839
840 #undef ctf_custom_field
841 #define ctf_custom_field(_type, _item, _code) _code
842
843 #undef ctf_custom_code
844 #define ctf_custom_code(...) \
845 { \
846 __VA_ARGS__ \
847 }
848
849 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
850 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
851 static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
852 { \
853 size_t __event_align = 1; \
854 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
855 \
856 _fields \
857 return __event_align; \
858 }
859
860 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
861 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
862 static inline size_t __event_get_align__##_name(void *__tp_locvar) \
863 { \
864 size_t __event_align = 1; \
865 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
866 \
867 _fields \
868 return __event_align; \
869 }
870
871 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
872
873 /*
874 * Stage 6 of tracepoint event generation.
875 *
876 * Create the probe function. This function calls event size calculation
877 * and writes event data into the buffer.
878 */
879
880 /* Reset all macros within TRACEPOINT_EVENT */
881 #include <probes/lttng-events-reset.h>
882 #include <probes/lttng-events-write.h>
883
884 #undef _ctf_integer_ext_fetched
885 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
886 { \
887 _type __tmp = _src; \
888 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
889 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
890 }
891
892 #undef _ctf_integer_ext_isuser0
893 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
894 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
895
896 #undef _ctf_integer_ext_isuser1
897 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
898 { \
899 union { \
900 char __array[sizeof(_user_src)]; \
901 __typeof__(_user_src) __v; \
902 } __tmp_fetch; \
903 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
904 &(_user_src), sizeof(_user_src))) \
905 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
906 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
907 }
908
909 #undef _ctf_integer_ext
910 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
911 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
912
913 #undef _ctf_array_encoded
914 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
915 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
916 if (_user) { \
917 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
918 } else { \
919 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
920 }
921
922 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
923 #undef _ctf_array_bitfield
924 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
925 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
926 if (_user) { \
927 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
928 } else { \
929 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
930 }
931 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
932 /*
933 * For big endian, we need to byteswap into little endian.
934 */
935 #undef _ctf_array_bitfield
936 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
937 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
938 { \
939 size_t _i; \
940 \
941 for (_i = 0; _i < (_length); _i++) { \
942 _type _tmp; \
943 \
944 if (_user) { \
945 if (get_user(_tmp, (_type *) _src + _i)) \
946 _tmp = 0; \
947 } else { \
948 _tmp = ((_type *) _src)[_i]; \
949 } \
950 switch (sizeof(_type)) { \
951 case 1: \
952 break; \
953 case 2: \
954 _tmp = cpu_to_le16(_tmp); \
955 break; \
956 case 4: \
957 _tmp = cpu_to_le32(_tmp); \
958 break; \
959 case 8: \
960 _tmp = cpu_to_le64(_tmp); \
961 break; \
962 default: \
963 BUG_ON(1); \
964 } \
965 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
966 } \
967 }
968 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
969
970 #undef _ctf_sequence_encoded
971 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
972 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
973 { \
974 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx]; \
975 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
976 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
977 } \
978 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
979 if (_user) { \
980 __chan->ops->event_write_from_user(&__ctx, _src, \
981 sizeof(_type) * __get_dynamic_len(dest)); \
982 } else { \
983 __chan->ops->event_write(&__ctx, _src, \
984 sizeof(_type) * __get_dynamic_len(dest)); \
985 }
986
987 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
988 #undef _ctf_sequence_bitfield
989 #define _ctf_sequence_bitfield(_type, _item, _src, \
990 _length_type, _src_length, \
991 _user, _nowrite) \
992 { \
993 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
994 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
995 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
996 } \
997 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
998 if (_user) { \
999 __chan->ops->event_write_from_user(&__ctx, _src, \
1000 sizeof(_type) * __get_dynamic_len(dest)); \
1001 } else { \
1002 __chan->ops->event_write(&__ctx, _src, \
1003 sizeof(_type) * __get_dynamic_len(dest)); \
1004 }
1005 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1006 /*
1007 * For big endian, we need to byteswap into little endian.
1008 */
1009 #undef _ctf_sequence_bitfield
1010 #define _ctf_sequence_bitfield(_type, _item, _src, \
1011 _length_type, _src_length, \
1012 _user, _nowrite) \
1013 { \
1014 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
1015 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
1016 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
1017 } \
1018 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
1019 { \
1020 size_t _i, _length; \
1021 \
1022 _length = __get_dynamic_len(dest); \
1023 for (_i = 0; _i < _length; _i++) { \
1024 _type _tmp; \
1025 \
1026 if (_user) { \
1027 if (get_user(_tmp, (_type *) _src + _i)) \
1028 _tmp = 0; \
1029 } else { \
1030 _tmp = ((_type *) _src)[_i]; \
1031 } \
1032 switch (sizeof(_type)) { \
1033 case 1: \
1034 break; \
1035 case 2: \
1036 _tmp = cpu_to_le16(_tmp); \
1037 break; \
1038 case 4: \
1039 _tmp = cpu_to_le32(_tmp); \
1040 break; \
1041 case 8: \
1042 _tmp = cpu_to_le64(_tmp); \
1043 break; \
1044 default: \
1045 BUG_ON(1); \
1046 } \
1047 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
1048 } \
1049 }
1050 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
1051
1052 #undef _ctf_string
1053 #define _ctf_string(_item, _src, _user, _nowrite) \
1054 if (_user) { \
1055 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
1056 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
1057 __get_dynamic_len(dest)); \
1058 } else { \
1059 const char *__ctf_tmp_string = \
1060 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
1061 lib_ring_buffer_align_ctx(&__ctx, \
1062 lttng_alignof(*__ctf_tmp_string)); \
1063 __chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \
1064 __get_dynamic_len(dest)); \
1065 }
1066
1067 #undef _ctf_enum
1068 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
1069 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
1070
1071 #undef ctf_align
1072 #define ctf_align(_type) \
1073 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type));
1074
1075 #undef ctf_custom_field
1076 #define ctf_custom_field(_type, _item, _code) _code
1077
1078 #undef ctf_custom_code
1079 #define ctf_custom_code(...) \
1080 { \
1081 __VA_ARGS__ \
1082 }
1083
1084 /* Beware: this get len actually consumes the len value */
1085 #undef __get_dynamic_len
1086 #define __get_dynamic_len(field) this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx++]
1087
1088 #undef TP_PROTO
1089 #define TP_PROTO(...) __VA_ARGS__
1090
1091 #undef TP_ARGS
1092 #define TP_ARGS(...) __VA_ARGS__
1093
1094 #undef TP_FIELDS
1095 #define TP_FIELDS(...) __VA_ARGS__
1096
1097 #undef TP_locvar
1098 #define TP_locvar(...) __VA_ARGS__
1099
1100 #undef TP_code_pre
1101 #define TP_code_pre(...) __VA_ARGS__
1102
1103 #undef TP_code_post
1104 #define TP_code_post(...) __VA_ARGS__
1105
1106 /*
1107 * For state dump, check that "session" argument (mandatory) matches the
1108 * session this event belongs to. Ensures that we write state dump data only
1109 * into the started session, not into all sessions.
1110 */
1111 #ifdef TP_SESSION_CHECK
1112 #define _TP_SESSION_CHECK(session, csession) (session == csession)
1113 #else /* TP_SESSION_CHECK */
1114 #define _TP_SESSION_CHECK(session, csession) 1
1115 #endif /* TP_SESSION_CHECK */
1116
1117 /*
1118 * Using twice size for filter stack data to hold size and pointer for
1119 * each field (worse case). For integers, max size required is 64-bit.
1120 * Same for double-precision floats. Those fit within
1121 * 2*sizeof(unsigned long) for all supported architectures.
1122 * Perform UNION (||) of filter runtime list.
1123 */
1124 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1125 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1126 static void __event_probe__##_name(void *__data, _proto) \
1127 { \
1128 struct probe_local_vars { _locvar }; \
1129 struct lttng_event *__event = __data; \
1130 struct lttng_probe_ctx __lttng_probe_ctx = { \
1131 .event = __event, \
1132 .interruptible = !irqs_disabled(), \
1133 }; \
1134 struct lttng_channel *__chan = __event->chan; \
1135 struct lttng_session *__session = __chan->session; \
1136 struct lib_ring_buffer_ctx __ctx; \
1137 ssize_t __event_len; \
1138 size_t __event_align; \
1139 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1140 union { \
1141 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1142 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1143 } __stackvar; \
1144 int __ret; \
1145 struct probe_local_vars __tp_locvar; \
1146 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1147 &__tp_locvar; \
1148 struct lttng_id_tracker_rcu *__lf; \
1149 \
1150 if (!_TP_SESSION_CHECK(session, __session)) \
1151 return; \
1152 if (unlikely(!READ_ONCE(__session->active))) \
1153 return; \
1154 if (unlikely(!READ_ONCE(__chan->enabled))) \
1155 return; \
1156 if (unlikely(!READ_ONCE(__event->enabled))) \
1157 return; \
1158 __lf = rcu_dereference_raw_check(__session->pid_tracker.p); \
1159 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1160 return; \
1161 __lf = rcu_dereference_raw_check(__session->vpid_tracker.p); \
1162 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1163 return; \
1164 __lf = rcu_dereference_raw_check(__session->uid_tracker.p); \
1165 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1166 lttng_current_uid()))) \
1167 return; \
1168 __lf = rcu_dereference_raw_check(__session->vuid_tracker.p); \
1169 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1170 lttng_current_vuid()))) \
1171 return; \
1172 __lf = rcu_dereference_raw_check(__session->gid_tracker.p); \
1173 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1174 lttng_current_gid()))) \
1175 return; \
1176 __lf = rcu_dereference_raw_check(__session->vgid_tracker.p); \
1177 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1178 lttng_current_vgid()))) \
1179 return; \
1180 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1181 __dynamic_len_idx = __orig_dynamic_len_offset; \
1182 _code_pre \
1183 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1184 struct lttng_bytecode_runtime *bc_runtime; \
1185 int __filter_record = __event->has_enablers_without_bytecode; \
1186 \
1187 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1188 tp_locvar, _args); \
1189 list_for_each_entry_rcu_notrace(bc_runtime, &__event->bytecode_runtime_head, node) { \
1190 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1191 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
1192 __filter_record = 1; \
1193 break; \
1194 } \
1195 } \
1196 if (likely(!__filter_record)) \
1197 goto __post; \
1198 } \
1199 __event_len = __event_get_size__##_name(tp_locvar, _args); \
1200 if (unlikely(__event_len < 0)) { \
1201 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1202 goto __post; \
1203 } \
1204 __event_align = __event_get_align__##_name(tp_locvar, _args); \
1205 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1206 __event_align, -1); \
1207 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1208 if (__ret < 0) \
1209 goto __post; \
1210 _fields \
1211 __chan->ops->event_commit(&__ctx); \
1212 __post: \
1213 _code_post \
1214 barrier(); /* use before un-reserve. */ \
1215 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1216 return; \
1217 }
1218
1219 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1220 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1221 static void __event_probe__##_name(void *__data) \
1222 { \
1223 struct probe_local_vars { _locvar }; \
1224 struct lttng_event *__event = __data; \
1225 struct lttng_probe_ctx __lttng_probe_ctx = { \
1226 .event = __event, \
1227 .interruptible = !irqs_disabled(), \
1228 }; \
1229 struct lttng_channel *__chan = __event->chan; \
1230 struct lttng_session *__session = __chan->session; \
1231 struct lib_ring_buffer_ctx __ctx; \
1232 ssize_t __event_len; \
1233 size_t __event_align; \
1234 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1235 union { \
1236 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1237 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1238 } __stackvar; \
1239 int __ret; \
1240 struct probe_local_vars __tp_locvar; \
1241 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1242 &__tp_locvar; \
1243 struct lttng_id_tracker_rcu *__lf; \
1244 \
1245 if (!_TP_SESSION_CHECK(session, __session)) \
1246 return; \
1247 if (unlikely(!READ_ONCE(__session->active))) \
1248 return; \
1249 if (unlikely(!READ_ONCE(__chan->enabled))) \
1250 return; \
1251 if (unlikely(!READ_ONCE(__event->enabled))) \
1252 return; \
1253 __lf = rcu_dereference_raw_check(__session->pid_tracker.p); \
1254 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1255 return; \
1256 __lf = rcu_dereference_raw_check(__session->vpid_tracker.p); \
1257 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1258 return; \
1259 __lf = rcu_dereference_raw_check(__session->uid_tracker.p); \
1260 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1261 lttng_current_uid()))) \
1262 return; \
1263 __lf = rcu_dereference_raw_check(__session->vuid_tracker.p); \
1264 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1265 lttng_current_vuid()))) \
1266 return; \
1267 __lf = rcu_dereference_raw_check(__session->gid_tracker.p); \
1268 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1269 lttng_current_gid()))) \
1270 return; \
1271 __lf = rcu_dereference_raw_check(__session->vgid_tracker.p); \
1272 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1273 lttng_current_vgid()))) \
1274 return; \
1275 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1276 __dynamic_len_idx = __orig_dynamic_len_offset; \
1277 _code_pre \
1278 if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \
1279 struct lttng_bytecode_runtime *bc_runtime; \
1280 int __filter_record = __event->has_enablers_without_bytecode; \
1281 \
1282 __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \
1283 tp_locvar); \
1284 list_for_each_entry_rcu_notrace(bc_runtime, &__event->bytecode_runtime_head, node) { \
1285 if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \
1286 __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) { \
1287 __filter_record = 1; \
1288 break; \
1289 } \
1290 } \
1291 if (likely(!__filter_record)) \
1292 goto __post; \
1293 } \
1294 __event_len = __event_get_size__##_name(tp_locvar); \
1295 if (unlikely(__event_len < 0)) { \
1296 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1297 goto __post; \
1298 } \
1299 __event_align = __event_get_align__##_name(tp_locvar); \
1300 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1301 __event_align, -1); \
1302 __ret = __chan->ops->event_reserve(&__ctx, __event->id); \
1303 if (__ret < 0) \
1304 goto __post; \
1305 _fields \
1306 __chan->ops->event_commit(&__ctx); \
1307 __post: \
1308 _code_post \
1309 barrier(); /* use before un-reserve. */ \
1310 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1311 return; \
1312 }
1313
1314 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1315
1316 #undef __get_dynamic_len
1317
1318 /*
1319 * Stage 7 of the trace events.
1320 *
1321 * Create event descriptions.
1322 */
1323
1324 /* Named field types must be defined in lttng-types.h */
1325
1326 #include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1327
1328 #ifndef TP_PROBE_CB
1329 #define TP_PROBE_CB(_template) &__event_probe__##_template
1330 #endif
1331
1332 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1333 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1334 static const struct lttng_event_desc __event_desc___##_map = { \
1335 .fields = __event_fields___##_template, \
1336 .name = #_map, \
1337 .kname = #_name, \
1338 .probe_callback = (void *) TP_PROBE_CB(_template), \
1339 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1340 .owner = THIS_MODULE, \
1341 };
1342
1343 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1344 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1345 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1346
1347 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1348
1349 /*
1350 * Stage 8 of the trace events.
1351 *
1352 * Create an array of event description pointers.
1353 */
1354
1355 #include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1356
1357 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1358 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1359 &__event_desc___##_map,
1360
1361 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1362 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1363 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1364
1365 #define TP_ID1(_token, _system) _token##_system
1366 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1367
1368 static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
1369 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1370 };
1371
1372 #undef TP_ID1
1373 #undef TP_ID
1374
1375 /*
1376 * Stage 9 of the trace events.
1377 *
1378 * Create a toplevel descriptor for the whole probe.
1379 */
1380
1381 #define TP_ID1(_token, _system) _token##_system
1382 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1383
1384 /* non-const because list head will be modified when registered. */
1385 static __used struct lttng_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
1386 .provider = __stringify(TRACE_SYSTEM),
1387 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
1388 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
1389 .head = { NULL, NULL },
1390 .lazy_init_head = { NULL, NULL },
1391 .lazy = 0,
1392 };
1393
1394 #undef TP_ID1
1395 #undef TP_ID
1396
1397 /*
1398 * Stage 10 of the trace events.
1399 *
1400 * Register/unregister probes at module load/unload.
1401 */
1402
1403 #include <probes/lttng-events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1404
1405 #define TP_ID1(_token, _system) _token##_system
1406 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1407 #define module_init_eval1(_token, _system) module_init(_token##_system)
1408 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1409 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
1410 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1411
1412 #ifndef TP_MODULE_NOINIT
1413 static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
1414 {
1415 return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1416 }
1417
1418 static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
1419 {
1420 lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1421 }
1422
1423 #ifndef TP_MODULE_NOAUTOLOAD
1424 module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
1425 module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
1426 #endif
1427
1428 #endif
1429
1430 #undef module_init_eval
1431 #undef module_exit_eval
1432 #undef TP_ID1
1433 #undef TP_ID
1434
1435 #undef TP_PROTO
1436 #undef TP_ARGS
This page took 0.097592 seconds and 4 git commands to generate.