Refactoring: event structures
[lttng-modules.git] / include / lttng / tracepoint-event-impl.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * lttng/tracepoint-event-impl.h
4 *
5 * Copyright (C) 2009 Steven Rostedt <rostedt@goodmis.org>
6 * Copyright (C) 2009-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 */
8
9 #include <linux/uaccess.h>
10 #include <linux/debugfs.h>
11 #include <linux/rculist.h>
12 #include <asm/byteorder.h>
13 #include <linux/swab.h>
14
15 #include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_mappings() */
16 #include <ringbuffer/frontend_types.h>
17 #include <ringbuffer/backend.h>
18 #include <wrapper/rcu.h>
19 #include <wrapper/user_namespace.h>
20 #include <lttng/types.h>
21 #include <lttng/probe-user.h>
22 #include <lttng/events.h>
23 #include <lttng/events-internal.h> /* TODO: remove this include after refactoring is done. */
24 #include <lttng/tracer-core.h>
25 #include <lttng/tp-mempool.h>
26
27 #define __LTTNG_NULL_STRING "(null)"
28
29 #undef PARAMS
30 #define PARAMS(args...) args
31
32 /*
33 * Macro declarations used for all stages.
34 */
35
36 /*
37 * LTTng name mapping macros. LTTng remaps some of the kernel events to
38 * enforce name-spacing.
39 */
40 #undef LTTNG_TRACEPOINT_EVENT_MAP
41 #define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \
42 LTTNG_TRACEPOINT_EVENT_CLASS(map, \
43 PARAMS(proto), \
44 PARAMS(args), \
45 PARAMS(fields)) \
46 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
47
48 #undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS
49 #define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \
50 LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \
51 PARAMS(fields)) \
52 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map)
53
54 #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP
55 #define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \
56 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \
57 PARAMS(proto), \
58 PARAMS(args), \
59 PARAMS(_locvar), \
60 PARAMS(_code_pre), \
61 PARAMS(fields), \
62 PARAMS(_code_post)) \
63 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args))
64
65 #undef LTTNG_TRACEPOINT_EVENT_CODE
66 #define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \
67 LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \
68 PARAMS(proto), \
69 PARAMS(args), \
70 PARAMS(_locvar), \
71 PARAMS(_code_pre), \
72 PARAMS(fields), \
73 PARAMS(_code_post))
74
75 /*
76 * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function
77 * handlers for events. That is, if all events have the same parameters
78 * and just have distinct trace points. Each tracepoint can be defined
79 * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the
80 * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint.
81 *
82 * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and
83 * template.
84 */
85
86 #undef LTTNG_TRACEPOINT_EVENT
87 #define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \
88 LTTNG_TRACEPOINT_EVENT_MAP(name, name, \
89 PARAMS(proto), \
90 PARAMS(args), \
91 PARAMS(fields))
92
93 #undef LTTNG_TRACEPOINT_EVENT_NOARGS
94 #define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \
95 LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields))
96
97 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE
98 #define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \
99 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args))
100
101 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS
102 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \
103 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name)
104
105 #undef LTTNG_TRACEPOINT_EVENT_CLASS
106 #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \
107 LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \
108 PARAMS(_fields), )
109
110 #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS
111 #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \
112 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), )
113
114
115 /*
116 * Stage 1 of the trace events.
117 *
118 * Create dummy trace calls for each events, verifying that the LTTng module
119 * instrumentation headers match the kernel arguments. Will be optimized
120 * out by the compiler.
121 */
122
123 /* Reset all macros within TRACEPOINT_EVENT */
124 #include <lttng/events-reset.h>
125
126 #undef TP_PROTO
127 #define TP_PROTO(...) __VA_ARGS__
128
129 #undef TP_ARGS
130 #define TP_ARGS(...) __VA_ARGS__
131
132 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
133 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
134 void trace_##_name(_proto);
135
136 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
137 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
138 void trace_##_name(void);
139
140 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
141
142 /*
143 * Stage 1.1 of the trace events.
144 *
145 * Create dummy trace prototypes for each event class, and for each used
146 * template. This will allow checking whether the prototypes from the
147 * class and the instance using the class actually match.
148 */
149
150 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
151
152 #undef TP_PROTO
153 #define TP_PROTO(...) __VA_ARGS__
154
155 #undef TP_ARGS
156 #define TP_ARGS(...) __VA_ARGS__
157
158 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
159 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
160 void __event_template_proto___##_template(_proto);
161
162 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
163 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
164 void __event_template_proto___##_template(void);
165
166 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
167 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
168 void __event_template_proto___##_name(_proto);
169
170 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
171 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
172 void __event_template_proto___##_name(void);
173
174 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
175
176 /*
177 * Stage 1.2 of the trace event_notifier.
178 *
179 * Create dummy trace prototypes for each event class, and for each used
180 * template. This will allow checking whether the prototypes from the
181 * class and the instance using the class actually match.
182 */
183
184 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
185
186 #undef TP_PROTO
187 #define TP_PROTO(...) __VA_ARGS__
188
189 #undef TP_ARGS
190 #define TP_ARGS(...) __VA_ARGS__
191
192 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
193 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
194 void __event_notifier_template_proto___##_template(_proto);
195
196 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
197 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
198 void __event_notifier_template_proto___##_template(void);
199
200 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
201 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
202 void __event_notifier_template_proto___##_name(_proto);
203
204 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
205 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
206 void __event_notifier_template_proto___##_name(void);
207
208 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
209
210
211 /*
212 * Stage 1.2 of tracepoint event generation
213 *
214 * Unfolding the enums
215 */
216 #include <lttng/events-reset.h> /* Reset all macros within TRACE_EVENT */
217
218 /* Enumeration entry (single value) */
219 #undef ctf_enum_value
220 #define ctf_enum_value(_string, _value) \
221 lttng_kernel_static_enum_entry_value(_string, _value)
222
223 /* Enumeration entry (range) */
224 #undef ctf_enum_range
225 #define ctf_enum_range(_string, _range_start, _range_end) \
226 lttng_kernel_static_enum_entry_range(_string, _range_start, _range_end)
227
228 /* Enumeration entry (automatic value; follows the rules of CTF) */
229 #undef ctf_enum_auto
230 #define ctf_enum_auto(_string) \
231 lttng_kernel_static_enum_entry_auto(_string)
232
233 #undef TP_ENUM_VALUES
234 #define TP_ENUM_VALUES(...) \
235 __VA_ARGS__
236
237 #undef LTTNG_TRACEPOINT_ENUM
238 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
239 static const struct lttng_kernel_enum_entry *__enum_values__##_name[] = { \
240 _values \
241 };
242
243 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
244
245 /*
246 * Stage 2 of the trace events.
247 *
248 * Create event field type metadata section.
249 * Each event produce an array of fields.
250 */
251
252 /* Reset all macros within TRACEPOINT_EVENT */
253 #include <lttng/events-reset.h>
254 #include <lttng/events-write.h>
255 #include <lttng/events-nowrite.h>
256
257 #undef _ctf_integer_ext
258 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
259 lttng_kernel_static_event_field(#_item, \
260 lttng_kernel_static_type_integer_from_type(_type, _byte_order, _base), \
261 _nowrite, _user, 0),
262
263 #undef _ctf_array_encoded
264 #define _ctf_array_encoded(_type, _item, _src, _length, \
265 _encoding, _byte_order, _elem_type_base, _user, _nowrite) \
266 lttng_kernel_static_event_field(#_item, \
267 lttng_kernel_static_type_array(_length, \
268 lttng_kernel_static_type_integer_from_type(_type, _byte_order, _elem_type_base), \
269 0, \
270 _encoding), \
271 _nowrite, _user, 0),
272
273 #undef _ctf_array_bitfield
274 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
275 lttng_kernel_static_event_field(#_item, \
276 lttng_kernel_static_type_array((_length) * sizeof(_type) * CHAR_BIT, \
277 lttng_kernel_static_type_integer(1, 1, 0, __LITTLE_ENDIAN, 10), \
278 lttng_alignof(_type), \
279 none), \
280 _nowrite, _user, 0),
281
282 #undef _ctf_sequence_encoded
283 #define _ctf_sequence_encoded(_type, _item, _src, \
284 _length_type, _src_length, _encoding, \
285 _byte_order, _elem_type_base, _user, _nowrite) \
286 lttng_kernel_static_event_field("_" #_item "_length", \
287 lttng_kernel_static_type_integer_from_type(_length_type, __BYTE_ORDER, 10), \
288 _nowrite, 0, 1), \
289 lttng_kernel_static_event_field(#_item, \
290 lttng_kernel_static_type_sequence("_" #_item "_length", \
291 lttng_kernel_static_type_integer_from_type(_type, _byte_order, _elem_type_base), \
292 0, \
293 _encoding), \
294 _nowrite, _user, 0),
295
296 #undef _ctf_sequence_bitfield
297 #define _ctf_sequence_bitfield(_type, _item, _src, \
298 _length_type, _src_length, \
299 _user, _nowrite) \
300 lttng_kernel_static_event_field("_" #_item "_length", \
301 lttng_kernel_static_type_integer_from_type(_length_type, __BYTE_ORDER, 10), \
302 _nowrite, 0, 1), \
303 lttng_kernel_static_event_field(#_item, \
304 lttng_kernel_static_type_sequence("_" #_item "_length", \
305 lttng_kernel_static_type_integer(1, 1, 0, __LITTLE_ENDIAN, 10), \
306 lttng_alignof(_type), \
307 none), \
308 _nowrite, _user, 0),
309
310 #undef _ctf_string
311 #define _ctf_string(_item, _src, _user, _nowrite) \
312 lttng_kernel_static_event_field(#_item, \
313 lttng_kernel_static_type_string(UTF8), \
314 _nowrite, _user, 0),
315
316 #undef _ctf_unused
317 #define _ctf_unused(_src)
318
319 #undef _ctf_enum
320 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
321 lttng_kernel_static_event_field(#_item, \
322 lttng_kernel_static_type_enum(&__enum_##_name, \
323 lttng_kernel_static_type_integer_from_type(_type, __BYTE_ORDER, 10)), \
324 _nowrite, _user, 0),
325
326 #undef ctf_custom_field
327 #define ctf_custom_field(_type, _item, _code) \
328 lttng_kernel_static_event_field(#_item, PARAMS(_type), 0, 0, 1),
329
330 #undef ctf_custom_type
331 #define ctf_custom_type(...) __VA_ARGS__
332
333 #undef TP_FIELDS
334 #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */
335
336 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
337 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
338 static const struct lttng_kernel_event_field *__event_fields___##_name[] = { \
339 _fields \
340 };
341
342 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
343 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
344 LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post)
345
346 #undef LTTNG_TRACEPOINT_ENUM
347 #define LTTNG_TRACEPOINT_ENUM(_name, _values) \
348 static const struct lttng_kernel_enum_desc __enum_##_name = { \
349 .name = #_name, \
350 .entries = __enum_values__##_name, \
351 .nr_entries = ARRAY_SIZE(__enum_values__##_name), \
352 };
353
354 #define LTTNG_CREATE_FIELD_METADATA
355 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
356 #undef LTTNG_CREATE_FIELD_METADATA
357
358 /*
359 * Stage 3 of the trace events.
360 *
361 * Create probe callback prototypes.
362 */
363
364 /* Reset all macros within TRACEPOINT_EVENT */
365 #include <lttng/events-reset.h>
366
367 #undef TP_PROTO
368 #define TP_PROTO(...) __VA_ARGS__
369
370 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
371 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
372 static void __event_probe__##_name(void *__data, _proto);
373
374 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
375 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
376 static void __event_probe__##_name(void *__data);
377
378 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
379
380 /*
381 * Stage 3.1 of the trace event_notifiers.
382 *
383 * Create event_notifier probe callback prototypes.
384 */
385
386 /* Reset all macros within TRACEPOINT_EVENT */
387 #include <lttng/events-reset.h>
388
389 #undef TP_PROTO
390 #define TP_PROTO(...) __VA_ARGS__
391
392 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
393 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
394 static void __event_notifier_probe__##_name(void *__data, _proto);
395
396 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
397 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
398 static void __event_notifier_probe__##_name(void *__data);
399
400 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
401
402 /*
403 * Stage 4 of the trace events.
404 *
405 * Create static inline function that calculates event size.
406 */
407
408 /* Reset all macros within TRACEPOINT_EVENT */
409 #include <lttng/events-reset.h>
410 #include <lttng/events-write.h>
411
412 #undef _ctf_integer_ext
413 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
414 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
415 __event_len += sizeof(_type);
416
417 #undef _ctf_array_encoded
418 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
419 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
420 __event_len += sizeof(_type) * (_length);
421
422 #undef _ctf_array_bitfield
423 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
424 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
425
426 #undef _ctf_sequence_encoded
427 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
428 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
429 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \
430 __event_len += sizeof(_length_type); \
431 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \
432 { \
433 size_t __seqlen = (_src_length); \
434 \
435 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
436 goto error; \
437 barrier(); /* reserve before use. */ \
438 this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = __seqlen; \
439 __event_len += sizeof(_type) * __seqlen; \
440 }
441
442 #undef _ctf_sequence_bitfield
443 #define _ctf_sequence_bitfield(_type, _item, _src, \
444 _length_type, _src_length, \
445 _user, _nowrite) \
446 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
447 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
448
449 /*
450 * ctf_user_string includes \0. If returns 0, it faulted, so we set size to
451 * 1 (\0 only).
452 */
453 #undef _ctf_string
454 #define _ctf_string(_item, _src, _user, _nowrite) \
455 if (unlikely(++this_cpu_ptr(&lttng_dynamic_len_stack)->offset >= LTTNG_DYNAMIC_LEN_STACK_SIZE)) \
456 goto error; \
457 barrier(); /* reserve before use. */ \
458 if (_user) { \
459 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
460 max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \
461 } else { \
462 __event_len += this_cpu_ptr(&lttng_dynamic_len_stack)->stack[this_cpu_ptr(&lttng_dynamic_len_stack)->offset - 1] = \
463 strlen((_src) ? (_src) : __LTTNG_NULL_STRING) + 1; \
464 }
465
466 #undef _ctf_enum
467 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
468 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
469
470 #undef ctf_align
471 #define ctf_align(_type) \
472 __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type));
473
474 #undef ctf_custom_field
475 #define ctf_custom_field(_type, _item, _code) \
476 { \
477 _code \
478 }
479
480 #undef ctf_custom_code
481 #define ctf_custom_code(...) __VA_ARGS__
482
483 #undef TP_PROTO
484 #define TP_PROTO(...) __VA_ARGS__
485
486 #undef TP_FIELDS
487 #define TP_FIELDS(...) __VA_ARGS__
488
489 #undef TP_locvar
490 #define TP_locvar(...) __VA_ARGS__
491
492 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
493 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
494 static inline ssize_t __event_get_size__##_name(void *__tp_locvar, _proto) \
495 { \
496 size_t __event_len = 0; \
497 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
498 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
499 \
500 _fields \
501 return __event_len; \
502 \
503 error: \
504 __attribute__((unused)); \
505 return -1; \
506 }
507
508 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
509 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
510 static inline ssize_t __event_get_size__##_name(void *__tp_locvar) \
511 { \
512 size_t __event_len = 0; \
513 unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \
514 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
515 \
516 _fields \
517 return __event_len; \
518 \
519 error: \
520 __attribute__((unused)); \
521 return -1; \
522 }
523
524 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
525
526
527 /*
528 * Stage 4.1 of tracepoint event generation.
529 *
530 * Create static inline function that layout the filter stack data.
531 * We make both write and nowrite data available to the filter.
532 */
533
534 /* Reset all macros within TRACEPOINT_EVENT */
535 #include <lttng/events-reset.h>
536 #include <lttng/events-write.h>
537 #include <lttng/events-nowrite.h>
538
539 #undef _ctf_integer_ext_fetched
540 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
541 if (lttng_is_signed_type(_type)) { \
542 int64_t __ctf_tmp_int64; \
543 switch (sizeof(_type)) { \
544 case 1: \
545 { \
546 union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \
547 __ctf_tmp_int64 = (int64_t) __tmp.v; \
548 break; \
549 } \
550 case 2: \
551 { \
552 union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \
553 if (_byte_order != __BYTE_ORDER) \
554 __swab16s(&__tmp.v); \
555 __ctf_tmp_int64 = (int64_t) __tmp.v; \
556 break; \
557 } \
558 case 4: \
559 { \
560 union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \
561 if (_byte_order != __BYTE_ORDER) \
562 __swab32s(&__tmp.v); \
563 __ctf_tmp_int64 = (int64_t) __tmp.v; \
564 break; \
565 } \
566 case 8: \
567 { \
568 union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \
569 if (_byte_order != __BYTE_ORDER) \
570 __swab64s(&__tmp.v); \
571 __ctf_tmp_int64 = (int64_t) __tmp.v; \
572 break; \
573 } \
574 default: \
575 BUG_ON(1); \
576 }; \
577 memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \
578 } else { \
579 uint64_t __ctf_tmp_uint64; \
580 switch (sizeof(_type)) { \
581 case 1: \
582 { \
583 union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \
584 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
585 break; \
586 } \
587 case 2: \
588 { \
589 union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \
590 if (_byte_order != __BYTE_ORDER) \
591 __swab16s(&__tmp.v); \
592 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
593 break; \
594 } \
595 case 4: \
596 { \
597 union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \
598 if (_byte_order != __BYTE_ORDER) \
599 __swab32s(&__tmp.v); \
600 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
601 break; \
602 } \
603 case 8: \
604 { \
605 union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \
606 if (_byte_order != __BYTE_ORDER) \
607 __swab64s(&__tmp.v); \
608 __ctf_tmp_uint64 = (uint64_t) __tmp.v; \
609 break; \
610 } \
611 default: \
612 BUG_ON(1); \
613 }; \
614 memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \
615 } \
616 __stack_data += sizeof(int64_t);
617
618 #undef _ctf_integer_ext_isuser0
619 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
620 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
621
622 #undef _ctf_integer_ext_isuser1
623 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
624 { \
625 union { \
626 char __array[sizeof(_user_src)]; \
627 __typeof__(_user_src) __v; \
628 } __tmp_fetch; \
629 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
630 &(_user_src), sizeof(_user_src))) \
631 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
632 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
633 }
634
635 #undef _ctf_integer_ext
636 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
637 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
638
639 #undef _ctf_array_encoded
640 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
641 { \
642 unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \
643 const void *__ctf_tmp_ptr = (_src); \
644 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
645 __stack_data += sizeof(unsigned long); \
646 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
647 __stack_data += sizeof(void *); \
648 }
649
650 #undef _ctf_array_bitfield
651 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
652 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
653
654 #undef _ctf_sequence_encoded
655 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
656 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
657 { \
658 unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \
659 const void *__ctf_tmp_ptr = (_src); \
660 memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \
661 __stack_data += sizeof(unsigned long); \
662 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
663 __stack_data += sizeof(void *); \
664 }
665
666 #undef _ctf_sequence_bitfield
667 #define _ctf_sequence_bitfield(_type, _item, _src, \
668 _length_type, _src_length, \
669 _user, _nowrite) \
670 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
671 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
672
673 #undef _ctf_string
674 #define _ctf_string(_item, _src, _user, _nowrite) \
675 { \
676 const void *__ctf_tmp_ptr = \
677 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
678 memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \
679 __stack_data += sizeof(void *); \
680 }
681
682 #undef _ctf_enum
683 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
684 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
685
686 #undef TP_PROTO
687 #define TP_PROTO(...) __VA_ARGS__
688
689 #undef TP_FIELDS
690 #define TP_FIELDS(...) __VA_ARGS__
691
692 #undef TP_locvar
693 #define TP_locvar(...) __VA_ARGS__
694
695 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
696 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
697 static inline \
698 void __event_prepare_interpreter_stack__##_name(char *__stack_data, \
699 void *__tp_locvar) \
700 { \
701 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
702 \
703 _fields \
704 }
705
706 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
707 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
708 static inline \
709 void __event_prepare_interpreter_stack__##_name(char *__stack_data, \
710 void *__tp_locvar, _proto) \
711 { \
712 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
713 \
714 _fields \
715 }
716
717 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
718
719 /*
720 * Stage 5 of the trace events.
721 *
722 * Create static inline function that calculates event payload alignment.
723 */
724
725 /* Reset all macros within TRACEPOINT_EVENT */
726 #include <lttng/events-reset.h>
727 #include <lttng/events-write.h>
728
729 #undef _ctf_integer_ext
730 #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \
731 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
732
733 #undef _ctf_array_encoded
734 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
735 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
736
737 #undef _ctf_array_bitfield
738 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
739 _ctf_array_encoded(_type, _item, _src, _length, none, __LITTLE_ENDIAN, 0, _user, _nowrite)
740
741 #undef _ctf_sequence_encoded
742 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
743 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
744 __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \
745 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
746
747 #undef _ctf_sequence_bitfield
748 #define _ctf_sequence_bitfield(_type, _item, _src, \
749 _length_type, _src_length, \
750 _user, _nowrite) \
751 _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \
752 none, __LITTLE_ENDIAN, 10, _user, _nowrite)
753
754 #undef _ctf_string
755 #define _ctf_string(_item, _src, _user, _nowrite)
756
757 #undef _ctf_enum
758 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
759 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
760
761 #undef ctf_align
762 #define ctf_align(_type) \
763 __event_align = max_t(size_t, __event_align, lttng_alignof(_type));
764
765 #undef TP_PROTO
766 #define TP_PROTO(...) __VA_ARGS__
767
768 #undef TP_FIELDS
769 #define TP_FIELDS(...) __VA_ARGS__
770
771 #undef TP_locvar
772 #define TP_locvar(...) __VA_ARGS__
773
774 #undef ctf_custom_field
775 #define ctf_custom_field(_type, _item, _code) _code
776
777 #undef ctf_custom_code
778 #define ctf_custom_code(...) \
779 { \
780 __VA_ARGS__ \
781 }
782
783 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
784 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
785 static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \
786 { \
787 size_t __event_align = 1; \
788 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
789 \
790 _fields \
791 return __event_align; \
792 }
793
794 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
795 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
796 static inline size_t __event_get_align__##_name(void *__tp_locvar) \
797 { \
798 size_t __event_align = 1; \
799 struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \
800 \
801 _fields \
802 return __event_align; \
803 }
804
805 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
806
807 /*
808 * Stage 6 of tracepoint event generation.
809 *
810 * Create the probe function. This function calls event size calculation
811 * and writes event data into the buffer.
812 */
813
814 /* Reset all macros within TRACEPOINT_EVENT */
815 #include <lttng/events-reset.h>
816 #include <lttng/events-write.h>
817
818 #undef _ctf_integer_ext_fetched
819 #define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \
820 { \
821 _type __tmp = _src; \
822 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\
823 __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\
824 }
825
826 #undef _ctf_integer_ext_isuser0
827 #define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \
828 _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite)
829
830 #undef _ctf_integer_ext_isuser1
831 #define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \
832 { \
833 union { \
834 char __array[sizeof(_user_src)]; \
835 __typeof__(_user_src) __v; \
836 } __tmp_fetch; \
837 if (lib_ring_buffer_copy_from_user_check_nofault(__tmp_fetch.__array, \
838 &(_user_src), sizeof(_user_src))) \
839 memset(__tmp_fetch.__array, 0, sizeof(__tmp_fetch.__array)); \
840 _ctf_integer_ext_fetched(_type, _item, __tmp_fetch.__v, _byte_order, _base, _nowrite) \
841 }
842
843 #undef _ctf_integer_ext
844 #define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \
845 _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite)
846
847 #undef _ctf_array_encoded
848 #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _byte_order, _base, _user, _nowrite) \
849 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
850 if (_user) { \
851 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
852 } else { \
853 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
854 }
855
856 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
857 #undef _ctf_array_bitfield
858 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
859 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
860 if (_user) { \
861 __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \
862 } else { \
863 __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \
864 }
865 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
866 /*
867 * For big endian, we need to byteswap into little endian.
868 */
869 #undef _ctf_array_bitfield
870 #define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \
871 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
872 { \
873 size_t _i; \
874 \
875 for (_i = 0; _i < (_length); _i++) { \
876 _type _tmp; \
877 \
878 if (_user) { \
879 if (get_user(_tmp, (_type *) _src + _i)) \
880 _tmp = 0; \
881 } else { \
882 _tmp = ((_type *) _src)[_i]; \
883 } \
884 switch (sizeof(_type)) { \
885 case 1: \
886 break; \
887 case 2: \
888 _tmp = cpu_to_le16(_tmp); \
889 break; \
890 case 4: \
891 _tmp = cpu_to_le32(_tmp); \
892 break; \
893 case 8: \
894 _tmp = cpu_to_le64(_tmp); \
895 break; \
896 default: \
897 BUG_ON(1); \
898 } \
899 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
900 } \
901 }
902 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
903
904 #undef _ctf_sequence_encoded
905 #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \
906 _src_length, _encoding, _byte_order, _base, _user, _nowrite) \
907 { \
908 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx]; \
909 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
910 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
911 } \
912 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
913 if (_user) { \
914 __chan->ops->event_write_from_user(&__ctx, _src, \
915 sizeof(_type) * __get_dynamic_len(dest)); \
916 } else { \
917 __chan->ops->event_write(&__ctx, _src, \
918 sizeof(_type) * __get_dynamic_len(dest)); \
919 }
920
921 #if (__BYTE_ORDER == __LITTLE_ENDIAN)
922 #undef _ctf_sequence_bitfield
923 #define _ctf_sequence_bitfield(_type, _item, _src, \
924 _length_type, _src_length, \
925 _user, _nowrite) \
926 { \
927 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
928 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
929 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
930 } \
931 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
932 if (_user) { \
933 __chan->ops->event_write_from_user(&__ctx, _src, \
934 sizeof(_type) * __get_dynamic_len(dest)); \
935 } else { \
936 __chan->ops->event_write(&__ctx, _src, \
937 sizeof(_type) * __get_dynamic_len(dest)); \
938 }
939 #else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
940 /*
941 * For big endian, we need to byteswap into little endian.
942 */
943 #undef _ctf_sequence_bitfield
944 #define _ctf_sequence_bitfield(_type, _item, _src, \
945 _length_type, _src_length, \
946 _user, _nowrite) \
947 { \
948 _length_type __tmpl = this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \
949 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\
950 __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\
951 } \
952 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \
953 { \
954 size_t _i, _length; \
955 \
956 _length = __get_dynamic_len(dest); \
957 for (_i = 0; _i < _length; _i++) { \
958 _type _tmp; \
959 \
960 if (_user) { \
961 if (get_user(_tmp, (_type *) _src + _i)) \
962 _tmp = 0; \
963 } else { \
964 _tmp = ((_type *) _src)[_i]; \
965 } \
966 switch (sizeof(_type)) { \
967 case 1: \
968 break; \
969 case 2: \
970 _tmp = cpu_to_le16(_tmp); \
971 break; \
972 case 4: \
973 _tmp = cpu_to_le32(_tmp); \
974 break; \
975 case 8: \
976 _tmp = cpu_to_le64(_tmp); \
977 break; \
978 default: \
979 BUG_ON(1); \
980 } \
981 __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \
982 } \
983 }
984 #endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */
985
986 #undef _ctf_string
987 #define _ctf_string(_item, _src, _user, _nowrite) \
988 if (_user) { \
989 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \
990 __chan->ops->event_strcpy_from_user(&__ctx, _src, \
991 __get_dynamic_len(dest)); \
992 } else { \
993 const char *__ctf_tmp_string = \
994 ((_src) ? (_src) : __LTTNG_NULL_STRING); \
995 lib_ring_buffer_align_ctx(&__ctx, \
996 lttng_alignof(*__ctf_tmp_string)); \
997 __chan->ops->event_strcpy(&__ctx, __ctf_tmp_string, \
998 __get_dynamic_len(dest)); \
999 }
1000
1001 #undef _ctf_enum
1002 #define _ctf_enum(_name, _type, _item, _src, _user, _nowrite) \
1003 _ctf_integer_ext(_type, _item, _src, __BYTE_ORDER, 10, _user, _nowrite)
1004
1005 #undef ctf_align
1006 #define ctf_align(_type) \
1007 lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type));
1008
1009 #undef ctf_custom_field
1010 #define ctf_custom_field(_type, _item, _code) _code
1011
1012 #undef ctf_custom_code
1013 #define ctf_custom_code(...) \
1014 { \
1015 __VA_ARGS__ \
1016 }
1017
1018 /* Beware: this get len actually consumes the len value */
1019 #undef __get_dynamic_len
1020 #define __get_dynamic_len(field) this_cpu_ptr(&lttng_dynamic_len_stack)->stack[__dynamic_len_idx++]
1021
1022 #undef TP_PROTO
1023 #define TP_PROTO(...) __VA_ARGS__
1024
1025 #undef TP_ARGS
1026 #define TP_ARGS(...) __VA_ARGS__
1027
1028 #undef TP_FIELDS
1029 #define TP_FIELDS(...) __VA_ARGS__
1030
1031 #undef TP_locvar
1032 #define TP_locvar(...) __VA_ARGS__
1033
1034 #undef TP_code_pre
1035 #define TP_code_pre(...) __VA_ARGS__
1036
1037 #undef TP_code_post
1038 #define TP_code_post(...) __VA_ARGS__
1039
1040 /*
1041 * For state dump, check that "session" argument (mandatory) matches the
1042 * session this event belongs to. Ensures that we write state dump data only
1043 * into the started session, not into all sessions.
1044 */
1045 #ifdef TP_SESSION_CHECK
1046 #define _TP_SESSION_CHECK(session, csession) (session == csession)
1047 #else /* TP_SESSION_CHECK */
1048 #define _TP_SESSION_CHECK(session, csession) 1
1049 #endif /* TP_SESSION_CHECK */
1050
1051 /*
1052 * Using twice size for filter stack data to hold size and pointer for
1053 * each field (worse case). For integers, max size required is 64-bit.
1054 * Same for double-precision floats. Those fit within
1055 * 2*sizeof(unsigned long) for all supported architectures.
1056 * Perform UNION (||) of filter runtime list.
1057 */
1058 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1059 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1060 static void __event_probe__##_name(void *__data, _proto) \
1061 { \
1062 struct probe_local_vars { _locvar }; \
1063 struct lttng_kernel_event_recorder *__event_recorder = __data; \
1064 struct lttng_probe_ctx __lttng_probe_ctx = { \
1065 .event = __event_recorder, \
1066 .event_notifier = NULL, \
1067 .interruptible = !irqs_disabled(), \
1068 }; \
1069 struct lttng_channel *__chan = __event_recorder->chan; \
1070 struct lttng_session *__session = __chan->session; \
1071 struct lib_ring_buffer_ctx __ctx; \
1072 ssize_t __event_len; \
1073 size_t __event_align; \
1074 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1075 union { \
1076 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1077 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1078 } __stackvar; \
1079 int __ret; \
1080 struct probe_local_vars __tp_locvar; \
1081 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1082 &__tp_locvar; \
1083 struct lttng_id_tracker_rcu *__lf; \
1084 \
1085 if (!_TP_SESSION_CHECK(session, __session)) \
1086 return; \
1087 if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
1088 return; \
1089 if (unlikely(!LTTNG_READ_ONCE(__chan->enabled))) \
1090 return; \
1091 if (unlikely(!LTTNG_READ_ONCE(__event_recorder->parent.enabled))) \
1092 return; \
1093 __lf = lttng_rcu_dereference(__session->pid_tracker.p); \
1094 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1095 return; \
1096 __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \
1097 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1098 return; \
1099 __lf = lttng_rcu_dereference(__session->uid_tracker.p); \
1100 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1101 lttng_current_uid()))) \
1102 return; \
1103 __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \
1104 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1105 lttng_current_vuid()))) \
1106 return; \
1107 __lf = lttng_rcu_dereference(__session->gid_tracker.p); \
1108 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1109 lttng_current_gid()))) \
1110 return; \
1111 __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \
1112 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1113 lttng_current_vgid()))) \
1114 return; \
1115 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1116 __dynamic_len_idx = __orig_dynamic_len_offset; \
1117 _code_pre \
1118 if (unlikely(!list_empty(&__event_recorder->priv->parent.filter_bytecode_runtime_head))) { \
1119 struct lttng_bytecode_runtime *bc_runtime; \
1120 int __filter_record = __event_recorder->priv->parent.has_enablers_without_filter_bytecode; \
1121 \
1122 __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
1123 tp_locvar, _args); \
1124 lttng_list_for_each_entry_rcu(bc_runtime, &__event_recorder->priv->parent.filter_bytecode_runtime_head, node) { \
1125 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1126 __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
1127 __filter_record = 1; \
1128 break; \
1129 } \
1130 } \
1131 if (likely(!__filter_record)) \
1132 goto __post; \
1133 } \
1134 __event_len = __event_get_size__##_name(tp_locvar, _args); \
1135 if (unlikely(__event_len < 0)) { \
1136 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1137 goto __post; \
1138 } \
1139 __event_align = __event_get_align__##_name(tp_locvar, _args); \
1140 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1141 __event_align, -1); \
1142 __ret = __chan->ops->event_reserve(&__ctx, __event_recorder->priv->id); \
1143 if (__ret < 0) \
1144 goto __post; \
1145 _fields \
1146 __chan->ops->event_commit(&__ctx); \
1147 __post: \
1148 _code_post \
1149 barrier(); /* use before un-reserve. */ \
1150 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1151 return; \
1152 }
1153
1154 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1155 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1156 static void __event_probe__##_name(void *__data) \
1157 { \
1158 struct probe_local_vars { _locvar }; \
1159 struct lttng_kernel_event_recorder *__event_recorder = __data; \
1160 struct lttng_probe_ctx __lttng_probe_ctx = { \
1161 .event = __event_recorder, \
1162 .event_notifier = NULL, \
1163 .interruptible = !irqs_disabled(), \
1164 }; \
1165 struct lttng_channel *__chan = __event_recorder->chan; \
1166 struct lttng_session *__session = __chan->session; \
1167 struct lib_ring_buffer_ctx __ctx; \
1168 ssize_t __event_len; \
1169 size_t __event_align; \
1170 size_t __orig_dynamic_len_offset, __dynamic_len_idx __attribute__((unused)); \
1171 union { \
1172 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1173 char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1174 } __stackvar; \
1175 int __ret; \
1176 struct probe_local_vars __tp_locvar; \
1177 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1178 &__tp_locvar; \
1179 struct lttng_id_tracker_rcu *__lf; \
1180 \
1181 if (!_TP_SESSION_CHECK(session, __session)) \
1182 return; \
1183 if (unlikely(!LTTNG_READ_ONCE(__session->active))) \
1184 return; \
1185 if (unlikely(!LTTNG_READ_ONCE(__chan->enabled))) \
1186 return; \
1187 if (unlikely(!LTTNG_READ_ONCE(__event_recorder->parent.enabled))) \
1188 return; \
1189 __lf = lttng_rcu_dereference(__session->pid_tracker.p); \
1190 if (__lf && likely(!lttng_id_tracker_lookup(__lf, current->tgid))) \
1191 return; \
1192 __lf = lttng_rcu_dereference(__session->vpid_tracker.p); \
1193 if (__lf && likely(!lttng_id_tracker_lookup(__lf, task_tgid_vnr(current)))) \
1194 return; \
1195 __lf = lttng_rcu_dereference(__session->uid_tracker.p); \
1196 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1197 lttng_current_uid()))) \
1198 return; \
1199 __lf = lttng_rcu_dereference(__session->vuid_tracker.p); \
1200 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1201 lttng_current_vuid()))) \
1202 return; \
1203 __lf = lttng_rcu_dereference(__session->gid_tracker.p); \
1204 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1205 lttng_current_gid()))) \
1206 return; \
1207 __lf = lttng_rcu_dereference(__session->vgid_tracker.p); \
1208 if (__lf && likely(!lttng_id_tracker_lookup(__lf, \
1209 lttng_current_vgid()))) \
1210 return; \
1211 __orig_dynamic_len_offset = this_cpu_ptr(&lttng_dynamic_len_stack)->offset; \
1212 __dynamic_len_idx = __orig_dynamic_len_offset; \
1213 _code_pre \
1214 if (unlikely(!list_empty(&__event_recorder->priv->parent.filter_bytecode_runtime_head))) { \
1215 struct lttng_bytecode_runtime *bc_runtime; \
1216 int __filter_record = __event_recorder->priv->parent.has_enablers_without_filter_bytecode; \
1217 \
1218 __event_prepare_interpreter_stack__##_name(__stackvar.__filter_stack_data, \
1219 tp_locvar); \
1220 lttng_list_for_each_entry_rcu(bc_runtime, &__event_recorder->priv->parent.filter_bytecode_runtime_head, node) { \
1221 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1222 __stackvar.__filter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) { \
1223 __filter_record = 1; \
1224 break; \
1225 } \
1226 } \
1227 if (likely(!__filter_record)) \
1228 goto __post; \
1229 } \
1230 __event_len = __event_get_size__##_name(tp_locvar); \
1231 if (unlikely(__event_len < 0)) { \
1232 lib_ring_buffer_lost_event_too_big(__chan->chan); \
1233 goto __post; \
1234 } \
1235 __event_align = __event_get_align__##_name(tp_locvar); \
1236 lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \
1237 __event_align, -1); \
1238 __ret = __chan->ops->event_reserve(&__ctx, __event_recorder->priv->id); \
1239 if (__ret < 0) \
1240 goto __post; \
1241 _fields \
1242 __chan->ops->event_commit(&__ctx); \
1243 __post: \
1244 _code_post \
1245 barrier(); /* use before un-reserve. */ \
1246 this_cpu_ptr(&lttng_dynamic_len_stack)->offset = __orig_dynamic_len_offset; \
1247 return; \
1248 }
1249
1250 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1251
1252 #undef __get_dynamic_len
1253
1254 /*
1255 * Stage 6.1 of tracepoint generation: generate event notifier probes
1256 *
1257 * Create the probe function. This function evaluates the filter bytecode and
1258 * queue a notification to be sent to userspace.
1259 */
1260
1261 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1262
1263 #undef TP_PROTO
1264 #define TP_PROTO(...) __VA_ARGS__
1265
1266 #undef TP_ARGS
1267 #define TP_ARGS(...) __VA_ARGS__
1268
1269 #undef TP_FIELDS
1270 #define TP_FIELDS(...) __VA_ARGS__
1271
1272 #undef TP_locvar
1273 #define TP_locvar(...) __VA_ARGS__
1274
1275 #undef TP_code_pre
1276 #define TP_code_pre(...) __VA_ARGS__
1277
1278 #undef TP_code_post
1279 #define TP_code_post(...) __VA_ARGS__
1280
1281 /*
1282 * Using twice size for filter stack data to hold size and pointer for
1283 * each field (worse case). For integers, max size required is 64-bit.
1284 * Same for double-precision floats. Those fit within
1285 * 2*sizeof(unsigned long) for all supported architectures.
1286 * Perform UNION (||) of filter runtime list.
1287 */
1288 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE
1289 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \
1290 static void __event_notifier_probe__##_name(void *__data, _proto) \
1291 { \
1292 struct probe_local_vars { _locvar }; \
1293 struct lttng_kernel_event_notifier *__event_notifier = __data; \
1294 struct lttng_probe_ctx __lttng_probe_ctx = { \
1295 .event = NULL, \
1296 .event_notifier = __event_notifier, \
1297 .interruptible = !irqs_disabled(), \
1298 }; \
1299 union { \
1300 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1301 char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1302 } __stackvar; \
1303 struct probe_local_vars __tp_locvar; \
1304 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1305 &__tp_locvar; \
1306 struct lttng_kernel_notification_ctx __notif_ctx; \
1307 bool __interpreter_stack_prepared = false; \
1308 \
1309 if (unlikely(!READ_ONCE(__event_notifier->parent.enabled))) \
1310 return; \
1311 _code_pre \
1312 if (unlikely(!list_empty(&__event_notifier->priv->parent.filter_bytecode_runtime_head))) { \
1313 struct lttng_bytecode_runtime *bc_runtime; \
1314 int __filter_record = __event_notifier->priv->parent.has_enablers_without_filter_bytecode; \
1315 \
1316 __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
1317 tp_locvar, _args); \
1318 __interpreter_stack_prepared = true; \
1319 lttng_list_for_each_entry_rcu(bc_runtime, &__event_notifier->priv->parent.filter_bytecode_runtime_head, node) { \
1320 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1321 __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
1322 __filter_record = 1; \
1323 } \
1324 if (likely(!__filter_record)) \
1325 goto __post; \
1326 } \
1327 \
1328 __notif_ctx.eval_capture = LTTNG_READ_ONCE(__event_notifier->eval_capture); \
1329 if (unlikely(!__interpreter_stack_prepared && __notif_ctx.eval_capture)) \
1330 __event_prepare_interpreter_stack__##_name( \
1331 __stackvar.__interpreter_stack_data, \
1332 tp_locvar, _args); \
1333 \
1334 __event_notifier->notification_send(__event_notifier, \
1335 &__lttng_probe_ctx, \
1336 __stackvar.__interpreter_stack_data, \
1337 &__notif_ctx); \
1338 \
1339 __post: \
1340 _code_post \
1341 return; \
1342 }
1343
1344 #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS
1345 #define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \
1346 static void __event_notifier_probe__##_name(void *__data) \
1347 { \
1348 struct probe_local_vars { _locvar }; \
1349 struct lttng_kernel_event_notifier *__event_notifier = __data; \
1350 struct lttng_probe_ctx __lttng_probe_ctx = { \
1351 .event = NULL, \
1352 .event_notifier = __event_notifier, \
1353 .interruptible = !irqs_disabled(), \
1354 }; \
1355 union { \
1356 size_t __dynamic_len_removed[ARRAY_SIZE(__event_fields___##_name)]; \
1357 char __interpreter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \
1358 } __stackvar; \
1359 struct probe_local_vars __tp_locvar; \
1360 struct probe_local_vars *tp_locvar __attribute__((unused)) = \
1361 &__tp_locvar; \
1362 struct lttng_kernel_notification_ctx __notif_ctx; \
1363 bool __interpreter_stack_prepared = false; \
1364 \
1365 if (unlikely(!READ_ONCE(__event_notifier->parent.enabled))) \
1366 return; \
1367 _code_pre \
1368 if (unlikely(!list_empty(&__event_notifier->priv->parent.filter_bytecode_runtime_head))) { \
1369 struct lttng_bytecode_runtime *bc_runtime; \
1370 int __filter_record = __event_notifier->priv->parent.has_enablers_without_filter_bytecode; \
1371 \
1372 __event_prepare_interpreter_stack__##_name(__stackvar.__interpreter_stack_data, \
1373 tp_locvar); \
1374 __interpreter_stack_prepared = true; \
1375 lttng_list_for_each_entry_rcu(bc_runtime, &__event_notifier->priv->parent.filter_bytecode_runtime_head, node) { \
1376 if (unlikely(bc_runtime->interpreter_funcs.filter(bc_runtime, &__lttng_probe_ctx, \
1377 __stackvar.__interpreter_stack_data) & LTTNG_INTERPRETER_RECORD_FLAG)) \
1378 __filter_record = 1; \
1379 } \
1380 if (likely(!__filter_record)) \
1381 goto __post; \
1382 } \
1383 \
1384 __notif_ctx.eval_capture = LTTNG_READ_ONCE(__event_notifier->eval_capture); \
1385 if (unlikely(!__interpreter_stack_prepared && __notif_ctx.eval_capture)) \
1386 __event_prepare_interpreter_stack__##_name( \
1387 __stackvar.__interpreter_stack_data, \
1388 tp_locvar); \
1389 \
1390 __event_notifier->notification_send(__event_notifier, \
1391 &__lttng_probe_ctx, \
1392 __stackvar.__interpreter_stack_data, \
1393 &__notif_ctx); \
1394 __post: \
1395 _code_post \
1396 return; \
1397 }
1398
1399 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1400 /*
1401 * Stage 7 of the trace events.
1402 *
1403 * Create event descriptions.
1404 */
1405
1406 /* Named field types must be defined in lttng-types.h */
1407
1408 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1409
1410 #ifndef TP_PROBE_CB
1411 #define TP_PROBE_CB(_template) &__event_probe__##_template
1412 #endif
1413
1414 #ifndef TP_EVENT_NOTIFIER_PROBE_CB
1415 #define TP_EVENT_NOTIFIER_PROBE_CB(_template) &__event_notifier_probe__##_template
1416 #endif
1417
1418 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1419 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1420 static const struct lttng_kernel_event_desc __event_desc___##_map = { \
1421 .event_name = #_map, \
1422 .event_kname = #_name, \
1423 .probe_callback = (void *) TP_PROBE_CB(_template), \
1424 .fields = __event_fields___##_template, \
1425 .nr_fields = ARRAY_SIZE(__event_fields___##_template), \
1426 .owner = THIS_MODULE, \
1427 .event_notifier_callback = (void *) TP_EVENT_NOTIFIER_PROBE_CB(_template), \
1428 };
1429
1430 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1431 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1432 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1433
1434 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1435
1436 /*
1437 * Stage 8 of the trace events.
1438 *
1439 * Create an array of event description pointers.
1440 */
1441
1442 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1443
1444 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS
1445 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \
1446 &__event_desc___##_map,
1447
1448 #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP
1449 #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \
1450 LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map)
1451
1452 #define TP_ID1(_token, _system) _token##_system
1453 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1454
1455 static const struct lttng_kernel_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = {
1456 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
1457 };
1458
1459 #undef TP_ID1
1460 #undef TP_ID
1461
1462 /*
1463 * Stage 9 of the trace events.
1464 *
1465 * Create a toplevel descriptor for the whole probe.
1466 */
1467
1468 #define TP_ID1(_token, _system) _token##_system
1469 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1470
1471 /* non-const because list head will be modified when registered. */
1472 static __used struct lttng_kernel_probe_desc TP_ID(__probe_desc___, TRACE_SYSTEM) = {
1473 .provider_name = __stringify(TRACE_SYSTEM),
1474 .event_desc = TP_ID(__event_desc___, TRACE_SYSTEM),
1475 .nr_events = ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)),
1476 .head = { NULL, NULL },
1477 .lazy_init_head = { NULL, NULL },
1478 .lazy = 0,
1479 };
1480
1481 #undef TP_ID1
1482 #undef TP_ID
1483
1484 /*
1485 * Stage 10 of the trace events.
1486 *
1487 * Register/unregister probes at module load/unload.
1488 */
1489
1490 #include <lttng/events-reset.h> /* Reset all macros within LTTNG_TRACEPOINT_EVENT */
1491
1492 #define TP_ID1(_token, _system) _token##_system
1493 #define TP_ID(_token, _system) TP_ID1(_token, _system)
1494 #define module_init_eval1(_token, _system) module_init(_token##_system)
1495 #define module_init_eval(_token, _system) module_init_eval1(_token, _system)
1496 #define module_exit_eval1(_token, _system) module_exit(_token##_system)
1497 #define module_exit_eval(_token, _system) module_exit_eval1(_token, _system)
1498
1499 #ifndef TP_MODULE_NOINIT
1500 static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
1501 {
1502 wrapper_vmalloc_sync_mappings();
1503 return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1504 }
1505
1506 static void TP_ID(__lttng_events_exit__, TRACE_SYSTEM)(void)
1507 {
1508 lttng_probe_unregister(&TP_ID(__probe_desc___, TRACE_SYSTEM));
1509 }
1510
1511 #ifndef TP_MODULE_NOAUTOLOAD
1512 module_init_eval(__lttng_events_init__, TRACE_SYSTEM);
1513 module_exit_eval(__lttng_events_exit__, TRACE_SYSTEM);
1514 #endif
1515
1516 #endif
1517
1518 #undef module_init_eval
1519 #undef module_exit_eval
1520 #undef TP_ID1
1521 #undef TP_ID
1522
1523 #undef TP_PROTO
1524 #undef TP_ARGS
This page took 0.072624 seconds and 4 git commands to generate.