X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=probes%2Flttng-events.h;h=35d6f790a7df7dbdb6188f87acd01d84a3408166;hb=43803cf2bdfe205fe0e426f7e93507d722ba1feb;hp=1651fda6a128b86738fe62be5b5c7f6b4693642b;hpb=67e5e60c3a685dbab8ab7dbefb1fbadf16cdde03;p=lttng-modules.git diff --git a/probes/lttng-events.h b/probes/lttng-events.h index 1651fda6..35d6f790 100644 --- a/probes/lttng-events.h +++ b/probes/lttng-events.h @@ -1,1196 +1,1135 @@ -#include -#include -#include -#include -#include "../ltt-events.h" -#include "../ltt-tracer-core.h" - -struct lttng_event_field { - const char *name; - const struct lttng_type type; -}; +/* + * lttng-events.h + * + * Copyright (C) 2009 Steven Rostedt + * Copyright (C) 2009-2014 Mathieu Desnoyers + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; only + * version 2.1 of the License. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ -struct lttng_event_desc { - const struct lttng_event_field *fields; - const char *name; - unsigned int nr_fields; -}; +#include +#include +#include +#include +#include "lttng.h" +#include "lttng-types.h" +#include "lttng-probe-user.h" +#include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */ +#include "../wrapper/ringbuffer/frontend_types.h" +#include "../wrapper/rcu.h" +#include "../lttng-events.h" +#include "../lttng-tracer-core.h" /* * Macro declarations used for all stages. */ /* - * DECLARE_EVENT_CLASS can be used to add a generic function - * handlers for events. That is, if all events have the same - * parameters and just have distinct trace points. - * Each tracepoint can be defined with DEFINE_EVENT and that - * will map the DECLARE_EVENT_CLASS to the tracepoint. + * LTTng name mapping macros. LTTng remaps some of the kernel events to + * enforce name-spacing. + */ +#undef LTTNG_TRACEPOINT_EVENT_MAP +#define LTTNG_TRACEPOINT_EVENT_MAP(name, map, proto, args, fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS(map, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(fields)) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args)) + +#undef LTTNG_TRACEPOINT_EVENT_MAP_NOARGS +#define LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, map, fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(map, \ + PARAMS(fields)) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map) + +#undef LTTNG_TRACEPOINT_EVENT_CODE_MAP +#define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code, fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(_locvar), \ + PARAMS(_code), \ + PARAMS(fields)) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args)) + +#undef LTTNG_TRACEPOINT_EVENT_CODE +#define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code, fields) \ + LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(_locvar), \ + PARAMS(_code), \ + PARAMS(fields)) + +/* + * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function + * handlers for events. That is, if all events have the same parameters + * and just have distinct trace points. Each tracepoint can be defined + * with LTTNG_TRACEPOINT_EVENT_INSTANCE and that will map the + * LTTNG_TRACEPOINT_EVENT_CLASS to the tracepoint. * - * TRACE_EVENT is a one to one mapping between tracepoint and template. + * LTTNG_TRACEPOINT_EVENT is a one to one mapping between tracepoint and + * template. */ -#undef TRACE_EVENT -#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ - DECLARE_EVENT_CLASS(name, \ - PARAMS(proto), \ - PARAMS(args), \ - PARAMS(tstruct), \ - PARAMS(assign), \ - PARAMS(print)) \ - DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)) - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) - -/* Callbacks are meaningless to LTTng. */ -#undef TRACE_EVENT_FN -#define TRACE_EVENT_FN(name, proto, args, tstruct, \ - assign, print, reg, unreg) \ - TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ - PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ +#undef LTTNG_TRACEPOINT_EVENT +#define LTTNG_TRACEPOINT_EVENT(name, proto, args, fields) \ + LTTNG_TRACEPOINT_EVENT_MAP(name, name, \ + PARAMS(proto), \ + PARAMS(args), \ + PARAMS(fields)) + +#undef LTTNG_TRACEPOINT_EVENT_NOARGS +#define LTTNG_TRACEPOINT_EVENT_NOARGS(name, fields) \ + LTTNG_TRACEPOINT_EVENT_MAP_NOARGS(name, name, PARAMS(fields)) + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE +#define LTTNG_TRACEPOINT_EVENT_INSTANCE(template, name, proto, args) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(template, name, name, PARAMS(proto), PARAMS(args)) + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_NOARGS(template, name) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(template, name, name) + +#undef LTTNG_TRACEPOINT_EVENT_CLASS +#define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \ + PARAMS(_fields)) + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields)) + /* * Stage 1 of the trace events. * - * Create event field type metadata section. - * Each event produce an array of fields. + * Create dummy trace calls for each events, verifying that the LTTng module + * instrumentation headers match the kernel arguments. Will be optimized + * out by the compiler. + */ + +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" + +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ + +#undef TP_ARGS +#define TP_ARGS(...) __VA_ARGS__ + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \ +void trace_##_name(_proto); + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \ +void trace_##_name(void); + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* + * Stage 1.1 of the trace events. + * + * Create dummy trace prototypes for each event class, and for each used + * template. This will allow checking whether the prototypes from the + * class and the instance using the class actually match. */ #include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ -/* Named field types must be defined in lttng-types.h */ +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ + +#undef TP_ARGS +#define TP_ARGS(...) __VA_ARGS__ + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \ +void __event_template_proto___##_template(_proto); + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \ +void __event_template_proto___##_template(void); + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +void __event_template_proto___##_name(_proto); + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +void __event_template_proto___##_name(void); + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* + * Stage 2 of the trace events. + * + * Create event field type metadata section. + * Each event produce an array of fields. + */ + +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" +#include "lttng-events-nowrite.h" -#undef __field -#define __field(_type, _item) \ - { .name = #_item, .type = { .atype = atype_integer, .name = #_type} }, +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none),\ + .nowrite = _nowrite, \ + .user = _user, \ + }, -#undef __field_ext -#define __field_ext(_type, _item, _filter_type) __field(_type, _item) +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = \ + { \ + .atype = atype_array, \ + .u = \ + { \ + .array = \ + { \ + .elem_type = __type_integer(_type, 0, 0, 0, __BYTE_ORDER, 10, _encoding), \ + .length = _length, \ + } \ + } \ + }, \ + .nowrite = _nowrite, \ + .user = _user, \ + }, -#undef __array -#define __array(_type, _item, _length) \ +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ { \ - .name = #_item, \ - .type = { \ + .name = #_item, \ + .type = \ + { \ .atype = atype_array, \ - .name = NULL, \ - .u.array.elem_type = #_type, \ - .u.array.length = _length, \ + .u = \ + { \ + .array = \ + { \ + .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \ + .length = (_length) * sizeof(_type) * CHAR_BIT, \ + .elem_alignment = lttng_alignof(_type), \ + } \ + } \ + }, \ + .nowrite = _nowrite, \ + .user = _user, \ + }, + + +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, \ + _length_type, _src_length, _encoding, \ + _byte_order, _base, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = \ + { \ + .atype = atype_sequence, \ + .u = \ + { \ + .sequence = \ + { \ + .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \ + .elem_type = __type_integer(_type, 0, 0, -1, _byte_order, _base, _encoding), \ + }, \ + }, \ }, \ + .nowrite = _nowrite, \ + .user = _user, \ }, -#undef __dynamic_array -#define __dynamic_array(_type, _item, _length) \ +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ { \ - .name = #_item, \ - .type = { \ + .name = #_item, \ + .type = \ + { \ .atype = atype_sequence, \ - .name = NULL, \ - .u.sequence.elem_type = #_type, \ - .u.sequence.length_type = "u32", \ + .u = \ + { \ + .sequence = \ + { \ + .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \ + .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \ + .elem_alignment = lttng_alignof(_type), \ + }, \ + }, \ }, \ + .nowrite = _nowrite, \ + .user = _user, \ }, -#undef __string -#define __string(_item, _src) \ +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) \ { \ - .name = #_item, \ - .type = { \ + .name = #_item, \ + .type = \ + { \ .atype = atype_string, \ - .name = NULL, \ - .u.string.encoding = lttng_encode_UTF8, \ + .u = \ + { \ + .basic = { .string = { .encoding = lttng_encode_UTF8 } } \ + }, \ }, \ + .nowrite = _nowrite, \ + .user = _user, \ }, -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args /* Only one used in this phase */ +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */ -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ static const struct lttng_event_field __event_fields___##_name[] = { \ - _tstruct \ + _fields \ }; +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, PARAMS(_fields)) + #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Stage 2 of the trace events. + * Stage 3 of the trace events. * - * Create an array of events. + * Create probe callback prototypes. */ -/* Named field types must be defined in lttng-types.h */ +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" -#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ -#undef DEFINE_EVENT -#define DEFINE_EVENT(_template, _name, _proto, _args) \ - { \ - .fields = __event_fields___##_template, \ - .name = #_name, \ - .nr_fields = ARRAY_SIZE(__event_fields___##_template), \ - }, +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static void __event_probe__##_name(void *__data, _proto); -#define TP_ID1(_token, _system) _token##_system -#define TP_ID(_token, _system) TP_ID1(_token, _system) +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static void __event_probe__##_name(void *__data); -static const struct lttng_event_desc TP_ID(__event_desc___, TRACE_SYSTEM)[] = { #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -}; - -#undef TP_ID1 -#undef TP_ID /* - * Stage 3 of the trace events. + * Stage 4 of the trace events. * - * Create seq file metadata output. + * Create static inline function that calculates event size. */ -#define TP_ID1(_token, _system) _token##_system -#define TP_ID(_token, _system) TP_ID1(_token, _system) -#define module_init_eval1(_token, _system) module_init(_token##_system) -#define module_init_eval(_token, _system) module_init_eval1(_token, _system) -#define module_exit_eval1(_token, _system) module_exit(_token##_system) -#define module_exit_eval(_token, _system) module_exit_eval1(_token, _system) - -static void *TP_ID(__lttng_seq_start__, TRACE_SYSTEM)(struct seq_file *m, - loff_t *pos) -{ - const struct lttng_event_desc *desc = - &TP_ID(__event_desc___, TRACE_SYSTEM)[*pos]; - - if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM) - [ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1]) - return NULL; - return (void *) desc; -} - -static void *TP_ID(__lttng_seq_next__, TRACE_SYSTEM)(struct seq_file *m, - void *p, loff_t *ppos) -{ - const struct lttng_event_desc *desc = - &TP_ID(__event_desc___, TRACE_SYSTEM)[++(*ppos)]; +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" - if (desc > &TP_ID(__event_desc___, TRACE_SYSTEM) - [ARRAY_SIZE(TP_ID(__event_desc___, TRACE_SYSTEM)) - 1]) - return NULL; - return (void *) desc; -} +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ + __event_len += sizeof(_type); -static void TP_ID(__lttng_seq_stop__, TRACE_SYSTEM)(struct seq_file *m, - void *p) -{ -} +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ + __event_len += sizeof(_type) * (_length); -static int TP_ID(__lttng_seq_show__, TRACE_SYSTEM)(struct seq_file *m, - void *p) -{ - const struct lttng_event_desc *desc = p; - int i; - - seq_printf(m, "event {\n" - "\tname = %s;\n" - "\tid = UNKNOWN;\n" - "\tstream = UNKNOWN;\n" - "\tfields = {\n", - desc->name); - for (i = 0; i < desc->nr_fields; i++) { - if (desc->fields[i].type.name) /* Named type */ - seq_printf(m, "\t\t%s", - desc->fields[i].type.name); - else /* Nameless type */ - lttng_print_event_type(m, 2, &desc->fields[i].type); - seq_printf(m, " %s;\n", desc->fields[i].name); - } - seq_printf(m, "\t};\n"); - seq_printf(m, "};\n"); - return 0; -} +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) + +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ + _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_length_type)); \ + __event_len += sizeof(_length_type); \ + __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ + __dynamic_len[__dynamic_len_idx] = (_src_length); \ + __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \ + __dynamic_len_idx++; + +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \ + none, __LITTLE_ENDIAN, 10, _user, _nowrite) -static const -struct seq_operations TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM) = { - .start = TP_ID(__lttng_seq_start__, TRACE_SYSTEM), - .next = TP_ID(__lttng_seq_next__, TRACE_SYSTEM), - .stop = TP_ID(__lttng_seq_stop__, TRACE_SYSTEM), - .show = TP_ID(__lttng_seq_show__, TRACE_SYSTEM), -}; +/* + * ctf_user_string includes \0. If returns 0, it faulted, so we set size to + * 1 (\0 only). + */ +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) \ + if (_user) \ + __event_len += __dynamic_len[__dynamic_len_idx++] = \ + max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \ + else \ + __event_len += __dynamic_len[__dynamic_len_idx++] = \ + strlen(_src) + 1; -static int -TP_ID(__lttng_types_open__, TRACE_SYSTEM)(struct inode *inode, struct file *file) -{ - return seq_open(file, &TP_ID(__lttng_types_seq_ops__, TRACE_SYSTEM)); -} +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ -static const -struct file_operations TP_ID(__lttng_types_fops__, TRACE_SYSTEM) = { - .open = TP_ID(__lttng_types_open__, TRACE_SYSTEM), - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release_private, -}; +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ -static struct dentry *TP_ID(__lttng_types_dentry__, TRACE_SYSTEM); +#undef TP_locvar +#define TP_locvar(...) __VA_ARGS__ -static int TP_ID(__lttng_types_init__, TRACE_SYSTEM)(void) -{ - int ret = 0; - - TP_ID(__lttng_types_dentry__, TRACE_SYSTEM) = - debugfs_create_file("lttng-events-" __stringify(TRACE_SYSTEM), - S_IWUSR, NULL, NULL, - &TP_ID(__lttng_types_fops__, TRACE_SYSTEM)); - if (IS_ERR(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)) - || !TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)) { - printk(KERN_ERR "Error creating LTTng type export file\n"); - ret = -ENOMEM; - goto error; - } -error: - return ret; +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ + void *__tp_locvar, _proto) \ +{ \ + size_t __event_len = 0; \ + unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ + return __event_len; \ } -module_init_eval(__lttng_types_init__, TRACE_SYSTEM); - -static void TP_ID(__lttng_types_exit__, TRACE_SYSTEM)(void) -{ - debugfs_remove(TP_ID(__lttng_types_dentry__, TRACE_SYSTEM)); +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ + void *__tp_locvar) \ +{ \ + size_t __event_len = 0; \ + unsigned int __dynamic_len_idx __attribute__((unused)) = 0; \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ + return __event_len; \ } -module_exit_eval(__lttng_types_exit__, TRACE_SYSTEM); +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) -#undef module_init_eval -#undef module_exit_eval -#undef TP_ID1 -#undef TP_ID /* - * Stage 4 of the trace events. + * Stage 4.1 of tracepoint event generation. * - * Create static inline function that calculates event size. + * Create static inline function that layout the filter stack data. + * We make both write and nowrite data available to the filter. */ -#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ - -/* Named field types must be defined in lttng-types.h */ +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" +#include "lttng-events-nowrite.h" + +#undef _ctf_integer_ext_fetched +#define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ + if (lttng_is_signed_type(_type)) { \ + int64_t __ctf_tmp_int64; \ + switch (sizeof(_type)) { \ + case 1: \ + { \ + union { _type t; int8_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_int64 = (int64_t) __tmp.v; \ + break; \ + } \ + case 2: \ + { \ + union { _type t; int16_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_int64 = (int64_t) __tmp.v; \ + break; \ + } \ + case 4: \ + { \ + union { _type t; int32_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_int64 = (int64_t) __tmp.v; \ + break; \ + } \ + case 8: \ + { \ + union { _type t; int64_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_int64 = (int64_t) __tmp.v; \ + break; \ + } \ + default: \ + BUG_ON(1); \ + }; \ + memcpy(__stack_data, &__ctf_tmp_int64, sizeof(int64_t)); \ + } else { \ + uint64_t __ctf_tmp_uint64; \ + switch (sizeof(_type)) { \ + case 1: \ + { \ + union { _type t; uint8_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ + break; \ + } \ + case 2: \ + { \ + union { _type t; uint16_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ + break; \ + } \ + case 4: \ + { \ + union { _type t; uint32_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ + break; \ + } \ + case 8: \ + { \ + union { _type t; uint64_t v; } __tmp = { (_type) (_src) }; \ + __ctf_tmp_uint64 = (uint64_t) __tmp.v; \ + break; \ + } \ + default: \ + BUG_ON(1); \ + }; \ + memcpy(__stack_data, &__ctf_tmp_uint64, sizeof(uint64_t)); \ + } \ + __stack_data += sizeof(int64_t); + +#undef _ctf_integer_ext_isuser0 +#define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \ + _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) + +#undef _ctf_integer_ext_isuser1 +#define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \ +{ \ + __typeof__(_user_src) _src; \ + if (get_user(_src, &(_user_src))) \ + _src = 0; \ + _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ +} -#undef __field -#define __field(_type, _item) \ - __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \ - __event_len += sizeof(_type); +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \ + _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite) + +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + { \ + unsigned long __ctf_tmp_ulong = (unsigned long) (_length); \ + const void *__ctf_tmp_ptr = (_src); \ + memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \ + __stack_data += sizeof(unsigned long); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \ + __stack_data += sizeof(void *); \ + } -#undef __field_ext -#define __field_ext(_type, _item, _filter_type) __field(_type, _item) +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) + +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ + _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ + { \ + unsigned long __ctf_tmp_ulong = (unsigned long) (_src_length); \ + const void *__ctf_tmp_ptr = (_src); \ + memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \ + __stack_data += sizeof(unsigned long); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \ + __stack_data += sizeof(void *); \ + } -#undef __array -#define __array(_type, _item, _length) \ - __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \ - __event_len += sizeof(_type) * (_length); +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \ + none, __LITTLE_ENDIAN, 10, _user, _nowrite) + +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) \ + { \ + const void *__ctf_tmp_ptr = (_src); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \ + __stack_data += sizeof(void *); \ + } -#undef __dynamic_array -#define __dynamic_array(_type, _item, _length) \ - __event_len += lib_ring_buffer_align(__event_len, sizeof(u32)); \ - __event_len += sizeof(u32); \ - __event_len += lib_ring_buffer_align(__event_len, sizeof(_type)); \ - __event_len += sizeof(_type) * (_length); +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ -#undef __string -#define __string(_item, _src) \ - __event_len += __dynamic_len[__dynamic_len_idx++] = strlen(_src) + 1; +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ -#undef TP_PROTO -#define TP_PROTO(args...) args +#undef TP_locvar +#define TP_locvar(...) __VA_ARGS__ -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static inline \ +void __event_prepare_filter_stack__##_name(char *__stack_data, \ + void *__tp_locvar) \ +{ \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ +} -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ -static inline size_t __event_get_size__##_name(size_t *__dynamic_len, _proto) \ +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static inline \ +void __event_prepare_filter_stack__##_name(char *__stack_data, \ + void *__tp_locvar, _proto) \ { \ - size_t __event_len = 0; \ - unsigned int __dynamic_len_idx = 0; \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ \ - if (0) \ - (void) __dynamic_len_idx; /* don't warn if unused */ \ - _tstruct \ - return __event_len; \ + _fields \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) - - /* * Stage 5 of the trace events. * * Create static inline function that calculates event payload alignment. */ -#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" -/* Named field types must be defined in lttng-types.h */ +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); -#undef __field -#define __field(_type, _item) \ - __event_align = max_t(size_t, __event_align, __alignof__(_type)); +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); -#undef __field_ext -#define __field_ext(_type, _item, _filter_type) __field(_type, _item) +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) -#undef __array -#define __array(_type, _item, _length) \ - __event_align = max_t(size_t, __event_align, __alignof__(_type)); +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ + _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \ + __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); -#undef __dynamic_array -#define __dynamic_array(_type, _item, _length) \ - __event_align = max_t(size_t, __event_align, __alignof__(u32)); \ - __event_align = max_t(size_t, __event_align, __alignof__(_type)); +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \ + none, __LITTLE_ENDIAN, 10, _user, _nowrite) -#undef __string -#define __string(_item, _src) +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) #undef TP_PROTO -#define TP_PROTO(args...) args +#define TP_PROTO(...) __VA_ARGS__ -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ -static inline size_t __event_get_align__##_name(_proto) \ +#undef TP_locvar +#define TP_locvar(...) __VA_ARGS__ + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \ { \ size_t __event_align = 1; \ - _tstruct \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ return __event_align; \ } -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) - +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static inline size_t __event_get_align__##_name(void *__tp_locvar) \ +{ \ + size_t __event_align = 1; \ + struct { _locvar } *tp_locvar __attribute__((unused)) = __tp_locvar; \ + \ + _fields \ + return __event_align; \ +} +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Stage 6 of the trace events. - * - * Create the probe function : call even size calculation and write event data - * into the buffer. + * Stage 6 of tracepoint event generation. * - * We use both the field and assignment macros to write the fields in the order - * defined in the field declaration. The field declarations control the - * execution order, jumping to the appropriate assignment block. + * Create the probe function. This function calls event size calculation + * and writes event data into the buffer. */ -#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ +/* Reset all macros within TRACEPOINT_EVENT */ +#include "lttng-events-reset.h" +#include "lttng-events-write.h" -#undef __field -#define __field(_type, _item) \ - lib_ring_buffer_align_ctx(&ctx, sizeof(_type)); \ - goto __assign_##_item; \ -__end_field_##_item: - -#undef __field_ext -#define __field_ext(_type, _item, _filter_type) __field(_type, _item) - -#undef __array -#define __array(_type, _item, _length) \ - lib_ring_buffer_align_ctx(&ctx, sizeof(_type)); \ - goto __assign_##_item; \ -__end_field_##_item: - -#undef __dynamic_array -#define __dynamic_array(_type, _item, _length) \ - lib_ring_buffer_align_ctx(&ctx, sizeof(u32)); \ - goto __assign_##_item##_1; \ -__end_field_##_item##_1: \ - lib_ring_buffer_align_ctx(&ctx, sizeof(_type)); \ - goto __assign_##_item##_2; \ -__end_field_##_item##_2: - -#undef __string -#define __string(_item, _src) \ - goto __assign_##_item; \ -__end_field_##_item: +#undef _ctf_integer_ext_fetched +#define _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ + { \ + _type __tmp = _src; \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(__tmp));\ + __chan->ops->event_write(&__ctx, &__tmp, sizeof(__tmp));\ + } + +#undef _ctf_integer_ext_isuser0 +#define _ctf_integer_ext_isuser0(_type, _item, _src, _byte_order, _base, _nowrite) \ + _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) + +#undef _ctf_integer_ext_isuser1 +#define _ctf_integer_ext_isuser1(_type, _item, _user_src, _byte_order, _base, _nowrite) \ +{ \ + __typeof__(_user_src) _src; \ + if (get_user(_src, &(_user_src))) \ + _src = 0; \ + _ctf_integer_ext_fetched(_type, _item, _src, _byte_order, _base, _nowrite) \ +} + +#undef _ctf_integer_ext +#define _ctf_integer_ext(_type, _item, _user_src, _byte_order, _base, _user, _nowrite) \ + _ctf_integer_ext_isuser##_user(_type, _item, _user_src, _byte_order, _base, _nowrite) + +#undef _ctf_array_encoded +#define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + if (_user) { \ + __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \ + } else { \ + __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \ + } +#if (__BYTE_ORDER == __LITTLE_ENDIAN) +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + if (_user) { \ + __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \ + } else { \ + __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \ + } +#else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */ /* - * Macros mapping tp_assign() to "=", tp_memcpy() to memcpy() and tp_strcpy() to - * strcpy(). + * For big endian, we need to byteswap into little endian. */ -#undef tp_assign -#define tp_assign(dest, src) \ -__assign_##dest: \ +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ { \ - __typeof__(src) __tmp = (src); \ - __chan->ops->event_write(&ctx, &__tmp, sizeof(src)); \ - } \ - goto __end_field_##dest; - -#undef tp_memcpy -#define tp_memcpy(dest, src, len) \ -__assign_##dest: \ - __chan->ops->event_write(&ctx, src, len); \ - goto __end_field_##dest; + size_t _i; \ + \ + for (_i = 0; _i < (_length); _i++) { \ + _type _tmp; \ + \ + if (_user) { \ + if (get_user(_tmp, (_type *) _src + _i)) \ + _tmp = 0; \ + } else { \ + _tmp = ((_type *) _src)[_i]; \ + } \ + switch (sizeof(_type)) { \ + case 1: \ + break; \ + case 2: \ + _tmp = cpu_to_le16(_tmp); \ + break; \ + case 4: \ + _tmp = cpu_to_le32(_tmp); \ + break; \ + case 8: \ + _tmp = cpu_to_le64(_tmp); \ + break; \ + default: \ + BUG_ON(1); \ + } \ + __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \ + } \ + } +#endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */ -#undef tp_memcpy_dyn -#define tp_memcpy_dyn(dest, src, len) \ -__assign_##dest##_1: \ +#undef _ctf_sequence_encoded +#define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ + _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ { \ - __typeof__(len) __tmpl = (len); \ - __chan->ops->event_write(&ctx, &__tmpl, sizeof(u32)); \ + _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx]; \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ + __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ } \ - goto __end_field_##dest##_1; \ -__assign_##dest##_2: \ - __chan->ops->event_write(&ctx, src, len); \ - goto __end_field_##dest##_2; - -#undef tp_strcpy -#define tp_strcpy(dest, src) \ - tp_memcpy(dest, src, __get_dynamic_array_len(dest)); \ - -/* Named field types must be defined in lttng-types.h */ - -#undef __get_str -#define __get_str(field) field + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + if (_user) { \ + __chan->ops->event_write_from_user(&__ctx, _src, \ + sizeof(_type) * __get_dynamic_len(dest)); \ + } else { \ + __chan->ops->event_write(&__ctx, _src, \ + sizeof(_type) * __get_dynamic_len(dest)); \ + } -#undef __get_dynamic_array -#define __get_dynamic_array(field) field +#if (__BYTE_ORDER == __LITTLE_ENDIAN) +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + { \ + _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ + __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ + } \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + if (_user) { \ + __chan->ops->event_write_from_user(&__ctx, _src, \ + sizeof(_type) * __get_dynamic_len(dest)); \ + } else { \ + __chan->ops->event_write(&__ctx, _src, \ + sizeof(_type) * __get_dynamic_len(dest)); \ + } +#else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */ +/* + * For big endian, we need to byteswap into little endian. + */ +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + { \ + _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ + __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ + } \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + { \ + size_t _i, _length; \ + \ + _length = __get_dynamic_len(dest); \ + for (_i = 0; _i < _length; _i++) { \ + _type _tmp; \ + \ + if (_user) { \ + if (get_user(_tmp, (_type *) _src + _i)) \ + _tmp = 0; \ + } else { \ + _tmp = ((_type *) _src)[_i]; \ + } \ + switch (sizeof(_type)) { \ + case 1: \ + break; \ + case 2: \ + _tmp = cpu_to_le16(_tmp); \ + break; \ + case 4: \ + _tmp = cpu_to_le32(_tmp); \ + break; \ + case 8: \ + _tmp = cpu_to_le64(_tmp); \ + break; \ + default: \ + BUG_ON(1); \ + } \ + __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \ + } \ + } +#endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */ + +#undef _ctf_string +#define _ctf_string(_item, _src, _user, _nowrite) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \ + if (_user) { \ + __chan->ops->event_strcpy_from_user(&__ctx, _src, \ + __get_dynamic_len(dest)); \ + } else { \ + __chan->ops->event_strcpy(&__ctx, _src, \ + __get_dynamic_len(dest)); \ + } /* Beware: this get len actually consumes the len value */ -#undef __get_dynamic_array_len -#define __get_dynamic_array_len(field) __dynamic_len[__dynamic_len_idx++] +#undef __get_dynamic_len +#define __get_dynamic_len(field) __stackvar.__dynamic_len[__dynamic_len_idx++] #undef TP_PROTO -#define TP_PROTO(args...) args +#define TP_PROTO(...) __VA_ARGS__ #undef TP_ARGS -#define TP_ARGS(args...) args +#define TP_ARGS(...) __VA_ARGS__ + +#undef TP_FIELDS +#define TP_FIELDS(...) __VA_ARGS__ -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args +#undef TP_locvar +#define TP_locvar(...) __VA_ARGS__ -#undef TP_fast_assign -#define TP_fast_assign(args...) args +#undef TP_code +#define TP_code(...) __VA_ARGS__ -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(_name, _proto, _args, _tstruct, _assign, _print) \ +/* + * For state dump, check that "session" argument (mandatory) matches the + * session this event belongs to. Ensures that we write state dump data only + * into the started session, not into all sessions. + */ +#ifdef TP_SESSION_CHECK +#define _TP_SESSION_CHECK(session, csession) (session == csession) +#else /* TP_SESSION_CHECK */ +#define _TP_SESSION_CHECK(session, csession) 1 +#endif /* TP_SESSION_CHECK */ + +/* + * Using twice size for filter stack data to hold size and pointer for + * each field (worse case). For integers, max size required is 64-bit. + * Same for double-precision floats. Those fit within + * 2*sizeof(unsigned long) for all supported architectures. + * Perform UNION (||) of filter runtime list. + */ +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ static void __event_probe__##_name(void *__data, _proto) \ { \ - struct ltt_event *__event = __data; \ - struct ltt_channel *__chan = __event->chan; \ - struct lib_ring_buffer_ctx ctx; \ + struct probe_local_vars { _locvar }; \ + struct lttng_event *__event = __data; \ + struct lttng_channel *__chan = __event->chan; \ + struct lttng_session *__session = __chan->session; \ + struct lib_ring_buffer_ctx __ctx; \ size_t __event_len, __event_align; \ - size_t __dynamic_len_idx = 0; \ - size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \ + size_t __dynamic_len_idx __attribute__((unused)) = 0; \ + union { \ + size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \ + char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \ + } __stackvar; \ int __ret; \ + struct probe_local_vars __tp_locvar; \ + struct probe_local_vars *tp_locvar __attribute__((unused)) = \ + &__tp_locvar; \ + struct lttng_pid_tracker *__lpf; \ + \ + if (!_TP_SESSION_CHECK(session, __session)) \ + return; \ + if (unlikely(!ACCESS_ONCE(__session->active))) \ + return; \ + if (unlikely(!ACCESS_ONCE(__chan->enabled))) \ + return; \ + if (unlikely(!ACCESS_ONCE(__event->enabled))) \ + return; \ + __lpf = lttng_rcu_dereference(__session->pid_tracker); \ + if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \ + return; \ + _code \ + if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \ + struct lttng_bytecode_runtime *bc_runtime; \ + int __filter_record = __event->has_enablers_without_bytecode; \ \ - if (0) \ - (void) __dynamic_len_idx; /* don't warn if unused */ \ - __event_len = __event_get_size__##_name(__dynamic_len, _args); \ - __event_align = __event_get_align__##_name(_args); \ - lib_ring_buffer_ctx_init(&ctx, __chan->chan, NULL, __event_len, \ + __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \ + tp_locvar, _args); \ + lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ + if (unlikely(bc_runtime->filter(bc_runtime, \ + __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ + __filter_record = 1; \ + } \ + if (likely(!__filter_record)) \ + return; \ + } \ + __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \ + tp_locvar, _args); \ + __event_align = __event_get_align__##_name(tp_locvar, _args); \ + lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \ __event_align, -1); \ - __ret = __chan->ops->event_reserve(&ctx); \ + __ret = __chan->ops->event_reserve(&__ctx, __event->id); \ if (__ret < 0) \ return; \ - /* Control code (field ordering) */ \ - _tstruct \ - __chan->ops->event_commit(&ctx); \ - return; \ - /* Copy code, steered by control code */ \ - _assign \ + _fields \ + __chan->ops->event_commit(&__ctx); \ } -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) - - -#if 0 - -#include - -/* - * DECLARE_EVENT_CLASS can be used to add a generic function - * handlers for events. That is, if all events have the same - * parameters and just have distinct trace points. - * Each tracepoint can be defined with DEFINE_EVENT and that - * will map the DECLARE_EVENT_CLASS to the tracepoint. - * - * TRACE_EVENT is a one to one mapping between tracepoint and template. - */ -#undef TRACE_EVENT -#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \ - DECLARE_EVENT_CLASS(name, \ - PARAMS(proto), \ - PARAMS(args), \ - PARAMS(tstruct), \ - PARAMS(assign), \ - PARAMS(print)); \ - DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args)); - - -#undef __field -#define __field(type, item) type item; - -#undef __field_ext -#define __field_ext(type, item, filter_type) type item; - -#undef __array -#define __array(type, item, len) type item[len]; - -#undef __dynamic_array -#define __dynamic_array(type, item, len) u32 __data_loc_##item; - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef TP_STRUCT__entry -#define TP_STRUCT__entry(args...) args - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \ - struct ftrace_raw_##name { \ - struct trace_entry ent; \ - tstruct \ - char __data[0]; \ - }; \ - \ - static struct ftrace_event_class event_class_##name; - -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) \ - static struct ftrace_event_call __used \ - __attribute__((__aligned__(4))) event_##name - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) - -/* Callbacks are meaningless to ftrace. */ -#undef TRACE_EVENT_FN -#define TRACE_EVENT_FN(name, proto, args, tstruct, \ - assign, print, reg, unreg) \ - TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \ - PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \ +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +static void __event_probe__##_name(void *__data) \ +{ \ + struct probe_local_vars { _locvar }; \ + struct lttng_event *__event = __data; \ + struct lttng_channel *__chan = __event->chan; \ + struct lttng_session *__session = __chan->session; \ + struct lib_ring_buffer_ctx __ctx; \ + size_t __event_len, __event_align; \ + size_t __dynamic_len_idx __attribute__((unused)) = 0; \ + union { \ + size_t __dynamic_len[ARRAY_SIZE(__event_fields___##_name)]; \ + char __filter_stack_data[2 * sizeof(unsigned long) * ARRAY_SIZE(__event_fields___##_name)]; \ + } __stackvar; \ + int __ret; \ + struct probe_local_vars __tp_locvar; \ + struct probe_local_vars *tp_locvar __attribute__((unused)) = \ + &__tp_locvar; \ + struct lttng_pid_tracker *__lpf; \ + \ + if (!_TP_SESSION_CHECK(session, __session)) \ + return; \ + if (unlikely(!ACCESS_ONCE(__session->active))) \ + return; \ + if (unlikely(!ACCESS_ONCE(__chan->enabled))) \ + return; \ + if (unlikely(!ACCESS_ONCE(__event->enabled))) \ + return; \ + __lpf = lttng_rcu_dereference(__session->pid_tracker); \ + if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \ + return; \ + _code \ + if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \ + struct lttng_bytecode_runtime *bc_runtime; \ + int __filter_record = __event->has_enablers_without_bytecode; \ + \ + __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \ + tp_locvar); \ + lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ + if (unlikely(bc_runtime->filter(bc_runtime, \ + __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ + __filter_record = 1; \ + } \ + if (likely(!__filter_record)) \ + return; \ + } \ + __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, tp_locvar); \ + __event_align = __event_get_align__##_name(tp_locvar); \ + lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \ + __event_align, -1); \ + __ret = __chan->ops->event_reserve(&__ctx, __event->id); \ + if (__ret < 0) \ + return; \ + _fields \ + __chan->ops->event_commit(&__ctx); \ +} #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef __get_dynamic_len /* - * Stage 2 of the trace events. + * Stage 7 of the trace events. * - * Create static inline function that calculates event size. + * Create event descriptions. */ -#undef __field -#define __field(type, item) - -#undef __field_ext -#define __field_ext(type, item, filter_type) - -#undef __array -#define __array(type, item, len) - -#undef __dynamic_array -#define __dynamic_array(type, item, len) u32 item; - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ - struct ftrace_data_offsets_##call { \ - tstruct; \ - }; +/* Named field types must be defined in lttng-types.h */ -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) +#include "lttng-events-reset.h" /* Reset all macros within LTTNG_TRACEPOINT_EVENT */ + +#ifndef TP_PROBE_CB +#define TP_PROBE_CB(_template) &__event_probe__##_template +#endif + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \ +static const struct lttng_event_desc __event_desc___##_map = { \ + .fields = __event_fields___##_template, \ + .name = #_map, \ + .kname = #_name, \ + .probe_callback = (void *) TP_PROBE_CB(_template), \ + .nr_fields = ARRAY_SIZE(__event_fields___##_template), \ + .owner = THIS_MODULE, \ +}; -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) /* - * Stage 3 of the trace events. + * Stage 8 of the trace events. * - * Create the probe function : call even size calculation and write event data - * into the buffer. + * Create an array of event description pointers. */ -#undef __entry -#define __entry field - -#undef TP_printk -#define TP_printk(fmt, args...) fmt "\n", args - -#undef __get_dynamic_array -#define __get_dynamic_array(field) \ - ((void *)__entry + (__entry->__data_loc_##field & 0xffff)) - -#undef __get_str -#define __get_str(field) (char *)__get_dynamic_array(field) - -#undef __print_flags -#define __print_flags(flag, delim, flag_array...) \ - ({ \ - static const struct trace_print_flags __flags[] = \ - { flag_array, { -1, NULL }}; \ - ftrace_print_flags_seq(p, delim, flag, __flags); \ - }) - -#undef __print_symbolic -#define __print_symbolic(value, symbol_array...) \ - ({ \ - static const struct trace_print_flags symbols[] = \ - { symbol_array, { -1, NULL }}; \ - ftrace_print_symbols_seq(p, value, symbols); \ - }) - -#undef __print_hex -#define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len) - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ -static notrace enum print_line_t \ -ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ - struct trace_event *trace_event) \ -{ \ - struct ftrace_event_call *event; \ - struct trace_seq *s = &iter->seq; \ - struct ftrace_raw_##call *field; \ - struct trace_entry *entry; \ - struct trace_seq *p = &iter->tmp_seq; \ - int ret; \ - \ - event = container_of(trace_event, struct ftrace_event_call, \ - event); \ - \ - entry = iter->ent; \ - \ - if (entry->type != event->event.type) { \ - WARN_ON_ONCE(1); \ - return TRACE_TYPE_UNHANDLED; \ - } \ - \ - field = (typeof(field))entry; \ - \ - trace_seq_init(p); \ - ret = trace_seq_printf(s, "%s: ", event->name); \ - if (ret) \ - ret = trace_seq_printf(s, print); \ - if (!ret) \ - return TRACE_TYPE_PARTIAL_LINE; \ - \ - return TRACE_TYPE_HANDLED; \ -} \ -static struct trace_event_functions ftrace_event_type_funcs_##call = { \ - .trace = ftrace_raw_output_##call, \ -}; - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, call, proto, args, print) \ -static notrace enum print_line_t \ -ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ - struct trace_event *event) \ -{ \ - struct trace_seq *s = &iter->seq; \ - struct ftrace_raw_##template *field; \ - struct trace_entry *entry; \ - struct trace_seq *p = &iter->tmp_seq; \ - int ret; \ - \ - entry = iter->ent; \ - \ - if (entry->type != event_##call.event.type) { \ - WARN_ON_ONCE(1); \ - return TRACE_TYPE_UNHANDLED; \ - } \ - \ - field = (typeof(field))entry; \ - \ - trace_seq_init(p); \ - ret = trace_seq_printf(s, "%s: ", #call); \ - if (ret) \ - ret = trace_seq_printf(s, print); \ - if (!ret) \ - return TRACE_TYPE_PARTIAL_LINE; \ - \ - return TRACE_TYPE_HANDLED; \ -} \ -static struct trace_event_functions ftrace_event_type_funcs_##call = { \ - .trace = ftrace_raw_output_##call, \ -}; - -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#include "lttng-events-reset.h" /* Reset all macros within LTTNG_TRACEPOINT_EVENT */ -#undef __field_ext -#define __field_ext(type, item, filter_type) \ - ret = trace_define_field(event_call, #type, #item, \ - offsetof(typeof(field), item), \ - sizeof(field.item), \ - is_signed_type(type), filter_type); \ - if (ret) \ - return ret; - -#undef __field -#define __field(type, item) __field_ext(type, item, FILTER_OTHER) - -#undef __array -#define __array(type, item, len) \ - BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \ - ret = trace_define_field(event_call, #type "[" #len "]", #item, \ - offsetof(typeof(field), item), \ - sizeof(field.item), \ - is_signed_type(type), FILTER_OTHER); \ - if (ret) \ - return ret; - -#undef __dynamic_array -#define __dynamic_array(type, item, len) \ - ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \ - offsetof(typeof(field), __data_loc_##item), \ - sizeof(field.__data_loc_##item), \ - is_signed_type(type), FILTER_OTHER); - -#undef __string -#define __string(item, src) __dynamic_array(char, item, -1) - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ -static int notrace \ -ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ -{ \ - struct ftrace_raw_##call field; \ - int ret; \ - \ - tstruct; \ - \ - return ret; \ -} +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \ + &__event_desc___##_map, -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \ + LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) +#define TP_ID1(_token, _system) _token##_system +#define TP_ID(_token, _system) TP_ID1(_token, _system) +static const struct lttng_event_desc *TP_ID(__event_desc___, TRACE_SYSTEM)[] = { #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +}; -/* - * remember the offset of each array from the beginning of the event. - */ - -#undef __entry -#define __entry entry - -#undef __field -#define __field(type, item) - -#undef __field_ext -#define __field_ext(type, item, filter_type) - -#undef __array -#define __array(type, item, len) - -#undef __dynamic_array -#define __dynamic_array(type, item, len) \ - __data_offsets->item = __data_size + \ - offsetof(typeof(*entry), __data); \ - __data_offsets->item |= (len * sizeof(type)) << 16; \ - __data_size += (len) * sizeof(type); - -#undef __string -#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) - -#undef DECLARE_EVENT_CLASS -#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ -static inline notrace int ftrace_get_offsets_##call( \ - struct ftrace_data_offsets_##call *__data_offsets, proto) \ -{ \ - int __data_size = 0; \ - struct ftrace_raw_##call __maybe_unused *entry; \ - \ - tstruct; \ - \ - return __data_size; \ -} - -#undef DEFINE_EVENT -#define DEFINE_EVENT(template, name, proto, args) - -#undef DEFINE_EVENT_PRINT -#define DEFINE_EVENT_PRINT(template, name, proto, args, print) \ - DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args)) - -#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) +#undef TP_ID1 +#undef TP_ID /* - * Stage 4 of the trace events. - * - * Override the macros in to include the following: - * - * For those macros defined with TRACE_EVENT: - * - * static struct ftrace_event_call event_; - * - * static void ftrace_raw_event_(void *__data, proto) - * { - * struct ftrace_event_call *event_call = __data; - * struct ftrace_data_offsets_ __maybe_unused __data_offsets; - * struct ring_buffer_event *event; - * struct ftrace_raw_ *entry; <-- defined in stage 1 - * struct ring_buffer *buffer; - * unsigned long irq_flags; - * int __data_size; - * int pc; - * - * local_save_flags(irq_flags); - * pc = preempt_count(); - * - * __data_size = ftrace_get_offsets_(&__data_offsets, args); - * - * event = trace_current_buffer_lock_reserve(&buffer, - * event_->event.type, - * sizeof(*entry) + __data_size, - * irq_flags, pc); - * if (!event) - * return; - * entry = ring_buffer_event_data(event); - * - * { ; } <-- Here we assign the entries by the __field and - * __array macros. - * - * if (!filter_current_check_discard(buffer, event_call, entry, event)) - * trace_current_buffer_unlock_commit(buffer, - * event, irq_flags, pc); - * } - * - * static struct trace_event ftrace_event_type_ = { - * .trace = ftrace_raw_output_, <-- stage 2 - * }; - * - * static const char print_fmt_[] = ; + * Stage 9 of the trace events. * - * static struct ftrace_event_class __used event_class_