X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=probes%2Flttng-events.h;h=6d8c71134dccbfc9842f9d9259baccb5856b596e;hb=79150a4903b5f31695fcd1d9655555ba6dc4bfa4;hp=ce4c3f125a43814251f9a3c26434973898e497a6;hpb=57ede728166a5a18a8cd9e70cb51e09d948f84a4;p=lttng-modules.git diff --git a/probes/lttng-events.h b/probes/lttng-events.h index ce4c3f12..6d8c7113 100644 --- a/probes/lttng-events.h +++ b/probes/lttng-events.h @@ -22,11 +22,13 @@ #include #include #include +#include #include "lttng.h" #include "lttng-types.h" #include "lttng-probe-user.h" #include "../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */ #include "../wrapper/ringbuffer/frontend_types.h" +#include "../wrapper/rcu.h" #include "../lttng-events.h" #include "../lttng-tracer-core.h" @@ -53,23 +55,25 @@ LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(map, name, map) #undef LTTNG_TRACEPOINT_EVENT_CODE_MAP -#define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code, fields) \ +#define LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, map, proto, args, _locvar, _code_pre, fields, _code_post) \ LTTNG_TRACEPOINT_EVENT_CLASS_CODE(map, \ PARAMS(proto), \ PARAMS(args), \ PARAMS(_locvar), \ - PARAMS(_code), \ - PARAMS(fields)) \ + PARAMS(_code_pre), \ + PARAMS(fields), \ + PARAMS(_code_post)) \ LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(map, name, map, PARAMS(proto), PARAMS(args)) #undef LTTNG_TRACEPOINT_EVENT_CODE -#define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code, fields) \ +#define LTTNG_TRACEPOINT_EVENT_CODE(name, proto, args, _locvar, _code_pre, fields, _code_post) \ LTTNG_TRACEPOINT_EVENT_CODE_MAP(name, name, \ PARAMS(proto), \ PARAMS(args), \ PARAMS(_locvar), \ - PARAMS(_code), \ - PARAMS(fields)) + PARAMS(_code_pre), \ + PARAMS(fields), \ + PARAMS(_code_post)) /* * LTTNG_TRACEPOINT_EVENT_CLASS can be used to add a generic function @@ -104,11 +108,11 @@ #undef LTTNG_TRACEPOINT_EVENT_CLASS #define LTTNG_TRACEPOINT_EVENT_CLASS(_name, _proto, _args, _fields) \ LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, PARAMS(_proto), PARAMS(_args), , , \ - PARAMS(_fields)) + PARAMS(_fields), ) #undef LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS #define LTTNG_TRACEPOINT_EVENT_CLASS_NOARGS(_name, _fields) \ - LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields)) + LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, , , PARAMS(_fields), ) /* @@ -134,7 +138,41 @@ void trace_##_name(_proto); #undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS #define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \ -void trace_##_name(void *__data); +void trace_##_name(void); + +#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) + +/* + * Stage 1.1 of the trace events. + * + * Create dummy trace prototypes for each event class, and for each used + * template. This will allow checking whether the prototypes from the + * class and the instance using the class actually match. + */ + +#include "lttng-events-reset.h" /* Reset all macros within TRACE_EVENT */ + +#undef TP_PROTO +#define TP_PROTO(...) __VA_ARGS__ + +#undef TP_ARGS +#define TP_ARGS(...) __VA_ARGS__ + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP(_template, _name, _map, _proto, _args) \ +void __event_template_proto___##_template(_proto); + +#undef LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS +#define LTTNG_TRACEPOINT_EVENT_INSTANCE_MAP_NOARGS(_template, _name, _map) \ +void __event_template_proto___##_template(void); + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ +void __event_template_proto___##_name(_proto); + +#undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \ +void __event_template_proto___##_name(void); #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -154,7 +192,7 @@ void trace_##_name(void *__data); #define _ctf_integer_ext(_type, _item, _src, _byte_order, _base, _user, _nowrite) \ { \ .name = #_item, \ - .type = __type_integer(_type, _byte_order, _base, none),\ + .type = __type_integer(_type, 0, 0, -1, _byte_order, _base, none),\ .nowrite = _nowrite, \ .user = _user, \ }, @@ -170,7 +208,7 @@ void trace_##_name(void *__data); { \ .array = \ { \ - .elem_type = __type_integer(_type, __BYTE_ORDER, 10, _encoding), \ + .elem_type = __type_integer(_type, 0, 0, 0, __BYTE_ORDER, 10, _encoding), \ .length = _length, \ } \ } \ @@ -179,6 +217,28 @@ void trace_##_name(void *__data); .user = _user, \ }, +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + { \ + .name = #_item, \ + .type = \ + { \ + .atype = atype_array, \ + .u = \ + { \ + .array = \ + { \ + .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \ + .length = (_length) * sizeof(_type) * CHAR_BIT, \ + .elem_alignment = lttng_alignof(_type), \ + } \ + } \ + }, \ + .nowrite = _nowrite, \ + .user = _user, \ + }, + + #undef _ctf_sequence_encoded #define _ctf_sequence_encoded(_type, _item, _src, \ _length_type, _src_length, _encoding, \ @@ -192,8 +252,31 @@ void trace_##_name(void *__data); { \ .sequence = \ { \ - .length_type = __type_integer(_length_type, __BYTE_ORDER, 10, none), \ - .elem_type = __type_integer(_type, _byte_order, _base, _encoding), \ + .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \ + .elem_type = __type_integer(_type, 0, 0, -1, _byte_order, _base, _encoding), \ + }, \ + }, \ + }, \ + .nowrite = _nowrite, \ + .user = _user, \ + }, + +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + { \ + .name = #_item, \ + .type = \ + { \ + .atype = atype_sequence, \ + .u = \ + { \ + .sequence = \ + { \ + .length_type = __type_integer(_length_type, 0, 0, 0, __BYTE_ORDER, 10, none), \ + .elem_type = __type_integer(_type, 1, 1, 0, __LITTLE_ENDIAN, 10, none), \ + .elem_alignment = lttng_alignof(_type), \ }, \ }, \ }, \ @@ -221,14 +304,14 @@ void trace_##_name(void *__data); #define TP_FIELDS(...) __VA_ARGS__ /* Only one used in this phase */ #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \ static const struct lttng_event_field __event_fields___##_name[] = { \ _fields \ }; #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ - LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, PARAMS(_fields)) +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ + LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, PARAMS(_fields), _code_post) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -245,11 +328,11 @@ void trace_##_name(void *__data); #define TP_PROTO(...) __VA_ARGS__ #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ static void __event_probe__##_name(void *__data, _proto); #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \ static void __event_probe__##_name(void *__data); #include TRACE_INCLUDE(TRACE_INCLUDE_FILE) @@ -274,6 +357,10 @@ static void __event_probe__##_name(void *__data); __event_len += lib_ring_buffer_align(__event_len, lttng_alignof(_type)); \ __event_len += sizeof(_type) * (_length); +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) + #undef _ctf_sequence_encoded #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ @@ -284,6 +371,13 @@ static void __event_probe__##_name(void *__data); __event_len += sizeof(_type) * __dynamic_len[__dynamic_len_idx]; \ __dynamic_len_idx++; +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \ + none, __LITTLE_ENDIAN, 10, _user, _nowrite) + /* * ctf_user_string includes \0. If returns 0, it faulted, so we set size to * 1 (\0 only). @@ -292,10 +386,10 @@ static void __event_probe__##_name(void *__data); #define _ctf_string(_item, _src, _user, _nowrite) \ if (_user) \ __event_len += __dynamic_len[__dynamic_len_idx++] = \ - strlen(_src) + 1; \ + max_t(size_t, lttng_strlen_user_inatomic(_src), 1); \ else \ __event_len += __dynamic_len[__dynamic_len_idx++] = \ - max_t(size_t, lttng_strlen_user_inatomic(_src), 1); + strlen(_src) + 1; #undef TP_PROTO #define TP_PROTO(...) __VA_ARGS__ @@ -307,7 +401,7 @@ static void __event_probe__##_name(void *__data); #define TP_locvar(...) __VA_ARGS__ #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ void *__tp_locvar, _proto) \ { \ @@ -320,7 +414,7 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ } #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ void *__tp_locvar) \ { \ @@ -438,10 +532,14 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ const void *__ctf_tmp_ptr = (_src); \ memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \ __stack_data += sizeof(unsigned long); \ - memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void **)); \ - __stack_data += sizeof(void **); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \ + __stack_data += sizeof(void *); \ } +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) + #undef _ctf_sequence_encoded #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ @@ -450,16 +548,23 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ const void *__ctf_tmp_ptr = (_src); \ memcpy(__stack_data, &__ctf_tmp_ulong, sizeof(unsigned long)); \ __stack_data += sizeof(unsigned long); \ - memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void **)); \ - __stack_data += sizeof(void **); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \ + __stack_data += sizeof(void *); \ } +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \ + none, __LITTLE_ENDIAN, 10, _user, _nowrite) + #undef _ctf_string #define _ctf_string(_item, _src, _user, _nowrite) \ { \ const void *__ctf_tmp_ptr = (_src); \ - memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void **)); \ - __stack_data += sizeof(void **); \ + memcpy(__stack_data, &__ctf_tmp_ptr, sizeof(void *)); \ + __stack_data += sizeof(void *); \ } #undef TP_PROTO @@ -472,7 +577,7 @@ static inline size_t __event_get_size__##_name(size_t *__dynamic_len, \ #define TP_locvar(...) __VA_ARGS__ #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \ static inline \ void __event_prepare_filter_stack__##_name(char *__stack_data, \ void *__tp_locvar) \ @@ -483,7 +588,7 @@ void __event_prepare_filter_stack__##_name(char *__stack_data, \ } #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ static inline \ void __event_prepare_filter_stack__##_name(char *__stack_data, \ void *__tp_locvar, _proto) \ @@ -513,12 +618,23 @@ void __event_prepare_filter_stack__##_name(char *__stack_data, \ #define _ctf_array_encoded(_type, _item, _src, _length, _encoding, _user, _nowrite) \ __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + _ctf_array_encoded(_type, _item, _src, _length, none, _user, _nowrite) + #undef _ctf_sequence_encoded #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ __event_align = max_t(size_t, __event_align, lttng_alignof(_length_type)); \ __event_align = max_t(size_t, __event_align, lttng_alignof(_type)); +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + _ctf_sequence_encoded(_type, _item, _src, _length_type, _src_length, \ + none, __LITTLE_ENDIAN, 10, _user, _nowrite) + #undef _ctf_string #define _ctf_string(_item, _src, _user, _nowrite) @@ -532,7 +648,7 @@ void __event_prepare_filter_stack__##_name(char *__stack_data, \ #define TP_locvar(...) __VA_ARGS__ #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \ { \ size_t __event_align = 1; \ @@ -543,7 +659,7 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar, _proto) \ } #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ { \ size_t __event_align = 1; \ @@ -600,6 +716,54 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \ } +#if (__BYTE_ORDER == __LITTLE_ENDIAN) +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + if (_user) { \ + __chan->ops->event_write_from_user(&__ctx, _src, sizeof(_type) * (_length)); \ + } else { \ + __chan->ops->event_write(&__ctx, _src, sizeof(_type) * (_length)); \ + } +#else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */ +/* + * For big endian, we need to byteswap into little endian. + */ +#undef _ctf_array_bitfield +#define _ctf_array_bitfield(_type, _item, _src, _length, _user, _nowrite) \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + { \ + size_t _i; \ + \ + for (_i = 0; _i < (_length); _i++) { \ + _type _tmp; \ + \ + if (_user) { \ + if (get_user(_tmp, (_type *) _src + _i)) \ + _tmp = 0; \ + } else { \ + _tmp = ((_type *) _src)[_i]; \ + } \ + switch (sizeof(_type)) { \ + case 1: \ + break; \ + case 2: \ + _tmp = cpu_to_le16(_tmp); \ + break; \ + case 4: \ + _tmp = cpu_to_le32(_tmp); \ + break; \ + case 8: \ + _tmp = cpu_to_le64(_tmp); \ + break; \ + default: \ + BUG_ON(1); \ + } \ + __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \ + } \ + } +#endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */ + #undef _ctf_sequence_encoded #define _ctf_sequence_encoded(_type, _item, _src, _length_type, \ _src_length, _encoding, _byte_order, _base, _user, _nowrite) \ @@ -617,6 +781,71 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ sizeof(_type) * __get_dynamic_len(dest)); \ } +#if (__BYTE_ORDER == __LITTLE_ENDIAN) +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + { \ + _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ + __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ + } \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + if (_user) { \ + __chan->ops->event_write_from_user(&__ctx, _src, \ + sizeof(_type) * __get_dynamic_len(dest)); \ + } else { \ + __chan->ops->event_write(&__ctx, _src, \ + sizeof(_type) * __get_dynamic_len(dest)); \ + } +#else /* #if (__BYTE_ORDER == __LITTLE_ENDIAN) */ +/* + * For big endian, we need to byteswap into little endian. + */ +#undef _ctf_sequence_bitfield +#define _ctf_sequence_bitfield(_type, _item, _src, \ + _length_type, _src_length, \ + _user, _nowrite) \ + { \ + _length_type __tmpl = __stackvar.__dynamic_len[__dynamic_len_idx] * sizeof(_type) * CHAR_BIT; \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_length_type));\ + __chan->ops->event_write(&__ctx, &__tmpl, sizeof(_length_type));\ + } \ + lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(_type)); \ + { \ + size_t _i, _length; \ + \ + _length = __get_dynamic_len(dest); \ + for (_i = 0; _i < _length; _i++) { \ + _type _tmp; \ + \ + if (_user) { \ + if (get_user(_tmp, (_type *) _src + _i)) \ + _tmp = 0; \ + } else { \ + _tmp = ((_type *) _src)[_i]; \ + } \ + switch (sizeof(_type)) { \ + case 1: \ + break; \ + case 2: \ + _tmp = cpu_to_le16(_tmp); \ + break; \ + case 4: \ + _tmp = cpu_to_le32(_tmp); \ + break; \ + case 8: \ + _tmp = cpu_to_le64(_tmp); \ + break; \ + default: \ + BUG_ON(1); \ + } \ + __chan->ops->event_write(&__ctx, &_tmp, sizeof(_type)); \ + } \ + } +#endif /* #else #if (__BYTE_ORDER == __LITTLE_ENDIAN) */ + #undef _ctf_string #define _ctf_string(_item, _src, _user, _nowrite) \ lib_ring_buffer_align_ctx(&__ctx, lttng_alignof(*(_src))); \ @@ -644,8 +873,11 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ #undef TP_locvar #define TP_locvar(...) __VA_ARGS__ -#undef TP_code -#define TP_code(...) __VA_ARGS__ +#undef TP_code_pre +#define TP_code_pre(...) __VA_ARGS__ + +#undef TP_code_post +#define TP_code_post(...) __VA_ARGS__ /* * For state dump, check that "session" argument (mandatory) matches the @@ -666,11 +898,15 @@ static inline size_t __event_get_align__##_name(void *__tp_locvar) \ * Perform UNION (||) of filter runtime list. */ #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE(_name, _proto, _args, _locvar, _code_pre, _fields, _code_post) \ static void __event_probe__##_name(void *__data, _proto) \ { \ struct probe_local_vars { _locvar }; \ struct lttng_event *__event = __data; \ + struct lttng_probe_ctx __lttng_probe_ctx = { \ + .event = __event, \ + .interruptible = irqs_disabled(), \ + }; \ struct lttng_channel *__chan = __event->chan; \ struct lttng_session *__session = __chan->session; \ struct lib_ring_buffer_ctx __ctx; \ @@ -694,42 +930,49 @@ static void __event_probe__##_name(void *__data, _proto) \ return; \ if (unlikely(!ACCESS_ONCE(__event->enabled))) \ return; \ - __lpf = rcu_dereference(__session->pid_tracker); \ + __lpf = lttng_rcu_dereference(__session->pid_tracker); \ if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \ return; \ - _code \ + _code_pre \ if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \ struct lttng_bytecode_runtime *bc_runtime; \ int __filter_record = __event->has_enablers_without_bytecode; \ \ __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \ tp_locvar, _args); \ - list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ - if (unlikely(bc_runtime->filter(bc_runtime, \ + lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ + if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \ __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ __filter_record = 1; \ } \ if (likely(!__filter_record)) \ - return; \ + goto __post; \ } \ __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, \ tp_locvar, _args); \ __event_align = __event_get_align__##_name(tp_locvar, _args); \ - lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \ + lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \ __event_align, -1); \ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \ if (__ret < 0) \ - return; \ + goto __post; \ _fields \ __chan->ops->event_commit(&__ctx); \ +__post: \ + _code_post \ + return; \ } #undef LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS -#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code, _fields) \ +#define LTTNG_TRACEPOINT_EVENT_CLASS_CODE_NOARGS(_name, _locvar, _code_pre, _fields, _code_post) \ static void __event_probe__##_name(void *__data) \ { \ struct probe_local_vars { _locvar }; \ struct lttng_event *__event = __data; \ + struct lttng_probe_ctx __lttng_probe_ctx = { \ + .event = __event, \ + .interruptible = irqs_disabled(), \ + }; \ struct lttng_channel *__chan = __event->chan; \ struct lttng_session *__session = __chan->session; \ struct lib_ring_buffer_ctx __ctx; \ @@ -753,33 +996,36 @@ static void __event_probe__##_name(void *__data) \ return; \ if (unlikely(!ACCESS_ONCE(__event->enabled))) \ return; \ - __lpf = rcu_dereference(__session->pid_tracker); \ + __lpf = lttng_rcu_dereference(__session->pid_tracker); \ if (__lpf && likely(!lttng_pid_tracker_lookup(__lpf, current->pid))) \ return; \ - _code \ + _code_pre \ if (unlikely(!list_empty(&__event->bytecode_runtime_head))) { \ struct lttng_bytecode_runtime *bc_runtime; \ int __filter_record = __event->has_enablers_without_bytecode; \ \ __event_prepare_filter_stack__##_name(__stackvar.__filter_stack_data, \ tp_locvar); \ - list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ - if (unlikely(bc_runtime->filter(bc_runtime, \ + lttng_list_for_each_entry_rcu(bc_runtime, &__event->bytecode_runtime_head, node) { \ + if (unlikely(bc_runtime->filter(bc_runtime, &__lttng_probe_ctx, \ __stackvar.__filter_stack_data) & LTTNG_FILTER_RECORD_FLAG)) \ __filter_record = 1; \ } \ if (likely(!__filter_record)) \ - return; \ + goto __post; \ } \ __event_len = __event_get_size__##_name(__stackvar.__dynamic_len, tp_locvar); \ __event_align = __event_get_align__##_name(tp_locvar); \ - lib_ring_buffer_ctx_init(&__ctx, __chan->chan, __event, __event_len, \ + lib_ring_buffer_ctx_init(&__ctx, __chan->chan, &__lttng_probe_ctx, __event_len, \ __event_align, -1); \ __ret = __chan->ops->event_reserve(&__ctx, __event->id); \ if (__ret < 0) \ - return; \ + goto __post; \ _fields \ __chan->ops->event_commit(&__ctx); \ +__post: \ + _code_post \ + return; \ } #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)