X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=src%2Flttng-bytecode.c;h=49ecd77a118d619e5298aff9eddeb8ffcfa51dd5;hb=606828e401c405619a0c7249e8c7e3291cc1cb45;hp=79c581c812c54541d24a38909f4d941e8ba9fa86;hpb=80c2a69a8298768c9010052e6c3668914191b9e7;p=lttng-modules.git diff --git a/src/lttng-bytecode.c b/src/lttng-bytecode.c index 79c581c8..49ecd77a 100644 --- a/src/lttng-bytecode.c +++ b/src/lttng-bytecode.c @@ -11,6 +11,7 @@ #include #include +#include static const char *opnames[] = { [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN", @@ -167,14 +168,14 @@ const char *lttng_bytecode_print_op(enum bytecode_op op) } static -int apply_field_reloc(const struct lttng_event_desc *event_desc, +int apply_field_reloc(const struct lttng_kernel_event_desc *event_desc, struct bytecode_runtime *runtime, uint32_t runtime_len, uint32_t reloc_offset, const char *field_name, enum bytecode_op bytecode_op) { - const struct lttng_event_field *fields, *field = NULL; + const struct lttng_kernel_event_field **fields, *field = NULL; unsigned int nr_fields, i; struct load_op *op; uint32_t field_offset = 0; @@ -189,35 +190,35 @@ int apply_field_reloc(const struct lttng_event_desc *event_desc, return -EINVAL; nr_fields = event_desc->nr_fields; for (i = 0; i < nr_fields; i++) { - if (fields[i].nofilter) + if (fields[i]->nofilter) continue; - if (!strcmp(fields[i].name, field_name)) { - field = &fields[i]; + if (!strcmp(fields[i]->name, field_name)) { + field = fields[i]; break; } /* compute field offset */ - switch (fields[i].type.atype) { - case atype_integer: - case atype_enum_nestable: + switch (fields[i]->type->type) { + case lttng_kernel_type_integer: + case lttng_kernel_type_enum: field_offset += sizeof(int64_t); break; - case atype_array_nestable: - if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type)) + case lttng_kernel_type_array: + if (!lttng_kernel_type_is_bytewise_integer(lttng_kernel_get_type_array(fields[i]->type)->elem_type)) return -EINVAL; field_offset += sizeof(unsigned long); field_offset += sizeof(void *); break; - case atype_sequence_nestable: - if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type)) + case lttng_kernel_type_sequence: + if (!lttng_kernel_type_is_bytewise_integer(lttng_kernel_get_type_sequence(fields[i]->type)->elem_type)) return -EINVAL; field_offset += sizeof(unsigned long); field_offset += sizeof(void *); break; - case atype_string: + case lttng_kernel_type_string: field_offset += sizeof(void *); break; - case atype_struct_nestable: /* Unsupported. */ - case atype_variant_nestable: /* Unsupported. */ + case lttng_kernel_type_struct: /* Unsupported. */ + case lttng_kernel_type_variant: /* Unsupported. */ default: return -EINVAL; } @@ -226,7 +227,7 @@ int apply_field_reloc(const struct lttng_event_desc *event_desc, return -EINVAL; /* Check if field offset is too large for 16-bit offset */ - if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1) + if (field_offset > LTTNG_KERNEL_ABI_FILTER_BYTECODE_MAX_LEN - 1) return -EINVAL; /* set type */ @@ -238,26 +239,45 @@ int apply_field_reloc(const struct lttng_event_desc *event_desc, struct field_ref *field_ref; field_ref = (struct field_ref *) op->data; - switch (field->type.atype) { - case atype_integer: - case atype_enum_nestable: + switch (field->type->type) { + case lttng_kernel_type_integer: + case lttng_kernel_type_enum: op->op = BYTECODE_OP_LOAD_FIELD_REF_S64; break; - case atype_array_nestable: - case atype_sequence_nestable: + case lttng_kernel_type_array: + { + const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(field->type); + const struct lttng_kernel_type_common *elem_type = array_type->elem_type; + + if (!lttng_kernel_type_is_bytewise_integer(elem_type) || array_type->encoding == lttng_kernel_string_encoding_none) + return -EINVAL; if (field->user) op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE; else op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE; break; - case atype_string: + } + case lttng_kernel_type_sequence: + { + const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(field->type); + const struct lttng_kernel_type_common *elem_type = sequence_type->elem_type; + + if (!lttng_kernel_type_is_bytewise_integer(elem_type) || sequence_type->encoding == lttng_kernel_string_encoding_none) + return -EINVAL; + if (field->user) + op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE; + else + op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE; + break; + } + case lttng_kernel_type_string: if (field->user) op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_STRING; else op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING; break; - case atype_struct_nestable: /* Unsupported. */ - case atype_variant_nestable: /* Unsupported. */ + case lttng_kernel_type_struct: /* Unsupported. */ + case lttng_kernel_type_variant: /* Unsupported. */ default: return -EINVAL; } @@ -279,18 +299,18 @@ int apply_context_reloc(struct bytecode_runtime *runtime, enum bytecode_op bytecode_op) { struct load_op *op; - struct lttng_ctx_field *ctx_field; + struct lttng_kernel_ctx_field *ctx_field; int idx; dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name); /* Get context index */ - idx = lttng_get_context_index(lttng_static_ctx, context_name); + idx = lttng_kernel_get_context_index(lttng_static_ctx, context_name); if (idx < 0) return -ENOENT; /* Check if idx is too large for 16-bit offset */ - if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1) + if (idx > LTTNG_KERNEL_ABI_FILTER_BYTECODE_MAX_LEN - 1) return -EINVAL; /* Get context return type */ @@ -303,30 +323,40 @@ int apply_context_reloc(struct bytecode_runtime *runtime, struct field_ref *field_ref; field_ref = (struct field_ref *) op->data; - switch (ctx_field->event_field.type.atype) { - case atype_integer: - case atype_enum_nestable: + switch (ctx_field->event_field->type->type) { + case lttng_kernel_type_integer: + case lttng_kernel_type_enum: op->op = BYTECODE_OP_GET_CONTEXT_REF_S64; break; /* Sequence and array supported as string */ - case atype_string: - BUG_ON(ctx_field->event_field.user); + case lttng_kernel_type_string: + BUG_ON(ctx_field->event_field->user); op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING; break; - case atype_array_nestable: - if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type)) + case lttng_kernel_type_array: + { + const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(ctx_field->event_field->type); + const struct lttng_kernel_type_common *elem_type = array_type->elem_type; + + if (!lttng_kernel_type_is_bytewise_integer(elem_type) || array_type->encoding == lttng_kernel_string_encoding_none) return -EINVAL; - BUG_ON(ctx_field->event_field.user); + BUG_ON(ctx_field->event_field->user); op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING; break; - case atype_sequence_nestable: - if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type)) + } + case lttng_kernel_type_sequence: + { + const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(ctx_field->event_field->type); + const struct lttng_kernel_type_common *elem_type = sequence_type->elem_type; + + if (!lttng_kernel_type_is_bytewise_integer(elem_type) || sequence_type->encoding == lttng_kernel_string_encoding_none) return -EINVAL; - BUG_ON(ctx_field->event_field.user); + BUG_ON(ctx_field->event_field->user); op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING; break; - case atype_struct_nestable: /* Unsupported. */ - case atype_variant_nestable: /* Unsupported. */ + } + case lttng_kernel_type_struct: /* Unsupported. */ + case lttng_kernel_type_variant: /* Unsupported. */ default: return -EINVAL; } @@ -341,7 +371,7 @@ int apply_context_reloc(struct bytecode_runtime *runtime, } static -int apply_reloc(const struct lttng_event_desc *event_desc, +int apply_reloc(const struct lttng_kernel_event_desc *event_desc, struct bytecode_runtime *runtime, uint32_t runtime_len, uint32_t reloc_offset, @@ -395,9 +425,10 @@ int bytecode_is_linked(struct lttng_bytecode_node *bytecode, * bytecode runtime. */ static -int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc, - struct lttng_ctx *ctx, +int link_bytecode(const struct lttng_kernel_event_desc *event_desc, + struct lttng_kernel_ctx *ctx, struct lttng_bytecode_node *bytecode, + struct list_head *bytecode_runtime_head, struct list_head *insert_loc) { int ret, offset, next_offset; @@ -407,7 +438,7 @@ int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc, if (!bytecode) return 0; /* Bytecode already linked */ - if (bytecode_is_linked(bytecode, insert_loc)) + if (bytecode_is_linked(bytecode, bytecode_runtime_head)) return 0; dbg_printk("Linking...\n"); @@ -452,14 +483,35 @@ int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc, if (ret) { goto link_error; } - runtime->p.filter = lttng_bytecode_filter_interpret; + + switch (bytecode->type) { + case LTTNG_BYTECODE_NODE_TYPE_FILTER: + runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret; + break; + case LTTNG_BYTECODE_NODE_TYPE_CAPTURE: + runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false; + break; + default: + WARN_ON(1); + } + runtime->p.link_failed = 0; list_add_rcu(&runtime->p.node, insert_loc); dbg_printk("Linking successful.\n"); return 0; link_error: - runtime->p.filter = lttng_bytecode_filter_interpret_false; + + switch (bytecode->type) { + case LTTNG_BYTECODE_NODE_TYPE_FILTER: + runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret_false; + break; + case LTTNG_BYTECODE_NODE_TYPE_CAPTURE: + runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false; + break; + default: + WARN_ON(1); + } runtime->p.link_failed = 1; list_add_rcu(&runtime->p.node, insert_loc); alloc_error: @@ -467,43 +519,64 @@ alloc_error: return ret; } -void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime) +void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime) { struct lttng_bytecode_node *bc = runtime->bc; if (!bc->enabler->enabled || runtime->link_failed) - runtime->filter = lttng_bytecode_filter_interpret_false; + runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false; else - runtime->filter = lttng_bytecode_filter_interpret; + runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret; +} + +void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime) +{ + struct lttng_bytecode_node *bc = runtime->bc; + + if (!bc->enabler->enabled || runtime->link_failed) + runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false; + else + runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret; } /* - * Link bytecode for all enablers referenced by an event. + * Given the lists of bytecode programs of an instance (event or event + * notifier) and of a matching enabler, try to link all the enabler's bytecode + * programs with the instance. + * + * This function is called after we confirmed that name enabler and the + * instance are matching names (or glob pattern matching). */ -void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc, - struct lttng_ctx *ctx, - struct list_head *bytecode_runtime_head, - struct lttng_enabler *enabler) +void lttng_enabler_link_bytecode(const struct lttng_kernel_event_desc *event_desc, + struct lttng_kernel_ctx *ctx, + struct list_head *instance_bytecode_head, + struct list_head *enabler_bytecode_head) { - struct lttng_bytecode_node *bc; + struct lttng_bytecode_node *enabler_bc; struct lttng_bytecode_runtime *runtime; - /* Can only be called for events with desc attached */ WARN_ON_ONCE(!event_desc); - /* Link each bytecode. */ - list_for_each_entry(bc, &enabler->filter_bytecode_head, node) { + /* Go over all the bytecode programs of the enabler. */ + list_for_each_entry(enabler_bc, enabler_bytecode_head, node) { int found = 0, ret; struct list_head *insert_loc; - list_for_each_entry(runtime, - bytecode_runtime_head, node) { - if (runtime->bc == bc) { + /* + * Check if the current enabler bytecode program is already + * linked with the instance. + */ + list_for_each_entry(runtime, instance_bytecode_head, node) { + if (runtime->bc == enabler_bc) { found = 1; break; } } - /* Skip bytecode already linked */ + + /* + * Skip bytecode already linked, go to the next enabler + * bytecode program. + */ if (found) continue; @@ -513,19 +586,18 @@ void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc, * insert the new bytecode right after it. */ list_for_each_entry_reverse(runtime, - bytecode_runtime_head, node) { - if (runtime->bc->bc.seqnum <= bc->bc.seqnum) { + instance_bytecode_head, node) { + if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) { /* insert here */ insert_loc = &runtime->node; goto add_within; } } /* Add to head to list */ - insert_loc = bytecode_runtime_head; + insert_loc = instance_bytecode_head; add_within: dbg_printk("linking bytecode\n"); - ret = _lttng_filter_link_bytecode(event_desc, ctx, bc, - insert_loc); + ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc); if (ret) { dbg_printk("[lttng filter] warning: cannot link event bytecode\n"); } @@ -562,3 +634,14 @@ void lttng_free_event_filter_runtime(struct lttng_event *event) kfree(runtime); } } + +void lttng_free_event_notifier_filter_runtime(struct lttng_event_notifier *event_notifier) +{ + struct bytecode_runtime *runtime, *tmp; + + list_for_each_entry_safe(runtime, tmp, + &event_notifier->filter_bytecode_runtime_head, p.node) { + kfree(runtime->data); + kfree(runtime); + } +}