#include <linux/slab.h>
#include <lttng/lttng-bytecode.h>
+#include <lttng/events-internal.h>
static const char *opnames[] = {
[ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
}
static
-int apply_field_reloc(const struct lttng_event_desc *event_desc,
+int apply_field_reloc(const struct lttng_kernel_event_desc *event_desc,
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
const char *field_name,
enum bytecode_op bytecode_op)
{
- const struct lttng_event_field *fields, *field = NULL;
+ const struct lttng_kernel_event_field * const *fields, *field = NULL;
unsigned int nr_fields, i;
struct load_op *op;
uint32_t field_offset = 0;
/* Lookup event by name */
if (!event_desc)
return -EINVAL;
- fields = event_desc->fields;
+ fields = event_desc->tp_class->fields;
if (!fields)
return -EINVAL;
- nr_fields = event_desc->nr_fields;
+ nr_fields = event_desc->tp_class->nr_fields;
for (i = 0; i < nr_fields; i++) {
- if (fields[i].nofilter)
+ if (fields[i]->nofilter)
continue;
- if (!strcmp(fields[i].name, field_name)) {
- field = &fields[i];
+ if (!strcmp(fields[i]->name, field_name)) {
+ field = fields[i];
break;
}
/* compute field offset */
- switch (fields[i].type.atype) {
- case atype_integer:
- case atype_enum_nestable:
+ switch (fields[i]->type->type) {
+ case lttng_kernel_type_integer:
+ case lttng_kernel_type_enum:
field_offset += sizeof(int64_t);
break;
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
+ case lttng_kernel_type_array:
+ if (!lttng_kernel_type_is_bytewise_integer(lttng_kernel_get_type_array(fields[i]->type)->elem_type))
return -EINVAL;
field_offset += sizeof(unsigned long);
field_offset += sizeof(void *);
break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
+ case lttng_kernel_type_sequence:
+ if (!lttng_kernel_type_is_bytewise_integer(lttng_kernel_get_type_sequence(fields[i]->type)->elem_type))
return -EINVAL;
field_offset += sizeof(unsigned long);
field_offset += sizeof(void *);
break;
- case atype_string:
+ case lttng_kernel_type_string:
field_offset += sizeof(void *);
break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
+ case lttng_kernel_type_struct: /* Unsupported. */
+ case lttng_kernel_type_variant: /* Unsupported. */
default:
return -EINVAL;
}
return -EINVAL;
/* Check if field offset is too large for 16-bit offset */
- if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+ if (field_offset > LTTNG_KERNEL_ABI_FILTER_BYTECODE_MAX_LEN - 1)
return -EINVAL;
/* set type */
struct field_ref *field_ref;
field_ref = (struct field_ref *) op->data;
- switch (field->type.atype) {
- case atype_integer:
- case atype_enum_nestable:
+ switch (field->type->type) {
+ case lttng_kernel_type_integer:
+ case lttng_kernel_type_enum:
op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
break;
- case atype_array_nestable:
- case atype_sequence_nestable:
+ case lttng_kernel_type_array:
+ {
+ const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(field->type);
+ const struct lttng_kernel_type_common *elem_type = array_type->elem_type;
+
+ if (!lttng_kernel_type_is_bytewise_integer(elem_type) || array_type->encoding == lttng_kernel_string_encoding_none)
+ return -EINVAL;
+ if (field->user)
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE;
+ else
+ op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
+ break;
+ }
+ case lttng_kernel_type_sequence:
+ {
+ const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(field->type);
+ const struct lttng_kernel_type_common *elem_type = sequence_type->elem_type;
+
+ if (!lttng_kernel_type_is_bytewise_integer(elem_type) || sequence_type->encoding == lttng_kernel_string_encoding_none)
+ return -EINVAL;
if (field->user)
op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE;
else
op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
break;
- case atype_string:
+ }
+ case lttng_kernel_type_string:
if (field->user)
op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_STRING;
else
op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
+ case lttng_kernel_type_struct: /* Unsupported. */
+ case lttng_kernel_type_variant: /* Unsupported. */
default:
return -EINVAL;
}
enum bytecode_op bytecode_op)
{
struct load_op *op;
- struct lttng_ctx_field *ctx_field;
+ struct lttng_kernel_ctx_field *ctx_field;
int idx;
dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
/* Get context index */
- idx = lttng_get_context_index(lttng_static_ctx, context_name);
+ idx = lttng_kernel_get_context_index(lttng_static_ctx, context_name);
if (idx < 0)
return -ENOENT;
/* Check if idx is too large for 16-bit offset */
- if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
+ if (idx > LTTNG_KERNEL_ABI_FILTER_BYTECODE_MAX_LEN - 1)
return -EINVAL;
/* Get context return type */
struct field_ref *field_ref;
field_ref = (struct field_ref *) op->data;
- switch (ctx_field->event_field.type.atype) {
- case atype_integer:
- case atype_enum_nestable:
+ switch (ctx_field->event_field->type->type) {
+ case lttng_kernel_type_integer:
+ case lttng_kernel_type_enum:
op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
break;
/* Sequence and array supported as string */
- case atype_string:
- BUG_ON(ctx_field->event_field.user);
+ case lttng_kernel_type_string:
+ BUG_ON(ctx_field->event_field->user);
op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
break;
- case atype_array_nestable:
- if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
+ case lttng_kernel_type_array:
+ {
+ const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(ctx_field->event_field->type);
+ const struct lttng_kernel_type_common *elem_type = array_type->elem_type;
+
+ if (!lttng_kernel_type_is_bytewise_integer(elem_type) || array_type->encoding == lttng_kernel_string_encoding_none)
return -EINVAL;
- BUG_ON(ctx_field->event_field.user);
+ BUG_ON(ctx_field->event_field->user);
op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
break;
- case atype_sequence_nestable:
- if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
+ }
+ case lttng_kernel_type_sequence:
+ {
+ const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(ctx_field->event_field->type);
+ const struct lttng_kernel_type_common *elem_type = sequence_type->elem_type;
+
+ if (!lttng_kernel_type_is_bytewise_integer(elem_type) || sequence_type->encoding == lttng_kernel_string_encoding_none)
return -EINVAL;
- BUG_ON(ctx_field->event_field.user);
+ BUG_ON(ctx_field->event_field->user);
op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
break;
- case atype_struct_nestable: /* Unsupported. */
- case atype_variant_nestable: /* Unsupported. */
+ }
+ case lttng_kernel_type_struct: /* Unsupported. */
+ case lttng_kernel_type_variant: /* Unsupported. */
default:
return -EINVAL;
}
}
static
-int apply_reloc(const struct lttng_event_desc *event_desc,
+int apply_reloc(const struct lttng_kernel_event_desc *event_desc,
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
}
static
-int bytecode_is_linked(struct lttng_bytecode_node *bytecode,
+int bytecode_is_linked(struct lttng_kernel_bytecode_node *bytecode,
struct list_head *bytecode_runtime_head)
{
- struct lttng_bytecode_runtime *bc_runtime;
+ struct lttng_kernel_bytecode_runtime *bc_runtime;
list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
if (bc_runtime->bc == bytecode)
* bytecode runtime.
*/
static
-int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc,
- struct lttng_ctx *ctx,
- struct lttng_bytecode_node *bytecode,
+int link_bytecode(const struct lttng_kernel_event_desc *event_desc,
+ struct lttng_kernel_ctx *ctx,
+ struct lttng_kernel_bytecode_node *bytecode,
+ struct list_head *bytecode_runtime_head,
struct list_head *insert_loc)
{
int ret, offset, next_offset;
if (!bytecode)
return 0;
/* Bytecode already linked */
- if (bytecode_is_linked(bytecode, insert_loc))
+ if (bytecode_is_linked(bytecode, bytecode_runtime_head))
return 0;
dbg_printk("Linking...\n");
ret = -ENOMEM;
goto alloc_error;
}
+ runtime->p.type = bytecode->type;
runtime->p.bc = bytecode;
runtime->p.ctx = ctx;
runtime->len = bytecode->bc.reloc_offset;
if (ret) {
goto link_error;
}
- runtime->p.filter = lttng_bytecode_filter_interpret;
+ runtime->p.interpreter_func = lttng_bytecode_interpret;
runtime->p.link_failed = 0;
list_add_rcu(&runtime->p.node, insert_loc);
dbg_printk("Linking successful.\n");
return 0;
link_error:
- runtime->p.filter = lttng_bytecode_filter_interpret_false;
+
+ runtime->p.interpreter_func = lttng_bytecode_interpret_error;
runtime->p.link_failed = 1;
list_add_rcu(&runtime->p.node, insert_loc);
alloc_error:
return ret;
}
-void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime)
+void lttng_bytecode_sync_state(struct lttng_kernel_bytecode_runtime *runtime)
{
- struct lttng_bytecode_node *bc = runtime->bc;
+ struct lttng_kernel_bytecode_node *bc = runtime->bc;
if (!bc->enabler->enabled || runtime->link_failed)
- runtime->filter = lttng_bytecode_filter_interpret_false;
+ runtime->interpreter_func = lttng_bytecode_interpret_error;
else
- runtime->filter = lttng_bytecode_filter_interpret;
+ runtime->interpreter_func = lttng_bytecode_interpret;
}
/*
- * Link bytecode for all enablers referenced by an event.
+ * Given the lists of bytecode programs of an instance (event or event
+ * notifier) and of a matching enabler, try to link all the enabler's bytecode
+ * programs with the instance.
+ *
+ * This function is called after we confirmed that name enabler and the
+ * instance are matching names (or glob pattern matching).
*/
-void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
- struct lttng_ctx *ctx,
- struct list_head *bytecode_runtime_head,
- struct lttng_enabler *enabler)
+void lttng_enabler_link_bytecode(const struct lttng_kernel_event_desc *event_desc,
+ struct lttng_kernel_ctx *ctx,
+ struct list_head *instance_bytecode_head,
+ struct list_head *enabler_bytecode_head)
{
- struct lttng_bytecode_node *bc;
- struct lttng_bytecode_runtime *runtime;
+ struct lttng_kernel_bytecode_node *enabler_bc;
+ struct lttng_kernel_bytecode_runtime *runtime;
- /* Can only be called for events with desc attached */
WARN_ON_ONCE(!event_desc);
- /* Link each bytecode. */
- list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
+ /* Go over all the bytecode programs of the enabler. */
+ list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
int found = 0, ret;
struct list_head *insert_loc;
- list_for_each_entry(runtime,
- bytecode_runtime_head, node) {
- if (runtime->bc == bc) {
+ /*
+ * Check if the current enabler bytecode program is already
+ * linked with the instance.
+ */
+ list_for_each_entry(runtime, instance_bytecode_head, node) {
+ if (runtime->bc == enabler_bc) {
found = 1;
break;
}
}
- /* Skip bytecode already linked */
+
+ /*
+ * Skip bytecode already linked, go to the next enabler
+ * bytecode program.
+ */
if (found)
continue;
* insert the new bytecode right after it.
*/
list_for_each_entry_reverse(runtime,
- bytecode_runtime_head, node) {
- if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
+ instance_bytecode_head, node) {
+ if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
/* insert here */
insert_loc = &runtime->node;
goto add_within;
}
}
/* Add to head to list */
- insert_loc = bytecode_runtime_head;
+ insert_loc = instance_bytecode_head;
add_within:
dbg_printk("linking bytecode\n");
- ret = _lttng_filter_link_bytecode(event_desc, ctx, bc,
- insert_loc);
+ ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc);
if (ret) {
dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
}
/*
* We own the filter_bytecode if we return success.
*/
-int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
- struct lttng_bytecode_node *filter_bytecode)
+int lttng_filter_enabler_attach_bytecode(struct lttng_event_enabler_common *enabler,
+ struct lttng_kernel_bytecode_node *filter_bytecode)
{
list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
return 0;
}
-void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
+void lttng_free_enabler_filter_bytecode(struct lttng_event_enabler_common *enabler)
{
- struct lttng_bytecode_node *filter_bytecode, *tmp;
+ struct lttng_kernel_bytecode_node *filter_bytecode, *tmp;
list_for_each_entry_safe(filter_bytecode, tmp,
&enabler->filter_bytecode_head, node) {
}
}
-void lttng_free_event_filter_runtime(struct lttng_event *event)
+void lttng_free_event_filter_runtime(struct lttng_kernel_event_common *event)
{
struct bytecode_runtime *runtime, *tmp;
list_for_each_entry_safe(runtime, tmp,
- &event->filter_bytecode_runtime_head, p.node) {
+ &event->priv->filter_bytecode_runtime_head, p.node) {
kfree(runtime->data);
kfree(runtime);
}