#include "context-internal.h"
#include "lttng-bytecode.h"
#include "ust-events-internal.h"
+#include "ust-helper.h"
static const char *opnames[] = {
[ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
}
static
-int apply_field_reloc(const struct lttng_event_desc *event_desc,
+int apply_field_reloc(const struct lttng_ust_event_desc *event_desc,
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
const char *field_name,
enum bytecode_op bytecode_op)
{
- const struct lttng_event_field *fields, *field = NULL;
+ const struct lttng_ust_event_field **fields, *field = NULL;
unsigned int nr_fields, i;
struct load_op *op;
uint32_t field_offset = 0;
return -EINVAL;
nr_fields = event_desc->nr_fields;
for (i = 0; i < nr_fields; i++) {
- if (fields[i].u.ext.nofilter) {
+ if (fields[i]->nofilter) {
continue;
}
- if (!strcmp(fields[i].name, field_name)) {
- field = &fields[i];
+ if (!strcmp(fields[i]->name, field_name)) {
+ field = fields[i];
break;
}
/* compute field offset */
- switch (fields[i].type.atype) {
+ switch (fields[i]->type.atype) {
case atype_integer:
- case atype_enum:
case atype_enum_nestable:
field_offset += sizeof(int64_t);
break;
- case atype_array:
case atype_array_nestable:
- case atype_sequence:
case atype_sequence_nestable:
field_offset += sizeof(unsigned long);
field_offset += sizeof(void *);
return -EINVAL;
/* Check if field offset is too large for 16-bit offset */
- if (field_offset > FILTER_BYTECODE_MAX_LEN - 1)
+ if (field_offset > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
return -EINVAL;
/* set type */
field_ref = (struct field_ref *) op->data;
switch (field->type.atype) {
case atype_integer:
- case atype_enum:
case atype_enum_nestable:
op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
break;
- case atype_array:
case atype_array_nestable:
- case atype_sequence:
case atype_sequence_nestable:
op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
break;
struct load_op *op;
struct lttng_ctx_field *ctx_field;
int idx;
- struct lttng_ctx **pctx = runtime->p.pctx;
+ struct lttng_ctx **pctx = runtime->p.priv->pctx;
dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
}
}
/* Check if idx is too large for 16-bit offset */
- if (idx > FILTER_BYTECODE_MAX_LEN - 1)
+ if (idx > LTTNG_UST_ABI_FILTER_BYTECODE_MAX_LEN - 1)
return -EINVAL;
/* Get context return type */
field_ref = (struct field_ref *) op->data;
switch (ctx_field->event_field.type.atype) {
case atype_integer:
- case atype_enum:
case atype_enum_nestable:
op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
break;
/* Sequence and array supported as string */
case atype_string:
- case atype_array:
case atype_array_nestable:
- case atype_sequence:
case atype_sequence_nestable:
op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
break;
}
static
-int apply_reloc(const struct lttng_event_desc *event_desc,
+int apply_reloc(const struct lttng_ust_event_desc *event_desc,
struct bytecode_runtime *runtime,
uint32_t runtime_len,
uint32_t reloc_offset,
int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode,
struct cds_list_head *bytecode_runtime_head)
{
- struct lttng_bytecode_runtime *bc_runtime;
+ struct lttng_ust_bytecode_runtime *bc_runtime;
cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
- if (bc_runtime->bc == bytecode)
+ if (bc_runtime->priv->bc == bytecode)
return 1;
}
return 0;
* bytecode runtime.
*/
static
-int link_bytecode(const struct lttng_event_desc *event_desc,
+int link_bytecode(const struct lttng_ust_event_desc *event_desc,
struct lttng_ctx **ctx,
struct lttng_ust_bytecode_node *bytecode,
+ struct cds_list_head *bytecode_runtime_head,
struct cds_list_head *insert_loc)
{
int ret, offset, next_offset;
struct bytecode_runtime *runtime = NULL;
+ struct lttng_ust_bytecode_runtime_private *runtime_priv = NULL;
size_t runtime_alloc_len;
if (!bytecode)
return 0;
/* Bytecode already linked */
- if (bytecode_is_linked(bytecode, insert_loc))
+ if (bytecode_is_linked(bytecode, bytecode_runtime_head))
return 0;
dbg_printf("Linking...\n");
ret = -ENOMEM;
goto alloc_error;
}
- runtime->p.bc = bytecode;
- runtime->p.pctx = ctx;
+ runtime_priv = zmalloc(sizeof(struct lttng_ust_bytecode_runtime_private));
+ if (!runtime_priv) {
+ free(runtime);
+ runtime = NULL;
+ ret = -ENOMEM;
+ goto alloc_error;
+ }
+ runtime->p.priv = runtime_priv;
+ runtime->p.struct_size = sizeof(struct lttng_ust_bytecode_runtime);
+ runtime_priv->pub = runtime;
+ runtime_priv->bc = bytecode;
+ runtime_priv->pctx = ctx;
runtime->len = bytecode->bc.reloc_offset;
/* copy original bytecode */
memcpy(runtime->code, bytecode->bc.data, runtime->len);
abort();
}
- runtime->p.link_failed = 0;
+ runtime->p.priv->link_failed = 0;
cds_list_add_rcu(&runtime->p.node, insert_loc);
dbg_printf("Linking successful.\n");
return 0;
abort();
}
- runtime->p.link_failed = 1;
+ runtime_priv->link_failed = 1;
cds_list_add_rcu(&runtime->p.node, insert_loc);
alloc_error:
dbg_printf("Linking failed.\n");
return ret;
}
-void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime)
+void lttng_bytecode_filter_sync_state(struct lttng_ust_bytecode_runtime *runtime)
{
- struct lttng_ust_bytecode_node *bc = runtime->bc;
+ struct lttng_ust_bytecode_node *bc = runtime->priv->bc;
- if (!bc->enabler->enabled || runtime->link_failed)
+ if (!bc->enabler->enabled || runtime->priv->link_failed)
runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
else
runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret;
}
-void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime)
+void lttng_bytecode_capture_sync_state(struct lttng_ust_bytecode_runtime *runtime)
{
- struct lttng_ust_bytecode_node *bc = runtime->bc;
+ struct lttng_ust_bytecode_node *bc = runtime->priv->bc;
- if (!bc->enabler->enabled || runtime->link_failed)
+ if (!bc->enabler->enabled || runtime->priv->link_failed)
runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
else
runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret;
* This function is called after we confirmed that name enabler and the
* instance are name matching (or glob pattern matching).
*/
-void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
+void lttng_enabler_link_bytecode(const struct lttng_ust_event_desc *event_desc,
struct lttng_ctx **ctx,
struct cds_list_head *instance_bytecode_head,
struct cds_list_head *enabler_bytecode_head)
{
struct lttng_ust_bytecode_node *enabler_bc;
- struct lttng_bytecode_runtime *runtime;
+ struct lttng_ust_bytecode_runtime *runtime;
assert(event_desc);
* linked with the instance.
*/
cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
- if (runtime->bc == enabler_bc) {
+ if (runtime->priv->bc == enabler_bc) {
found = 1;
break;
}
*/
cds_list_for_each_entry_reverse(runtime,
instance_bytecode_head, node) {
- if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
+ if (runtime->priv->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
/* insert here */
insert_loc = &runtime->node;
goto add_within;
insert_loc = instance_bytecode_head;
add_within:
dbg_printf("linking bytecode\n");
- ret = link_bytecode(event_desc, ctx, enabler_bc, insert_loc);
+ ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc);
if (ret) {
dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
}
cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
p.node) {
free(runtime->data);
+ free(runtime->p.priv);
free(runtime);
}
}
-void lttng_free_event_filter_runtime(struct lttng_event *event)
+void lttng_free_event_filter_runtime(struct lttng_ust_event_common *event)
{
free_filter_runtime(&event->filter_bytecode_runtime_head);
}
-
-void lttng_free_event_notifier_filter_runtime(
- struct lttng_event_notifier *event_notifier)
-{
- free_filter_runtime(&event_notifier->filter_bytecode_runtime_head);
-}