X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=src%2Flttng-filter.c;h=a0ad3085611f76d841313c161bcfddd30ee54734;hb=89ec2b91ca249747d87b59aea76e6e67cdf7f0bf;hp=0b5e872fdb969ce2071d4e4a16f8e08df4cfb7a5;hpb=b2bc0bc8dfef0380624d7d13764661a446cc809d;p=lttng-modules.git diff --git a/src/lttng-filter.c b/src/lttng-filter.c index 0b5e872f..a0ad3085 100644 --- a/src/lttng-filter.c +++ b/src/lttng-filter.c @@ -167,14 +167,13 @@ const char *lttng_filter_print_op(enum filter_op op) } static -int apply_field_reloc(struct lttng_event *event, +int apply_field_reloc(const struct lttng_event_desc *event_desc, struct bytecode_runtime *runtime, uint32_t runtime_len, uint32_t reloc_offset, const char *field_name, enum filter_op filter_op) { - const struct lttng_event_desc *desc; const struct lttng_event_field *fields, *field = NULL; unsigned int nr_fields, i; struct load_op *op; @@ -183,13 +182,12 @@ int apply_field_reloc(struct lttng_event *event, dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name); /* Lookup event by name */ - desc = event->desc; - if (!desc) + if (!event_desc) return -EINVAL; - fields = desc->fields; + fields = event_desc->fields; if (!fields) return -EINVAL; - nr_fields = desc->nr_fields; + nr_fields = event_desc->nr_fields; for (i = 0; i < nr_fields; i++) { if (fields[i].nofilter) continue; @@ -274,8 +272,7 @@ int apply_field_reloc(struct lttng_event *event, } static -int apply_context_reloc(struct lttng_event *event, - struct bytecode_runtime *runtime, +int apply_context_reloc(struct bytecode_runtime *runtime, uint32_t runtime_len, uint32_t reloc_offset, const char *context_name, @@ -344,7 +341,7 @@ int apply_context_reloc(struct lttng_event *event, } static -int apply_reloc(struct lttng_event *event, +int apply_reloc(const struct lttng_event_desc *event_desc, struct bytecode_runtime *runtime, uint32_t runtime_len, uint32_t reloc_offset, @@ -361,10 +358,10 @@ int apply_reloc(struct lttng_event *event, op = (struct load_op *) &runtime->code[reloc_offset]; switch (op->op) { case FILTER_OP_LOAD_FIELD_REF: - return apply_field_reloc(event, runtime, runtime_len, + return apply_field_reloc(event_desc, runtime, runtime_len, reloc_offset, name, op->op); case FILTER_OP_GET_CONTEXT_REF: - return apply_context_reloc(event, runtime, runtime_len, + return apply_context_reloc(runtime, runtime_len, reloc_offset, name, op->op); case FILTER_OP_GET_SYMBOL: case FILTER_OP_GET_SYMBOL_FIELD: @@ -381,14 +378,13 @@ int apply_reloc(struct lttng_event *event, } static -int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode, - struct lttng_event *event) +int bytecode_is_linked(struct lttng_bytecode_node *bytecode, + struct list_head *bytecode_runtime_head) { struct lttng_bytecode_runtime *bc_runtime; - list_for_each_entry(bc_runtime, - &event->bytecode_runtime_head, node) { - if (bc_runtime->bc == filter_bytecode) + list_for_each_entry(bc_runtime, bytecode_runtime_head, node) { + if (bc_runtime->bc == bytecode) return 1; } return 0; @@ -399,47 +395,48 @@ int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode, * bytecode runtime. */ static -int _lttng_filter_event_link_bytecode(struct lttng_event *event, - struct lttng_filter_bytecode_node *filter_bytecode, +int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc, + struct lttng_ctx *ctx, + struct lttng_bytecode_node *bytecode, struct list_head *insert_loc) { int ret, offset, next_offset; struct bytecode_runtime *runtime = NULL; size_t runtime_alloc_len; - if (!filter_bytecode) + if (!bytecode) return 0; /* Bytecode already linked */ - if (bytecode_is_linked(filter_bytecode, event)) + if (bytecode_is_linked(bytecode, insert_loc)) return 0; dbg_printk("Linking...\n"); /* We don't need the reloc table in the runtime */ - runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset; + runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset; runtime = kzalloc(runtime_alloc_len, GFP_KERNEL); if (!runtime) { ret = -ENOMEM; goto alloc_error; } - runtime->p.bc = filter_bytecode; - runtime->p.event = event; - runtime->len = filter_bytecode->bc.reloc_offset; + runtime->p.bc = bytecode; + runtime->p.ctx = ctx; + runtime->len = bytecode->bc.reloc_offset; /* copy original bytecode */ - memcpy(runtime->code, filter_bytecode->bc.data, runtime->len); + memcpy(runtime->code, bytecode->bc.data, runtime->len); /* * apply relocs. Those are a uint16_t (offset in bytecode) * followed by a string (field name). */ - for (offset = filter_bytecode->bc.reloc_offset; - offset < filter_bytecode->bc.len; + for (offset = bytecode->bc.reloc_offset; + offset < bytecode->bc.len; offset = next_offset) { uint16_t reloc_offset = - *(uint16_t *) &filter_bytecode->bc.data[offset]; + *(uint16_t *) &bytecode->bc.data[offset]; const char *name = - (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)]; + (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)]; - ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name); + ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name); if (ret) { goto link_error; } @@ -451,7 +448,7 @@ int _lttng_filter_event_link_bytecode(struct lttng_event *event, goto link_error; } /* Specialize bytecode */ - ret = lttng_filter_specialize_bytecode(event, runtime); + ret = lttng_filter_specialize_bytecode(event_desc, runtime); if (ret) { goto link_error; } @@ -462,7 +459,7 @@ int _lttng_filter_event_link_bytecode(struct lttng_event *event, return 0; link_error: - runtime->p.filter = lttng_filter_false; + runtime->p.filter = lttng_filter_interpret_bytecode_false; runtime->p.link_failed = 1; list_add_rcu(&runtime->p.node, insert_loc); alloc_error: @@ -472,10 +469,10 @@ alloc_error: void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime) { - struct lttng_filter_bytecode_node *bc = runtime->bc; + struct lttng_bytecode_node *bc = runtime->bc; if (!bc->enabler->enabled || runtime->link_failed) - runtime->filter = lttng_filter_false; + runtime->filter = lttng_filter_interpret_bytecode_false; else runtime->filter = lttng_filter_interpret_bytecode; } @@ -483,23 +480,24 @@ void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime) /* * Link bytecode for all enablers referenced by an event. */ -void lttng_event_enabler_link_bytecode(struct lttng_event *event, - struct lttng_event_enabler *event_enabler) +void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc, + struct lttng_ctx *ctx, + struct list_head *bytecode_runtime_head, + struct lttng_enabler *enabler) { - struct lttng_filter_bytecode_node *bc; + struct lttng_bytecode_node *bc; struct lttng_bytecode_runtime *runtime; - struct lttng_enabler *base_enabler = lttng_event_enabler_as_enabler(event_enabler); /* Can only be called for events with desc attached */ - WARN_ON_ONCE(!event->desc); + WARN_ON_ONCE(!event_desc); /* Link each bytecode. */ - list_for_each_entry(bc, &base_enabler->filter_bytecode_head, node) { + list_for_each_entry(bc, &enabler->filter_bytecode_head, node) { int found = 0, ret; struct list_head *insert_loc; list_for_each_entry(runtime, - &event->bytecode_runtime_head, node) { + bytecode_runtime_head, node) { if (runtime->bc == bc) { found = 1; break; @@ -515,7 +513,7 @@ void lttng_event_enabler_link_bytecode(struct lttng_event *event, * insert the new bytecode right after it. */ list_for_each_entry_reverse(runtime, - &event->bytecode_runtime_head, node) { + bytecode_runtime_head, node) { if (runtime->bc->bc.seqnum <= bc->bc.seqnum) { /* insert here */ insert_loc = &runtime->node; @@ -523,11 +521,11 @@ void lttng_event_enabler_link_bytecode(struct lttng_event *event, } } /* Add to head to list */ - insert_loc = &event->bytecode_runtime_head; + insert_loc = bytecode_runtime_head; add_within: dbg_printk("linking bytecode\n"); - ret = _lttng_filter_event_link_bytecode(event, bc, - insert_loc); + ret = _lttng_filter_link_bytecode(event_desc, ctx, bc, + insert_loc); if (ret) { dbg_printk("[lttng filter] warning: cannot link event bytecode\n"); } @@ -538,7 +536,7 @@ void lttng_event_enabler_link_bytecode(struct lttng_event *event, * We own the filter_bytecode if we return success. */ int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler, - struct lttng_filter_bytecode_node *filter_bytecode) + struct lttng_bytecode_node *filter_bytecode) { list_add(&filter_bytecode->node, &enabler->filter_bytecode_head); return 0; @@ -546,7 +544,7 @@ int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler, void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler) { - struct lttng_filter_bytecode_node *filter_bytecode, *tmp; + struct lttng_bytecode_node *filter_bytecode, *tmp; list_for_each_entry_safe(filter_bytecode, tmp, &enabler->filter_bytecode_head, node) { @@ -559,7 +557,7 @@ void lttng_free_event_filter_runtime(struct lttng_event *event) struct bytecode_runtime *runtime, *tmp; list_for_each_entry_safe(runtime, tmp, - &event->bytecode_runtime_head, p.node) { + &event->filter_bytecode_runtime_head, p.node) { kfree(runtime->data); kfree(runtime); }