4 * LTTng modules filter code.
6 * Copyright (C) 2010-2014 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/list.h>
24 #include <linux/slab.h>
26 #include <lttng-filter.h>
28 static const char *opnames
[] = {
29 [ FILTER_OP_UNKNOWN
] = "UNKNOWN",
31 [ FILTER_OP_RETURN
] = "RETURN",
34 [ FILTER_OP_MUL
] = "MUL",
35 [ FILTER_OP_DIV
] = "DIV",
36 [ FILTER_OP_MOD
] = "MOD",
37 [ FILTER_OP_PLUS
] = "PLUS",
38 [ FILTER_OP_MINUS
] = "MINUS",
39 [ FILTER_OP_RSHIFT
] = "RSHIFT",
40 [ FILTER_OP_LSHIFT
] = "LSHIFT",
41 [ FILTER_OP_BIN_AND
] = "BIN_AND",
42 [ FILTER_OP_BIN_OR
] = "BIN_OR",
43 [ FILTER_OP_BIN_XOR
] = "BIN_XOR",
45 /* binary comparators */
46 [ FILTER_OP_EQ
] = "EQ",
47 [ FILTER_OP_NE
] = "NE",
48 [ FILTER_OP_GT
] = "GT",
49 [ FILTER_OP_LT
] = "LT",
50 [ FILTER_OP_GE
] = "GE",
51 [ FILTER_OP_LE
] = "LE",
53 /* string binary comparators */
54 [ FILTER_OP_EQ_STRING
] = "EQ_STRING",
55 [ FILTER_OP_NE_STRING
] = "NE_STRING",
56 [ FILTER_OP_GT_STRING
] = "GT_STRING",
57 [ FILTER_OP_LT_STRING
] = "LT_STRING",
58 [ FILTER_OP_GE_STRING
] = "GE_STRING",
59 [ FILTER_OP_LE_STRING
] = "LE_STRING",
61 /* s64 binary comparators */
62 [ FILTER_OP_EQ_S64
] = "EQ_S64",
63 [ FILTER_OP_NE_S64
] = "NE_S64",
64 [ FILTER_OP_GT_S64
] = "GT_S64",
65 [ FILTER_OP_LT_S64
] = "LT_S64",
66 [ FILTER_OP_GE_S64
] = "GE_S64",
67 [ FILTER_OP_LE_S64
] = "LE_S64",
69 /* double binary comparators */
70 [ FILTER_OP_EQ_DOUBLE
] = "EQ_DOUBLE",
71 [ FILTER_OP_NE_DOUBLE
] = "NE_DOUBLE",
72 [ FILTER_OP_GT_DOUBLE
] = "GT_DOUBLE",
73 [ FILTER_OP_LT_DOUBLE
] = "LT_DOUBLE",
74 [ FILTER_OP_GE_DOUBLE
] = "GE_DOUBLE",
75 [ FILTER_OP_LE_DOUBLE
] = "LE_DOUBLE",
77 /* Mixed S64-double binary comparators */
78 [ FILTER_OP_EQ_DOUBLE_S64
] = "EQ_DOUBLE_S64",
79 [ FILTER_OP_NE_DOUBLE_S64
] = "NE_DOUBLE_S64",
80 [ FILTER_OP_GT_DOUBLE_S64
] = "GT_DOUBLE_S64",
81 [ FILTER_OP_LT_DOUBLE_S64
] = "LT_DOUBLE_S64",
82 [ FILTER_OP_GE_DOUBLE_S64
] = "GE_DOUBLE_S64",
83 [ FILTER_OP_LE_DOUBLE_S64
] = "LE_DOUBLE_S64",
85 [ FILTER_OP_EQ_S64_DOUBLE
] = "EQ_S64_DOUBLE",
86 [ FILTER_OP_NE_S64_DOUBLE
] = "NE_S64_DOUBLE",
87 [ FILTER_OP_GT_S64_DOUBLE
] = "GT_S64_DOUBLE",
88 [ FILTER_OP_LT_S64_DOUBLE
] = "LT_S64_DOUBLE",
89 [ FILTER_OP_GE_S64_DOUBLE
] = "GE_S64_DOUBLE",
90 [ FILTER_OP_LE_S64_DOUBLE
] = "LE_S64_DOUBLE",
93 [ FILTER_OP_UNARY_PLUS
] = "UNARY_PLUS",
94 [ FILTER_OP_UNARY_MINUS
] = "UNARY_MINUS",
95 [ FILTER_OP_UNARY_NOT
] = "UNARY_NOT",
96 [ FILTER_OP_UNARY_PLUS_S64
] = "UNARY_PLUS_S64",
97 [ FILTER_OP_UNARY_MINUS_S64
] = "UNARY_MINUS_S64",
98 [ FILTER_OP_UNARY_NOT_S64
] = "UNARY_NOT_S64",
99 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = "UNARY_PLUS_DOUBLE",
100 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = "UNARY_MINUS_DOUBLE",
101 [ FILTER_OP_UNARY_NOT_DOUBLE
] = "UNARY_NOT_DOUBLE",
104 [ FILTER_OP_AND
] = "AND",
105 [ FILTER_OP_OR
] = "OR",
108 [ FILTER_OP_LOAD_FIELD_REF
] = "LOAD_FIELD_REF",
109 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = "LOAD_FIELD_REF_STRING",
110 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = "LOAD_FIELD_REF_SEQUENCE",
111 [ FILTER_OP_LOAD_FIELD_REF_S64
] = "LOAD_FIELD_REF_S64",
112 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = "LOAD_FIELD_REF_DOUBLE",
114 /* load from immediate operand */
115 [ FILTER_OP_LOAD_STRING
] = "LOAD_STRING",
116 [ FILTER_OP_LOAD_S64
] = "LOAD_S64",
117 [ FILTER_OP_LOAD_DOUBLE
] = "LOAD_DOUBLE",
120 [ FILTER_OP_CAST_TO_S64
] = "CAST_TO_S64",
121 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = "CAST_DOUBLE_TO_S64",
122 [ FILTER_OP_CAST_NOP
] = "CAST_NOP",
124 /* get context ref */
125 [ FILTER_OP_GET_CONTEXT_REF
] = "GET_CONTEXT_REF",
126 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = "GET_CONTEXT_REF_STRING",
127 [ FILTER_OP_GET_CONTEXT_REF_S64
] = "GET_CONTEXT_REF_S64",
128 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = "GET_CONTEXT_REF_DOUBLE",
130 /* load userspace field ref */
131 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = "LOAD_FIELD_REF_USER_STRING",
132 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = "LOAD_FIELD_REF_USER_SEQUENCE",
135 const char *lttng_filter_print_op(enum filter_op op
)
137 if (op
>= NR_FILTER_OPS
)
144 int apply_field_reloc(struct lttng_event
*event
,
145 struct bytecode_runtime
*runtime
,
146 uint32_t runtime_len
,
147 uint32_t reloc_offset
,
148 const char *field_name
)
150 const struct lttng_event_desc
*desc
;
151 const struct lttng_event_field
*fields
, *field
= NULL
;
152 unsigned int nr_fields
, i
;
153 struct field_ref
*field_ref
;
155 uint32_t field_offset
= 0;
157 dbg_printk("Apply field reloc: %u %s\n", reloc_offset
, field_name
);
159 /* Lookup event by name */
163 fields
= desc
->fields
;
166 nr_fields
= desc
->nr_fields
;
167 for (i
= 0; i
< nr_fields
; i
++) {
168 if (!strcmp(fields
[i
].name
, field_name
)) {
172 /* compute field offset */
173 switch (fields
[i
].type
.atype
) {
176 field_offset
+= sizeof(int64_t);
180 field_offset
+= sizeof(unsigned long);
181 field_offset
+= sizeof(void *);
184 field_offset
+= sizeof(void *);
186 case atype_struct
: /* Unsupported. */
187 case atype_array_compound
: /* Unsupported. */
188 case atype_sequence_compound
: /* Unsupported. */
189 case atype_variant
: /* Unsupported. */
197 /* Check if field offset is too large for 16-bit offset */
198 if (field_offset
> LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN
- 1)
202 op
= (struct load_op
*) &runtime
->data
[reloc_offset
];
203 field_ref
= (struct field_ref
*) op
->data
;
204 switch (field
->type
.atype
) {
207 op
->op
= FILTER_OP_LOAD_FIELD_REF_S64
;
212 op
->op
= FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
;
214 op
->op
= FILTER_OP_LOAD_FIELD_REF_SEQUENCE
;
218 op
->op
= FILTER_OP_LOAD_FIELD_REF_USER_STRING
;
220 op
->op
= FILTER_OP_LOAD_FIELD_REF_STRING
;
222 case atype_struct
: /* Unsupported. */
223 case atype_array_compound
: /* Unsupported. */
224 case atype_sequence_compound
: /* Unsupported. */
225 case atype_variant
: /* Unsupported. */
230 field_ref
->offset
= (uint16_t) field_offset
;
235 int apply_context_reloc(struct lttng_event
*event
,
236 struct bytecode_runtime
*runtime
,
237 uint32_t runtime_len
,
238 uint32_t reloc_offset
,
239 const char *context_name
)
241 struct field_ref
*field_ref
;
243 struct lttng_ctx_field
*ctx_field
;
246 dbg_printk("Apply context reloc: %u %s\n", reloc_offset
, context_name
);
248 /* Get context index */
249 idx
= lttng_get_context_index(lttng_static_ctx
, context_name
);
253 /* Check if idx is too large for 16-bit offset */
254 if (idx
> LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN
- 1)
257 /* Get context return type */
258 ctx_field
= <tng_static_ctx
->fields
[idx
];
259 op
= (struct load_op
*) &runtime
->data
[reloc_offset
];
260 field_ref
= (struct field_ref
*) op
->data
;
261 switch (ctx_field
->event_field
.type
.atype
) {
264 op
->op
= FILTER_OP_GET_CONTEXT_REF_S64
;
266 /* Sequence and array supported as string */
270 BUG_ON(ctx_field
->event_field
.user
);
271 op
->op
= FILTER_OP_GET_CONTEXT_REF_STRING
;
273 case atype_struct
: /* Unsupported. */
274 case atype_array_compound
: /* Unsupported. */
275 case atype_sequence_compound
: /* Unsupported. */
276 case atype_variant
: /* Unsupported. */
280 /* set offset to context index within channel contexts */
281 field_ref
->offset
= (uint16_t) idx
;
286 int apply_reloc(struct lttng_event
*event
,
287 struct bytecode_runtime
*runtime
,
288 uint32_t runtime_len
,
289 uint32_t reloc_offset
,
294 dbg_printk("Apply reloc: %u %s\n", reloc_offset
, name
);
296 /* Ensure that the reloc is within the code */
297 if (runtime_len
- reloc_offset
< sizeof(uint16_t))
300 op
= (struct load_op
*) &runtime
->data
[reloc_offset
];
302 case FILTER_OP_LOAD_FIELD_REF
:
303 return apply_field_reloc(event
, runtime
, runtime_len
,
305 case FILTER_OP_GET_CONTEXT_REF
:
306 return apply_context_reloc(event
, runtime
, runtime_len
,
309 printk(KERN_WARNING
"Unknown reloc op type %u\n", op
->op
);
316 int bytecode_is_linked(struct lttng_filter_bytecode_node
*filter_bytecode
,
317 struct lttng_event
*event
)
319 struct lttng_bytecode_runtime
*bc_runtime
;
321 list_for_each_entry(bc_runtime
,
322 &event
->bytecode_runtime_head
, node
) {
323 if (bc_runtime
->bc
== filter_bytecode
)
330 * Take a bytecode with reloc table and link it to an event to create a
334 int _lttng_filter_event_link_bytecode(struct lttng_event
*event
,
335 struct lttng_filter_bytecode_node
*filter_bytecode
,
336 struct list_head
*insert_loc
)
338 int ret
, offset
, next_offset
;
339 struct bytecode_runtime
*runtime
= NULL
;
340 size_t runtime_alloc_len
;
342 if (!filter_bytecode
)
344 /* Bytecode already linked */
345 if (bytecode_is_linked(filter_bytecode
, event
))
348 dbg_printk("Linking...\n");
350 /* We don't need the reloc table in the runtime */
351 runtime_alloc_len
= sizeof(*runtime
) + filter_bytecode
->bc
.reloc_offset
;
352 runtime
= kzalloc(runtime_alloc_len
, GFP_KERNEL
);
357 runtime
->p
.bc
= filter_bytecode
;
358 runtime
->len
= filter_bytecode
->bc
.reloc_offset
;
359 /* copy original bytecode */
360 memcpy(runtime
->data
, filter_bytecode
->bc
.data
, runtime
->len
);
362 * apply relocs. Those are a uint16_t (offset in bytecode)
363 * followed by a string (field name).
365 for (offset
= filter_bytecode
->bc
.reloc_offset
;
366 offset
< filter_bytecode
->bc
.len
;
367 offset
= next_offset
) {
368 uint16_t reloc_offset
=
369 *(uint16_t *) &filter_bytecode
->bc
.data
[offset
];
371 (const char *) &filter_bytecode
->bc
.data
[offset
+ sizeof(uint16_t)];
373 ret
= apply_reloc(event
, runtime
, runtime
->len
, reloc_offset
, name
);
377 next_offset
= offset
+ sizeof(uint16_t) + strlen(name
) + 1;
379 /* Validate bytecode */
380 ret
= lttng_filter_validate_bytecode(runtime
);
384 /* Specialize bytecode */
385 ret
= lttng_filter_specialize_bytecode(runtime
);
389 runtime
->p
.filter
= lttng_filter_interpret_bytecode
;
390 runtime
->p
.link_failed
= 0;
391 list_add_rcu(&runtime
->p
.node
, insert_loc
);
392 dbg_printk("Linking successful.\n");
396 runtime
->p
.filter
= lttng_filter_false
;
397 runtime
->p
.link_failed
= 1;
398 list_add_rcu(&runtime
->p
.node
, insert_loc
);
400 dbg_printk("Linking failed.\n");
404 void lttng_filter_sync_state(struct lttng_bytecode_runtime
*runtime
)
406 struct lttng_filter_bytecode_node
*bc
= runtime
->bc
;
408 if (!bc
->enabler
->enabled
|| runtime
->link_failed
)
409 runtime
->filter
= lttng_filter_false
;
411 runtime
->filter
= lttng_filter_interpret_bytecode
;
415 * Link bytecode for all enablers referenced by an event.
417 void lttng_enabler_event_link_bytecode(struct lttng_event
*event
,
418 struct lttng_enabler
*enabler
)
420 struct lttng_filter_bytecode_node
*bc
;
421 struct lttng_bytecode_runtime
*runtime
;
423 /* Can only be called for events with desc attached */
424 WARN_ON_ONCE(!event
->desc
);
426 /* Link each bytecode. */
427 list_for_each_entry(bc
, &enabler
->filter_bytecode_head
, node
) {
429 struct list_head
*insert_loc
;
431 list_for_each_entry(runtime
,
432 &event
->bytecode_runtime_head
, node
) {
433 if (runtime
->bc
== bc
) {
438 /* Skip bytecode already linked */
443 * Insert at specified priority (seqnum) in increasing
446 list_for_each_entry_reverse(runtime
,
447 &event
->bytecode_runtime_head
, node
) {
448 if (runtime
->bc
->bc
.seqnum
< bc
->bc
.seqnum
) {
450 insert_loc
= &runtime
->node
;
454 /* Add to head to list */
455 insert_loc
= &event
->bytecode_runtime_head
;
457 dbg_printk("linking bytecode\n");
458 ret
= _lttng_filter_event_link_bytecode(event
, bc
,
461 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
467 * We own the filter_bytecode if we return success.
469 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler
*enabler
,
470 struct lttng_filter_bytecode_node
*filter_bytecode
)
472 list_add(&filter_bytecode
->node
, &enabler
->filter_bytecode_head
);
476 void lttng_free_enabler_filter_bytecode(struct lttng_enabler
*enabler
)
478 struct lttng_filter_bytecode_node
*filter_bytecode
, *tmp
;
480 list_for_each_entry_safe(filter_bytecode
, tmp
,
481 &enabler
->filter_bytecode_head
, node
) {
482 kfree(filter_bytecode
);
486 void lttng_free_event_filter_runtime(struct lttng_event
*event
)
488 struct bytecode_runtime
*runtime
, *tmp
;
490 list_for_each_entry_safe(runtime
, tmp
,
491 &event
->bytecode_runtime_head
, p
.node
) {