bytecode: generalize `struct lttng_filter_bytecode_node`
[lttng-modules.git] / src / lttng-filter.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter.c
4 *
5 * LTTng modules filter code.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/list.h>
11 #include <linux/slab.h>
12
13 #include <lttng/filter.h>
14
15 static const char *opnames[] = {
16 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
17
18 [ FILTER_OP_RETURN ] = "RETURN",
19
20 /* binary */
21 [ FILTER_OP_MUL ] = "MUL",
22 [ FILTER_OP_DIV ] = "DIV",
23 [ FILTER_OP_MOD ] = "MOD",
24 [ FILTER_OP_PLUS ] = "PLUS",
25 [ FILTER_OP_MINUS ] = "MINUS",
26 [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
27 [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
28 [ FILTER_OP_BIT_AND ] = "BIT_AND",
29 [ FILTER_OP_BIT_OR ] = "BIT_OR",
30 [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
31
32 /* binary comparators */
33 [ FILTER_OP_EQ ] = "EQ",
34 [ FILTER_OP_NE ] = "NE",
35 [ FILTER_OP_GT ] = "GT",
36 [ FILTER_OP_LT ] = "LT",
37 [ FILTER_OP_GE ] = "GE",
38 [ FILTER_OP_LE ] = "LE",
39
40 /* string binary comparators */
41 [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
42 [ FILTER_OP_NE_STRING ] = "NE_STRING",
43 [ FILTER_OP_GT_STRING ] = "GT_STRING",
44 [ FILTER_OP_LT_STRING ] = "LT_STRING",
45 [ FILTER_OP_GE_STRING ] = "GE_STRING",
46 [ FILTER_OP_LE_STRING ] = "LE_STRING",
47
48 /* s64 binary comparators */
49 [ FILTER_OP_EQ_S64 ] = "EQ_S64",
50 [ FILTER_OP_NE_S64 ] = "NE_S64",
51 [ FILTER_OP_GT_S64 ] = "GT_S64",
52 [ FILTER_OP_LT_S64 ] = "LT_S64",
53 [ FILTER_OP_GE_S64 ] = "GE_S64",
54 [ FILTER_OP_LE_S64 ] = "LE_S64",
55
56 /* double binary comparators */
57 [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
58 [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
59 [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
60 [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
61 [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
62 [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
63
64 /* Mixed S64-double binary comparators */
65 [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
66 [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
67 [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
68 [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
69 [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
70 [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
71
72 [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
73 [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
74 [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
75 [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
76 [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
77 [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
78
79 /* unary */
80 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
81 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
82 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
83 [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
84 [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
85 [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
86 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
87 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
88 [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
89
90 /* logical */
91 [ FILTER_OP_AND ] = "AND",
92 [ FILTER_OP_OR ] = "OR",
93
94 /* load field ref */
95 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
96 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
97 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
98 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
99 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
100
101 /* load from immediate operand */
102 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
103 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
104 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
105
106 /* cast */
107 [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
108 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
109 [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
110
111 /* get context ref */
112 [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
113 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
114 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
115 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
116
117 /* load userspace field ref */
118 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
119 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
120
121 /*
122 * load immediate star globbing pattern (literal string)
123 * from immediate.
124 */
125 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
126
127 /* globbing pattern binary operator: apply to */
128 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
129 [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
130
131 /*
132 * Instructions for recursive traversal through composed types.
133 */
134 [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
135 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
136 [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
137
138 [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
139 [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
140 [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
141 [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
142
143 [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
144 [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
145 [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
146 [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
147 [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
148 [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
149 [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
150 [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
151 [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
152 [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
153 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
154 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
155
156 [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
157
158 [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
159 };
160
161 const char *lttng_filter_print_op(enum filter_op op)
162 {
163 if (op >= NR_FILTER_OPS)
164 return "UNKNOWN";
165 else
166 return opnames[op];
167 }
168
169 static
170 int apply_field_reloc(const struct lttng_event_desc *event_desc,
171 struct bytecode_runtime *runtime,
172 uint32_t runtime_len,
173 uint32_t reloc_offset,
174 const char *field_name,
175 enum filter_op filter_op)
176 {
177 const struct lttng_event_field *fields, *field = NULL;
178 unsigned int nr_fields, i;
179 struct load_op *op;
180 uint32_t field_offset = 0;
181
182 dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
183
184 /* Lookup event by name */
185 if (!event_desc)
186 return -EINVAL;
187 fields = event_desc->fields;
188 if (!fields)
189 return -EINVAL;
190 nr_fields = event_desc->nr_fields;
191 for (i = 0; i < nr_fields; i++) {
192 if (fields[i].nofilter)
193 continue;
194 if (!strcmp(fields[i].name, field_name)) {
195 field = &fields[i];
196 break;
197 }
198 /* compute field offset */
199 switch (fields[i].type.atype) {
200 case atype_integer:
201 case atype_enum_nestable:
202 field_offset += sizeof(int64_t);
203 break;
204 case atype_array_nestable:
205 if (!lttng_is_bytewise_integer(fields[i].type.u.array_nestable.elem_type))
206 return -EINVAL;
207 field_offset += sizeof(unsigned long);
208 field_offset += sizeof(void *);
209 break;
210 case atype_sequence_nestable:
211 if (!lttng_is_bytewise_integer(fields[i].type.u.sequence_nestable.elem_type))
212 return -EINVAL;
213 field_offset += sizeof(unsigned long);
214 field_offset += sizeof(void *);
215 break;
216 case atype_string:
217 field_offset += sizeof(void *);
218 break;
219 case atype_struct_nestable: /* Unsupported. */
220 case atype_variant_nestable: /* Unsupported. */
221 default:
222 return -EINVAL;
223 }
224 }
225 if (!field)
226 return -EINVAL;
227
228 /* Check if field offset is too large for 16-bit offset */
229 if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
230 return -EINVAL;
231
232 /* set type */
233 op = (struct load_op *) &runtime->code[reloc_offset];
234
235 switch (filter_op) {
236 case FILTER_OP_LOAD_FIELD_REF:
237 {
238 struct field_ref *field_ref;
239
240 field_ref = (struct field_ref *) op->data;
241 switch (field->type.atype) {
242 case atype_integer:
243 case atype_enum_nestable:
244 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
245 break;
246 case atype_array_nestable:
247 case atype_sequence_nestable:
248 if (field->user)
249 op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
250 else
251 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
252 break;
253 case atype_string:
254 if (field->user)
255 op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
256 else
257 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
258 break;
259 case atype_struct_nestable: /* Unsupported. */
260 case atype_variant_nestable: /* Unsupported. */
261 default:
262 return -EINVAL;
263 }
264 /* set offset */
265 field_ref->offset = (uint16_t) field_offset;
266 break;
267 }
268 default:
269 return -EINVAL;
270 }
271 return 0;
272 }
273
274 static
275 int apply_context_reloc(struct bytecode_runtime *runtime,
276 uint32_t runtime_len,
277 uint32_t reloc_offset,
278 const char *context_name,
279 enum filter_op filter_op)
280 {
281 struct load_op *op;
282 struct lttng_ctx_field *ctx_field;
283 int idx;
284
285 dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
286
287 /* Get context index */
288 idx = lttng_get_context_index(lttng_static_ctx, context_name);
289 if (idx < 0)
290 return -ENOENT;
291
292 /* Check if idx is too large for 16-bit offset */
293 if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
294 return -EINVAL;
295
296 /* Get context return type */
297 ctx_field = &lttng_static_ctx->fields[idx];
298 op = (struct load_op *) &runtime->code[reloc_offset];
299
300 switch (filter_op) {
301 case FILTER_OP_GET_CONTEXT_REF:
302 {
303 struct field_ref *field_ref;
304
305 field_ref = (struct field_ref *) op->data;
306 switch (ctx_field->event_field.type.atype) {
307 case atype_integer:
308 case atype_enum_nestable:
309 op->op = FILTER_OP_GET_CONTEXT_REF_S64;
310 break;
311 /* Sequence and array supported as string */
312 case atype_string:
313 BUG_ON(ctx_field->event_field.user);
314 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
315 break;
316 case atype_array_nestable:
317 if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.array_nestable.elem_type))
318 return -EINVAL;
319 BUG_ON(ctx_field->event_field.user);
320 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
321 break;
322 case atype_sequence_nestable:
323 if (!lttng_is_bytewise_integer(ctx_field->event_field.type.u.sequence_nestable.elem_type))
324 return -EINVAL;
325 BUG_ON(ctx_field->event_field.user);
326 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
327 break;
328 case atype_struct_nestable: /* Unsupported. */
329 case atype_variant_nestable: /* Unsupported. */
330 default:
331 return -EINVAL;
332 }
333 /* set offset to context index within channel contexts */
334 field_ref->offset = (uint16_t) idx;
335 break;
336 }
337 default:
338 return -EINVAL;
339 }
340 return 0;
341 }
342
343 static
344 int apply_reloc(const struct lttng_event_desc *event_desc,
345 struct bytecode_runtime *runtime,
346 uint32_t runtime_len,
347 uint32_t reloc_offset,
348 const char *name)
349 {
350 struct load_op *op;
351
352 dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
353
354 /* Ensure that the reloc is within the code */
355 if (runtime_len - reloc_offset < sizeof(uint16_t))
356 return -EINVAL;
357
358 op = (struct load_op *) &runtime->code[reloc_offset];
359 switch (op->op) {
360 case FILTER_OP_LOAD_FIELD_REF:
361 return apply_field_reloc(event_desc, runtime, runtime_len,
362 reloc_offset, name, op->op);
363 case FILTER_OP_GET_CONTEXT_REF:
364 return apply_context_reloc(runtime, runtime_len,
365 reloc_offset, name, op->op);
366 case FILTER_OP_GET_SYMBOL:
367 case FILTER_OP_GET_SYMBOL_FIELD:
368 /*
369 * Will be handled by load specialize phase or
370 * dynamically by interpreter.
371 */
372 return 0;
373 default:
374 printk(KERN_WARNING "LTTng: filter: Unknown reloc op type %u\n", op->op);
375 return -EINVAL;
376 }
377 return 0;
378 }
379
380 static
381 int bytecode_is_linked(struct lttng_bytecode_node *bytecode,
382 struct list_head *bytecode_runtime_head)
383 {
384 struct lttng_bytecode_runtime *bc_runtime;
385
386 list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
387 if (bc_runtime->bc == bytecode)
388 return 1;
389 }
390 return 0;
391 }
392
393 /*
394 * Take a bytecode with reloc table and link it to an event to create a
395 * bytecode runtime.
396 */
397 static
398 int _lttng_filter_link_bytecode(const struct lttng_event_desc *event_desc,
399 struct lttng_ctx *ctx,
400 struct lttng_bytecode_node *bytecode,
401 struct list_head *insert_loc)
402 {
403 int ret, offset, next_offset;
404 struct bytecode_runtime *runtime = NULL;
405 size_t runtime_alloc_len;
406
407 if (!bytecode)
408 return 0;
409 /* Bytecode already linked */
410 if (bytecode_is_linked(bytecode, insert_loc))
411 return 0;
412
413 dbg_printk("Linking...\n");
414
415 /* We don't need the reloc table in the runtime */
416 runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
417 runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
418 if (!runtime) {
419 ret = -ENOMEM;
420 goto alloc_error;
421 }
422 runtime->p.bc = bytecode;
423 runtime->p.ctx = ctx;
424 runtime->len = bytecode->bc.reloc_offset;
425 /* copy original bytecode */
426 memcpy(runtime->code, bytecode->bc.data, runtime->len);
427 /*
428 * apply relocs. Those are a uint16_t (offset in bytecode)
429 * followed by a string (field name).
430 */
431 for (offset = bytecode->bc.reloc_offset;
432 offset < bytecode->bc.len;
433 offset = next_offset) {
434 uint16_t reloc_offset =
435 *(uint16_t *) &bytecode->bc.data[offset];
436 const char *name =
437 (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
438
439 ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
440 if (ret) {
441 goto link_error;
442 }
443 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
444 }
445 /* Validate bytecode */
446 ret = lttng_filter_validate_bytecode(runtime);
447 if (ret) {
448 goto link_error;
449 }
450 /* Specialize bytecode */
451 ret = lttng_filter_specialize_bytecode(event_desc, runtime);
452 if (ret) {
453 goto link_error;
454 }
455 runtime->p.filter = lttng_filter_interpret_bytecode;
456 runtime->p.link_failed = 0;
457 list_add_rcu(&runtime->p.node, insert_loc);
458 dbg_printk("Linking successful.\n");
459 return 0;
460
461 link_error:
462 runtime->p.filter = lttng_filter_interpret_bytecode_false;
463 runtime->p.link_failed = 1;
464 list_add_rcu(&runtime->p.node, insert_loc);
465 alloc_error:
466 dbg_printk("Linking failed.\n");
467 return ret;
468 }
469
470 void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
471 {
472 struct lttng_bytecode_node *bc = runtime->bc;
473
474 if (!bc->enabler->enabled || runtime->link_failed)
475 runtime->filter = lttng_filter_interpret_bytecode_false;
476 else
477 runtime->filter = lttng_filter_interpret_bytecode;
478 }
479
480 /*
481 * Link bytecode for all enablers referenced by an event.
482 */
483 void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
484 struct lttng_ctx *ctx,
485 struct list_head *bytecode_runtime_head,
486 struct lttng_enabler *enabler)
487 {
488 struct lttng_bytecode_node *bc;
489 struct lttng_bytecode_runtime *runtime;
490
491 /* Can only be called for events with desc attached */
492 WARN_ON_ONCE(!event_desc);
493
494 /* Link each bytecode. */
495 list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
496 int found = 0, ret;
497 struct list_head *insert_loc;
498
499 list_for_each_entry(runtime,
500 bytecode_runtime_head, node) {
501 if (runtime->bc == bc) {
502 found = 1;
503 break;
504 }
505 }
506 /* Skip bytecode already linked */
507 if (found)
508 continue;
509
510 /*
511 * Insert at specified priority (seqnum) in increasing
512 * order. If there already is a bytecode of the same priority,
513 * insert the new bytecode right after it.
514 */
515 list_for_each_entry_reverse(runtime,
516 bytecode_runtime_head, node) {
517 if (runtime->bc->bc.seqnum <= bc->bc.seqnum) {
518 /* insert here */
519 insert_loc = &runtime->node;
520 goto add_within;
521 }
522 }
523 /* Add to head to list */
524 insert_loc = bytecode_runtime_head;
525 add_within:
526 dbg_printk("linking bytecode\n");
527 ret = _lttng_filter_link_bytecode(event_desc, ctx, bc,
528 insert_loc);
529 if (ret) {
530 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
531 }
532 }
533 }
534
535 /*
536 * We own the filter_bytecode if we return success.
537 */
538 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
539 struct lttng_bytecode_node *filter_bytecode)
540 {
541 list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
542 return 0;
543 }
544
545 void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
546 {
547 struct lttng_bytecode_node *filter_bytecode, *tmp;
548
549 list_for_each_entry_safe(filter_bytecode, tmp,
550 &enabler->filter_bytecode_head, node) {
551 kfree(filter_bytecode);
552 }
553 }
554
555 void lttng_free_event_filter_runtime(struct lttng_event *event)
556 {
557 struct bytecode_runtime *runtime, *tmp;
558
559 list_for_each_entry_safe(runtime, tmp,
560 &event->filter_bytecode_runtime_head, p.node) {
561 kfree(runtime->data);
562 kfree(runtime);
563 }
564 }
This page took 0.039469 seconds and 4 git commands to generate.