d7fcceae8d2ca214969bb4cbcdfd570adede0e7d
[lttng-modules.git] / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng modules filter code.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/list.h>
28 #include <linux/slab.h>
29
30 #include <lttng-filter.h>
31
32 static const char *opnames[] = {
33 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
34
35 [ FILTER_OP_RETURN ] = "RETURN",
36
37 /* binary */
38 [ FILTER_OP_MUL ] = "MUL",
39 [ FILTER_OP_DIV ] = "DIV",
40 [ FILTER_OP_MOD ] = "MOD",
41 [ FILTER_OP_PLUS ] = "PLUS",
42 [ FILTER_OP_MINUS ] = "MINUS",
43 [ FILTER_OP_RSHIFT ] = "RSHIFT",
44 [ FILTER_OP_LSHIFT ] = "LSHIFT",
45 [ FILTER_OP_BIT_AND ] = "BIT_AND",
46 [ FILTER_OP_BIT_OR ] = "BIT_OR",
47 [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
48
49 /* binary comparators */
50 [ FILTER_OP_EQ ] = "EQ",
51 [ FILTER_OP_NE ] = "NE",
52 [ FILTER_OP_GT ] = "GT",
53 [ FILTER_OP_LT ] = "LT",
54 [ FILTER_OP_GE ] = "GE",
55 [ FILTER_OP_LE ] = "LE",
56
57 /* string binary comparators */
58 [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
59 [ FILTER_OP_NE_STRING ] = "NE_STRING",
60 [ FILTER_OP_GT_STRING ] = "GT_STRING",
61 [ FILTER_OP_LT_STRING ] = "LT_STRING",
62 [ FILTER_OP_GE_STRING ] = "GE_STRING",
63 [ FILTER_OP_LE_STRING ] = "LE_STRING",
64
65 /* s64 binary comparators */
66 [ FILTER_OP_EQ_S64 ] = "EQ_S64",
67 [ FILTER_OP_NE_S64 ] = "NE_S64",
68 [ FILTER_OP_GT_S64 ] = "GT_S64",
69 [ FILTER_OP_LT_S64 ] = "LT_S64",
70 [ FILTER_OP_GE_S64 ] = "GE_S64",
71 [ FILTER_OP_LE_S64 ] = "LE_S64",
72
73 /* double binary comparators */
74 [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
75 [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
76 [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
77 [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
78 [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
79 [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
80
81 /* Mixed S64-double binary comparators */
82 [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
83 [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
84 [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
85 [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
86 [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
87 [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
88
89 [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
90 [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
91 [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
92 [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
93 [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
94 [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
95
96 /* unary */
97 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
98 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
99 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
100 [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
101 [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
102 [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
103 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
104 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
105 [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
106
107 /* logical */
108 [ FILTER_OP_AND ] = "AND",
109 [ FILTER_OP_OR ] = "OR",
110
111 /* load field ref */
112 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
113 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
114 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
115 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
116 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
117
118 /* load from immediate operand */
119 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
120 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
121 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
122
123 /* cast */
124 [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
125 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
126 [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
127
128 /* get context ref */
129 [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
130 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
131 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
132 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
133
134 /* load userspace field ref */
135 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
136 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
137
138 /*
139 * load immediate star globbing pattern (literal string)
140 * from immediate.
141 */
142 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
143
144 /* globbing pattern binary operator: apply to */
145 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
146 [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
147
148 /*
149 * Instructions for recursive traversal through composed types.
150 */
151 [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
152 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
153 [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
154
155 [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
156 [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
157 [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
158 [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
159
160 [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
161 [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
162 [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
163 [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
164 [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
165 [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
166 [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
167 [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
168 [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
169 [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
170 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
171 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
172 };
173
174 const char *lttng_filter_print_op(enum filter_op op)
175 {
176 if (op >= NR_FILTER_OPS)
177 return "UNKNOWN";
178 else
179 return opnames[op];
180 }
181
182 static
183 int apply_field_reloc(struct lttng_event *event,
184 struct bytecode_runtime *runtime,
185 uint32_t runtime_len,
186 uint32_t reloc_offset,
187 const char *field_name,
188 enum filter_op filter_op)
189 {
190 const struct lttng_event_desc *desc;
191 const struct lttng_event_field *fields, *field = NULL;
192 unsigned int nr_fields, i;
193 struct load_op *op;
194 uint32_t field_offset = 0;
195
196 dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
197
198 /* Lookup event by name */
199 desc = event->desc;
200 if (!desc)
201 return -EINVAL;
202 fields = desc->fields;
203 if (!fields)
204 return -EINVAL;
205 nr_fields = desc->nr_fields;
206 for (i = 0; i < nr_fields; i++) {
207 if (!strcmp(fields[i].name, field_name)) {
208 field = &fields[i];
209 break;
210 }
211 /* compute field offset */
212 switch (fields[i].type.atype) {
213 case atype_integer:
214 case atype_enum:
215 field_offset += sizeof(int64_t);
216 break;
217 case atype_array:
218 case atype_sequence:
219 case atype_array_bitfield:
220 case atype_sequence_bitfield:
221 field_offset += sizeof(unsigned long);
222 field_offset += sizeof(void *);
223 break;
224 case atype_string:
225 field_offset += sizeof(void *);
226 break;
227 case atype_struct: /* Unsupported. */
228 case atype_array_compound: /* Unsupported. */
229 case atype_sequence_compound: /* Unsupported. */
230 case atype_variant: /* Unsupported. */
231 default:
232 return -EINVAL;
233 }
234 }
235 if (!field)
236 return -EINVAL;
237
238 /* Check if field offset is too large for 16-bit offset */
239 if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
240 return -EINVAL;
241
242 /* set type */
243 op = (struct load_op *) &runtime->code[reloc_offset];
244
245 switch (filter_op) {
246 case FILTER_OP_LOAD_FIELD_REF:
247 {
248 struct field_ref *field_ref;
249
250 field_ref = (struct field_ref *) op->data;
251 switch (field->type.atype) {
252 case atype_integer:
253 case atype_enum:
254 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
255 break;
256 case atype_array:
257 case atype_sequence:
258 if (field->user)
259 op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
260 else
261 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
262 break;
263 case atype_string:
264 if (field->user)
265 op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
266 else
267 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
268 break;
269 case atype_struct: /* Unsupported. */
270 case atype_array_compound: /* Unsupported. */
271 case atype_sequence_compound: /* Unsupported. */
272 case atype_variant: /* Unsupported. */
273 case atype_array_bitfield: /* Unsupported. */
274 case atype_sequence_bitfield: /* Unsupported. */
275 default:
276 return -EINVAL;
277 }
278 /* set offset */
279 field_ref->offset = (uint16_t) field_offset;
280 break;
281 }
282 default:
283 return -EINVAL;
284 }
285 return 0;
286 }
287
288 static
289 int apply_context_reloc(struct lttng_event *event,
290 struct bytecode_runtime *runtime,
291 uint32_t runtime_len,
292 uint32_t reloc_offset,
293 const char *context_name,
294 enum filter_op filter_op)
295 {
296 struct load_op *op;
297 struct lttng_ctx_field *ctx_field;
298 int idx;
299
300 dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
301
302 /* Get context index */
303 idx = lttng_get_context_index(lttng_static_ctx, context_name);
304 if (idx < 0)
305 return -ENOENT;
306
307 /* Check if idx is too large for 16-bit offset */
308 if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
309 return -EINVAL;
310
311 /* Get context return type */
312 ctx_field = &lttng_static_ctx->fields[idx];
313 op = (struct load_op *) &runtime->code[reloc_offset];
314
315 switch (filter_op) {
316 case FILTER_OP_GET_CONTEXT_REF:
317 {
318 struct field_ref *field_ref;
319
320 field_ref = (struct field_ref *) op->data;
321 switch (ctx_field->event_field.type.atype) {
322 case atype_integer:
323 case atype_enum:
324 op->op = FILTER_OP_GET_CONTEXT_REF_S64;
325 break;
326 /* Sequence and array supported as string */
327 case atype_string:
328 case atype_array:
329 case atype_sequence:
330 BUG_ON(ctx_field->event_field.user);
331 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
332 break;
333 case atype_struct: /* Unsupported. */
334 case atype_array_compound: /* Unsupported. */
335 case atype_sequence_compound: /* Unsupported. */
336 case atype_variant: /* Unsupported. */
337 case atype_array_bitfield: /* Unsupported. */
338 case atype_sequence_bitfield: /* Unsupported. */
339 default:
340 return -EINVAL;
341 }
342 /* set offset to context index within channel contexts */
343 field_ref->offset = (uint16_t) idx;
344 break;
345 }
346 default:
347 return -EINVAL;
348 }
349 return 0;
350 }
351
352 static
353 int apply_reloc(struct lttng_event *event,
354 struct bytecode_runtime *runtime,
355 uint32_t runtime_len,
356 uint32_t reloc_offset,
357 const char *name)
358 {
359 struct load_op *op;
360
361 dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
362
363 /* Ensure that the reloc is within the code */
364 if (runtime_len - reloc_offset < sizeof(uint16_t))
365 return -EINVAL;
366
367 op = (struct load_op *) &runtime->code[reloc_offset];
368 switch (op->op) {
369 case FILTER_OP_LOAD_FIELD_REF:
370 return apply_field_reloc(event, runtime, runtime_len,
371 reloc_offset, name, op->op);
372 case FILTER_OP_GET_CONTEXT_REF:
373 return apply_context_reloc(event, runtime, runtime_len,
374 reloc_offset, name, op->op);
375 case FILTER_OP_GET_SYMBOL:
376 case FILTER_OP_GET_SYMBOL_FIELD:
377 /*
378 * Will be handled by load specialize phase or
379 * dynamically by interpreter.
380 */
381 return 0;
382 default:
383 printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
384 return -EINVAL;
385 }
386 return 0;
387 }
388
389 static
390 int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
391 struct lttng_event *event)
392 {
393 struct lttng_bytecode_runtime *bc_runtime;
394
395 list_for_each_entry(bc_runtime,
396 &event->bytecode_runtime_head, node) {
397 if (bc_runtime->bc == filter_bytecode)
398 return 1;
399 }
400 return 0;
401 }
402
403 /*
404 * Take a bytecode with reloc table and link it to an event to create a
405 * bytecode runtime.
406 */
407 static
408 int _lttng_filter_event_link_bytecode(struct lttng_event *event,
409 struct lttng_filter_bytecode_node *filter_bytecode,
410 struct list_head *insert_loc)
411 {
412 int ret, offset, next_offset;
413 struct bytecode_runtime *runtime = NULL;
414 size_t runtime_alloc_len;
415
416 if (!filter_bytecode)
417 return 0;
418 /* Bytecode already linked */
419 if (bytecode_is_linked(filter_bytecode, event))
420 return 0;
421
422 dbg_printk("Linking...\n");
423
424 /* We don't need the reloc table in the runtime */
425 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
426 runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
427 if (!runtime) {
428 ret = -ENOMEM;
429 goto alloc_error;
430 }
431 runtime->p.bc = filter_bytecode;
432 runtime->p.event = event;
433 runtime->len = filter_bytecode->bc.reloc_offset;
434 /* copy original bytecode */
435 memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
436 /*
437 * apply relocs. Those are a uint16_t (offset in bytecode)
438 * followed by a string (field name).
439 */
440 for (offset = filter_bytecode->bc.reloc_offset;
441 offset < filter_bytecode->bc.len;
442 offset = next_offset) {
443 uint16_t reloc_offset =
444 *(uint16_t *) &filter_bytecode->bc.data[offset];
445 const char *name =
446 (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
447
448 ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
449 if (ret) {
450 goto link_error;
451 }
452 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
453 }
454 /* Validate bytecode */
455 ret = lttng_filter_validate_bytecode(runtime);
456 if (ret) {
457 goto link_error;
458 }
459 /* Specialize bytecode */
460 ret = lttng_filter_specialize_bytecode(event, runtime);
461 if (ret) {
462 goto link_error;
463 }
464 runtime->p.filter = lttng_filter_interpret_bytecode;
465 runtime->p.link_failed = 0;
466 list_add_rcu(&runtime->p.node, insert_loc);
467 dbg_printk("Linking successful.\n");
468 return 0;
469
470 link_error:
471 runtime->p.filter = lttng_filter_false;
472 runtime->p.link_failed = 1;
473 list_add_rcu(&runtime->p.node, insert_loc);
474 alloc_error:
475 dbg_printk("Linking failed.\n");
476 return ret;
477 }
478
479 void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
480 {
481 struct lttng_filter_bytecode_node *bc = runtime->bc;
482
483 if (!bc->enabler->enabled || runtime->link_failed)
484 runtime->filter = lttng_filter_false;
485 else
486 runtime->filter = lttng_filter_interpret_bytecode;
487 }
488
489 /*
490 * Link bytecode for all enablers referenced by an event.
491 */
492 void lttng_enabler_event_link_bytecode(struct lttng_event *event,
493 struct lttng_enabler *enabler)
494 {
495 struct lttng_filter_bytecode_node *bc;
496 struct lttng_bytecode_runtime *runtime;
497
498 /* Can only be called for events with desc attached */
499 WARN_ON_ONCE(!event->desc);
500
501 /* Link each bytecode. */
502 list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
503 int found = 0, ret;
504 struct list_head *insert_loc;
505
506 list_for_each_entry(runtime,
507 &event->bytecode_runtime_head, node) {
508 if (runtime->bc == bc) {
509 found = 1;
510 break;
511 }
512 }
513 /* Skip bytecode already linked */
514 if (found)
515 continue;
516
517 /*
518 * Insert at specified priority (seqnum) in increasing
519 * order.
520 */
521 list_for_each_entry_reverse(runtime,
522 &event->bytecode_runtime_head, node) {
523 if (runtime->bc->bc.seqnum < bc->bc.seqnum) {
524 /* insert here */
525 insert_loc = &runtime->node;
526 goto add_within;
527 }
528 }
529 /* Add to head to list */
530 insert_loc = &event->bytecode_runtime_head;
531 add_within:
532 dbg_printk("linking bytecode\n");
533 ret = _lttng_filter_event_link_bytecode(event, bc,
534 insert_loc);
535 if (ret) {
536 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
537 }
538 }
539 }
540
541 /*
542 * We own the filter_bytecode if we return success.
543 */
544 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
545 struct lttng_filter_bytecode_node *filter_bytecode)
546 {
547 list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
548 return 0;
549 }
550
551 void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
552 {
553 struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
554
555 list_for_each_entry_safe(filter_bytecode, tmp,
556 &enabler->filter_bytecode_head, node) {
557 kfree(filter_bytecode);
558 }
559 }
560
561 void lttng_free_event_filter_runtime(struct lttng_event *event)
562 {
563 struct bytecode_runtime *runtime, *tmp;
564
565 list_for_each_entry_safe(runtime, tmp,
566 &event->bytecode_runtime_head, p.node) {
567 kfree(runtime->data);
568 kfree(runtime);
569 }
570 }
This page took 0.03963 seconds and 3 git commands to generate.