Move headers under include/
[lttng-modules.git] / lttng-filter-interpreter.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-interpreter.c
4 *
5 * LTTng modules filter interpreter.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/uaccess.h>
11 #include <linux/frame.h>
12 #include <linux/limits.h>
13 #include <linux/swab.h>
14
15 #include <lttng/lttng-filter.h>
16 #include <lttng/lttng-string-utils.h>
17
18 STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
19
20 /*
21 * get_char should be called with page fault handler disabled if it is expected
22 * to handle user-space read.
23 */
24 static
25 char get_char(struct estack_entry *reg, size_t offset)
26 {
27 if (unlikely(offset >= reg->u.s.seq_len))
28 return '\0';
29 if (reg->u.s.user) {
30 char c;
31
32 /* Handle invalid access as end of string. */
33 if (unlikely(!access_ok(reg->u.s.user_str + offset,
34 sizeof(c))))
35 return '\0';
36 /* Handle fault (nonzero return value) as end of string. */
37 if (unlikely(__copy_from_user_inatomic(&c,
38 reg->u.s.user_str + offset,
39 sizeof(c))))
40 return '\0';
41 return c;
42 } else {
43 return reg->u.s.str[offset];
44 }
45 }
46
47 /*
48 * -1: wildcard found.
49 * -2: unknown escape char.
50 * 0: normal char.
51 */
52 static
53 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
54 {
55 switch (*c) {
56 case '\\':
57 (*offset)++;
58 *c = get_char(reg, *offset);
59 switch (*c) {
60 case '\\':
61 case '*':
62 return 0;
63 default:
64 return -2;
65 }
66 case '*':
67 return -1;
68 default:
69 return 0;
70 }
71 }
72
73 static
74 char get_char_at_cb(size_t at, void *data)
75 {
76 return get_char(data, at);
77 }
78
79 static
80 int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
81 {
82 bool has_user = false;
83 mm_segment_t old_fs;
84 int result;
85 struct estack_entry *pattern_reg;
86 struct estack_entry *candidate_reg;
87
88 if (estack_bx(stack, top)->u.s.user
89 || estack_ax(stack, top)->u.s.user) {
90 has_user = true;
91 old_fs = get_fs();
92 set_fs(KERNEL_DS);
93 pagefault_disable();
94 }
95
96 /* Find out which side is the pattern vs. the candidate. */
97 if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
98 pattern_reg = estack_ax(stack, top);
99 candidate_reg = estack_bx(stack, top);
100 } else {
101 pattern_reg = estack_bx(stack, top);
102 candidate_reg = estack_ax(stack, top);
103 }
104
105 /* Perform the match operation. */
106 result = !strutils_star_glob_match_char_cb(get_char_at_cb,
107 pattern_reg, get_char_at_cb, candidate_reg);
108 if (has_user) {
109 pagefault_enable();
110 set_fs(old_fs);
111 }
112
113 return result;
114 }
115
116 static
117 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
118 {
119 size_t offset_bx = 0, offset_ax = 0;
120 int diff, has_user = 0;
121 mm_segment_t old_fs;
122
123 if (estack_bx(stack, top)->u.s.user
124 || estack_ax(stack, top)->u.s.user) {
125 has_user = 1;
126 old_fs = get_fs();
127 set_fs(KERNEL_DS);
128 pagefault_disable();
129 }
130
131 for (;;) {
132 int ret;
133 int escaped_r0 = 0;
134 char char_bx, char_ax;
135
136 char_bx = get_char(estack_bx(stack, top), offset_bx);
137 char_ax = get_char(estack_ax(stack, top), offset_ax);
138
139 if (unlikely(char_bx == '\0')) {
140 if (char_ax == '\0') {
141 diff = 0;
142 break;
143 } else {
144 if (estack_ax(stack, top)->u.s.literal_type ==
145 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
146 ret = parse_char(estack_ax(stack, top),
147 &char_ax, &offset_ax);
148 if (ret == -1) {
149 diff = 0;
150 break;
151 }
152 }
153 diff = -1;
154 break;
155 }
156 }
157 if (unlikely(char_ax == '\0')) {
158 if (estack_bx(stack, top)->u.s.literal_type ==
159 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
160 ret = parse_char(estack_bx(stack, top),
161 &char_bx, &offset_bx);
162 if (ret == -1) {
163 diff = 0;
164 break;
165 }
166 }
167 diff = 1;
168 break;
169 }
170 if (estack_bx(stack, top)->u.s.literal_type ==
171 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
172 ret = parse_char(estack_bx(stack, top),
173 &char_bx, &offset_bx);
174 if (ret == -1) {
175 diff = 0;
176 break;
177 } else if (ret == -2) {
178 escaped_r0 = 1;
179 }
180 /* else compare both char */
181 }
182 if (estack_ax(stack, top)->u.s.literal_type ==
183 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
184 ret = parse_char(estack_ax(stack, top),
185 &char_ax, &offset_ax);
186 if (ret == -1) {
187 diff = 0;
188 break;
189 } else if (ret == -2) {
190 if (!escaped_r0) {
191 diff = -1;
192 break;
193 }
194 } else {
195 if (escaped_r0) {
196 diff = 1;
197 break;
198 }
199 }
200 } else {
201 if (escaped_r0) {
202 diff = 1;
203 break;
204 }
205 }
206 diff = char_bx - char_ax;
207 if (diff != 0)
208 break;
209 offset_bx++;
210 offset_ax++;
211 }
212 if (has_user) {
213 pagefault_enable();
214 set_fs(old_fs);
215 }
216 return diff;
217 }
218
219 uint64_t lttng_filter_false(void *filter_data,
220 struct lttng_probe_ctx *lttng_probe_ctx,
221 const char *filter_stack_data)
222 {
223 return 0;
224 }
225
226 #ifdef INTERPRETER_USE_SWITCH
227
228 /*
229 * Fallback for compilers that do not support taking address of labels.
230 */
231
232 #define START_OP \
233 start_pc = &bytecode->data[0]; \
234 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
235 pc = next_pc) { \
236 dbg_printk("Executing op %s (%u)\n", \
237 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
238 (unsigned int) *(filter_opcode_t *) pc); \
239 switch (*(filter_opcode_t *) pc) {
240
241 #define OP(name) case name
242
243 #define PO break
244
245 #define END_OP } \
246 }
247
248 #else
249
250 /*
251 * Dispatch-table based interpreter.
252 */
253
254 #define START_OP \
255 start_pc = &bytecode->code[0]; \
256 pc = next_pc = start_pc; \
257 if (unlikely(pc - start_pc >= bytecode->len)) \
258 goto end; \
259 goto *dispatch[*(filter_opcode_t *) pc];
260
261 #define OP(name) \
262 LABEL_##name
263
264 #define PO \
265 pc = next_pc; \
266 goto *dispatch[*(filter_opcode_t *) pc];
267
268 #define END_OP
269
270 #endif
271
272 static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
273 struct load_ptr *ptr,
274 uint32_t idx)
275 {
276
277 struct lttng_ctx_field *ctx_field;
278 struct lttng_event_field *field;
279 union lttng_ctx_value v;
280
281 ctx_field = &lttng_static_ctx->fields[idx];
282 field = &ctx_field->event_field;
283 ptr->type = LOAD_OBJECT;
284 /* field is only used for types nested within variants. */
285 ptr->field = NULL;
286
287 switch (field->type.atype) {
288 case atype_integer:
289 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
290 if (field->type.u.integer.signedness) {
291 ptr->object_type = OBJECT_TYPE_S64;
292 ptr->u.s64 = v.s64;
293 ptr->ptr = &ptr->u.s64;
294 } else {
295 ptr->object_type = OBJECT_TYPE_U64;
296 ptr->u.u64 = v.s64; /* Cast. */
297 ptr->ptr = &ptr->u.u64;
298 }
299 break;
300 case atype_enum_nestable:
301 {
302 const struct lttng_integer_type *itype =
303 &field->type.u.enum_nestable.container_type->u.integer;
304
305 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
306 if (itype->signedness) {
307 ptr->object_type = OBJECT_TYPE_S64;
308 ptr->u.s64 = v.s64;
309 ptr->ptr = &ptr->u.s64;
310 } else {
311 ptr->object_type = OBJECT_TYPE_U64;
312 ptr->u.u64 = v.s64; /* Cast. */
313 ptr->ptr = &ptr->u.u64;
314 }
315 break;
316 }
317 case atype_array_nestable:
318 if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
319 printk(KERN_WARNING "Array nesting only supports integer types.\n");
320 return -EINVAL;
321 }
322 if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
323 printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
324 return -EINVAL;
325 }
326 ptr->object_type = OBJECT_TYPE_STRING;
327 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
328 ptr->ptr = v.str;
329 break;
330 case atype_sequence_nestable:
331 if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
332 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
333 return -EINVAL;
334 }
335 if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
336 printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
337 return -EINVAL;
338 }
339 ptr->object_type = OBJECT_TYPE_STRING;
340 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
341 ptr->ptr = v.str;
342 break;
343 case atype_string:
344 ptr->object_type = OBJECT_TYPE_STRING;
345 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
346 ptr->ptr = v.str;
347 break;
348 case atype_struct_nestable:
349 printk(KERN_WARNING "Structure type cannot be loaded.\n");
350 return -EINVAL;
351 case atype_variant_nestable:
352 printk(KERN_WARNING "Variant type cannot be loaded.\n");
353 return -EINVAL;
354 default:
355 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
356 return -EINVAL;
357 }
358 return 0;
359 }
360
361 static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
362 struct bytecode_runtime *runtime,
363 uint64_t index, struct estack_entry *stack_top)
364 {
365 int ret;
366 const struct filter_get_index_data *gid;
367
368 /*
369 * Types nested within variants need to perform dynamic lookup
370 * based on the field descriptions. LTTng-UST does not implement
371 * variants for now.
372 */
373 if (stack_top->u.ptr.field)
374 return -EINVAL;
375 gid = (const struct filter_get_index_data *) &runtime->data[index];
376 switch (stack_top->u.ptr.type) {
377 case LOAD_OBJECT:
378 switch (stack_top->u.ptr.object_type) {
379 case OBJECT_TYPE_ARRAY:
380 {
381 const char *ptr;
382
383 WARN_ON_ONCE(gid->offset >= gid->array_len);
384 /* Skip count (unsigned long) */
385 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
386 ptr = ptr + gid->offset;
387 stack_top->u.ptr.ptr = ptr;
388 stack_top->u.ptr.object_type = gid->elem.type;
389 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
390 /* field is only used for types nested within variants. */
391 stack_top->u.ptr.field = NULL;
392 break;
393 }
394 case OBJECT_TYPE_SEQUENCE:
395 {
396 const char *ptr;
397 size_t ptr_seq_len;
398
399 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
400 ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
401 if (gid->offset >= gid->elem.len * ptr_seq_len) {
402 ret = -EINVAL;
403 goto end;
404 }
405 ptr = ptr + gid->offset;
406 stack_top->u.ptr.ptr = ptr;
407 stack_top->u.ptr.object_type = gid->elem.type;
408 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
409 /* field is only used for types nested within variants. */
410 stack_top->u.ptr.field = NULL;
411 break;
412 }
413 case OBJECT_TYPE_STRUCT:
414 printk(KERN_WARNING "Nested structures are not supported yet.\n");
415 ret = -EINVAL;
416 goto end;
417 case OBJECT_TYPE_VARIANT:
418 default:
419 printk(KERN_WARNING "Unexpected get index type %d",
420 (int) stack_top->u.ptr.object_type);
421 ret = -EINVAL;
422 goto end;
423 }
424 break;
425 case LOAD_ROOT_CONTEXT:
426 case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
427 {
428 ret = context_get_index(lttng_probe_ctx,
429 &stack_top->u.ptr,
430 gid->ctx_index);
431 if (ret) {
432 goto end;
433 }
434 break;
435 }
436 case LOAD_ROOT_PAYLOAD:
437 stack_top->u.ptr.ptr += gid->offset;
438 if (gid->elem.type == OBJECT_TYPE_STRING)
439 stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
440 stack_top->u.ptr.object_type = gid->elem.type;
441 stack_top->u.ptr.type = LOAD_OBJECT;
442 /* field is only used for types nested within variants. */
443 stack_top->u.ptr.field = NULL;
444 break;
445 }
446 return 0;
447
448 end:
449 return ret;
450 }
451
452 static int dynamic_load_field(struct estack_entry *stack_top)
453 {
454 int ret;
455
456 switch (stack_top->u.ptr.type) {
457 case LOAD_OBJECT:
458 break;
459 case LOAD_ROOT_CONTEXT:
460 case LOAD_ROOT_APP_CONTEXT:
461 case LOAD_ROOT_PAYLOAD:
462 default:
463 dbg_printk("Filter warning: cannot load root, missing field name.\n");
464 ret = -EINVAL;
465 goto end;
466 }
467 switch (stack_top->u.ptr.object_type) {
468 case OBJECT_TYPE_S8:
469 dbg_printk("op load field s8\n");
470 stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
471 break;
472 case OBJECT_TYPE_S16:
473 {
474 int16_t tmp;
475
476 dbg_printk("op load field s16\n");
477 tmp = *(int16_t *) stack_top->u.ptr.ptr;
478 if (stack_top->u.ptr.rev_bo)
479 __swab16s(&tmp);
480 stack_top->u.v = tmp;
481 break;
482 }
483 case OBJECT_TYPE_S32:
484 {
485 int32_t tmp;
486
487 dbg_printk("op load field s32\n");
488 tmp = *(int32_t *) stack_top->u.ptr.ptr;
489 if (stack_top->u.ptr.rev_bo)
490 __swab32s(&tmp);
491 stack_top->u.v = tmp;
492 break;
493 }
494 case OBJECT_TYPE_S64:
495 {
496 int64_t tmp;
497
498 dbg_printk("op load field s64\n");
499 tmp = *(int64_t *) stack_top->u.ptr.ptr;
500 if (stack_top->u.ptr.rev_bo)
501 __swab64s(&tmp);
502 stack_top->u.v = tmp;
503 break;
504 }
505 case OBJECT_TYPE_U8:
506 dbg_printk("op load field u8\n");
507 stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
508 break;
509 case OBJECT_TYPE_U16:
510 {
511 uint16_t tmp;
512
513 dbg_printk("op load field s16\n");
514 tmp = *(uint16_t *) stack_top->u.ptr.ptr;
515 if (stack_top->u.ptr.rev_bo)
516 __swab16s(&tmp);
517 stack_top->u.v = tmp;
518 break;
519 }
520 case OBJECT_TYPE_U32:
521 {
522 uint32_t tmp;
523
524 dbg_printk("op load field u32\n");
525 tmp = *(uint32_t *) stack_top->u.ptr.ptr;
526 if (stack_top->u.ptr.rev_bo)
527 __swab32s(&tmp);
528 stack_top->u.v = tmp;
529 break;
530 }
531 case OBJECT_TYPE_U64:
532 {
533 uint64_t tmp;
534
535 dbg_printk("op load field u64\n");
536 tmp = *(uint64_t *) stack_top->u.ptr.ptr;
537 if (stack_top->u.ptr.rev_bo)
538 __swab64s(&tmp);
539 stack_top->u.v = tmp;
540 break;
541 }
542 case OBJECT_TYPE_STRING:
543 {
544 const char *str;
545
546 dbg_printk("op load field string\n");
547 str = (const char *) stack_top->u.ptr.ptr;
548 stack_top->u.s.str = str;
549 if (unlikely(!stack_top->u.s.str)) {
550 dbg_printk("Filter warning: loading a NULL string.\n");
551 ret = -EINVAL;
552 goto end;
553 }
554 stack_top->u.s.seq_len = SIZE_MAX;
555 stack_top->u.s.literal_type =
556 ESTACK_STRING_LITERAL_TYPE_NONE;
557 break;
558 }
559 case OBJECT_TYPE_STRING_SEQUENCE:
560 {
561 const char *ptr;
562
563 dbg_printk("op load field string sequence\n");
564 ptr = stack_top->u.ptr.ptr;
565 stack_top->u.s.seq_len = *(unsigned long *) ptr;
566 stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
567 if (unlikely(!stack_top->u.s.str)) {
568 dbg_printk("Filter warning: loading a NULL sequence.\n");
569 ret = -EINVAL;
570 goto end;
571 }
572 stack_top->u.s.literal_type =
573 ESTACK_STRING_LITERAL_TYPE_NONE;
574 break;
575 }
576 case OBJECT_TYPE_DYNAMIC:
577 /*
578 * Dynamic types in context are looked up
579 * by context get index.
580 */
581 ret = -EINVAL;
582 goto end;
583 case OBJECT_TYPE_DOUBLE:
584 ret = -EINVAL;
585 goto end;
586 case OBJECT_TYPE_SEQUENCE:
587 case OBJECT_TYPE_ARRAY:
588 case OBJECT_TYPE_STRUCT:
589 case OBJECT_TYPE_VARIANT:
590 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
591 ret = -EINVAL;
592 goto end;
593 }
594 return 0;
595
596 end:
597 return ret;
598 }
599
600 /*
601 * Return 0 (discard), or raise the 0x1 flag (log event).
602 * Currently, other flags are kept for future extensions and have no
603 * effect.
604 */
605 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
606 struct lttng_probe_ctx *lttng_probe_ctx,
607 const char *filter_stack_data)
608 {
609 struct bytecode_runtime *bytecode = filter_data;
610 void *pc, *next_pc, *start_pc;
611 int ret = -EINVAL;
612 uint64_t retval = 0;
613 struct estack _stack;
614 struct estack *stack = &_stack;
615 register int64_t ax = 0, bx = 0;
616 register int top = FILTER_STACK_EMPTY;
617 #ifndef INTERPRETER_USE_SWITCH
618 static void *dispatch[NR_FILTER_OPS] = {
619 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
620
621 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
622
623 /* binary */
624 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
625 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
626 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
627 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
628 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
629 [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
630 [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
631 [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
632 [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
633 [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
634
635 /* binary comparators */
636 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
637 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
638 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
639 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
640 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
641 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
642
643 /* string binary comparator */
644 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
645 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
646 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
647 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
648 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
649 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
650
651 /* globbing pattern binary comparator */
652 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
653 [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
654
655 /* s64 binary comparator */
656 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
657 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
658 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
659 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
660 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
661 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
662
663 /* double binary comparator */
664 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
665 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
666 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
667 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
668 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
669 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
670
671 /* Mixed S64-double binary comparators */
672 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
673 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
674 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
675 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
676 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
677 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
678
679 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
680 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
681 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
682 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
683 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
684 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
685
686 /* unary */
687 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
688 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
689 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
690 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
691 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
692 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
693 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
694 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
695 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
696
697 /* logical */
698 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
699 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
700
701 /* load field ref */
702 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
703 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
704 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
705 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
706 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
707
708 /* load from immediate operand */
709 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
710 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
711 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
712 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
713
714 /* cast */
715 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
716 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
717 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
718
719 /* get context ref */
720 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
721 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
722 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
723 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
724
725 /* load userspace field ref */
726 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
727 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
728
729 /* Instructions for recursive traversal through composed types. */
730 [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
731 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
732 [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
733
734 [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
735 [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
736 [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
737 [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
738
739 [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
740 [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
741 [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
742 [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
743 [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
744 [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
745 [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
746 [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
747 [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
748 [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
749 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
750 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
751
752 [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
753
754 [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
755 };
756 #endif /* #ifndef INTERPRETER_USE_SWITCH */
757
758 START_OP
759
760 OP(FILTER_OP_UNKNOWN):
761 OP(FILTER_OP_LOAD_FIELD_REF):
762 OP(FILTER_OP_GET_CONTEXT_REF):
763 #ifdef INTERPRETER_USE_SWITCH
764 default:
765 #endif /* INTERPRETER_USE_SWITCH */
766 printk(KERN_WARNING "unknown bytecode op %u\n",
767 (unsigned int) *(filter_opcode_t *) pc);
768 ret = -EINVAL;
769 goto end;
770
771 OP(FILTER_OP_RETURN):
772 OP(FILTER_OP_RETURN_S64):
773 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
774 retval = !!estack_ax_v;
775 ret = 0;
776 goto end;
777
778 /* binary */
779 OP(FILTER_OP_MUL):
780 OP(FILTER_OP_DIV):
781 OP(FILTER_OP_MOD):
782 OP(FILTER_OP_PLUS):
783 OP(FILTER_OP_MINUS):
784 printk(KERN_WARNING "unsupported bytecode op %u\n",
785 (unsigned int) *(filter_opcode_t *) pc);
786 ret = -EINVAL;
787 goto end;
788
789 OP(FILTER_OP_EQ):
790 OP(FILTER_OP_NE):
791 OP(FILTER_OP_GT):
792 OP(FILTER_OP_LT):
793 OP(FILTER_OP_GE):
794 OP(FILTER_OP_LE):
795 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
796 (unsigned int) *(filter_opcode_t *) pc);
797 ret = -EINVAL;
798 goto end;
799
800 OP(FILTER_OP_EQ_STRING):
801 {
802 int res;
803
804 res = (stack_strcmp(stack, top, "==") == 0);
805 estack_pop(stack, top, ax, bx);
806 estack_ax_v = res;
807 next_pc += sizeof(struct binary_op);
808 PO;
809 }
810 OP(FILTER_OP_NE_STRING):
811 {
812 int res;
813
814 res = (stack_strcmp(stack, top, "!=") != 0);
815 estack_pop(stack, top, ax, bx);
816 estack_ax_v = res;
817 next_pc += sizeof(struct binary_op);
818 PO;
819 }
820 OP(FILTER_OP_GT_STRING):
821 {
822 int res;
823
824 res = (stack_strcmp(stack, top, ">") > 0);
825 estack_pop(stack, top, ax, bx);
826 estack_ax_v = res;
827 next_pc += sizeof(struct binary_op);
828 PO;
829 }
830 OP(FILTER_OP_LT_STRING):
831 {
832 int res;
833
834 res = (stack_strcmp(stack, top, "<") < 0);
835 estack_pop(stack, top, ax, bx);
836 estack_ax_v = res;
837 next_pc += sizeof(struct binary_op);
838 PO;
839 }
840 OP(FILTER_OP_GE_STRING):
841 {
842 int res;
843
844 res = (stack_strcmp(stack, top, ">=") >= 0);
845 estack_pop(stack, top, ax, bx);
846 estack_ax_v = res;
847 next_pc += sizeof(struct binary_op);
848 PO;
849 }
850 OP(FILTER_OP_LE_STRING):
851 {
852 int res;
853
854 res = (stack_strcmp(stack, top, "<=") <= 0);
855 estack_pop(stack, top, ax, bx);
856 estack_ax_v = res;
857 next_pc += sizeof(struct binary_op);
858 PO;
859 }
860
861 OP(FILTER_OP_EQ_STAR_GLOB_STRING):
862 {
863 int res;
864
865 res = (stack_star_glob_match(stack, top, "==") == 0);
866 estack_pop(stack, top, ax, bx);
867 estack_ax_v = res;
868 next_pc += sizeof(struct binary_op);
869 PO;
870 }
871 OP(FILTER_OP_NE_STAR_GLOB_STRING):
872 {
873 int res;
874
875 res = (stack_star_glob_match(stack, top, "!=") != 0);
876 estack_pop(stack, top, ax, bx);
877 estack_ax_v = res;
878 next_pc += sizeof(struct binary_op);
879 PO;
880 }
881
882 OP(FILTER_OP_EQ_S64):
883 {
884 int res;
885
886 res = (estack_bx_v == estack_ax_v);
887 estack_pop(stack, top, ax, bx);
888 estack_ax_v = res;
889 next_pc += sizeof(struct binary_op);
890 PO;
891 }
892 OP(FILTER_OP_NE_S64):
893 {
894 int res;
895
896 res = (estack_bx_v != estack_ax_v);
897 estack_pop(stack, top, ax, bx);
898 estack_ax_v = res;
899 next_pc += sizeof(struct binary_op);
900 PO;
901 }
902 OP(FILTER_OP_GT_S64):
903 {
904 int res;
905
906 res = (estack_bx_v > estack_ax_v);
907 estack_pop(stack, top, ax, bx);
908 estack_ax_v = res;
909 next_pc += sizeof(struct binary_op);
910 PO;
911 }
912 OP(FILTER_OP_LT_S64):
913 {
914 int res;
915
916 res = (estack_bx_v < estack_ax_v);
917 estack_pop(stack, top, ax, bx);
918 estack_ax_v = res;
919 next_pc += sizeof(struct binary_op);
920 PO;
921 }
922 OP(FILTER_OP_GE_S64):
923 {
924 int res;
925
926 res = (estack_bx_v >= estack_ax_v);
927 estack_pop(stack, top, ax, bx);
928 estack_ax_v = res;
929 next_pc += sizeof(struct binary_op);
930 PO;
931 }
932 OP(FILTER_OP_LE_S64):
933 {
934 int res;
935
936 res = (estack_bx_v <= estack_ax_v);
937 estack_pop(stack, top, ax, bx);
938 estack_ax_v = res;
939 next_pc += sizeof(struct binary_op);
940 PO;
941 }
942
943 OP(FILTER_OP_EQ_DOUBLE):
944 OP(FILTER_OP_NE_DOUBLE):
945 OP(FILTER_OP_GT_DOUBLE):
946 OP(FILTER_OP_LT_DOUBLE):
947 OP(FILTER_OP_GE_DOUBLE):
948 OP(FILTER_OP_LE_DOUBLE):
949 {
950 BUG_ON(1);
951 PO;
952 }
953
954 /* Mixed S64-double binary comparators */
955 OP(FILTER_OP_EQ_DOUBLE_S64):
956 OP(FILTER_OP_NE_DOUBLE_S64):
957 OP(FILTER_OP_GT_DOUBLE_S64):
958 OP(FILTER_OP_LT_DOUBLE_S64):
959 OP(FILTER_OP_GE_DOUBLE_S64):
960 OP(FILTER_OP_LE_DOUBLE_S64):
961 OP(FILTER_OP_EQ_S64_DOUBLE):
962 OP(FILTER_OP_NE_S64_DOUBLE):
963 OP(FILTER_OP_GT_S64_DOUBLE):
964 OP(FILTER_OP_LT_S64_DOUBLE):
965 OP(FILTER_OP_GE_S64_DOUBLE):
966 OP(FILTER_OP_LE_S64_DOUBLE):
967 {
968 BUG_ON(1);
969 PO;
970 }
971 OP(FILTER_OP_BIT_RSHIFT):
972 {
973 int64_t res;
974
975 /* Catch undefined behavior. */
976 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
977 ret = -EINVAL;
978 goto end;
979 }
980 res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
981 estack_pop(stack, top, ax, bx);
982 estack_ax_v = res;
983 next_pc += sizeof(struct binary_op);
984 PO;
985 }
986 OP(FILTER_OP_BIT_LSHIFT):
987 {
988 int64_t res;
989
990 /* Catch undefined behavior. */
991 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
992 ret = -EINVAL;
993 goto end;
994 }
995 res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
996 estack_pop(stack, top, ax, bx);
997 estack_ax_v = res;
998 next_pc += sizeof(struct binary_op);
999 PO;
1000 }
1001 OP(FILTER_OP_BIT_AND):
1002 {
1003 int64_t res;
1004
1005 res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
1006 estack_pop(stack, top, ax, bx);
1007 estack_ax_v = res;
1008 next_pc += sizeof(struct binary_op);
1009 PO;
1010 }
1011 OP(FILTER_OP_BIT_OR):
1012 {
1013 int64_t res;
1014
1015 res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
1016 estack_pop(stack, top, ax, bx);
1017 estack_ax_v = res;
1018 next_pc += sizeof(struct binary_op);
1019 PO;
1020 }
1021 OP(FILTER_OP_BIT_XOR):
1022 {
1023 int64_t res;
1024
1025 res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
1026 estack_pop(stack, top, ax, bx);
1027 estack_ax_v = res;
1028 next_pc += sizeof(struct binary_op);
1029 PO;
1030 }
1031
1032 /* unary */
1033 OP(FILTER_OP_UNARY_PLUS):
1034 OP(FILTER_OP_UNARY_MINUS):
1035 OP(FILTER_OP_UNARY_NOT):
1036 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1037 (unsigned int) *(filter_opcode_t *) pc);
1038 ret = -EINVAL;
1039 goto end;
1040
1041
1042 OP(FILTER_OP_UNARY_BIT_NOT):
1043 {
1044 estack_ax_v = ~(uint64_t) estack_ax_v;
1045 next_pc += sizeof(struct unary_op);
1046 PO;
1047 }
1048
1049 OP(FILTER_OP_UNARY_PLUS_S64):
1050 {
1051 next_pc += sizeof(struct unary_op);
1052 PO;
1053 }
1054 OP(FILTER_OP_UNARY_MINUS_S64):
1055 {
1056 estack_ax_v = -estack_ax_v;
1057 next_pc += sizeof(struct unary_op);
1058 PO;
1059 }
1060 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
1061 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
1062 {
1063 BUG_ON(1);
1064 PO;
1065 }
1066 OP(FILTER_OP_UNARY_NOT_S64):
1067 {
1068 estack_ax_v = !estack_ax_v;
1069 next_pc += sizeof(struct unary_op);
1070 PO;
1071 }
1072 OP(FILTER_OP_UNARY_NOT_DOUBLE):
1073 {
1074 BUG_ON(1);
1075 PO;
1076 }
1077
1078 /* logical */
1079 OP(FILTER_OP_AND):
1080 {
1081 struct logical_op *insn = (struct logical_op *) pc;
1082
1083 /* If AX is 0, skip and evaluate to 0 */
1084 if (unlikely(estack_ax_v == 0)) {
1085 dbg_printk("Jumping to bytecode offset %u\n",
1086 (unsigned int) insn->skip_offset);
1087 next_pc = start_pc + insn->skip_offset;
1088 } else {
1089 /* Pop 1 when jump not taken */
1090 estack_pop(stack, top, ax, bx);
1091 next_pc += sizeof(struct logical_op);
1092 }
1093 PO;
1094 }
1095 OP(FILTER_OP_OR):
1096 {
1097 struct logical_op *insn = (struct logical_op *) pc;
1098
1099 /* If AX is nonzero, skip and evaluate to 1 */
1100
1101 if (unlikely(estack_ax_v != 0)) {
1102 estack_ax_v = 1;
1103 dbg_printk("Jumping to bytecode offset %u\n",
1104 (unsigned int) insn->skip_offset);
1105 next_pc = start_pc + insn->skip_offset;
1106 } else {
1107 /* Pop 1 when jump not taken */
1108 estack_pop(stack, top, ax, bx);
1109 next_pc += sizeof(struct logical_op);
1110 }
1111 PO;
1112 }
1113
1114
1115 /* load field ref */
1116 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
1117 {
1118 struct load_op *insn = (struct load_op *) pc;
1119 struct field_ref *ref = (struct field_ref *) insn->data;
1120
1121 dbg_printk("load field ref offset %u type string\n",
1122 ref->offset);
1123 estack_push(stack, top, ax, bx);
1124 estack_ax(stack, top)->u.s.str =
1125 *(const char * const *) &filter_stack_data[ref->offset];
1126 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1127 dbg_printk("Filter warning: loading a NULL string.\n");
1128 ret = -EINVAL;
1129 goto end;
1130 }
1131 estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
1132 estack_ax(stack, top)->u.s.literal_type =
1133 ESTACK_STRING_LITERAL_TYPE_NONE;
1134 estack_ax(stack, top)->u.s.user = 0;
1135 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1136 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1137 PO;
1138 }
1139
1140 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
1141 {
1142 struct load_op *insn = (struct load_op *) pc;
1143 struct field_ref *ref = (struct field_ref *) insn->data;
1144
1145 dbg_printk("load field ref offset %u type sequence\n",
1146 ref->offset);
1147 estack_push(stack, top, ax, bx);
1148 estack_ax(stack, top)->u.s.seq_len =
1149 *(unsigned long *) &filter_stack_data[ref->offset];
1150 estack_ax(stack, top)->u.s.str =
1151 *(const char **) (&filter_stack_data[ref->offset
1152 + sizeof(unsigned long)]);
1153 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1154 dbg_printk("Filter warning: loading a NULL sequence.\n");
1155 ret = -EINVAL;
1156 goto end;
1157 }
1158 estack_ax(stack, top)->u.s.literal_type =
1159 ESTACK_STRING_LITERAL_TYPE_NONE;
1160 estack_ax(stack, top)->u.s.user = 0;
1161 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1162 PO;
1163 }
1164
1165 OP(FILTER_OP_LOAD_FIELD_REF_S64):
1166 {
1167 struct load_op *insn = (struct load_op *) pc;
1168 struct field_ref *ref = (struct field_ref *) insn->data;
1169
1170 dbg_printk("load field ref offset %u type s64\n",
1171 ref->offset);
1172 estack_push(stack, top, ax, bx);
1173 estack_ax_v =
1174 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
1175 dbg_printk("ref load s64 %lld\n",
1176 (long long) estack_ax_v);
1177 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1178 PO;
1179 }
1180
1181 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
1182 {
1183 BUG_ON(1);
1184 PO;
1185 }
1186
1187 /* load from immediate operand */
1188 OP(FILTER_OP_LOAD_STRING):
1189 {
1190 struct load_op *insn = (struct load_op *) pc;
1191
1192 dbg_printk("load string %s\n", insn->data);
1193 estack_push(stack, top, ax, bx);
1194 estack_ax(stack, top)->u.s.str = insn->data;
1195 estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
1196 estack_ax(stack, top)->u.s.literal_type =
1197 ESTACK_STRING_LITERAL_TYPE_PLAIN;
1198 estack_ax(stack, top)->u.s.user = 0;
1199 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1200 PO;
1201 }
1202
1203 OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
1204 {
1205 struct load_op *insn = (struct load_op *) pc;
1206
1207 dbg_printk("load globbing pattern %s\n", insn->data);
1208 estack_push(stack, top, ax, bx);
1209 estack_ax(stack, top)->u.s.str = insn->data;
1210 estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
1211 estack_ax(stack, top)->u.s.literal_type =
1212 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
1213 estack_ax(stack, top)->u.s.user = 0;
1214 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1215 PO;
1216 }
1217
1218 OP(FILTER_OP_LOAD_S64):
1219 {
1220 struct load_op *insn = (struct load_op *) pc;
1221
1222 estack_push(stack, top, ax, bx);
1223 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
1224 dbg_printk("load s64 %lld\n",
1225 (long long) estack_ax_v);
1226 next_pc += sizeof(struct load_op)
1227 + sizeof(struct literal_numeric);
1228 PO;
1229 }
1230
1231 OP(FILTER_OP_LOAD_DOUBLE):
1232 {
1233 BUG_ON(1);
1234 PO;
1235 }
1236
1237 /* cast */
1238 OP(FILTER_OP_CAST_TO_S64):
1239 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1240 (unsigned int) *(filter_opcode_t *) pc);
1241 ret = -EINVAL;
1242 goto end;
1243
1244 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
1245 {
1246 BUG_ON(1);
1247 PO;
1248 }
1249
1250 OP(FILTER_OP_CAST_NOP):
1251 {
1252 next_pc += sizeof(struct cast_op);
1253 PO;
1254 }
1255
1256 /* get context ref */
1257 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
1258 {
1259 struct load_op *insn = (struct load_op *) pc;
1260 struct field_ref *ref = (struct field_ref *) insn->data;
1261 struct lttng_ctx_field *ctx_field;
1262 union lttng_ctx_value v;
1263
1264 dbg_printk("get context ref offset %u type string\n",
1265 ref->offset);
1266 ctx_field = &lttng_static_ctx->fields[ref->offset];
1267 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1268 estack_push(stack, top, ax, bx);
1269 estack_ax(stack, top)->u.s.str = v.str;
1270 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1271 dbg_printk("Filter warning: loading a NULL string.\n");
1272 ret = -EINVAL;
1273 goto end;
1274 }
1275 estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
1276 estack_ax(stack, top)->u.s.literal_type =
1277 ESTACK_STRING_LITERAL_TYPE_NONE;
1278 estack_ax(stack, top)->u.s.user = 0;
1279 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
1280 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1281 PO;
1282 }
1283
1284 OP(FILTER_OP_GET_CONTEXT_REF_S64):
1285 {
1286 struct load_op *insn = (struct load_op *) pc;
1287 struct field_ref *ref = (struct field_ref *) insn->data;
1288 struct lttng_ctx_field *ctx_field;
1289 union lttng_ctx_value v;
1290
1291 dbg_printk("get context ref offset %u type s64\n",
1292 ref->offset);
1293 ctx_field = &lttng_static_ctx->fields[ref->offset];
1294 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1295 estack_push(stack, top, ax, bx);
1296 estack_ax_v = v.s64;
1297 dbg_printk("ref get context s64 %lld\n",
1298 (long long) estack_ax_v);
1299 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1300 PO;
1301 }
1302
1303 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
1304 {
1305 BUG_ON(1);
1306 PO;
1307 }
1308
1309 /* load userspace field ref */
1310 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
1311 {
1312 struct load_op *insn = (struct load_op *) pc;
1313 struct field_ref *ref = (struct field_ref *) insn->data;
1314
1315 dbg_printk("load field ref offset %u type user string\n",
1316 ref->offset);
1317 estack_push(stack, top, ax, bx);
1318 estack_ax(stack, top)->u.s.user_str =
1319 *(const char * const *) &filter_stack_data[ref->offset];
1320 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1321 dbg_printk("Filter warning: loading a NULL string.\n");
1322 ret = -EINVAL;
1323 goto end;
1324 }
1325 estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
1326 estack_ax(stack, top)->u.s.literal_type =
1327 ESTACK_STRING_LITERAL_TYPE_NONE;
1328 estack_ax(stack, top)->u.s.user = 1;
1329 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1330 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1331 PO;
1332 }
1333
1334 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
1335 {
1336 struct load_op *insn = (struct load_op *) pc;
1337 struct field_ref *ref = (struct field_ref *) insn->data;
1338
1339 dbg_printk("load field ref offset %u type user sequence\n",
1340 ref->offset);
1341 estack_push(stack, top, ax, bx);
1342 estack_ax(stack, top)->u.s.seq_len =
1343 *(unsigned long *) &filter_stack_data[ref->offset];
1344 estack_ax(stack, top)->u.s.user_str =
1345 *(const char **) (&filter_stack_data[ref->offset
1346 + sizeof(unsigned long)]);
1347 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1348 dbg_printk("Filter warning: loading a NULL sequence.\n");
1349 ret = -EINVAL;
1350 goto end;
1351 }
1352 estack_ax(stack, top)->u.s.literal_type =
1353 ESTACK_STRING_LITERAL_TYPE_NONE;
1354 estack_ax(stack, top)->u.s.user = 1;
1355 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1356 PO;
1357 }
1358
1359 OP(FILTER_OP_GET_CONTEXT_ROOT):
1360 {
1361 dbg_printk("op get context root\n");
1362 estack_push(stack, top, ax, bx);
1363 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
1364 /* "field" only needed for variants. */
1365 estack_ax(stack, top)->u.ptr.field = NULL;
1366 next_pc += sizeof(struct load_op);
1367 PO;
1368 }
1369
1370 OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
1371 {
1372 BUG_ON(1);
1373 PO;
1374 }
1375
1376 OP(FILTER_OP_GET_PAYLOAD_ROOT):
1377 {
1378 dbg_printk("op get app payload root\n");
1379 estack_push(stack, top, ax, bx);
1380 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
1381 estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
1382 /* "field" only needed for variants. */
1383 estack_ax(stack, top)->u.ptr.field = NULL;
1384 next_pc += sizeof(struct load_op);
1385 PO;
1386 }
1387
1388 OP(FILTER_OP_GET_SYMBOL):
1389 {
1390 dbg_printk("op get symbol\n");
1391 switch (estack_ax(stack, top)->u.ptr.type) {
1392 case LOAD_OBJECT:
1393 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1394 ret = -EINVAL;
1395 goto end;
1396 case LOAD_ROOT_CONTEXT:
1397 case LOAD_ROOT_APP_CONTEXT:
1398 case LOAD_ROOT_PAYLOAD:
1399 /*
1400 * symbol lookup is performed by
1401 * specialization.
1402 */
1403 ret = -EINVAL;
1404 goto end;
1405 }
1406 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1407 PO;
1408 }
1409
1410 OP(FILTER_OP_GET_SYMBOL_FIELD):
1411 {
1412 /*
1413 * Used for first variant encountered in a
1414 * traversal. Variants are not implemented yet.
1415 */
1416 ret = -EINVAL;
1417 goto end;
1418 }
1419
1420 OP(FILTER_OP_GET_INDEX_U16):
1421 {
1422 struct load_op *insn = (struct load_op *) pc;
1423 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1424
1425 dbg_printk("op get index u16\n");
1426 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1427 if (ret)
1428 goto end;
1429 estack_ax_v = estack_ax(stack, top)->u.v;
1430 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1431 PO;
1432 }
1433
1434 OP(FILTER_OP_GET_INDEX_U64):
1435 {
1436 struct load_op *insn = (struct load_op *) pc;
1437 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1438
1439 dbg_printk("op get index u64\n");
1440 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1441 if (ret)
1442 goto end;
1443 estack_ax_v = estack_ax(stack, top)->u.v;
1444 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1445 PO;
1446 }
1447
1448 OP(FILTER_OP_LOAD_FIELD):
1449 {
1450 dbg_printk("op load field\n");
1451 ret = dynamic_load_field(estack_ax(stack, top));
1452 if (ret)
1453 goto end;
1454 estack_ax_v = estack_ax(stack, top)->u.v;
1455 next_pc += sizeof(struct load_op);
1456 PO;
1457 }
1458
1459 OP(FILTER_OP_LOAD_FIELD_S8):
1460 {
1461 dbg_printk("op load field s8\n");
1462
1463 estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
1464 next_pc += sizeof(struct load_op);
1465 PO;
1466 }
1467 OP(FILTER_OP_LOAD_FIELD_S16):
1468 {
1469 dbg_printk("op load field s16\n");
1470
1471 estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
1472 next_pc += sizeof(struct load_op);
1473 PO;
1474 }
1475 OP(FILTER_OP_LOAD_FIELD_S32):
1476 {
1477 dbg_printk("op load field s32\n");
1478
1479 estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
1480 next_pc += sizeof(struct load_op);
1481 PO;
1482 }
1483 OP(FILTER_OP_LOAD_FIELD_S64):
1484 {
1485 dbg_printk("op load field s64\n");
1486
1487 estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
1488 next_pc += sizeof(struct load_op);
1489 PO;
1490 }
1491 OP(FILTER_OP_LOAD_FIELD_U8):
1492 {
1493 dbg_printk("op load field u8\n");
1494
1495 estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
1496 next_pc += sizeof(struct load_op);
1497 PO;
1498 }
1499 OP(FILTER_OP_LOAD_FIELD_U16):
1500 {
1501 dbg_printk("op load field u16\n");
1502
1503 estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
1504 next_pc += sizeof(struct load_op);
1505 PO;
1506 }
1507 OP(FILTER_OP_LOAD_FIELD_U32):
1508 {
1509 dbg_printk("op load field u32\n");
1510
1511 estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
1512 next_pc += sizeof(struct load_op);
1513 PO;
1514 }
1515 OP(FILTER_OP_LOAD_FIELD_U64):
1516 {
1517 dbg_printk("op load field u64\n");
1518
1519 estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
1520 next_pc += sizeof(struct load_op);
1521 PO;
1522 }
1523 OP(FILTER_OP_LOAD_FIELD_DOUBLE):
1524 {
1525 ret = -EINVAL;
1526 goto end;
1527 }
1528
1529 OP(FILTER_OP_LOAD_FIELD_STRING):
1530 {
1531 const char *str;
1532
1533 dbg_printk("op load field string\n");
1534 str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
1535 estack_ax(stack, top)->u.s.str = str;
1536 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1537 dbg_printk("Filter warning: loading a NULL string.\n");
1538 ret = -EINVAL;
1539 goto end;
1540 }
1541 estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
1542 estack_ax(stack, top)->u.s.literal_type =
1543 ESTACK_STRING_LITERAL_TYPE_NONE;
1544 next_pc += sizeof(struct load_op);
1545 PO;
1546 }
1547
1548 OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
1549 {
1550 const char *ptr;
1551
1552 dbg_printk("op load field string sequence\n");
1553 ptr = estack_ax(stack, top)->u.ptr.ptr;
1554 estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
1555 estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
1556 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1557 dbg_printk("Filter warning: loading a NULL sequence.\n");
1558 ret = -EINVAL;
1559 goto end;
1560 }
1561 estack_ax(stack, top)->u.s.literal_type =
1562 ESTACK_STRING_LITERAL_TYPE_NONE;
1563 next_pc += sizeof(struct load_op);
1564 PO;
1565 }
1566
1567 END_OP
1568 end:
1569 /* return 0 (discard) on error */
1570 if (ret)
1571 return 0;
1572 return retval;
1573 }
1574
1575 #undef START_OP
1576 #undef OP
1577 #undef PO
1578 #undef END_OP
This page took 0.092792 seconds and 4 git commands to generate.