1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-interpreter.c
5 * LTTng modules filter interpreter.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <wrapper/uaccess.h>
11 #include <wrapper/objtool.h>
12 #include <wrapper/types.h>
13 #include <linux/swab.h>
15 #include <lttng/filter.h>
16 #include <lttng/string-utils.h>
18 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode
);
21 * get_char should be called with page fault handler disabled if it is expected
22 * to handle user-space read.
25 char get_char(struct estack_entry
*reg
, size_t offset
)
27 if (unlikely(offset
>= reg
->u
.s
.seq_len
))
32 /* Handle invalid access as end of string. */
33 if (unlikely(!lttng_access_ok(VERIFY_READ
,
34 reg
->u
.s
.user_str
+ offset
,
37 /* Handle fault (nonzero return value) as end of string. */
38 if (unlikely(__copy_from_user_inatomic(&c
,
39 reg
->u
.s
.user_str
+ offset
,
44 return reg
->u
.s
.str
[offset
];
50 * -2: unknown escape char.
54 int parse_char(struct estack_entry
*reg
, char *c
, size_t *offset
)
59 *c
= get_char(reg
, *offset
);
75 char get_char_at_cb(size_t at
, void *data
)
77 return get_char(data
, at
);
81 int stack_star_glob_match(struct estack
*stack
, int top
, const char *cmp_type
)
83 bool has_user
= false;
85 struct estack_entry
*pattern_reg
;
86 struct estack_entry
*candidate_reg
;
88 /* Disable the page fault handler when reading from userspace. */
89 if (estack_bx(stack
, top
)->u
.s
.user
90 || estack_ax(stack
, top
)->u
.s
.user
) {
95 /* Find out which side is the pattern vs. the candidate. */
96 if (estack_ax(stack
, top
)->u
.s
.literal_type
== ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
) {
97 pattern_reg
= estack_ax(stack
, top
);
98 candidate_reg
= estack_bx(stack
, top
);
100 pattern_reg
= estack_bx(stack
, top
);
101 candidate_reg
= estack_ax(stack
, top
);
104 /* Perform the match operation. */
105 result
= !strutils_star_glob_match_char_cb(get_char_at_cb
,
106 pattern_reg
, get_char_at_cb
, candidate_reg
);
114 int stack_strcmp(struct estack
*stack
, int top
, const char *cmp_type
)
116 size_t offset_bx
= 0, offset_ax
= 0;
117 int diff
, has_user
= 0;
119 if (estack_bx(stack
, top
)->u
.s
.user
120 || estack_ax(stack
, top
)->u
.s
.user
) {
128 char char_bx
, char_ax
;
130 char_bx
= get_char(estack_bx(stack
, top
), offset_bx
);
131 char_ax
= get_char(estack_ax(stack
, top
), offset_ax
);
133 if (unlikely(char_bx
== '\0')) {
134 if (char_ax
== '\0') {
138 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
139 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
140 ret
= parse_char(estack_ax(stack
, top
),
141 &char_ax
, &offset_ax
);
151 if (unlikely(char_ax
== '\0')) {
152 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
153 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
154 ret
= parse_char(estack_bx(stack
, top
),
155 &char_bx
, &offset_bx
);
164 if (estack_bx(stack
, top
)->u
.s
.literal_type
==
165 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
166 ret
= parse_char(estack_bx(stack
, top
),
167 &char_bx
, &offset_bx
);
171 } else if (ret
== -2) {
174 /* else compare both char */
176 if (estack_ax(stack
, top
)->u
.s
.literal_type
==
177 ESTACK_STRING_LITERAL_TYPE_PLAIN
) {
178 ret
= parse_char(estack_ax(stack
, top
),
179 &char_ax
, &offset_ax
);
183 } else if (ret
== -2) {
200 diff
= char_bx
- char_ax
;
212 uint64_t lttng_filter_false(void *filter_data
,
213 struct lttng_probe_ctx
*lttng_probe_ctx
,
214 const char *filter_stack_data
)
216 return LTTNG_FILTER_DISCARD
;
219 #ifdef INTERPRETER_USE_SWITCH
222 * Fallback for compilers that do not support taking address of labels.
226 start_pc = &bytecode->data[0]; \
227 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
229 dbg_printk("LTTng: Executing op %s (%u)\n", \
230 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
231 (unsigned int) *(filter_opcode_t *) pc); \
232 switch (*(filter_opcode_t *) pc) {
234 #define OP(name) case name
244 * Dispatch-table based interpreter.
248 start_pc = &bytecode->code[0]; \
249 pc = next_pc = start_pc; \
250 if (unlikely(pc - start_pc >= bytecode->len)) \
252 goto *dispatch[*(filter_opcode_t *) pc];
259 goto *dispatch[*(filter_opcode_t *) pc];
265 #define IS_INTEGER_REGISTER(reg_type) (reg_type == REG_S64)
267 static int context_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
268 struct load_ptr
*ptr
,
272 struct lttng_ctx_field
*ctx_field
;
273 struct lttng_event_field
*field
;
274 union lttng_ctx_value v
;
276 ctx_field
= <tng_static_ctx
->fields
[idx
];
277 field
= &ctx_field
->event_field
;
278 ptr
->type
= LOAD_OBJECT
;
279 /* field is only used for types nested within variants. */
282 switch (field
->type
.atype
) {
284 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
285 if (field
->type
.u
.integer
.signedness
) {
286 ptr
->object_type
= OBJECT_TYPE_S64
;
288 ptr
->ptr
= &ptr
->u
.s64
;
290 ptr
->object_type
= OBJECT_TYPE_U64
;
291 ptr
->u
.u64
= v
.s64
; /* Cast. */
292 ptr
->ptr
= &ptr
->u
.u64
;
295 case atype_enum_nestable
:
297 const struct lttng_integer_type
*itype
=
298 &field
->type
.u
.enum_nestable
.container_type
->u
.integer
;
300 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
301 if (itype
->signedness
) {
302 ptr
->object_type
= OBJECT_TYPE_S64
;
304 ptr
->ptr
= &ptr
->u
.s64
;
306 ptr
->object_type
= OBJECT_TYPE_U64
;
307 ptr
->u
.u64
= v
.s64
; /* Cast. */
308 ptr
->ptr
= &ptr
->u
.u64
;
312 case atype_array_nestable
:
313 if (!lttng_is_bytewise_integer(field
->type
.u
.array_nestable
.elem_type
)) {
314 printk(KERN_WARNING
"LTTng: filter: Array nesting only supports integer types.\n");
317 if (field
->type
.u
.array_nestable
.elem_type
->u
.integer
.encoding
== lttng_encode_none
) {
318 printk(KERN_WARNING
"LTTng: filter: Only string arrays are supported for contexts.\n");
321 ptr
->object_type
= OBJECT_TYPE_STRING
;
322 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
325 case atype_sequence_nestable
:
326 if (!lttng_is_bytewise_integer(field
->type
.u
.sequence_nestable
.elem_type
)) {
327 printk(KERN_WARNING
"LTTng: filter: Sequence nesting only supports integer types.\n");
330 if (field
->type
.u
.sequence_nestable
.elem_type
->u
.integer
.encoding
== lttng_encode_none
) {
331 printk(KERN_WARNING
"LTTng: filter: Only string sequences are supported for contexts.\n");
334 ptr
->object_type
= OBJECT_TYPE_STRING
;
335 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
339 ptr
->object_type
= OBJECT_TYPE_STRING
;
340 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
343 case atype_struct_nestable
:
344 printk(KERN_WARNING
"LTTng: filter: Structure type cannot be loaded.\n");
346 case atype_variant_nestable
:
347 printk(KERN_WARNING
"LTTng: filter: Variant type cannot be loaded.\n");
350 printk(KERN_WARNING
"LTTng: filter: Unknown type: %d", (int) field
->type
.atype
);
356 static int dynamic_get_index(struct lttng_probe_ctx
*lttng_probe_ctx
,
357 struct bytecode_runtime
*runtime
,
358 uint64_t index
, struct estack_entry
*stack_top
)
361 const struct filter_get_index_data
*gid
;
364 * Types nested within variants need to perform dynamic lookup
365 * based on the field descriptions. LTTng-UST does not implement
368 if (stack_top
->u
.ptr
.field
)
370 gid
= (const struct filter_get_index_data
*) &runtime
->data
[index
];
371 switch (stack_top
->u
.ptr
.type
) {
373 switch (stack_top
->u
.ptr
.object_type
) {
374 case OBJECT_TYPE_ARRAY
:
378 WARN_ON_ONCE(gid
->offset
>= gid
->array_len
);
379 /* Skip count (unsigned long) */
380 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
381 ptr
= ptr
+ gid
->offset
;
382 stack_top
->u
.ptr
.ptr
= ptr
;
383 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
384 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
385 /* field is only used for types nested within variants. */
386 stack_top
->u
.ptr
.field
= NULL
;
389 case OBJECT_TYPE_SEQUENCE
:
394 ptr
= *(const char **) (stack_top
->u
.ptr
.ptr
+ sizeof(unsigned long));
395 ptr_seq_len
= *(unsigned long *) stack_top
->u
.ptr
.ptr
;
396 if (gid
->offset
>= gid
->elem
.len
* ptr_seq_len
) {
400 ptr
= ptr
+ gid
->offset
;
401 stack_top
->u
.ptr
.ptr
= ptr
;
402 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
403 stack_top
->u
.ptr
.rev_bo
= gid
->elem
.rev_bo
;
404 /* field is only used for types nested within variants. */
405 stack_top
->u
.ptr
.field
= NULL
;
408 case OBJECT_TYPE_STRUCT
:
409 printk(KERN_WARNING
"LTTng: filter: Nested structures are not supported yet.\n");
412 case OBJECT_TYPE_VARIANT
:
414 printk(KERN_WARNING
"LTTng: filter: Unexpected get index type %d",
415 (int) stack_top
->u
.ptr
.object_type
);
420 case LOAD_ROOT_CONTEXT
:
421 case LOAD_ROOT_APP_CONTEXT
: /* Fall-through */
423 ret
= context_get_index(lttng_probe_ctx
,
431 case LOAD_ROOT_PAYLOAD
:
432 stack_top
->u
.ptr
.ptr
+= gid
->offset
;
433 if (gid
->elem
.type
== OBJECT_TYPE_STRING
)
434 stack_top
->u
.ptr
.ptr
= *(const char * const *) stack_top
->u
.ptr
.ptr
;
435 stack_top
->u
.ptr
.object_type
= gid
->elem
.type
;
436 stack_top
->u
.ptr
.type
= LOAD_OBJECT
;
437 /* field is only used for types nested within variants. */
438 stack_top
->u
.ptr
.field
= NULL
;
447 static int dynamic_load_field(struct estack_entry
*stack_top
)
451 switch (stack_top
->u
.ptr
.type
) {
454 case LOAD_ROOT_CONTEXT
:
455 case LOAD_ROOT_APP_CONTEXT
:
456 case LOAD_ROOT_PAYLOAD
:
458 dbg_printk("Filter warning: cannot load root, missing field name.\n");
462 switch (stack_top
->u
.ptr
.object_type
) {
464 dbg_printk("op load field s8\n");
465 stack_top
->u
.v
= *(int8_t *) stack_top
->u
.ptr
.ptr
;
466 stack_top
->type
= REG_S64
;
468 case OBJECT_TYPE_S16
:
472 dbg_printk("op load field s16\n");
473 tmp
= *(int16_t *) stack_top
->u
.ptr
.ptr
;
474 if (stack_top
->u
.ptr
.rev_bo
)
476 stack_top
->u
.v
= tmp
;
477 stack_top
->type
= REG_S64
;
480 case OBJECT_TYPE_S32
:
484 dbg_printk("op load field s32\n");
485 tmp
= *(int32_t *) stack_top
->u
.ptr
.ptr
;
486 if (stack_top
->u
.ptr
.rev_bo
)
488 stack_top
->u
.v
= tmp
;
489 stack_top
->type
= REG_S64
;
492 case OBJECT_TYPE_S64
:
496 dbg_printk("op load field s64\n");
497 tmp
= *(int64_t *) stack_top
->u
.ptr
.ptr
;
498 if (stack_top
->u
.ptr
.rev_bo
)
500 stack_top
->u
.v
= tmp
;
501 stack_top
->type
= REG_S64
;
505 dbg_printk("op load field u8\n");
506 stack_top
->u
.v
= *(uint8_t *) stack_top
->u
.ptr
.ptr
;
507 stack_top
->type
= REG_S64
;
509 case OBJECT_TYPE_U16
:
513 dbg_printk("op load field u16\n");
514 tmp
= *(uint16_t *) stack_top
->u
.ptr
.ptr
;
515 if (stack_top
->u
.ptr
.rev_bo
)
517 stack_top
->u
.v
= tmp
;
518 stack_top
->type
= REG_S64
;
521 case OBJECT_TYPE_U32
:
525 dbg_printk("op load field u32\n");
526 tmp
= *(uint32_t *) stack_top
->u
.ptr
.ptr
;
527 if (stack_top
->u
.ptr
.rev_bo
)
529 stack_top
->u
.v
= tmp
;
530 stack_top
->type
= REG_S64
;
533 case OBJECT_TYPE_U64
:
537 dbg_printk("op load field u64\n");
538 tmp
= *(uint64_t *) stack_top
->u
.ptr
.ptr
;
539 if (stack_top
->u
.ptr
.rev_bo
)
541 stack_top
->u
.v
= tmp
;
542 stack_top
->type
= REG_S64
;
545 case OBJECT_TYPE_STRING
:
549 dbg_printk("op load field string\n");
550 str
= (const char *) stack_top
->u
.ptr
.ptr
;
551 stack_top
->u
.s
.str
= str
;
552 if (unlikely(!stack_top
->u
.s
.str
)) {
553 dbg_printk("Filter warning: loading a NULL string.\n");
557 stack_top
->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
558 stack_top
->u
.s
.literal_type
=
559 ESTACK_STRING_LITERAL_TYPE_NONE
;
560 stack_top
->type
= REG_STRING
;
563 case OBJECT_TYPE_STRING_SEQUENCE
:
567 dbg_printk("op load field string sequence\n");
568 ptr
= stack_top
->u
.ptr
.ptr
;
569 stack_top
->u
.s
.seq_len
= *(unsigned long *) ptr
;
570 stack_top
->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
571 if (unlikely(!stack_top
->u
.s
.str
)) {
572 dbg_printk("Filter warning: loading a NULL sequence.\n");
576 stack_top
->u
.s
.literal_type
=
577 ESTACK_STRING_LITERAL_TYPE_NONE
;
578 stack_top
->type
= REG_STRING
;
581 case OBJECT_TYPE_DYNAMIC
:
583 * Dynamic types in context are looked up
584 * by context get index.
588 case OBJECT_TYPE_DOUBLE
:
591 case OBJECT_TYPE_SEQUENCE
:
592 case OBJECT_TYPE_ARRAY
:
593 case OBJECT_TYPE_STRUCT
:
594 case OBJECT_TYPE_VARIANT
:
595 printk(KERN_WARNING
"LTTng: filter: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
606 * Return 0 (discard), or raise the 0x1 flag (log event).
607 * Currently, other flags are kept for future extensions and have no
610 uint64_t lttng_filter_interpret_bytecode(void *filter_data
,
611 struct lttng_probe_ctx
*lttng_probe_ctx
,
612 const char *filter_stack_data
)
614 struct bytecode_runtime
*bytecode
= filter_data
;
615 void *pc
, *next_pc
, *start_pc
;
618 struct estack _stack
;
619 struct estack
*stack
= &_stack
;
620 register int64_t ax
= 0, bx
= 0;
621 register enum entry_type ax_t
= REG_TYPE_UNKNOWN
, bx_t
= REG_TYPE_UNKNOWN
;
622 register int top
= FILTER_STACK_EMPTY
;
623 #ifndef INTERPRETER_USE_SWITCH
624 static void *dispatch
[NR_FILTER_OPS
] = {
625 [ FILTER_OP_UNKNOWN
] = &&LABEL_FILTER_OP_UNKNOWN
,
627 [ FILTER_OP_RETURN
] = &&LABEL_FILTER_OP_RETURN
,
630 [ FILTER_OP_MUL
] = &&LABEL_FILTER_OP_MUL
,
631 [ FILTER_OP_DIV
] = &&LABEL_FILTER_OP_DIV
,
632 [ FILTER_OP_MOD
] = &&LABEL_FILTER_OP_MOD
,
633 [ FILTER_OP_PLUS
] = &&LABEL_FILTER_OP_PLUS
,
634 [ FILTER_OP_MINUS
] = &&LABEL_FILTER_OP_MINUS
,
635 [ FILTER_OP_BIT_RSHIFT
] = &&LABEL_FILTER_OP_BIT_RSHIFT
,
636 [ FILTER_OP_BIT_LSHIFT
] = &&LABEL_FILTER_OP_BIT_LSHIFT
,
637 [ FILTER_OP_BIT_AND
] = &&LABEL_FILTER_OP_BIT_AND
,
638 [ FILTER_OP_BIT_OR
] = &&LABEL_FILTER_OP_BIT_OR
,
639 [ FILTER_OP_BIT_XOR
] = &&LABEL_FILTER_OP_BIT_XOR
,
641 /* binary comparators */
642 [ FILTER_OP_EQ
] = &&LABEL_FILTER_OP_EQ
,
643 [ FILTER_OP_NE
] = &&LABEL_FILTER_OP_NE
,
644 [ FILTER_OP_GT
] = &&LABEL_FILTER_OP_GT
,
645 [ FILTER_OP_LT
] = &&LABEL_FILTER_OP_LT
,
646 [ FILTER_OP_GE
] = &&LABEL_FILTER_OP_GE
,
647 [ FILTER_OP_LE
] = &&LABEL_FILTER_OP_LE
,
649 /* string binary comparator */
650 [ FILTER_OP_EQ_STRING
] = &&LABEL_FILTER_OP_EQ_STRING
,
651 [ FILTER_OP_NE_STRING
] = &&LABEL_FILTER_OP_NE_STRING
,
652 [ FILTER_OP_GT_STRING
] = &&LABEL_FILTER_OP_GT_STRING
,
653 [ FILTER_OP_LT_STRING
] = &&LABEL_FILTER_OP_LT_STRING
,
654 [ FILTER_OP_GE_STRING
] = &&LABEL_FILTER_OP_GE_STRING
,
655 [ FILTER_OP_LE_STRING
] = &&LABEL_FILTER_OP_LE_STRING
,
657 /* globbing pattern binary comparator */
658 [ FILTER_OP_EQ_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING
,
659 [ FILTER_OP_NE_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING
,
661 /* s64 binary comparator */
662 [ FILTER_OP_EQ_S64
] = &&LABEL_FILTER_OP_EQ_S64
,
663 [ FILTER_OP_NE_S64
] = &&LABEL_FILTER_OP_NE_S64
,
664 [ FILTER_OP_GT_S64
] = &&LABEL_FILTER_OP_GT_S64
,
665 [ FILTER_OP_LT_S64
] = &&LABEL_FILTER_OP_LT_S64
,
666 [ FILTER_OP_GE_S64
] = &&LABEL_FILTER_OP_GE_S64
,
667 [ FILTER_OP_LE_S64
] = &&LABEL_FILTER_OP_LE_S64
,
669 /* double binary comparator */
670 [ FILTER_OP_EQ_DOUBLE
] = &&LABEL_FILTER_OP_EQ_DOUBLE
,
671 [ FILTER_OP_NE_DOUBLE
] = &&LABEL_FILTER_OP_NE_DOUBLE
,
672 [ FILTER_OP_GT_DOUBLE
] = &&LABEL_FILTER_OP_GT_DOUBLE
,
673 [ FILTER_OP_LT_DOUBLE
] = &&LABEL_FILTER_OP_LT_DOUBLE
,
674 [ FILTER_OP_GE_DOUBLE
] = &&LABEL_FILTER_OP_GE_DOUBLE
,
675 [ FILTER_OP_LE_DOUBLE
] = &&LABEL_FILTER_OP_LE_DOUBLE
,
677 /* Mixed S64-double binary comparators */
678 [ FILTER_OP_EQ_DOUBLE_S64
] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64
,
679 [ FILTER_OP_NE_DOUBLE_S64
] = &&LABEL_FILTER_OP_NE_DOUBLE_S64
,
680 [ FILTER_OP_GT_DOUBLE_S64
] = &&LABEL_FILTER_OP_GT_DOUBLE_S64
,
681 [ FILTER_OP_LT_DOUBLE_S64
] = &&LABEL_FILTER_OP_LT_DOUBLE_S64
,
682 [ FILTER_OP_GE_DOUBLE_S64
] = &&LABEL_FILTER_OP_GE_DOUBLE_S64
,
683 [ FILTER_OP_LE_DOUBLE_S64
] = &&LABEL_FILTER_OP_LE_DOUBLE_S64
,
685 [ FILTER_OP_EQ_S64_DOUBLE
] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE
,
686 [ FILTER_OP_NE_S64_DOUBLE
] = &&LABEL_FILTER_OP_NE_S64_DOUBLE
,
687 [ FILTER_OP_GT_S64_DOUBLE
] = &&LABEL_FILTER_OP_GT_S64_DOUBLE
,
688 [ FILTER_OP_LT_S64_DOUBLE
] = &&LABEL_FILTER_OP_LT_S64_DOUBLE
,
689 [ FILTER_OP_GE_S64_DOUBLE
] = &&LABEL_FILTER_OP_GE_S64_DOUBLE
,
690 [ FILTER_OP_LE_S64_DOUBLE
] = &&LABEL_FILTER_OP_LE_S64_DOUBLE
,
693 [ FILTER_OP_UNARY_PLUS
] = &&LABEL_FILTER_OP_UNARY_PLUS
,
694 [ FILTER_OP_UNARY_MINUS
] = &&LABEL_FILTER_OP_UNARY_MINUS
,
695 [ FILTER_OP_UNARY_NOT
] = &&LABEL_FILTER_OP_UNARY_NOT
,
696 [ FILTER_OP_UNARY_PLUS_S64
] = &&LABEL_FILTER_OP_UNARY_PLUS_S64
,
697 [ FILTER_OP_UNARY_MINUS_S64
] = &&LABEL_FILTER_OP_UNARY_MINUS_S64
,
698 [ FILTER_OP_UNARY_NOT_S64
] = &&LABEL_FILTER_OP_UNARY_NOT_S64
,
699 [ FILTER_OP_UNARY_PLUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE
,
700 [ FILTER_OP_UNARY_MINUS_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE
,
701 [ FILTER_OP_UNARY_NOT_DOUBLE
] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE
,
704 [ FILTER_OP_AND
] = &&LABEL_FILTER_OP_AND
,
705 [ FILTER_OP_OR
] = &&LABEL_FILTER_OP_OR
,
708 [ FILTER_OP_LOAD_FIELD_REF
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF
,
709 [ FILTER_OP_LOAD_FIELD_REF_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING
,
710 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE
,
711 [ FILTER_OP_LOAD_FIELD_REF_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64
,
712 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE
,
714 /* load from immediate operand */
715 [ FILTER_OP_LOAD_STRING
] = &&LABEL_FILTER_OP_LOAD_STRING
,
716 [ FILTER_OP_LOAD_STAR_GLOB_STRING
] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING
,
717 [ FILTER_OP_LOAD_S64
] = &&LABEL_FILTER_OP_LOAD_S64
,
718 [ FILTER_OP_LOAD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_DOUBLE
,
721 [ FILTER_OP_CAST_TO_S64
] = &&LABEL_FILTER_OP_CAST_TO_S64
,
722 [ FILTER_OP_CAST_DOUBLE_TO_S64
] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64
,
723 [ FILTER_OP_CAST_NOP
] = &&LABEL_FILTER_OP_CAST_NOP
,
725 /* get context ref */
726 [ FILTER_OP_GET_CONTEXT_REF
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF
,
727 [ FILTER_OP_GET_CONTEXT_REF_STRING
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING
,
728 [ FILTER_OP_GET_CONTEXT_REF_S64
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64
,
729 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE
] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE
,
731 /* load userspace field ref */
732 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING
,
733 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
,
735 /* Instructions for recursive traversal through composed types. */
736 [ FILTER_OP_GET_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT
,
737 [ FILTER_OP_GET_APP_CONTEXT_ROOT
] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT
,
738 [ FILTER_OP_GET_PAYLOAD_ROOT
] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT
,
740 [ FILTER_OP_GET_SYMBOL
] = &&LABEL_FILTER_OP_GET_SYMBOL
,
741 [ FILTER_OP_GET_SYMBOL_FIELD
] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD
,
742 [ FILTER_OP_GET_INDEX_U16
] = &&LABEL_FILTER_OP_GET_INDEX_U16
,
743 [ FILTER_OP_GET_INDEX_U64
] = &&LABEL_FILTER_OP_GET_INDEX_U64
,
745 [ FILTER_OP_LOAD_FIELD
] = &&LABEL_FILTER_OP_LOAD_FIELD
,
746 [ FILTER_OP_LOAD_FIELD_S8
] = &&LABEL_FILTER_OP_LOAD_FIELD_S8
,
747 [ FILTER_OP_LOAD_FIELD_S16
] = &&LABEL_FILTER_OP_LOAD_FIELD_S16
,
748 [ FILTER_OP_LOAD_FIELD_S32
] = &&LABEL_FILTER_OP_LOAD_FIELD_S32
,
749 [ FILTER_OP_LOAD_FIELD_S64
] = &&LABEL_FILTER_OP_LOAD_FIELD_S64
,
750 [ FILTER_OP_LOAD_FIELD_U8
] = &&LABEL_FILTER_OP_LOAD_FIELD_U8
,
751 [ FILTER_OP_LOAD_FIELD_U16
] = &&LABEL_FILTER_OP_LOAD_FIELD_U16
,
752 [ FILTER_OP_LOAD_FIELD_U32
] = &&LABEL_FILTER_OP_LOAD_FIELD_U32
,
753 [ FILTER_OP_LOAD_FIELD_U64
] = &&LABEL_FILTER_OP_LOAD_FIELD_U64
,
754 [ FILTER_OP_LOAD_FIELD_STRING
] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING
,
755 [ FILTER_OP_LOAD_FIELD_SEQUENCE
] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE
,
756 [ FILTER_OP_LOAD_FIELD_DOUBLE
] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE
,
758 [ FILTER_OP_UNARY_BIT_NOT
] = &&LABEL_FILTER_OP_UNARY_BIT_NOT
,
760 [ FILTER_OP_RETURN_S64
] = &&LABEL_FILTER_OP_RETURN_S64
,
762 #endif /* #ifndef INTERPRETER_USE_SWITCH */
766 OP(FILTER_OP_UNKNOWN
):
767 OP(FILTER_OP_LOAD_FIELD_REF
):
768 OP(FILTER_OP_GET_CONTEXT_REF
):
769 #ifdef INTERPRETER_USE_SWITCH
771 #endif /* INTERPRETER_USE_SWITCH */
772 printk(KERN_WARNING
"LTTng: filter: unknown bytecode op %u\n",
773 (unsigned int) *(filter_opcode_t
*) pc
);
777 OP(FILTER_OP_RETURN
):
778 OP(FILTER_OP_RETURN_S64
):
779 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
780 switch (estack_ax_t
) {
782 retval
= !!estack_ax_v
;
787 case REG_STAR_GLOB_STRING
:
788 case REG_TYPE_UNKNOWN
:
801 printk(KERN_WARNING
"LTTng: filter: unsupported bytecode op %u\n",
802 (unsigned int) *(filter_opcode_t
*) pc
);
812 printk(KERN_WARNING
"LTTng: filter: unsupported non-specialized bytecode op %u\n",
813 (unsigned int) *(filter_opcode_t
*) pc
);
817 OP(FILTER_OP_EQ_STRING
):
821 res
= (stack_strcmp(stack
, top
, "==") == 0);
822 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
824 estack_ax_t
= REG_S64
;
825 next_pc
+= sizeof(struct binary_op
);
828 OP(FILTER_OP_NE_STRING
):
832 res
= (stack_strcmp(stack
, top
, "!=") != 0);
833 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
835 estack_ax_t
= REG_S64
;
836 next_pc
+= sizeof(struct binary_op
);
839 OP(FILTER_OP_GT_STRING
):
843 res
= (stack_strcmp(stack
, top
, ">") > 0);
844 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
846 estack_ax_t
= REG_S64
;
847 next_pc
+= sizeof(struct binary_op
);
850 OP(FILTER_OP_LT_STRING
):
854 res
= (stack_strcmp(stack
, top
, "<") < 0);
855 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
857 estack_ax_t
= REG_S64
;
858 next_pc
+= sizeof(struct binary_op
);
861 OP(FILTER_OP_GE_STRING
):
865 res
= (stack_strcmp(stack
, top
, ">=") >= 0);
866 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
868 estack_ax_t
= REG_S64
;
869 next_pc
+= sizeof(struct binary_op
);
872 OP(FILTER_OP_LE_STRING
):
876 res
= (stack_strcmp(stack
, top
, "<=") <= 0);
877 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
879 estack_ax_t
= REG_S64
;
880 next_pc
+= sizeof(struct binary_op
);
884 OP(FILTER_OP_EQ_STAR_GLOB_STRING
):
888 res
= (stack_star_glob_match(stack
, top
, "==") == 0);
889 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
891 estack_ax_t
= REG_S64
;
892 next_pc
+= sizeof(struct binary_op
);
895 OP(FILTER_OP_NE_STAR_GLOB_STRING
):
899 res
= (stack_star_glob_match(stack
, top
, "!=") != 0);
900 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
902 estack_ax_t
= REG_S64
;
903 next_pc
+= sizeof(struct binary_op
);
907 OP(FILTER_OP_EQ_S64
):
911 res
= (estack_bx_v
== estack_ax_v
);
912 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
914 estack_ax_t
= REG_S64
;
915 next_pc
+= sizeof(struct binary_op
);
918 OP(FILTER_OP_NE_S64
):
922 res
= (estack_bx_v
!= estack_ax_v
);
923 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
925 estack_ax_t
= REG_S64
;
926 next_pc
+= sizeof(struct binary_op
);
929 OP(FILTER_OP_GT_S64
):
933 res
= (estack_bx_v
> estack_ax_v
);
934 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
936 estack_ax_t
= REG_S64
;
937 next_pc
+= sizeof(struct binary_op
);
940 OP(FILTER_OP_LT_S64
):
944 res
= (estack_bx_v
< estack_ax_v
);
945 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
947 estack_ax_t
= REG_S64
;
948 next_pc
+= sizeof(struct binary_op
);
951 OP(FILTER_OP_GE_S64
):
955 res
= (estack_bx_v
>= estack_ax_v
);
956 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
958 estack_ax_t
= REG_S64
;
959 next_pc
+= sizeof(struct binary_op
);
962 OP(FILTER_OP_LE_S64
):
966 res
= (estack_bx_v
<= estack_ax_v
);
967 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
969 estack_ax_t
= REG_S64
;
970 next_pc
+= sizeof(struct binary_op
);
974 OP(FILTER_OP_EQ_DOUBLE
):
975 OP(FILTER_OP_NE_DOUBLE
):
976 OP(FILTER_OP_GT_DOUBLE
):
977 OP(FILTER_OP_LT_DOUBLE
):
978 OP(FILTER_OP_GE_DOUBLE
):
979 OP(FILTER_OP_LE_DOUBLE
):
985 /* Mixed S64-double binary comparators */
986 OP(FILTER_OP_EQ_DOUBLE_S64
):
987 OP(FILTER_OP_NE_DOUBLE_S64
):
988 OP(FILTER_OP_GT_DOUBLE_S64
):
989 OP(FILTER_OP_LT_DOUBLE_S64
):
990 OP(FILTER_OP_GE_DOUBLE_S64
):
991 OP(FILTER_OP_LE_DOUBLE_S64
):
992 OP(FILTER_OP_EQ_S64_DOUBLE
):
993 OP(FILTER_OP_NE_S64_DOUBLE
):
994 OP(FILTER_OP_GT_S64_DOUBLE
):
995 OP(FILTER_OP_LT_S64_DOUBLE
):
996 OP(FILTER_OP_GE_S64_DOUBLE
):
997 OP(FILTER_OP_LE_S64_DOUBLE
):
1002 OP(FILTER_OP_BIT_RSHIFT
):
1006 if (!IS_INTEGER_REGISTER(estack_ax_t
) || !IS_INTEGER_REGISTER(estack_bx_t
)) {
1011 /* Catch undefined behavior. */
1012 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1016 res
= ((uint64_t) estack_bx_v
>> (uint32_t) estack_ax_v
);
1017 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1019 estack_ax_t
= REG_S64
;
1020 next_pc
+= sizeof(struct binary_op
);
1023 OP(FILTER_OP_BIT_LSHIFT
):
1027 if (!IS_INTEGER_REGISTER(estack_ax_t
) || !IS_INTEGER_REGISTER(estack_bx_t
)) {
1032 /* Catch undefined behavior. */
1033 if (unlikely(estack_ax_v
< 0 || estack_ax_v
>= 64)) {
1037 res
= ((uint64_t) estack_bx_v
<< (uint32_t) estack_ax_v
);
1038 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1040 estack_ax_t
= REG_S64
;
1041 next_pc
+= sizeof(struct binary_op
);
1044 OP(FILTER_OP_BIT_AND
):
1048 if (!IS_INTEGER_REGISTER(estack_ax_t
) || !IS_INTEGER_REGISTER(estack_bx_t
)) {
1053 res
= ((uint64_t) estack_bx_v
& (uint64_t) estack_ax_v
);
1054 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1056 estack_ax_t
= REG_S64
;
1057 next_pc
+= sizeof(struct binary_op
);
1060 OP(FILTER_OP_BIT_OR
):
1064 if (!IS_INTEGER_REGISTER(estack_ax_t
) || !IS_INTEGER_REGISTER(estack_bx_t
)) {
1069 res
= ((uint64_t) estack_bx_v
| (uint64_t) estack_ax_v
);
1070 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1072 estack_ax_t
= REG_S64
;
1073 next_pc
+= sizeof(struct binary_op
);
1076 OP(FILTER_OP_BIT_XOR
):
1080 if (!IS_INTEGER_REGISTER(estack_ax_t
) || !IS_INTEGER_REGISTER(estack_bx_t
)) {
1085 res
= ((uint64_t) estack_bx_v
^ (uint64_t) estack_ax_v
);
1086 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1088 estack_ax_t
= REG_S64
;
1089 next_pc
+= sizeof(struct binary_op
);
1094 OP(FILTER_OP_UNARY_PLUS
):
1095 OP(FILTER_OP_UNARY_MINUS
):
1096 OP(FILTER_OP_UNARY_NOT
):
1097 printk(KERN_WARNING
"LTTng: filter: unsupported non-specialized bytecode op %u\n",
1098 (unsigned int) *(filter_opcode_t
*) pc
);
1103 OP(FILTER_OP_UNARY_BIT_NOT
):
1105 estack_ax_v
= ~(uint64_t) estack_ax_v
;
1106 estack_ax_t
= REG_S64
;
1107 next_pc
+= sizeof(struct unary_op
);
1111 OP(FILTER_OP_UNARY_PLUS_S64
):
1113 next_pc
+= sizeof(struct unary_op
);
1116 OP(FILTER_OP_UNARY_MINUS_S64
):
1118 estack_ax_v
= -estack_ax_v
;
1119 estack_ax_t
= REG_S64
;
1120 next_pc
+= sizeof(struct unary_op
);
1123 OP(FILTER_OP_UNARY_PLUS_DOUBLE
):
1124 OP(FILTER_OP_UNARY_MINUS_DOUBLE
):
1129 OP(FILTER_OP_UNARY_NOT_S64
):
1131 estack_ax_v
= !estack_ax_v
;
1132 estack_ax_t
= REG_S64
;
1133 next_pc
+= sizeof(struct unary_op
);
1136 OP(FILTER_OP_UNARY_NOT_DOUBLE
):
1145 struct logical_op
*insn
= (struct logical_op
*) pc
;
1147 /* If AX is 0, skip and evaluate to 0 */
1148 if (unlikely(estack_ax_v
== 0)) {
1149 dbg_printk("Jumping to bytecode offset %u\n",
1150 (unsigned int) insn
->skip_offset
);
1151 next_pc
= start_pc
+ insn
->skip_offset
;
1153 /* Pop 1 when jump not taken */
1154 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1155 next_pc
+= sizeof(struct logical_op
);
1161 struct logical_op
*insn
= (struct logical_op
*) pc
;
1163 /* If AX is nonzero, skip and evaluate to 1 */
1165 if (unlikely(estack_ax_v
!= 0)) {
1167 dbg_printk("Jumping to bytecode offset %u\n",
1168 (unsigned int) insn
->skip_offset
);
1169 next_pc
= start_pc
+ insn
->skip_offset
;
1171 /* Pop 1 when jump not taken */
1172 estack_pop(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1173 next_pc
+= sizeof(struct logical_op
);
1179 /* load field ref */
1180 OP(FILTER_OP_LOAD_FIELD_REF_STRING
):
1182 struct load_op
*insn
= (struct load_op
*) pc
;
1183 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1185 dbg_printk("load field ref offset %u type string\n",
1187 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1188 estack_ax(stack
, top
)->u
.s
.str
=
1189 *(const char * const *) &filter_stack_data
[ref
->offset
];
1190 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1191 dbg_printk("Filter warning: loading a NULL string.\n");
1195 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1196 estack_ax(stack
, top
)->u
.s
.literal_type
=
1197 ESTACK_STRING_LITERAL_TYPE_NONE
;
1198 estack_ax(stack
, top
)->u
.s
.user
= 0;
1199 estack_ax(stack
, top
)->type
= REG_STRING
;
1200 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1201 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1205 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE
):
1207 struct load_op
*insn
= (struct load_op
*) pc
;
1208 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1210 dbg_printk("load field ref offset %u type sequence\n",
1212 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1213 estack_ax(stack
, top
)->u
.s
.seq_len
=
1214 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1215 estack_ax(stack
, top
)->u
.s
.str
=
1216 *(const char **) (&filter_stack_data
[ref
->offset
1217 + sizeof(unsigned long)]);
1218 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1219 dbg_printk("Filter warning: loading a NULL sequence.\n");
1223 estack_ax(stack
, top
)->u
.s
.literal_type
=
1224 ESTACK_STRING_LITERAL_TYPE_NONE
;
1225 estack_ax(stack
, top
)->u
.s
.user
= 0;
1226 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1230 OP(FILTER_OP_LOAD_FIELD_REF_S64
):
1232 struct load_op
*insn
= (struct load_op
*) pc
;
1233 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1235 dbg_printk("load field ref offset %u type s64\n",
1237 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1239 ((struct literal_numeric
*) &filter_stack_data
[ref
->offset
])->v
;
1240 estack_ax_t
= REG_S64
;
1241 dbg_printk("ref load s64 %lld\n",
1242 (long long) estack_ax_v
);
1243 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1247 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE
):
1253 /* load from immediate operand */
1254 OP(FILTER_OP_LOAD_STRING
):
1256 struct load_op
*insn
= (struct load_op
*) pc
;
1258 dbg_printk("load string %s\n", insn
->data
);
1259 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1260 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1261 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1262 estack_ax(stack
, top
)->u
.s
.literal_type
=
1263 ESTACK_STRING_LITERAL_TYPE_PLAIN
;
1264 estack_ax(stack
, top
)->u
.s
.user
= 0;
1265 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1269 OP(FILTER_OP_LOAD_STAR_GLOB_STRING
):
1271 struct load_op
*insn
= (struct load_op
*) pc
;
1273 dbg_printk("load globbing pattern %s\n", insn
->data
);
1274 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1275 estack_ax(stack
, top
)->u
.s
.str
= insn
->data
;
1276 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1277 estack_ax(stack
, top
)->u
.s
.literal_type
=
1278 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB
;
1279 estack_ax(stack
, top
)->u
.s
.user
= 0;
1280 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1284 OP(FILTER_OP_LOAD_S64
):
1286 struct load_op
*insn
= (struct load_op
*) pc
;
1288 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1289 estack_ax_v
= ((struct literal_numeric
*) insn
->data
)->v
;
1290 estack_ax_t
= REG_S64
;
1291 dbg_printk("load s64 %lld\n",
1292 (long long) estack_ax_v
);
1293 next_pc
+= sizeof(struct load_op
)
1294 + sizeof(struct literal_numeric
);
1298 OP(FILTER_OP_LOAD_DOUBLE
):
1305 OP(FILTER_OP_CAST_TO_S64
):
1306 printk(KERN_WARNING
"LTTng: filter: unsupported non-specialized bytecode op %u\n",
1307 (unsigned int) *(filter_opcode_t
*) pc
);
1311 OP(FILTER_OP_CAST_DOUBLE_TO_S64
):
1317 OP(FILTER_OP_CAST_NOP
):
1319 next_pc
+= sizeof(struct cast_op
);
1323 /* get context ref */
1324 OP(FILTER_OP_GET_CONTEXT_REF_STRING
):
1326 struct load_op
*insn
= (struct load_op
*) pc
;
1327 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1328 struct lttng_ctx_field
*ctx_field
;
1329 union lttng_ctx_value v
;
1331 dbg_printk("get context ref offset %u type string\n",
1333 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1334 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1335 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1336 estack_ax(stack
, top
)->u
.s
.str
= v
.str
;
1337 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1338 dbg_printk("Filter warning: loading a NULL string.\n");
1342 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1343 estack_ax(stack
, top
)->u
.s
.literal_type
=
1344 ESTACK_STRING_LITERAL_TYPE_NONE
;
1345 estack_ax(stack
, top
)->u
.s
.user
= 0;
1346 estack_ax(stack
, top
)->type
= REG_STRING
;
1347 dbg_printk("ref get context string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1348 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1352 OP(FILTER_OP_GET_CONTEXT_REF_S64
):
1354 struct load_op
*insn
= (struct load_op
*) pc
;
1355 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1356 struct lttng_ctx_field
*ctx_field
;
1357 union lttng_ctx_value v
;
1359 dbg_printk("get context ref offset %u type s64\n",
1361 ctx_field
= <tng_static_ctx
->fields
[ref
->offset
];
1362 ctx_field
->get_value(ctx_field
, lttng_probe_ctx
, &v
);
1363 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1364 estack_ax_v
= v
.s64
;
1365 estack_ax_t
= REG_S64
;
1366 dbg_printk("ref get context s64 %lld\n",
1367 (long long) estack_ax_v
);
1368 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1372 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE
):
1378 /* load userspace field ref */
1379 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING
):
1381 struct load_op
*insn
= (struct load_op
*) pc
;
1382 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1384 dbg_printk("load field ref offset %u type user string\n",
1386 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1387 estack_ax(stack
, top
)->u
.s
.user_str
=
1388 *(const char * const *) &filter_stack_data
[ref
->offset
];
1389 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1390 dbg_printk("Filter warning: loading a NULL string.\n");
1394 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1395 estack_ax(stack
, top
)->u
.s
.literal_type
=
1396 ESTACK_STRING_LITERAL_TYPE_NONE
;
1397 estack_ax(stack
, top
)->u
.s
.user
= 1;
1398 estack_ax(stack
, top
)->type
= REG_STRING
;
1399 dbg_printk("ref load string %s\n", estack_ax(stack
, top
)->u
.s
.str
);
1400 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1404 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
):
1406 struct load_op
*insn
= (struct load_op
*) pc
;
1407 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
1409 dbg_printk("load field ref offset %u type user sequence\n",
1411 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1412 estack_ax(stack
, top
)->u
.s
.seq_len
=
1413 *(unsigned long *) &filter_stack_data
[ref
->offset
];
1414 estack_ax(stack
, top
)->u
.s
.user_str
=
1415 *(const char **) (&filter_stack_data
[ref
->offset
1416 + sizeof(unsigned long)]);
1417 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1418 dbg_printk("Filter warning: loading a NULL sequence.\n");
1422 estack_ax(stack
, top
)->u
.s
.literal_type
=
1423 ESTACK_STRING_LITERAL_TYPE_NONE
;
1424 estack_ax(stack
, top
)->u
.s
.user
= 1;
1425 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1429 OP(FILTER_OP_GET_CONTEXT_ROOT
):
1431 dbg_printk("op get context root\n");
1432 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1433 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_CONTEXT
;
1434 /* "field" only needed for variants. */
1435 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1436 estack_ax(stack
, top
)->type
= REG_PTR
;
1437 next_pc
+= sizeof(struct load_op
);
1441 OP(FILTER_OP_GET_APP_CONTEXT_ROOT
):
1447 OP(FILTER_OP_GET_PAYLOAD_ROOT
):
1449 dbg_printk("op get app payload root\n");
1450 estack_push(stack
, top
, ax
, bx
, ax_t
, bx_t
);
1451 estack_ax(stack
, top
)->u
.ptr
.type
= LOAD_ROOT_PAYLOAD
;
1452 estack_ax(stack
, top
)->u
.ptr
.ptr
= filter_stack_data
;
1453 /* "field" only needed for variants. */
1454 estack_ax(stack
, top
)->u
.ptr
.field
= NULL
;
1455 estack_ax(stack
, top
)->type
= REG_PTR
;
1456 next_pc
+= sizeof(struct load_op
);
1460 OP(FILTER_OP_GET_SYMBOL
):
1462 dbg_printk("op get symbol\n");
1463 switch (estack_ax(stack
, top
)->u
.ptr
.type
) {
1465 printk(KERN_WARNING
"LTTng: filter: Nested fields not implemented yet.\n");
1468 case LOAD_ROOT_CONTEXT
:
1469 case LOAD_ROOT_APP_CONTEXT
:
1470 case LOAD_ROOT_PAYLOAD
:
1472 * symbol lookup is performed by
1478 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1482 OP(FILTER_OP_GET_SYMBOL_FIELD
):
1485 * Used for first variant encountered in a
1486 * traversal. Variants are not implemented yet.
1492 OP(FILTER_OP_GET_INDEX_U16
):
1494 struct load_op
*insn
= (struct load_op
*) pc
;
1495 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1497 dbg_printk("op get index u16\n");
1498 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1501 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1502 estack_ax_t
= estack_ax(stack
, top
)->type
;
1503 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1507 OP(FILTER_OP_GET_INDEX_U64
):
1509 struct load_op
*insn
= (struct load_op
*) pc
;
1510 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1512 dbg_printk("op get index u64\n");
1513 ret
= dynamic_get_index(lttng_probe_ctx
, bytecode
, index
->index
, estack_ax(stack
, top
));
1516 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1517 estack_ax_t
= estack_ax(stack
, top
)->type
;
1518 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1522 OP(FILTER_OP_LOAD_FIELD
):
1524 dbg_printk("op load field\n");
1525 ret
= dynamic_load_field(estack_ax(stack
, top
));
1528 estack_ax_v
= estack_ax(stack
, top
)->u
.v
;
1529 estack_ax_t
= estack_ax(stack
, top
)->type
;
1530 next_pc
+= sizeof(struct load_op
);
1534 OP(FILTER_OP_LOAD_FIELD_S8
):
1536 dbg_printk("op load field s8\n");
1538 estack_ax_v
= *(int8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1539 estack_ax_t
= REG_S64
;
1540 next_pc
+= sizeof(struct load_op
);
1543 OP(FILTER_OP_LOAD_FIELD_S16
):
1545 dbg_printk("op load field s16\n");
1547 estack_ax_v
= *(int16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1548 estack_ax_t
= REG_S64
;
1549 next_pc
+= sizeof(struct load_op
);
1552 OP(FILTER_OP_LOAD_FIELD_S32
):
1554 dbg_printk("op load field s32\n");
1556 estack_ax_v
= *(int32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1557 estack_ax_t
= REG_S64
;
1558 next_pc
+= sizeof(struct load_op
);
1561 OP(FILTER_OP_LOAD_FIELD_S64
):
1563 dbg_printk("op load field s64\n");
1565 estack_ax_v
= *(int64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1566 estack_ax_t
= REG_S64
;
1567 next_pc
+= sizeof(struct load_op
);
1570 OP(FILTER_OP_LOAD_FIELD_U8
):
1572 dbg_printk("op load field u8\n");
1574 estack_ax_v
= *(uint8_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1575 estack_ax_t
= REG_S64
;
1576 next_pc
+= sizeof(struct load_op
);
1579 OP(FILTER_OP_LOAD_FIELD_U16
):
1581 dbg_printk("op load field u16\n");
1583 estack_ax_v
= *(uint16_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1584 estack_ax_t
= REG_S64
;
1585 next_pc
+= sizeof(struct load_op
);
1588 OP(FILTER_OP_LOAD_FIELD_U32
):
1590 dbg_printk("op load field u32\n");
1592 estack_ax_v
= *(uint32_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1593 estack_ax_t
= REG_S64
;
1594 next_pc
+= sizeof(struct load_op
);
1597 OP(FILTER_OP_LOAD_FIELD_U64
):
1599 dbg_printk("op load field u64\n");
1601 estack_ax_v
= *(uint64_t *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1602 estack_ax_t
= REG_S64
;
1603 next_pc
+= sizeof(struct load_op
);
1606 OP(FILTER_OP_LOAD_FIELD_DOUBLE
):
1612 OP(FILTER_OP_LOAD_FIELD_STRING
):
1616 dbg_printk("op load field string\n");
1617 str
= (const char *) estack_ax(stack
, top
)->u
.ptr
.ptr
;
1618 estack_ax(stack
, top
)->u
.s
.str
= str
;
1619 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1620 dbg_printk("Filter warning: loading a NULL string.\n");
1624 estack_ax(stack
, top
)->u
.s
.seq_len
= LTTNG_SIZE_MAX
;
1625 estack_ax(stack
, top
)->u
.s
.literal_type
=
1626 ESTACK_STRING_LITERAL_TYPE_NONE
;
1627 estack_ax(stack
, top
)->type
= REG_STRING
;
1628 next_pc
+= sizeof(struct load_op
);
1632 OP(FILTER_OP_LOAD_FIELD_SEQUENCE
):
1636 dbg_printk("op load field string sequence\n");
1637 ptr
= estack_ax(stack
, top
)->u
.ptr
.ptr
;
1638 estack_ax(stack
, top
)->u
.s
.seq_len
= *(unsigned long *) ptr
;
1639 estack_ax(stack
, top
)->u
.s
.str
= *(const char **) (ptr
+ sizeof(unsigned long));
1640 if (unlikely(!estack_ax(stack
, top
)->u
.s
.str
)) {
1641 dbg_printk("Filter warning: loading a NULL sequence.\n");
1645 estack_ax(stack
, top
)->u
.s
.literal_type
=
1646 ESTACK_STRING_LITERAL_TYPE_NONE
;
1647 estack_ax(stack
, top
)->type
= REG_STRING
;
1648 next_pc
+= sizeof(struct load_op
);
1654 /* Return _DISCARD on error. */
1656 return LTTNG_FILTER_DISCARD
;