1 /* SPDX-License-Identifier: MIT
3 * lttng-filter-validator.c
5 * LTTng modules filter bytecode validator.
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 #include <linux/types.h>
11 #include <linux/jhash.h>
12 #include <linux/slab.h>
14 #include <wrapper/list.h>
15 #include <lttng/filter.h>
17 #define MERGE_POINT_TABLE_BITS 7
18 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
20 /* merge point table node */
22 struct hlist_node node
;
24 /* Context at merge point */
26 unsigned long target_pc
;
30 struct hlist_head mp_head
[MERGE_POINT_TABLE_SIZE
];
34 int lttng_hash_match(struct mp_node
*mp_node
, unsigned long key_pc
)
36 if (mp_node
->target_pc
== key_pc
)
43 int merge_points_compare(const struct vstack
*stacka
,
44 const struct vstack
*stackb
)
48 if (stacka
->top
!= stackb
->top
)
50 len
= stacka
->top
+ 1;
51 WARN_ON_ONCE(len
< 0);
52 for (i
= 0; i
< len
; i
++) {
53 if (stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
60 int merge_point_add_check(struct mp_table
*mp_table
, unsigned long target_pc
,
61 const struct vstack
*stack
)
63 struct mp_node
*mp_node
;
64 unsigned long hash
= jhash_1word(target_pc
, 0);
65 struct hlist_head
*head
;
66 struct mp_node
*lookup_node
;
69 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
71 mp_node
= kzalloc(sizeof(struct mp_node
), GFP_KERNEL
);
74 mp_node
->target_pc
= target_pc
;
75 memcpy(&mp_node
->stack
, stack
, sizeof(mp_node
->stack
));
77 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
78 lttng_hlist_for_each_entry(lookup_node
, head
, node
) {
79 if (lttng_hash_match(lookup_node
, target_pc
)) {
85 /* Key already present */
86 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
89 if (merge_points_compare(stack
, &lookup_node
->stack
)) {
90 printk(KERN_WARNING
"LTTng: filter: Merge points differ for offset %lu\n",
95 hlist_add_head(&mp_node
->node
, head
);
101 * Binary comparators use top of stack and top of stack -1.
104 int bin_op_compare_check(struct vstack
*stack
, const filter_opcode_t opcode
,
107 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
110 switch (vstack_ax(stack
)->type
) {
116 switch (vstack_bx(stack
)->type
) {
120 case REG_TYPE_UNKNOWN
:
124 case REG_STAR_GLOB_STRING
:
125 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
133 case REG_STAR_GLOB_STRING
:
134 switch (vstack_bx(stack
)->type
) {
138 case REG_TYPE_UNKNOWN
:
141 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
145 case REG_STAR_GLOB_STRING
:
151 switch (vstack_bx(stack
)->type
) {
155 case REG_TYPE_UNKNOWN
:
158 case REG_STAR_GLOB_STRING
:
164 case REG_TYPE_UNKNOWN
:
165 switch (vstack_bx(stack
)->type
) {
169 case REG_TYPE_UNKNOWN
:
171 case REG_STAR_GLOB_STRING
:
183 printk(KERN_WARNING
"LTTng: filter: empty stack for '%s' binary operator\n", str
);
187 printk(KERN_WARNING
"LTTng: filter: type mismatch for '%s' binary operator\n", str
);
191 printk(KERN_WARNING
"LTTng: filter: unknown type for '%s' binary operator\n", str
);
196 * Binary bitwise operators use top of stack and top of stack -1.
197 * Return 0 if typing is known to match, 1 if typing is dynamic
198 * (unknown), negative error value on error.
201 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
204 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
207 switch (vstack_ax(stack
)->type
) {
212 case REG_TYPE_UNKNOWN
:
213 switch (vstack_bx(stack
)->type
) {
217 case REG_TYPE_UNKNOWN
:
223 switch (vstack_bx(stack
)->type
) {
227 case REG_TYPE_UNKNOWN
:
240 printk(KERN_WARNING
"LTTng: filter: empty stack for '%s' binary operator\n", str
);
244 printk(KERN_WARNING
"LTTng: filter: unknown type for '%s' binary operator\n", str
);
249 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
250 const struct get_symbol
*sym
)
252 const char *str
, *str_limit
;
255 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
258 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
259 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
260 len_limit
= str_limit
- str
;
261 if (strnlen(str
, len_limit
) == len_limit
)
267 * Validate bytecode range overflow within the validation pass.
268 * Called for each instruction encountered.
271 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
272 char *start_pc
, char *pc
)
276 switch (*(filter_opcode_t
*) pc
) {
277 case FILTER_OP_UNKNOWN
:
280 printk(KERN_WARNING
"LTTng: filter: unknown bytecode op %u\n",
281 (unsigned int) *(filter_opcode_t
*) pc
);
286 case FILTER_OP_RETURN
:
287 case FILTER_OP_RETURN_S64
:
289 if (unlikely(pc
+ sizeof(struct return_op
)
290 > start_pc
+ bytecode
->len
)) {
301 case FILTER_OP_MINUS
:
302 case FILTER_OP_EQ_DOUBLE
:
303 case FILTER_OP_NE_DOUBLE
:
304 case FILTER_OP_GT_DOUBLE
:
305 case FILTER_OP_LT_DOUBLE
:
306 case FILTER_OP_GE_DOUBLE
:
307 case FILTER_OP_LE_DOUBLE
:
309 case FILTER_OP_EQ_DOUBLE_S64
:
310 case FILTER_OP_NE_DOUBLE_S64
:
311 case FILTER_OP_GT_DOUBLE_S64
:
312 case FILTER_OP_LT_DOUBLE_S64
:
313 case FILTER_OP_GE_DOUBLE_S64
:
314 case FILTER_OP_LE_DOUBLE_S64
:
315 case FILTER_OP_EQ_S64_DOUBLE
:
316 case FILTER_OP_NE_S64_DOUBLE
:
317 case FILTER_OP_GT_S64_DOUBLE
:
318 case FILTER_OP_LT_S64_DOUBLE
:
319 case FILTER_OP_GE_S64_DOUBLE
:
320 case FILTER_OP_LE_S64_DOUBLE
:
321 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
322 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
323 case FILTER_OP_LOAD_DOUBLE
:
324 case FILTER_OP_CAST_DOUBLE_TO_S64
:
325 case FILTER_OP_UNARY_PLUS_DOUBLE
:
326 case FILTER_OP_UNARY_MINUS_DOUBLE
:
327 case FILTER_OP_UNARY_NOT_DOUBLE
:
329 printk(KERN_WARNING
"LTTng: filter: unsupported bytecode op %u\n",
330 (unsigned int) *(filter_opcode_t
*) pc
);
341 case FILTER_OP_EQ_STRING
:
342 case FILTER_OP_NE_STRING
:
343 case FILTER_OP_GT_STRING
:
344 case FILTER_OP_LT_STRING
:
345 case FILTER_OP_GE_STRING
:
346 case FILTER_OP_LE_STRING
:
347 case FILTER_OP_EQ_STAR_GLOB_STRING
:
348 case FILTER_OP_NE_STAR_GLOB_STRING
:
349 case FILTER_OP_EQ_S64
:
350 case FILTER_OP_NE_S64
:
351 case FILTER_OP_GT_S64
:
352 case FILTER_OP_LT_S64
:
353 case FILTER_OP_GE_S64
:
354 case FILTER_OP_LE_S64
:
355 case FILTER_OP_BIT_RSHIFT
:
356 case FILTER_OP_BIT_LSHIFT
:
357 case FILTER_OP_BIT_AND
:
358 case FILTER_OP_BIT_OR
:
359 case FILTER_OP_BIT_XOR
:
361 if (unlikely(pc
+ sizeof(struct binary_op
)
362 > start_pc
+ bytecode
->len
)) {
369 case FILTER_OP_UNARY_PLUS
:
370 case FILTER_OP_UNARY_MINUS
:
371 case FILTER_OP_UNARY_NOT
:
372 case FILTER_OP_UNARY_PLUS_S64
:
373 case FILTER_OP_UNARY_MINUS_S64
:
374 case FILTER_OP_UNARY_NOT_S64
:
375 case FILTER_OP_UNARY_BIT_NOT
:
377 if (unlikely(pc
+ sizeof(struct unary_op
)
378 > start_pc
+ bytecode
->len
)) {
388 if (unlikely(pc
+ sizeof(struct logical_op
)
389 > start_pc
+ bytecode
->len
)) {
396 case FILTER_OP_LOAD_FIELD_REF
:
398 printk(KERN_WARNING
"LTTng: filter: Unknown field ref type\n");
403 /* get context ref */
404 case FILTER_OP_GET_CONTEXT_REF
:
406 printk(KERN_WARNING
"LTTng: filter: Unknown field ref type\n");
410 case FILTER_OP_LOAD_FIELD_REF_STRING
:
411 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
412 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
413 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
414 case FILTER_OP_LOAD_FIELD_REF_S64
:
415 case FILTER_OP_GET_CONTEXT_REF_STRING
:
416 case FILTER_OP_GET_CONTEXT_REF_S64
:
418 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
419 > start_pc
+ bytecode
->len
)) {
425 /* load from immediate operand */
426 case FILTER_OP_LOAD_STRING
:
427 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
429 struct load_op
*insn
= (struct load_op
*) pc
;
430 uint32_t str_len
, maxlen
;
432 if (unlikely(pc
+ sizeof(struct load_op
)
433 > start_pc
+ bytecode
->len
)) {
438 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
439 str_len
= strnlen(insn
->data
, maxlen
);
440 if (unlikely(str_len
>= maxlen
)) {
441 /* Final '\0' not found within range */
447 case FILTER_OP_LOAD_S64
:
449 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
450 > start_pc
+ bytecode
->len
)) {
456 case FILTER_OP_CAST_TO_S64
:
457 case FILTER_OP_CAST_NOP
:
459 if (unlikely(pc
+ sizeof(struct cast_op
)
460 > start_pc
+ bytecode
->len
)) {
467 * Instructions for recursive traversal through composed types.
469 case FILTER_OP_GET_CONTEXT_ROOT
:
470 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
471 case FILTER_OP_GET_PAYLOAD_ROOT
:
472 case FILTER_OP_LOAD_FIELD
:
473 case FILTER_OP_LOAD_FIELD_S8
:
474 case FILTER_OP_LOAD_FIELD_S16
:
475 case FILTER_OP_LOAD_FIELD_S32
:
476 case FILTER_OP_LOAD_FIELD_S64
:
477 case FILTER_OP_LOAD_FIELD_U8
:
478 case FILTER_OP_LOAD_FIELD_U16
:
479 case FILTER_OP_LOAD_FIELD_U32
:
480 case FILTER_OP_LOAD_FIELD_U64
:
481 case FILTER_OP_LOAD_FIELD_STRING
:
482 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
483 case FILTER_OP_LOAD_FIELD_DOUBLE
:
484 if (unlikely(pc
+ sizeof(struct load_op
)
485 > start_pc
+ bytecode
->len
)) {
490 case FILTER_OP_GET_SYMBOL
:
492 struct load_op
*insn
= (struct load_op
*) pc
;
493 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
495 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
496 > start_pc
+ bytecode
->len
)) {
500 ret
= validate_get_symbol(bytecode
, sym
);
504 case FILTER_OP_GET_SYMBOL_FIELD
:
505 printk(KERN_WARNING
"LTTng: filter: Unexpected get symbol field\n");
509 case FILTER_OP_GET_INDEX_U16
:
510 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
511 > start_pc
+ bytecode
->len
)) {
516 case FILTER_OP_GET_INDEX_U64
:
517 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
518 > start_pc
+ bytecode
->len
)) {
528 unsigned long delete_all_nodes(struct mp_table
*mp_table
)
530 struct mp_node
*mp_node
;
531 struct hlist_node
*tmp
;
532 unsigned long nr_nodes
= 0;
535 for (i
= 0; i
< MERGE_POINT_TABLE_SIZE
; i
++) {
536 struct hlist_head
*head
;
538 head
= &mp_table
->mp_head
[i
];
539 lttng_hlist_for_each_entry_safe(mp_node
, tmp
, head
, node
) {
553 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
554 struct vstack
*stack
,
559 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
562 case FILTER_OP_UNKNOWN
:
565 printk(KERN_WARNING
"LTTng: filter: unknown bytecode op %u\n",
566 (unsigned int) *(filter_opcode_t
*) pc
);
571 case FILTER_OP_RETURN
:
572 case FILTER_OP_RETURN_S64
:
582 case FILTER_OP_MINUS
:
584 case FILTER_OP_EQ_DOUBLE
:
585 case FILTER_OP_NE_DOUBLE
:
586 case FILTER_OP_GT_DOUBLE
:
587 case FILTER_OP_LT_DOUBLE
:
588 case FILTER_OP_GE_DOUBLE
:
589 case FILTER_OP_LE_DOUBLE
:
590 case FILTER_OP_EQ_DOUBLE_S64
:
591 case FILTER_OP_NE_DOUBLE_S64
:
592 case FILTER_OP_GT_DOUBLE_S64
:
593 case FILTER_OP_LT_DOUBLE_S64
:
594 case FILTER_OP_GE_DOUBLE_S64
:
595 case FILTER_OP_LE_DOUBLE_S64
:
596 case FILTER_OP_EQ_S64_DOUBLE
:
597 case FILTER_OP_NE_S64_DOUBLE
:
598 case FILTER_OP_GT_S64_DOUBLE
:
599 case FILTER_OP_LT_S64_DOUBLE
:
600 case FILTER_OP_GE_S64_DOUBLE
:
601 case FILTER_OP_LE_S64_DOUBLE
:
602 case FILTER_OP_UNARY_PLUS_DOUBLE
:
603 case FILTER_OP_UNARY_MINUS_DOUBLE
:
604 case FILTER_OP_UNARY_NOT_DOUBLE
:
605 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
606 case FILTER_OP_LOAD_DOUBLE
:
607 case FILTER_OP_CAST_DOUBLE_TO_S64
:
608 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
610 printk(KERN_WARNING
"LTTng: filter: unsupported bytecode op %u\n",
611 (unsigned int) *(filter_opcode_t
*) pc
);
618 ret
= bin_op_compare_check(stack
, opcode
, "==");
625 ret
= bin_op_compare_check(stack
, opcode
, "!=");
632 ret
= bin_op_compare_check(stack
, opcode
, ">");
639 ret
= bin_op_compare_check(stack
, opcode
, "<");
646 ret
= bin_op_compare_check(stack
, opcode
, ">=");
653 ret
= bin_op_compare_check(stack
, opcode
, "<=");
659 case FILTER_OP_EQ_STRING
:
660 case FILTER_OP_NE_STRING
:
661 case FILTER_OP_GT_STRING
:
662 case FILTER_OP_LT_STRING
:
663 case FILTER_OP_GE_STRING
:
664 case FILTER_OP_LE_STRING
:
666 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
667 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
671 if (vstack_ax(stack
)->type
!= REG_STRING
672 || vstack_bx(stack
)->type
!= REG_STRING
) {
673 printk(KERN_WARNING
"LTTng: filter: Unexpected register type for string comparator\n");
681 case FILTER_OP_EQ_STAR_GLOB_STRING
:
682 case FILTER_OP_NE_STAR_GLOB_STRING
:
684 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
685 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
689 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
690 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
691 printk(KERN_WARNING
"LTTng: filter: Unexpected register type for globbing pattern comparator\n");
698 case FILTER_OP_EQ_S64
:
699 case FILTER_OP_NE_S64
:
700 case FILTER_OP_GT_S64
:
701 case FILTER_OP_LT_S64
:
702 case FILTER_OP_GE_S64
:
703 case FILTER_OP_LE_S64
:
705 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
706 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
710 if (vstack_ax(stack
)->type
!= REG_S64
711 || vstack_bx(stack
)->type
!= REG_S64
) {
712 printk(KERN_WARNING
"LTTng: filter: Unexpected register type for s64 comparator\n");
719 case FILTER_OP_BIT_RSHIFT
:
720 ret
= bin_op_bitwise_check(stack
, opcode
, ">>");
724 case FILTER_OP_BIT_LSHIFT
:
725 ret
= bin_op_bitwise_check(stack
, opcode
, "<<");
729 case FILTER_OP_BIT_AND
:
730 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
734 case FILTER_OP_BIT_OR
:
735 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
739 case FILTER_OP_BIT_XOR
:
740 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
746 case FILTER_OP_UNARY_PLUS
:
747 case FILTER_OP_UNARY_MINUS
:
748 case FILTER_OP_UNARY_NOT
:
750 if (!vstack_ax(stack
)) {
751 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
755 switch (vstack_ax(stack
)->type
) {
758 printk(KERN_WARNING
"LTTng: filter: unknown register type\n");
763 case REG_STAR_GLOB_STRING
:
764 printk(KERN_WARNING
"LTTng: filter: Unary op can only be applied to numeric or floating point registers\n");
768 case REG_TYPE_UNKNOWN
:
773 case FILTER_OP_UNARY_BIT_NOT
:
775 if (!vstack_ax(stack
)) {
776 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
780 switch (vstack_ax(stack
)->type
) {
782 printk(KERN_WARNING
"LTTng: filter: unknown register type\n");
787 case REG_STAR_GLOB_STRING
:
789 printk(KERN_WARNING
"LTTng: filter: Unary bitwise op can only be applied to numeric registers\n");
794 case REG_TYPE_UNKNOWN
:
800 case FILTER_OP_UNARY_PLUS_S64
:
801 case FILTER_OP_UNARY_MINUS_S64
:
802 case FILTER_OP_UNARY_NOT_S64
:
804 if (!vstack_ax(stack
)) {
805 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
809 if (vstack_ax(stack
)->type
!= REG_S64
) {
810 printk(KERN_WARNING
"LTTng: filter: Invalid register type\n");
821 struct logical_op
*insn
= (struct logical_op
*) pc
;
823 if (!vstack_ax(stack
)) {
824 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
828 if (vstack_ax(stack
)->type
!= REG_S64
) {
829 printk(KERN_WARNING
"LTTng: filter: Logical comparator expects S64 register\n");
834 dbg_printk("Validate jumping to bytecode offset %u\n",
835 (unsigned int) insn
->skip_offset
);
836 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
837 printk(KERN_WARNING
"LTTng: filter: Loops are not allowed in bytecode\n");
845 case FILTER_OP_LOAD_FIELD_REF
:
847 printk(KERN_WARNING
"LTTng: filter: Unknown field ref type\n");
851 case FILTER_OP_LOAD_FIELD_REF_STRING
:
852 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
853 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
854 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
856 struct load_op
*insn
= (struct load_op
*) pc
;
857 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
859 dbg_printk("Validate load field ref offset %u type string\n",
863 case FILTER_OP_LOAD_FIELD_REF_S64
:
865 struct load_op
*insn
= (struct load_op
*) pc
;
866 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
868 dbg_printk("Validate load field ref offset %u type s64\n",
873 /* load from immediate operand */
874 case FILTER_OP_LOAD_STRING
:
875 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
880 case FILTER_OP_LOAD_S64
:
885 case FILTER_OP_CAST_TO_S64
:
887 struct cast_op
*insn
= (struct cast_op
*) pc
;
889 if (!vstack_ax(stack
)) {
890 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
894 switch (vstack_ax(stack
)->type
) {
897 printk(KERN_WARNING
"LTTng: filter: unknown register type\n");
902 case REG_STAR_GLOB_STRING
:
903 printk(KERN_WARNING
"LTTng: filter: Cast op can only be applied to numeric or floating point registers\n");
909 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
910 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
911 printk(KERN_WARNING
"LTTng: filter: Cast expects double\n");
918 case FILTER_OP_CAST_NOP
:
923 /* get context ref */
924 case FILTER_OP_GET_CONTEXT_REF
:
926 printk(KERN_WARNING
"LTTng: filter: Unknown get context ref type\n");
930 case FILTER_OP_GET_CONTEXT_REF_STRING
:
932 struct load_op
*insn
= (struct load_op
*) pc
;
933 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
935 dbg_printk("Validate get context ref offset %u type string\n",
939 case FILTER_OP_GET_CONTEXT_REF_S64
:
941 struct load_op
*insn
= (struct load_op
*) pc
;
942 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
944 dbg_printk("Validate get context ref offset %u type s64\n",
950 * Instructions for recursive traversal through composed types.
952 case FILTER_OP_GET_CONTEXT_ROOT
:
954 dbg_printk("Validate get context root\n");
957 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
959 dbg_printk("Validate get app context root\n");
962 case FILTER_OP_GET_PAYLOAD_ROOT
:
964 dbg_printk("Validate get payload root\n");
967 case FILTER_OP_LOAD_FIELD
:
970 * We tolerate that field type is unknown at validation,
971 * because we are performing the load specialization in
972 * a phase after validation.
974 dbg_printk("Validate load field\n");
977 case FILTER_OP_LOAD_FIELD_S8
:
979 dbg_printk("Validate load field s8\n");
982 case FILTER_OP_LOAD_FIELD_S16
:
984 dbg_printk("Validate load field s16\n");
987 case FILTER_OP_LOAD_FIELD_S32
:
989 dbg_printk("Validate load field s32\n");
992 case FILTER_OP_LOAD_FIELD_S64
:
994 dbg_printk("Validate load field s64\n");
997 case FILTER_OP_LOAD_FIELD_U8
:
999 dbg_printk("Validate load field u8\n");
1002 case FILTER_OP_LOAD_FIELD_U16
:
1004 dbg_printk("Validate load field u16\n");
1007 case FILTER_OP_LOAD_FIELD_U32
:
1009 dbg_printk("Validate load field u32\n");
1012 case FILTER_OP_LOAD_FIELD_U64
:
1014 dbg_printk("Validate load field u64\n");
1017 case FILTER_OP_LOAD_FIELD_STRING
:
1019 dbg_printk("Validate load field string\n");
1022 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1024 dbg_printk("Validate load field sequence\n");
1027 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1029 dbg_printk("Validate load field double\n");
1033 case FILTER_OP_GET_SYMBOL
:
1035 struct load_op
*insn
= (struct load_op
*) pc
;
1036 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1038 dbg_printk("Validate get symbol offset %u\n", sym
->offset
);
1042 case FILTER_OP_GET_SYMBOL_FIELD
:
1044 struct load_op
*insn
= (struct load_op
*) pc
;
1045 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1047 dbg_printk("Validate get symbol field offset %u\n", sym
->offset
);
1051 case FILTER_OP_GET_INDEX_U16
:
1053 struct load_op
*insn
= (struct load_op
*) pc
;
1054 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1056 dbg_printk("Validate get index u16 index %u\n", get_index
->index
);
1060 case FILTER_OP_GET_INDEX_U64
:
1062 struct load_op
*insn
= (struct load_op
*) pc
;
1063 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1065 dbg_printk("Validate get index u64 index %llu\n",
1066 (unsigned long long) get_index
->index
);
1080 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1081 struct mp_table
*mp_table
,
1082 struct vstack
*stack
,
1087 unsigned long target_pc
= pc
- start_pc
;
1089 struct hlist_head
*head
;
1090 struct mp_node
*mp_node
;
1092 /* Validate the context resulting from the previous instruction */
1093 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1097 /* Validate merge points */
1098 hash
= jhash_1word(target_pc
, 0);
1099 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
1100 lttng_hlist_for_each_entry(mp_node
, head
, node
) {
1101 if (lttng_hash_match(mp_node
, target_pc
)) {
1107 dbg_printk("Filter: validate merge point at offset %lu\n",
1109 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1110 printk(KERN_WARNING
"LTTng: filter: Merge points differ for offset %lu\n",
1114 /* Once validated, we can remove the merge point */
1115 dbg_printk("Filter: remove merge point at offset %lu\n",
1117 hlist_del(&mp_node
->node
);
1124 * >0: going to next insn.
1125 * 0: success, stop iteration.
1129 int exec_insn(struct bytecode_runtime
*bytecode
,
1130 struct mp_table
*mp_table
,
1131 struct vstack
*stack
,
1136 char *next_pc
= *_next_pc
;
1138 switch (*(filter_opcode_t
*) pc
) {
1139 case FILTER_OP_UNKNOWN
:
1142 printk(KERN_WARNING
"LTTng: filter: unknown bytecode op %u\n",
1143 (unsigned int) *(filter_opcode_t
*) pc
);
1148 case FILTER_OP_RETURN
:
1150 if (!vstack_ax(stack
)) {
1151 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
1155 switch (vstack_ax(stack
)->type
) {
1157 case REG_TYPE_UNKNOWN
:
1160 printk(KERN_WARNING
"LTTng: filter: Unexpected register type %d at end of bytecode\n",
1161 (int) vstack_ax(stack
)->type
);
1170 case FILTER_OP_RETURN_S64
:
1172 if (!vstack_ax(stack
)) {
1173 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
1177 switch (vstack_ax(stack
)->type
) {
1181 case REG_TYPE_UNKNOWN
:
1182 printk(KERN_WARNING
"LTTng: filter: Unexpected register type %d at end of bytecode\n",
1183 (int) vstack_ax(stack
)->type
);
1196 case FILTER_OP_PLUS
:
1197 case FILTER_OP_MINUS
:
1198 /* Floating point */
1199 case FILTER_OP_EQ_DOUBLE
:
1200 case FILTER_OP_NE_DOUBLE
:
1201 case FILTER_OP_GT_DOUBLE
:
1202 case FILTER_OP_LT_DOUBLE
:
1203 case FILTER_OP_GE_DOUBLE
:
1204 case FILTER_OP_LE_DOUBLE
:
1205 case FILTER_OP_EQ_DOUBLE_S64
:
1206 case FILTER_OP_NE_DOUBLE_S64
:
1207 case FILTER_OP_GT_DOUBLE_S64
:
1208 case FILTER_OP_LT_DOUBLE_S64
:
1209 case FILTER_OP_GE_DOUBLE_S64
:
1210 case FILTER_OP_LE_DOUBLE_S64
:
1211 case FILTER_OP_EQ_S64_DOUBLE
:
1212 case FILTER_OP_NE_S64_DOUBLE
:
1213 case FILTER_OP_GT_S64_DOUBLE
:
1214 case FILTER_OP_LT_S64_DOUBLE
:
1215 case FILTER_OP_GE_S64_DOUBLE
:
1216 case FILTER_OP_LE_S64_DOUBLE
:
1217 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1218 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1219 case FILTER_OP_UNARY_NOT_DOUBLE
:
1220 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1221 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1222 case FILTER_OP_LOAD_DOUBLE
:
1223 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1225 printk(KERN_WARNING
"LTTng: filter: unsupported bytecode op %u\n",
1226 (unsigned int) *(filter_opcode_t
*) pc
);
1237 case FILTER_OP_EQ_STRING
:
1238 case FILTER_OP_NE_STRING
:
1239 case FILTER_OP_GT_STRING
:
1240 case FILTER_OP_LT_STRING
:
1241 case FILTER_OP_GE_STRING
:
1242 case FILTER_OP_LE_STRING
:
1243 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1244 case FILTER_OP_NE_STAR_GLOB_STRING
:
1245 case FILTER_OP_EQ_S64
:
1246 case FILTER_OP_NE_S64
:
1247 case FILTER_OP_GT_S64
:
1248 case FILTER_OP_LT_S64
:
1249 case FILTER_OP_GE_S64
:
1250 case FILTER_OP_LE_S64
:
1251 case FILTER_OP_BIT_RSHIFT
:
1252 case FILTER_OP_BIT_LSHIFT
:
1253 case FILTER_OP_BIT_AND
:
1254 case FILTER_OP_BIT_OR
:
1255 case FILTER_OP_BIT_XOR
:
1258 if (vstack_pop(stack
)) {
1262 if (!vstack_ax(stack
)) {
1263 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
1267 switch (vstack_ax(stack
)->type
) {
1271 case REG_STAR_GLOB_STRING
:
1272 case REG_TYPE_UNKNOWN
:
1275 printk(KERN_WARNING
"LTTng: filter: Unexpected register type %d for operation\n",
1276 (int) vstack_ax(stack
)->type
);
1281 vstack_ax(stack
)->type
= REG_S64
;
1282 next_pc
+= sizeof(struct binary_op
);
1287 case FILTER_OP_UNARY_PLUS
:
1288 case FILTER_OP_UNARY_MINUS
:
1291 if (!vstack_ax(stack
)) {
1292 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1296 switch (vstack_ax(stack
)->type
) {
1298 case REG_TYPE_UNKNOWN
:
1301 printk(KERN_WARNING
"LTTng: filter: Unexpected register type %d for operation\n",
1302 (int) vstack_ax(stack
)->type
);
1307 vstack_ax(stack
)->type
= REG_TYPE_UNKNOWN
;
1308 next_pc
+= sizeof(struct unary_op
);
1312 case FILTER_OP_UNARY_PLUS_S64
:
1313 case FILTER_OP_UNARY_MINUS_S64
:
1314 case FILTER_OP_UNARY_NOT_S64
:
1317 if (!vstack_ax(stack
)) {
1318 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1322 switch (vstack_ax(stack
)->type
) {
1326 printk(KERN_WARNING
"LTTng: filter: Unexpected register type %d for operation\n",
1327 (int) vstack_ax(stack
)->type
);
1332 vstack_ax(stack
)->type
= REG_S64
;
1333 next_pc
+= sizeof(struct unary_op
);
1337 case FILTER_OP_UNARY_NOT
:
1340 if (!vstack_ax(stack
)) {
1341 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1345 switch (vstack_ax(stack
)->type
) {
1347 case REG_TYPE_UNKNOWN
:
1350 printk(KERN_WARNING
"LTTng: filter: Unexpected register type %d for operation\n",
1351 (int) vstack_ax(stack
)->type
);
1356 vstack_ax(stack
)->type
= REG_S64
;
1357 next_pc
+= sizeof(struct unary_op
);
1361 case FILTER_OP_UNARY_BIT_NOT
:
1364 if (!vstack_ax(stack
)) {
1365 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
1369 switch (vstack_ax(stack
)->type
) {
1371 case REG_TYPE_UNKNOWN
:
1375 printk(KERN_WARNING
"LTTng: filter: Unexpected register type %d for operation\n",
1376 (int) vstack_ax(stack
)->type
);
1381 vstack_ax(stack
)->type
= REG_S64
;
1382 next_pc
+= sizeof(struct unary_op
);
1390 struct logical_op
*insn
= (struct logical_op
*) pc
;
1393 /* Add merge point to table */
1394 merge_ret
= merge_point_add_check(mp_table
,
1395 insn
->skip_offset
, stack
);
1401 if (!vstack_ax(stack
)) {
1402 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1406 /* There is always a cast-to-s64 operation before a or/and op. */
1407 switch (vstack_ax(stack
)->type
) {
1411 printk(KERN_WARNING
"LTTng: filter: Incorrect register type %d for operation\n",
1412 (int) vstack_ax(stack
)->type
);
1417 /* Continue to next instruction */
1418 /* Pop 1 when jump not taken */
1419 if (vstack_pop(stack
)) {
1423 next_pc
+= sizeof(struct logical_op
);
1427 /* load field ref */
1428 case FILTER_OP_LOAD_FIELD_REF
:
1430 printk(KERN_WARNING
"LTTng: filter: Unknown field ref type\n");
1434 /* get context ref */
1435 case FILTER_OP_GET_CONTEXT_REF
:
1437 printk(KERN_WARNING
"LTTng: filter: Unknown get context ref type\n");
1441 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1442 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1443 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1444 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
1445 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
1447 if (vstack_push(stack
)) {
1451 vstack_ax(stack
)->type
= REG_STRING
;
1452 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1455 case FILTER_OP_LOAD_FIELD_REF_S64
:
1456 case FILTER_OP_GET_CONTEXT_REF_S64
:
1458 if (vstack_push(stack
)) {
1462 vstack_ax(stack
)->type
= REG_S64
;
1463 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1467 /* load from immediate operand */
1468 case FILTER_OP_LOAD_STRING
:
1470 struct load_op
*insn
= (struct load_op
*) pc
;
1472 if (vstack_push(stack
)) {
1476 vstack_ax(stack
)->type
= REG_STRING
;
1477 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1481 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1483 struct load_op
*insn
= (struct load_op
*) pc
;
1485 if (vstack_push(stack
)) {
1489 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1490 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1494 case FILTER_OP_LOAD_S64
:
1496 if (vstack_push(stack
)) {
1500 vstack_ax(stack
)->type
= REG_S64
;
1501 next_pc
+= sizeof(struct load_op
)
1502 + sizeof(struct literal_numeric
);
1506 case FILTER_OP_CAST_TO_S64
:
1509 if (!vstack_ax(stack
)) {
1510 printk(KERN_WARNING
"LTTng: filter: Empty stack\n");
1514 switch (vstack_ax(stack
)->type
) {
1517 case REG_TYPE_UNKNOWN
:
1520 printk(KERN_WARNING
"LTTng: filter: Incorrect register type %d for cast\n",
1521 (int) vstack_ax(stack
)->type
);
1525 vstack_ax(stack
)->type
= REG_S64
;
1526 next_pc
+= sizeof(struct cast_op
);
1529 case FILTER_OP_CAST_NOP
:
1531 next_pc
+= sizeof(struct cast_op
);
1536 * Instructions for recursive traversal through composed types.
1538 case FILTER_OP_GET_CONTEXT_ROOT
:
1539 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1540 case FILTER_OP_GET_PAYLOAD_ROOT
:
1542 if (vstack_push(stack
)) {
1546 vstack_ax(stack
)->type
= REG_PTR
;
1547 next_pc
+= sizeof(struct load_op
);
1551 case FILTER_OP_LOAD_FIELD
:
1554 if (!vstack_ax(stack
)) {
1555 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1559 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1560 printk(KERN_WARNING
"LTTng: filter: Expecting pointer on top of stack\n\n");
1564 vstack_ax(stack
)->type
= REG_TYPE_UNKNOWN
;
1565 next_pc
+= sizeof(struct load_op
);
1569 case FILTER_OP_LOAD_FIELD_S8
:
1570 case FILTER_OP_LOAD_FIELD_S16
:
1571 case FILTER_OP_LOAD_FIELD_S32
:
1572 case FILTER_OP_LOAD_FIELD_S64
:
1573 case FILTER_OP_LOAD_FIELD_U8
:
1574 case FILTER_OP_LOAD_FIELD_U16
:
1575 case FILTER_OP_LOAD_FIELD_U32
:
1576 case FILTER_OP_LOAD_FIELD_U64
:
1579 if (!vstack_ax(stack
)) {
1580 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1584 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1585 printk(KERN_WARNING
"LTTng: filter: Expecting pointer on top of stack\n\n");
1589 vstack_ax(stack
)->type
= REG_S64
;
1590 next_pc
+= sizeof(struct load_op
);
1594 case FILTER_OP_LOAD_FIELD_STRING
:
1595 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1598 if (!vstack_ax(stack
)) {
1599 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1603 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1604 printk(KERN_WARNING
"LTTng: filter: Expecting pointer on top of stack\n\n");
1608 vstack_ax(stack
)->type
= REG_STRING
;
1609 next_pc
+= sizeof(struct load_op
);
1613 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1616 if (!vstack_ax(stack
)) {
1617 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1621 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1622 printk(KERN_WARNING
"LTTng: filter: Expecting pointer on top of stack\n\n");
1626 vstack_ax(stack
)->type
= REG_DOUBLE
;
1627 next_pc
+= sizeof(struct load_op
);
1631 case FILTER_OP_GET_SYMBOL
:
1632 case FILTER_OP_GET_SYMBOL_FIELD
:
1635 if (!vstack_ax(stack
)) {
1636 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1640 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1641 printk(KERN_WARNING
"LTTng: filter: Expecting pointer on top of stack\n\n");
1645 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1649 case FILTER_OP_GET_INDEX_U16
:
1652 if (!vstack_ax(stack
)) {
1653 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1657 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1658 printk(KERN_WARNING
"LTTng: filter: Expecting pointer on top of stack\n\n");
1662 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1666 case FILTER_OP_GET_INDEX_U64
:
1669 if (!vstack_ax(stack
)) {
1670 printk(KERN_WARNING
"LTTng: filter: Empty stack\n\n");
1674 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1675 printk(KERN_WARNING
"LTTng: filter: Expecting pointer on top of stack\n\n");
1679 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1685 *_next_pc
= next_pc
;
1690 * Never called concurrently (hash seed is shared).
1692 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1694 struct mp_table
*mp_table
;
1695 char *pc
, *next_pc
, *start_pc
;
1697 struct vstack stack
;
1699 vstack_init(&stack
);
1701 mp_table
= kzalloc(sizeof(*mp_table
), GFP_KERNEL
);
1703 printk(KERN_WARNING
"LTTng: filter: Error allocating hash table for bytecode validation\n");
1706 start_pc
= &bytecode
->code
[0];
1707 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1709 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1712 printk(KERN_WARNING
"LTTng: filter: filter bytecode overflow\n");
1715 dbg_printk("Validating op %s (%u)\n",
1716 lttng_filter_print_op((unsigned int) *(filter_opcode_t
*) pc
),
1717 (unsigned int) *(filter_opcode_t
*) pc
);
1720 * For each instruction, validate the current context
1721 * (traversal of entire execution flow), and validate
1722 * all merge points targeting this instruction.
1724 ret
= validate_instruction_all_contexts(bytecode
, mp_table
,
1725 &stack
, start_pc
, pc
);
1728 ret
= exec_insn(bytecode
, mp_table
, &stack
, &next_pc
, pc
);
1733 if (delete_all_nodes(mp_table
)) {
1735 printk(KERN_WARNING
"LTTng: filter: Unexpected merge points\n");