2 * lttng-filter-validator.c
4 * LTTng modules filter bytecode validator.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <linux/types.h>
28 #include <linux/jhash.h>
29 #include <linux/slab.h>
31 #include <wrapper/list.h>
32 #include <lttng-filter.h>
34 #define MERGE_POINT_TABLE_BITS 7
35 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
37 /* merge point table node */
39 struct hlist_node node
;
41 /* Context at merge point */
43 unsigned long target_pc
;
47 struct hlist_head mp_head
[MERGE_POINT_TABLE_SIZE
];
51 int lttng_hash_match(struct mp_node
*mp_node
, unsigned long key_pc
)
53 if (mp_node
->target_pc
== key_pc
)
60 int merge_points_compare(const struct vstack
*stacka
,
61 const struct vstack
*stackb
)
65 if (stacka
->top
!= stackb
->top
)
67 len
= stacka
->top
+ 1;
68 WARN_ON_ONCE(len
< 0);
69 for (i
= 0; i
< len
; i
++) {
70 if (stacka
->e
[i
].type
!= stackb
->e
[i
].type
)
77 int merge_point_add_check(struct mp_table
*mp_table
, unsigned long target_pc
,
78 const struct vstack
*stack
)
80 struct mp_node
*mp_node
;
81 unsigned long hash
= jhash_1word(target_pc
, 0);
82 struct hlist_head
*head
;
83 struct mp_node
*lookup_node
;
86 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
88 mp_node
= kzalloc(sizeof(struct mp_node
), GFP_KERNEL
);
91 mp_node
->target_pc
= target_pc
;
92 memcpy(&mp_node
->stack
, stack
, sizeof(mp_node
->stack
));
94 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
95 lttng_hlist_for_each_entry(lookup_node
, head
, node
) {
96 if (lttng_hash_match(lookup_node
, target_pc
)) {
102 /* Key already present */
103 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
106 if (merge_points_compare(stack
, &lookup_node
->stack
)) {
107 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
112 hlist_add_head(&mp_node
->node
, head
);
118 * Binary comparators use top of stack and top of stack -1.
121 int bin_op_compare_check(struct vstack
*stack
, const filter_opcode_t opcode
,
124 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
127 switch (vstack_ax(stack
)->type
) {
133 switch (vstack_bx(stack
)->type
) {
137 case REG_TYPE_UNKNOWN
:
141 case REG_STAR_GLOB_STRING
:
142 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
150 case REG_STAR_GLOB_STRING
:
151 switch (vstack_bx(stack
)->type
) {
155 case REG_TYPE_UNKNOWN
:
158 if (opcode
!= FILTER_OP_EQ
&& opcode
!= FILTER_OP_NE
) {
162 case REG_STAR_GLOB_STRING
:
168 switch (vstack_bx(stack
)->type
) {
172 case REG_TYPE_UNKNOWN
:
175 case REG_STAR_GLOB_STRING
:
181 case REG_TYPE_UNKNOWN
:
182 switch (vstack_bx(stack
)->type
) {
186 case REG_TYPE_UNKNOWN
:
188 case REG_STAR_GLOB_STRING
:
200 printk(KERN_WARNING
"empty stack for '%s' binary operator\n", str
);
204 printk(KERN_WARNING
"type mismatch for '%s' binary operator\n", str
);
208 printk(KERN_WARNING
"unknown type for '%s' binary operator\n", str
);
213 * Binary bitwise operators use top of stack and top of stack -1.
214 * Return 0 if typing is known to match, 1 if typing is dynamic
215 * (unknown), negative error value on error.
218 int bin_op_bitwise_check(struct vstack
*stack
, filter_opcode_t opcode
,
221 if (unlikely(!vstack_ax(stack
) || !vstack_bx(stack
)))
224 switch (vstack_ax(stack
)->type
) {
229 case REG_TYPE_UNKNOWN
:
230 switch (vstack_bx(stack
)->type
) {
234 case REG_TYPE_UNKNOWN
:
236 case REG_STAR_GLOB_STRING
:
242 switch (vstack_bx(stack
)->type
) {
246 case REG_TYPE_UNKNOWN
:
259 printk(KERN_WARNING
"empty stack for '%s' binary operator\n", str
);
263 printk(KERN_WARNING
"unknown type for '%s' binary operator\n", str
);
268 int validate_get_symbol(struct bytecode_runtime
*bytecode
,
269 const struct get_symbol
*sym
)
271 const char *str
, *str_limit
;
274 if (sym
->offset
>= bytecode
->p
.bc
->bc
.len
- bytecode
->p
.bc
->bc
.reloc_offset
)
277 str
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ sym
->offset
;
278 str_limit
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.len
;
279 len_limit
= str_limit
- str
;
280 if (strnlen(str
, len_limit
) == len_limit
)
286 * Validate bytecode range overflow within the validation pass.
287 * Called for each instruction encountered.
290 int bytecode_validate_overflow(struct bytecode_runtime
*bytecode
,
291 char *start_pc
, char *pc
)
295 switch (*(filter_opcode_t
*) pc
) {
296 case FILTER_OP_UNKNOWN
:
299 printk(KERN_WARNING
"unknown bytecode op %u\n",
300 (unsigned int) *(filter_opcode_t
*) pc
);
305 case FILTER_OP_RETURN
:
306 case FILTER_OP_RETURN_S64
:
308 if (unlikely(pc
+ sizeof(struct return_op
)
309 > start_pc
+ bytecode
->len
)) {
320 case FILTER_OP_MINUS
:
321 case FILTER_OP_EQ_DOUBLE
:
322 case FILTER_OP_NE_DOUBLE
:
323 case FILTER_OP_GT_DOUBLE
:
324 case FILTER_OP_LT_DOUBLE
:
325 case FILTER_OP_GE_DOUBLE
:
326 case FILTER_OP_LE_DOUBLE
:
328 case FILTER_OP_EQ_DOUBLE_S64
:
329 case FILTER_OP_NE_DOUBLE_S64
:
330 case FILTER_OP_GT_DOUBLE_S64
:
331 case FILTER_OP_LT_DOUBLE_S64
:
332 case FILTER_OP_GE_DOUBLE_S64
:
333 case FILTER_OP_LE_DOUBLE_S64
:
334 case FILTER_OP_EQ_S64_DOUBLE
:
335 case FILTER_OP_NE_S64_DOUBLE
:
336 case FILTER_OP_GT_S64_DOUBLE
:
337 case FILTER_OP_LT_S64_DOUBLE
:
338 case FILTER_OP_GE_S64_DOUBLE
:
339 case FILTER_OP_LE_S64_DOUBLE
:
340 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
341 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
342 case FILTER_OP_LOAD_DOUBLE
:
343 case FILTER_OP_CAST_DOUBLE_TO_S64
:
344 case FILTER_OP_UNARY_PLUS_DOUBLE
:
345 case FILTER_OP_UNARY_MINUS_DOUBLE
:
346 case FILTER_OP_UNARY_NOT_DOUBLE
:
348 printk(KERN_WARNING
"unsupported bytecode op %u\n",
349 (unsigned int) *(filter_opcode_t
*) pc
);
360 case FILTER_OP_EQ_STRING
:
361 case FILTER_OP_NE_STRING
:
362 case FILTER_OP_GT_STRING
:
363 case FILTER_OP_LT_STRING
:
364 case FILTER_OP_GE_STRING
:
365 case FILTER_OP_LE_STRING
:
366 case FILTER_OP_EQ_STAR_GLOB_STRING
:
367 case FILTER_OP_NE_STAR_GLOB_STRING
:
368 case FILTER_OP_EQ_S64
:
369 case FILTER_OP_NE_S64
:
370 case FILTER_OP_GT_S64
:
371 case FILTER_OP_LT_S64
:
372 case FILTER_OP_GE_S64
:
373 case FILTER_OP_LE_S64
:
374 case FILTER_OP_BIT_RSHIFT
:
375 case FILTER_OP_BIT_LSHIFT
:
376 case FILTER_OP_BIT_AND
:
377 case FILTER_OP_BIT_OR
:
378 case FILTER_OP_BIT_XOR
:
380 if (unlikely(pc
+ sizeof(struct binary_op
)
381 > start_pc
+ bytecode
->len
)) {
388 case FILTER_OP_UNARY_PLUS
:
389 case FILTER_OP_UNARY_MINUS
:
390 case FILTER_OP_UNARY_NOT
:
391 case FILTER_OP_UNARY_PLUS_S64
:
392 case FILTER_OP_UNARY_MINUS_S64
:
393 case FILTER_OP_UNARY_NOT_S64
:
394 case FILTER_OP_UNARY_BIT_NOT
:
396 if (unlikely(pc
+ sizeof(struct unary_op
)
397 > start_pc
+ bytecode
->len
)) {
407 if (unlikely(pc
+ sizeof(struct logical_op
)
408 > start_pc
+ bytecode
->len
)) {
415 case FILTER_OP_LOAD_FIELD_REF
:
417 printk(KERN_WARNING
"Unknown field ref type\n");
422 /* get context ref */
423 case FILTER_OP_GET_CONTEXT_REF
:
425 printk(KERN_WARNING
"Unknown field ref type\n");
429 case FILTER_OP_LOAD_FIELD_REF_STRING
:
430 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
431 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
432 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
433 case FILTER_OP_LOAD_FIELD_REF_S64
:
434 case FILTER_OP_GET_CONTEXT_REF_STRING
:
435 case FILTER_OP_GET_CONTEXT_REF_S64
:
437 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct field_ref
)
438 > start_pc
+ bytecode
->len
)) {
444 /* load from immediate operand */
445 case FILTER_OP_LOAD_STRING
:
446 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
448 struct load_op
*insn
= (struct load_op
*) pc
;
449 uint32_t str_len
, maxlen
;
451 if (unlikely(pc
+ sizeof(struct load_op
)
452 > start_pc
+ bytecode
->len
)) {
457 maxlen
= start_pc
+ bytecode
->len
- pc
- sizeof(struct load_op
);
458 str_len
= strnlen(insn
->data
, maxlen
);
459 if (unlikely(str_len
>= maxlen
)) {
460 /* Final '\0' not found within range */
466 case FILTER_OP_LOAD_S64
:
468 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct literal_numeric
)
469 > start_pc
+ bytecode
->len
)) {
475 case FILTER_OP_CAST_TO_S64
:
476 case FILTER_OP_CAST_NOP
:
478 if (unlikely(pc
+ sizeof(struct cast_op
)
479 > start_pc
+ bytecode
->len
)) {
486 * Instructions for recursive traversal through composed types.
488 case FILTER_OP_GET_CONTEXT_ROOT
:
489 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
490 case FILTER_OP_GET_PAYLOAD_ROOT
:
491 case FILTER_OP_LOAD_FIELD
:
492 case FILTER_OP_LOAD_FIELD_S8
:
493 case FILTER_OP_LOAD_FIELD_S16
:
494 case FILTER_OP_LOAD_FIELD_S32
:
495 case FILTER_OP_LOAD_FIELD_S64
:
496 case FILTER_OP_LOAD_FIELD_U8
:
497 case FILTER_OP_LOAD_FIELD_U16
:
498 case FILTER_OP_LOAD_FIELD_U32
:
499 case FILTER_OP_LOAD_FIELD_U64
:
500 case FILTER_OP_LOAD_FIELD_STRING
:
501 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
502 case FILTER_OP_LOAD_FIELD_DOUBLE
:
503 if (unlikely(pc
+ sizeof(struct load_op
)
504 > start_pc
+ bytecode
->len
)) {
509 case FILTER_OP_GET_SYMBOL
:
511 struct load_op
*insn
= (struct load_op
*) pc
;
512 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
514 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_symbol
)
515 > start_pc
+ bytecode
->len
)) {
519 ret
= validate_get_symbol(bytecode
, sym
);
523 case FILTER_OP_GET_SYMBOL_FIELD
:
524 printk(KERN_WARNING
"Unexpected get symbol field\n");
528 case FILTER_OP_GET_INDEX_U16
:
529 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u16
)
530 > start_pc
+ bytecode
->len
)) {
535 case FILTER_OP_GET_INDEX_U64
:
536 if (unlikely(pc
+ sizeof(struct load_op
) + sizeof(struct get_index_u64
)
537 > start_pc
+ bytecode
->len
)) {
547 unsigned long delete_all_nodes(struct mp_table
*mp_table
)
549 struct mp_node
*mp_node
;
550 struct hlist_node
*tmp
;
551 unsigned long nr_nodes
= 0;
554 for (i
= 0; i
< MERGE_POINT_TABLE_SIZE
; i
++) {
555 struct hlist_head
*head
;
557 head
= &mp_table
->mp_head
[i
];
558 lttng_hlist_for_each_entry_safe(mp_node
, tmp
, head
, node
) {
572 int validate_instruction_context(struct bytecode_runtime
*bytecode
,
573 struct vstack
*stack
,
578 const filter_opcode_t opcode
= *(filter_opcode_t
*) pc
;
581 case FILTER_OP_UNKNOWN
:
584 printk(KERN_WARNING
"unknown bytecode op %u\n",
585 (unsigned int) *(filter_opcode_t
*) pc
);
590 case FILTER_OP_RETURN
:
591 case FILTER_OP_RETURN_S64
:
601 case FILTER_OP_MINUS
:
603 case FILTER_OP_EQ_DOUBLE
:
604 case FILTER_OP_NE_DOUBLE
:
605 case FILTER_OP_GT_DOUBLE
:
606 case FILTER_OP_LT_DOUBLE
:
607 case FILTER_OP_GE_DOUBLE
:
608 case FILTER_OP_LE_DOUBLE
:
609 case FILTER_OP_EQ_DOUBLE_S64
:
610 case FILTER_OP_NE_DOUBLE_S64
:
611 case FILTER_OP_GT_DOUBLE_S64
:
612 case FILTER_OP_LT_DOUBLE_S64
:
613 case FILTER_OP_GE_DOUBLE_S64
:
614 case FILTER_OP_LE_DOUBLE_S64
:
615 case FILTER_OP_EQ_S64_DOUBLE
:
616 case FILTER_OP_NE_S64_DOUBLE
:
617 case FILTER_OP_GT_S64_DOUBLE
:
618 case FILTER_OP_LT_S64_DOUBLE
:
619 case FILTER_OP_GE_S64_DOUBLE
:
620 case FILTER_OP_LE_S64_DOUBLE
:
621 case FILTER_OP_UNARY_PLUS_DOUBLE
:
622 case FILTER_OP_UNARY_MINUS_DOUBLE
:
623 case FILTER_OP_UNARY_NOT_DOUBLE
:
624 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
625 case FILTER_OP_LOAD_DOUBLE
:
626 case FILTER_OP_CAST_DOUBLE_TO_S64
:
627 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
629 printk(KERN_WARNING
"unsupported bytecode op %u\n",
630 (unsigned int) *(filter_opcode_t
*) pc
);
637 ret
= bin_op_compare_check(stack
, opcode
, "==");
644 ret
= bin_op_compare_check(stack
, opcode
, "!=");
651 ret
= bin_op_compare_check(stack
, opcode
, ">");
658 ret
= bin_op_compare_check(stack
, opcode
, "<");
665 ret
= bin_op_compare_check(stack
, opcode
, ">=");
672 ret
= bin_op_compare_check(stack
, opcode
, "<=");
678 case FILTER_OP_EQ_STRING
:
679 case FILTER_OP_NE_STRING
:
680 case FILTER_OP_GT_STRING
:
681 case FILTER_OP_LT_STRING
:
682 case FILTER_OP_GE_STRING
:
683 case FILTER_OP_LE_STRING
:
685 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
686 printk(KERN_WARNING
"Empty stack\n");
690 if (vstack_ax(stack
)->type
!= REG_STRING
691 || vstack_bx(stack
)->type
!= REG_STRING
) {
692 printk(KERN_WARNING
"Unexpected register type for string comparator\n");
700 case FILTER_OP_EQ_STAR_GLOB_STRING
:
701 case FILTER_OP_NE_STAR_GLOB_STRING
:
703 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
704 printk(KERN_WARNING
"Empty stack\n");
708 if (vstack_ax(stack
)->type
!= REG_STAR_GLOB_STRING
709 && vstack_bx(stack
)->type
!= REG_STAR_GLOB_STRING
) {
710 printk(KERN_WARNING
"Unexpected register type for globbing pattern comparator\n");
717 case FILTER_OP_EQ_S64
:
718 case FILTER_OP_NE_S64
:
719 case FILTER_OP_GT_S64
:
720 case FILTER_OP_LT_S64
:
721 case FILTER_OP_GE_S64
:
722 case FILTER_OP_LE_S64
:
724 if (!vstack_ax(stack
) || !vstack_bx(stack
)) {
725 printk(KERN_WARNING
"Empty stack\n");
729 if (vstack_ax(stack
)->type
!= REG_S64
730 || vstack_bx(stack
)->type
!= REG_S64
) {
731 printk(KERN_WARNING
"Unexpected register type for s64 comparator\n");
738 case FILTER_OP_BIT_RSHIFT
:
739 ret
= bin_op_bitwise_check(stack
, opcode
, ">>");
743 case FILTER_OP_BIT_LSHIFT
:
744 ret
= bin_op_bitwise_check(stack
, opcode
, "<<");
748 case FILTER_OP_BIT_AND
:
749 ret
= bin_op_bitwise_check(stack
, opcode
, "&");
753 case FILTER_OP_BIT_OR
:
754 ret
= bin_op_bitwise_check(stack
, opcode
, "|");
758 case FILTER_OP_BIT_XOR
:
759 ret
= bin_op_bitwise_check(stack
, opcode
, "^");
765 case FILTER_OP_UNARY_PLUS
:
766 case FILTER_OP_UNARY_MINUS
:
767 case FILTER_OP_UNARY_NOT
:
769 if (!vstack_ax(stack
)) {
770 printk(KERN_WARNING
"Empty stack\n");
774 switch (vstack_ax(stack
)->type
) {
777 printk(KERN_WARNING
"unknown register type\n");
782 case REG_STAR_GLOB_STRING
:
783 printk(KERN_WARNING
"Unary op can only be applied to numeric or floating point registers\n");
787 case REG_TYPE_UNKNOWN
:
792 case FILTER_OP_UNARY_BIT_NOT
:
794 if (!vstack_ax(stack
)) {
795 printk(KERN_WARNING
"Empty stack\n");
799 switch (vstack_ax(stack
)->type
) {
801 printk(KERN_WARNING
"unknown register type\n");
806 case REG_STAR_GLOB_STRING
:
808 printk(KERN_WARNING
"Unary bitwise op can only be applied to numeric registers\n");
813 case REG_TYPE_UNKNOWN
:
819 case FILTER_OP_UNARY_PLUS_S64
:
820 case FILTER_OP_UNARY_MINUS_S64
:
821 case FILTER_OP_UNARY_NOT_S64
:
823 if (!vstack_ax(stack
)) {
824 printk(KERN_WARNING
"Empty stack\n");
828 if (vstack_ax(stack
)->type
!= REG_S64
) {
829 printk(KERN_WARNING
"Invalid register type\n");
840 struct logical_op
*insn
= (struct logical_op
*) pc
;
842 if (!vstack_ax(stack
)) {
843 printk(KERN_WARNING
"Empty stack\n");
847 if (vstack_ax(stack
)->type
!= REG_S64
) {
848 printk(KERN_WARNING
"Logical comparator expects S64 register\n");
853 dbg_printk("Validate jumping to bytecode offset %u\n",
854 (unsigned int) insn
->skip_offset
);
855 if (unlikely(start_pc
+ insn
->skip_offset
<= pc
)) {
856 printk(KERN_WARNING
"Loops are not allowed in bytecode\n");
864 case FILTER_OP_LOAD_FIELD_REF
:
866 printk(KERN_WARNING
"Unknown field ref type\n");
870 case FILTER_OP_LOAD_FIELD_REF_STRING
:
871 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
872 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
873 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
875 struct load_op
*insn
= (struct load_op
*) pc
;
876 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
878 dbg_printk("Validate load field ref offset %u type string\n",
882 case FILTER_OP_LOAD_FIELD_REF_S64
:
884 struct load_op
*insn
= (struct load_op
*) pc
;
885 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
887 dbg_printk("Validate load field ref offset %u type s64\n",
892 /* load from immediate operand */
893 case FILTER_OP_LOAD_STRING
:
894 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
899 case FILTER_OP_LOAD_S64
:
904 case FILTER_OP_CAST_TO_S64
:
906 struct cast_op
*insn
= (struct cast_op
*) pc
;
908 if (!vstack_ax(stack
)) {
909 printk(KERN_WARNING
"Empty stack\n");
913 switch (vstack_ax(stack
)->type
) {
916 printk(KERN_WARNING
"unknown register type\n");
921 case REG_STAR_GLOB_STRING
:
922 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
928 if (insn
->op
== FILTER_OP_CAST_DOUBLE_TO_S64
) {
929 if (vstack_ax(stack
)->type
!= REG_DOUBLE
) {
930 printk(KERN_WARNING
"Cast expects double\n");
937 case FILTER_OP_CAST_NOP
:
942 /* get context ref */
943 case FILTER_OP_GET_CONTEXT_REF
:
945 printk(KERN_WARNING
"Unknown get context ref type\n");
949 case FILTER_OP_GET_CONTEXT_REF_STRING
:
951 struct load_op
*insn
= (struct load_op
*) pc
;
952 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
954 dbg_printk("Validate get context ref offset %u type string\n",
958 case FILTER_OP_GET_CONTEXT_REF_S64
:
960 struct load_op
*insn
= (struct load_op
*) pc
;
961 struct field_ref
*ref
= (struct field_ref
*) insn
->data
;
963 dbg_printk("Validate get context ref offset %u type s64\n",
969 * Instructions for recursive traversal through composed types.
971 case FILTER_OP_GET_CONTEXT_ROOT
:
973 dbg_printk("Validate get context root\n");
976 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
978 dbg_printk("Validate get app context root\n");
981 case FILTER_OP_GET_PAYLOAD_ROOT
:
983 dbg_printk("Validate get payload root\n");
986 case FILTER_OP_LOAD_FIELD
:
989 * We tolerate that field type is unknown at validation,
990 * because we are performing the load specialization in
991 * a phase after validation.
993 dbg_printk("Validate load field\n");
996 case FILTER_OP_LOAD_FIELD_S8
:
998 dbg_printk("Validate load field s8\n");
1001 case FILTER_OP_LOAD_FIELD_S16
:
1003 dbg_printk("Validate load field s16\n");
1006 case FILTER_OP_LOAD_FIELD_S32
:
1008 dbg_printk("Validate load field s32\n");
1011 case FILTER_OP_LOAD_FIELD_S64
:
1013 dbg_printk("Validate load field s64\n");
1016 case FILTER_OP_LOAD_FIELD_U8
:
1018 dbg_printk("Validate load field u8\n");
1021 case FILTER_OP_LOAD_FIELD_U16
:
1023 dbg_printk("Validate load field u16\n");
1026 case FILTER_OP_LOAD_FIELD_U32
:
1028 dbg_printk("Validate load field u32\n");
1031 case FILTER_OP_LOAD_FIELD_U64
:
1033 dbg_printk("Validate load field u64\n");
1036 case FILTER_OP_LOAD_FIELD_STRING
:
1038 dbg_printk("Validate load field string\n");
1041 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1043 dbg_printk("Validate load field sequence\n");
1046 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1048 dbg_printk("Validate load field double\n");
1052 case FILTER_OP_GET_SYMBOL
:
1054 struct load_op
*insn
= (struct load_op
*) pc
;
1055 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1057 dbg_printk("Validate get symbol offset %u\n", sym
->offset
);
1061 case FILTER_OP_GET_SYMBOL_FIELD
:
1063 struct load_op
*insn
= (struct load_op
*) pc
;
1064 struct get_symbol
*sym
= (struct get_symbol
*) insn
->data
;
1066 dbg_printk("Validate get symbol field offset %u\n", sym
->offset
);
1070 case FILTER_OP_GET_INDEX_U16
:
1072 struct load_op
*insn
= (struct load_op
*) pc
;
1073 struct get_index_u16
*get_index
= (struct get_index_u16
*) insn
->data
;
1075 dbg_printk("Validate get index u16 index %u\n", get_index
->index
);
1079 case FILTER_OP_GET_INDEX_U64
:
1081 struct load_op
*insn
= (struct load_op
*) pc
;
1082 struct get_index_u64
*get_index
= (struct get_index_u64
*) insn
->data
;
1084 dbg_printk("Validate get index u64 index %llu\n",
1085 (unsigned long long) get_index
->index
);
1099 int validate_instruction_all_contexts(struct bytecode_runtime
*bytecode
,
1100 struct mp_table
*mp_table
,
1101 struct vstack
*stack
,
1106 unsigned long target_pc
= pc
- start_pc
;
1108 struct hlist_head
*head
;
1109 struct mp_node
*mp_node
;
1111 /* Validate the context resulting from the previous instruction */
1112 ret
= validate_instruction_context(bytecode
, stack
, start_pc
, pc
);
1116 /* Validate merge points */
1117 hash
= jhash_1word(target_pc
, 0);
1118 head
= &mp_table
->mp_head
[hash
& (MERGE_POINT_TABLE_SIZE
- 1)];
1119 lttng_hlist_for_each_entry(mp_node
, head
, node
) {
1120 if (lttng_hash_match(mp_node
, target_pc
)) {
1126 dbg_printk("Filter: validate merge point at offset %lu\n",
1128 if (merge_points_compare(stack
, &mp_node
->stack
)) {
1129 printk(KERN_WARNING
"Merge points differ for offset %lu\n",
1133 /* Once validated, we can remove the merge point */
1134 dbg_printk("Filter: remove merge point at offset %lu\n",
1136 hlist_del(&mp_node
->node
);
1143 * >0: going to next insn.
1144 * 0: success, stop iteration.
1148 int exec_insn(struct bytecode_runtime
*bytecode
,
1149 struct mp_table
*mp_table
,
1150 struct vstack
*stack
,
1155 char *next_pc
= *_next_pc
;
1157 switch (*(filter_opcode_t
*) pc
) {
1158 case FILTER_OP_UNKNOWN
:
1161 printk(KERN_WARNING
"unknown bytecode op %u\n",
1162 (unsigned int) *(filter_opcode_t
*) pc
);
1167 case FILTER_OP_RETURN
:
1169 if (!vstack_ax(stack
)) {
1170 printk(KERN_WARNING
"Empty stack\n");
1174 switch (vstack_ax(stack
)->type
) {
1176 case REG_TYPE_UNKNOWN
:
1179 printk(KERN_WARNING
"Unexpected register type %d at end of bytecode\n",
1180 (int) vstack_ax(stack
)->type
);
1189 case FILTER_OP_RETURN_S64
:
1191 if (!vstack_ax(stack
)) {
1192 printk(KERN_WARNING
"Empty stack\n");
1196 switch (vstack_ax(stack
)->type
) {
1200 case REG_TYPE_UNKNOWN
:
1201 printk(KERN_WARNING
"Unexpected register type %d at end of bytecode\n",
1202 (int) vstack_ax(stack
)->type
);
1215 case FILTER_OP_PLUS
:
1216 case FILTER_OP_MINUS
:
1217 /* Floating point */
1218 case FILTER_OP_EQ_DOUBLE
:
1219 case FILTER_OP_NE_DOUBLE
:
1220 case FILTER_OP_GT_DOUBLE
:
1221 case FILTER_OP_LT_DOUBLE
:
1222 case FILTER_OP_GE_DOUBLE
:
1223 case FILTER_OP_LE_DOUBLE
:
1224 case FILTER_OP_EQ_DOUBLE_S64
:
1225 case FILTER_OP_NE_DOUBLE_S64
:
1226 case FILTER_OP_GT_DOUBLE_S64
:
1227 case FILTER_OP_LT_DOUBLE_S64
:
1228 case FILTER_OP_GE_DOUBLE_S64
:
1229 case FILTER_OP_LE_DOUBLE_S64
:
1230 case FILTER_OP_EQ_S64_DOUBLE
:
1231 case FILTER_OP_NE_S64_DOUBLE
:
1232 case FILTER_OP_GT_S64_DOUBLE
:
1233 case FILTER_OP_LT_S64_DOUBLE
:
1234 case FILTER_OP_GE_S64_DOUBLE
:
1235 case FILTER_OP_LE_S64_DOUBLE
:
1236 case FILTER_OP_UNARY_PLUS_DOUBLE
:
1237 case FILTER_OP_UNARY_MINUS_DOUBLE
:
1238 case FILTER_OP_UNARY_NOT_DOUBLE
:
1239 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
1240 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
1241 case FILTER_OP_LOAD_DOUBLE
:
1242 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1244 printk(KERN_WARNING
"unsupported bytecode op %u\n",
1245 (unsigned int) *(filter_opcode_t
*) pc
);
1256 case FILTER_OP_EQ_STRING
:
1257 case FILTER_OP_NE_STRING
:
1258 case FILTER_OP_GT_STRING
:
1259 case FILTER_OP_LT_STRING
:
1260 case FILTER_OP_GE_STRING
:
1261 case FILTER_OP_LE_STRING
:
1262 case FILTER_OP_EQ_STAR_GLOB_STRING
:
1263 case FILTER_OP_NE_STAR_GLOB_STRING
:
1264 case FILTER_OP_EQ_S64
:
1265 case FILTER_OP_NE_S64
:
1266 case FILTER_OP_GT_S64
:
1267 case FILTER_OP_LT_S64
:
1268 case FILTER_OP_GE_S64
:
1269 case FILTER_OP_LE_S64
:
1270 case FILTER_OP_BIT_RSHIFT
:
1271 case FILTER_OP_BIT_LSHIFT
:
1272 case FILTER_OP_BIT_AND
:
1273 case FILTER_OP_BIT_OR
:
1274 case FILTER_OP_BIT_XOR
:
1277 if (vstack_pop(stack
)) {
1281 if (!vstack_ax(stack
)) {
1282 printk(KERN_WARNING
"Empty stack\n");
1286 switch (vstack_ax(stack
)->type
) {
1290 case REG_STAR_GLOB_STRING
:
1291 case REG_TYPE_UNKNOWN
:
1294 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1295 (int) vstack_ax(stack
)->type
);
1300 vstack_ax(stack
)->type
= REG_S64
;
1301 next_pc
+= sizeof(struct binary_op
);
1306 case FILTER_OP_UNARY_PLUS
:
1307 case FILTER_OP_UNARY_MINUS
:
1310 if (!vstack_ax(stack
)) {
1311 printk(KERN_WARNING
"Empty stack\n\n");
1315 switch (vstack_ax(stack
)->type
) {
1317 case REG_TYPE_UNKNOWN
:
1320 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1321 (int) vstack_ax(stack
)->type
);
1326 vstack_ax(stack
)->type
= REG_TYPE_UNKNOWN
;
1327 next_pc
+= sizeof(struct unary_op
);
1331 case FILTER_OP_UNARY_PLUS_S64
:
1332 case FILTER_OP_UNARY_MINUS_S64
:
1333 case FILTER_OP_UNARY_NOT_S64
:
1336 if (!vstack_ax(stack
)) {
1337 printk(KERN_WARNING
"Empty stack\n\n");
1341 switch (vstack_ax(stack
)->type
) {
1345 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1346 (int) vstack_ax(stack
)->type
);
1351 vstack_ax(stack
)->type
= REG_S64
;
1352 next_pc
+= sizeof(struct unary_op
);
1356 case FILTER_OP_UNARY_NOT
:
1359 if (!vstack_ax(stack
)) {
1360 printk(KERN_WARNING
"Empty stack\n\n");
1364 switch (vstack_ax(stack
)->type
) {
1366 case REG_TYPE_UNKNOWN
:
1369 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1370 (int) vstack_ax(stack
)->type
);
1375 vstack_ax(stack
)->type
= REG_S64
;
1376 next_pc
+= sizeof(struct unary_op
);
1380 case FILTER_OP_UNARY_BIT_NOT
:
1383 if (!vstack_ax(stack
)) {
1384 printk(KERN_WARNING
"Empty stack\n");
1388 switch (vstack_ax(stack
)->type
) {
1390 case REG_TYPE_UNKNOWN
:
1394 printk(KERN_WARNING
"Unexpected register type %d for operation\n",
1395 (int) vstack_ax(stack
)->type
);
1400 vstack_ax(stack
)->type
= REG_S64
;
1401 next_pc
+= sizeof(struct unary_op
);
1409 struct logical_op
*insn
= (struct logical_op
*) pc
;
1412 /* Add merge point to table */
1413 merge_ret
= merge_point_add_check(mp_table
,
1414 insn
->skip_offset
, stack
);
1420 if (!vstack_ax(stack
)) {
1421 printk(KERN_WARNING
"Empty stack\n\n");
1425 /* There is always a cast-to-s64 operation before a or/and op. */
1426 switch (vstack_ax(stack
)->type
) {
1430 printk(KERN_WARNING
"Incorrect register type %d for operation\n",
1431 (int) vstack_ax(stack
)->type
);
1436 /* Continue to next instruction */
1437 /* Pop 1 when jump not taken */
1438 if (vstack_pop(stack
)) {
1442 next_pc
+= sizeof(struct logical_op
);
1446 /* load field ref */
1447 case FILTER_OP_LOAD_FIELD_REF
:
1449 printk(KERN_WARNING
"Unknown field ref type\n");
1453 /* get context ref */
1454 case FILTER_OP_GET_CONTEXT_REF
:
1456 printk(KERN_WARNING
"Unknown get context ref type\n");
1460 case FILTER_OP_LOAD_FIELD_REF_STRING
:
1461 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
1462 case FILTER_OP_GET_CONTEXT_REF_STRING
:
1463 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
1464 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
1466 if (vstack_push(stack
)) {
1470 vstack_ax(stack
)->type
= REG_STRING
;
1471 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1474 case FILTER_OP_LOAD_FIELD_REF_S64
:
1475 case FILTER_OP_GET_CONTEXT_REF_S64
:
1477 if (vstack_push(stack
)) {
1481 vstack_ax(stack
)->type
= REG_S64
;
1482 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
1486 /* load from immediate operand */
1487 case FILTER_OP_LOAD_STRING
:
1489 struct load_op
*insn
= (struct load_op
*) pc
;
1491 if (vstack_push(stack
)) {
1495 vstack_ax(stack
)->type
= REG_STRING
;
1496 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1500 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
1502 struct load_op
*insn
= (struct load_op
*) pc
;
1504 if (vstack_push(stack
)) {
1508 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
1509 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1513 case FILTER_OP_LOAD_S64
:
1515 if (vstack_push(stack
)) {
1519 vstack_ax(stack
)->type
= REG_S64
;
1520 next_pc
+= sizeof(struct load_op
)
1521 + sizeof(struct literal_numeric
);
1525 case FILTER_OP_CAST_TO_S64
:
1528 if (!vstack_ax(stack
)) {
1529 printk(KERN_WARNING
"Empty stack\n");
1533 switch (vstack_ax(stack
)->type
) {
1536 case REG_TYPE_UNKNOWN
:
1539 printk(KERN_WARNING
"Incorrect register type %d for cast\n",
1540 (int) vstack_ax(stack
)->type
);
1544 vstack_ax(stack
)->type
= REG_S64
;
1545 next_pc
+= sizeof(struct cast_op
);
1548 case FILTER_OP_CAST_NOP
:
1550 next_pc
+= sizeof(struct cast_op
);
1555 * Instructions for recursive traversal through composed types.
1557 case FILTER_OP_GET_CONTEXT_ROOT
:
1558 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1559 case FILTER_OP_GET_PAYLOAD_ROOT
:
1561 if (vstack_push(stack
)) {
1565 vstack_ax(stack
)->type
= REG_PTR
;
1566 next_pc
+= sizeof(struct load_op
);
1570 case FILTER_OP_LOAD_FIELD
:
1573 if (!vstack_ax(stack
)) {
1574 printk(KERN_WARNING
"Empty stack\n\n");
1578 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1579 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1583 vstack_ax(stack
)->type
= REG_TYPE_UNKNOWN
;
1584 next_pc
+= sizeof(struct load_op
);
1588 case FILTER_OP_LOAD_FIELD_S8
:
1589 case FILTER_OP_LOAD_FIELD_S16
:
1590 case FILTER_OP_LOAD_FIELD_S32
:
1591 case FILTER_OP_LOAD_FIELD_S64
:
1592 case FILTER_OP_LOAD_FIELD_U8
:
1593 case FILTER_OP_LOAD_FIELD_U16
:
1594 case FILTER_OP_LOAD_FIELD_U32
:
1595 case FILTER_OP_LOAD_FIELD_U64
:
1598 if (!vstack_ax(stack
)) {
1599 printk(KERN_WARNING
"Empty stack\n\n");
1603 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1604 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1608 vstack_ax(stack
)->type
= REG_S64
;
1609 next_pc
+= sizeof(struct load_op
);
1613 case FILTER_OP_LOAD_FIELD_STRING
:
1614 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1617 if (!vstack_ax(stack
)) {
1618 printk(KERN_WARNING
"Empty stack\n\n");
1622 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1623 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1627 vstack_ax(stack
)->type
= REG_STRING
;
1628 next_pc
+= sizeof(struct load_op
);
1632 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1635 if (!vstack_ax(stack
)) {
1636 printk(KERN_WARNING
"Empty stack\n\n");
1640 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1641 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1645 vstack_ax(stack
)->type
= REG_DOUBLE
;
1646 next_pc
+= sizeof(struct load_op
);
1650 case FILTER_OP_GET_SYMBOL
:
1651 case FILTER_OP_GET_SYMBOL_FIELD
:
1654 if (!vstack_ax(stack
)) {
1655 printk(KERN_WARNING
"Empty stack\n\n");
1659 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1660 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1664 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1668 case FILTER_OP_GET_INDEX_U16
:
1671 if (!vstack_ax(stack
)) {
1672 printk(KERN_WARNING
"Empty stack\n\n");
1676 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1677 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1681 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1685 case FILTER_OP_GET_INDEX_U64
:
1688 if (!vstack_ax(stack
)) {
1689 printk(KERN_WARNING
"Empty stack\n\n");
1693 if (vstack_ax(stack
)->type
!= REG_PTR
) {
1694 printk(KERN_WARNING
"Expecting pointer on top of stack\n\n");
1698 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);
1704 *_next_pc
= next_pc
;
1709 * Never called concurrently (hash seed is shared).
1711 int lttng_filter_validate_bytecode(struct bytecode_runtime
*bytecode
)
1713 struct mp_table
*mp_table
;
1714 char *pc
, *next_pc
, *start_pc
;
1716 struct vstack stack
;
1718 vstack_init(&stack
);
1720 mp_table
= kzalloc(sizeof(*mp_table
), GFP_KERNEL
);
1722 printk(KERN_WARNING
"Error allocating hash table for bytecode validation\n");
1725 start_pc
= &bytecode
->code
[0];
1726 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
1728 ret
= bytecode_validate_overflow(bytecode
, start_pc
, pc
);
1731 printk(KERN_WARNING
"filter bytecode overflow\n");
1734 dbg_printk("Validating op %s (%u)\n",
1735 lttng_filter_print_op((unsigned int) *(filter_opcode_t
*) pc
),
1736 (unsigned int) *(filter_opcode_t
*) pc
);
1739 * For each instruction, validate the current context
1740 * (traversal of entire execution flow), and validate
1741 * all merge points targeting this instruction.
1743 ret
= validate_instruction_all_contexts(bytecode
, mp_table
,
1744 &stack
, start_pc
, pc
);
1747 ret
= exec_insn(bytecode
, mp_table
, &stack
, &next_pc
, pc
);
1752 if (delete_all_nodes(mp_table
)) {
1754 printk(KERN_WARNING
"Unexpected merge points\n");