2 * lttng-filter-specialize.c
4 * LTTng modules filter code specializer.
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
27 #include <linux/slab.h>
28 #include <lttng-filter.h>
29 #include "lib/align.h"
31 static ssize_t
bytecode_reserve_data(struct bytecode_runtime
*runtime
,
32 size_t align
, size_t len
)
35 size_t padding
= offset_align(runtime
->data_len
, align
);
36 size_t new_len
= runtime
->data_len
+ padding
+ len
;
37 size_t new_alloc_len
= new_len
;
38 size_t old_alloc_len
= runtime
->data_alloc_len
;
40 if (new_len
> FILTER_MAX_DATA_LEN
)
43 if (new_alloc_len
> old_alloc_len
) {
47 max_t(size_t, 1U << get_count_order(new_alloc_len
), old_alloc_len
<< 1);
48 newptr
= krealloc(runtime
->data
, new_alloc_len
, GFP_KERNEL
);
51 runtime
->data
= newptr
;
52 /* We zero directly the memory from start of allocation. */
53 memset(&runtime
->data
[old_alloc_len
], 0, new_alloc_len
- old_alloc_len
);
54 runtime
->data_alloc_len
= new_alloc_len
;
56 runtime
->data_len
+= padding
;
57 ret
= runtime
->data_len
;
58 runtime
->data_len
+= len
;
62 static ssize_t
bytecode_push_data(struct bytecode_runtime
*runtime
,
63 const void *p
, size_t align
, size_t len
)
67 offset
= bytecode_reserve_data(runtime
, align
, len
);
70 memcpy(&runtime
->data
[offset
], p
, len
);
74 static int specialize_load_field(struct vstack_entry
*stack_top
,
79 switch (stack_top
->load
.type
) {
82 case LOAD_ROOT_CONTEXT
:
83 case LOAD_ROOT_APP_CONTEXT
:
84 case LOAD_ROOT_PAYLOAD
:
86 dbg_printk("Filter warning: cannot load root, missing field name.\n");
90 switch (stack_top
->load
.object_type
) {
92 dbg_printk("op load field s8\n");
93 stack_top
->type
= REG_S64
;
94 if (!stack_top
->load
.rev_bo
)
95 insn
->op
= FILTER_OP_LOAD_FIELD_S8
;
98 dbg_printk("op load field s16\n");
99 stack_top
->type
= REG_S64
;
100 if (!stack_top
->load
.rev_bo
)
101 insn
->op
= FILTER_OP_LOAD_FIELD_S16
;
103 case OBJECT_TYPE_S32
:
104 dbg_printk("op load field s32\n");
105 stack_top
->type
= REG_S64
;
106 if (!stack_top
->load
.rev_bo
)
107 insn
->op
= FILTER_OP_LOAD_FIELD_S32
;
109 case OBJECT_TYPE_S64
:
110 dbg_printk("op load field s64\n");
111 stack_top
->type
= REG_S64
;
112 if (!stack_top
->load
.rev_bo
)
113 insn
->op
= FILTER_OP_LOAD_FIELD_S64
;
116 dbg_printk("op load field u8\n");
117 stack_top
->type
= REG_S64
;
118 insn
->op
= FILTER_OP_LOAD_FIELD_U8
;
120 case OBJECT_TYPE_U16
:
121 dbg_printk("op load field u16\n");
122 stack_top
->type
= REG_S64
;
123 if (!stack_top
->load
.rev_bo
)
124 insn
->op
= FILTER_OP_LOAD_FIELD_U16
;
126 case OBJECT_TYPE_U32
:
127 dbg_printk("op load field u32\n");
128 stack_top
->type
= REG_S64
;
129 if (!stack_top
->load
.rev_bo
)
130 insn
->op
= FILTER_OP_LOAD_FIELD_U32
;
132 case OBJECT_TYPE_U64
:
133 dbg_printk("op load field u64\n");
134 stack_top
->type
= REG_S64
;
135 if (!stack_top
->load
.rev_bo
)
136 insn
->op
= FILTER_OP_LOAD_FIELD_U64
;
138 case OBJECT_TYPE_DOUBLE
:
139 printk(KERN_WARNING
"Double type unsupported\n\n");
142 case OBJECT_TYPE_STRING
:
143 dbg_printk("op load field string\n");
144 stack_top
->type
= REG_STRING
;
145 insn
->op
= FILTER_OP_LOAD_FIELD_STRING
;
147 case OBJECT_TYPE_STRING_SEQUENCE
:
148 dbg_printk("op load field string sequence\n");
149 stack_top
->type
= REG_STRING
;
150 insn
->op
= FILTER_OP_LOAD_FIELD_SEQUENCE
;
152 case OBJECT_TYPE_DYNAMIC
:
155 case OBJECT_TYPE_SEQUENCE
:
156 case OBJECT_TYPE_ARRAY
:
157 case OBJECT_TYPE_STRUCT
:
158 case OBJECT_TYPE_VARIANT
:
159 printk(KERN_WARNING
"Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
169 static int specialize_get_index_object_type(enum object_type
*otype
,
170 int signedness
, uint32_t elem_len
)
175 *otype
= OBJECT_TYPE_S8
;
177 *otype
= OBJECT_TYPE_U8
;
181 *otype
= OBJECT_TYPE_S16
;
183 *otype
= OBJECT_TYPE_U16
;
187 *otype
= OBJECT_TYPE_S32
;
189 *otype
= OBJECT_TYPE_U32
;
193 *otype
= OBJECT_TYPE_S64
;
195 *otype
= OBJECT_TYPE_U64
;
203 static int specialize_get_index(struct bytecode_runtime
*runtime
,
204 struct load_op
*insn
, uint64_t index
,
205 struct vstack_entry
*stack_top
,
209 struct filter_get_index_data gid
;
212 memset(&gid
, 0, sizeof(gid
));
213 switch (stack_top
->load
.type
) {
215 switch (stack_top
->load
.object_type
) {
216 case OBJECT_TYPE_ARRAY
:
218 const struct lttng_event_field
*field
;
219 uint32_t elem_len
, num_elems
;
222 field
= stack_top
->load
.field
;
223 elem_len
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.size
;
224 signedness
= field
->type
.u
.array
.elem_type
.u
.basic
.integer
.signedness
;
225 num_elems
= field
->type
.u
.array
.length
;
226 if (index
>= num_elems
) {
230 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
231 signedness
, elem_len
);
234 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
235 gid
.array_len
= num_elems
* (elem_len
/ CHAR_BIT
);
236 gid
.elem
.type
= stack_top
->load
.object_type
;
237 gid
.elem
.len
= elem_len
;
238 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
239 gid
.elem
.rev_bo
= true;
240 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
243 case OBJECT_TYPE_SEQUENCE
:
245 const struct lttng_event_field
*field
;
249 field
= stack_top
->load
.field
;
250 elem_len
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.size
;
251 signedness
= field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.signedness
;
252 ret
= specialize_get_index_object_type(&stack_top
->load
.object_type
,
253 signedness
, elem_len
);
256 gid
.offset
= index
* (elem_len
/ CHAR_BIT
);
257 gid
.elem
.type
= stack_top
->load
.object_type
;
258 gid
.elem
.len
= elem_len
;
259 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.reverse_byte_order
)
260 gid
.elem
.rev_bo
= true;
261 stack_top
->load
.rev_bo
= gid
.elem
.rev_bo
;
264 case OBJECT_TYPE_STRUCT
:
265 /* Only generated by the specialize phase. */
266 case OBJECT_TYPE_VARIANT
: /* Fall-through */
268 printk(KERN_WARNING
"Unexpected get index type %d",
269 (int) stack_top
->load
.object_type
);
274 case LOAD_ROOT_CONTEXT
:
275 case LOAD_ROOT_APP_CONTEXT
:
276 case LOAD_ROOT_PAYLOAD
:
277 printk(KERN_WARNING
"Index lookup for root field not implemented yet.\n");
281 data_offset
= bytecode_push_data(runtime
, &gid
,
282 __alignof__(gid
), sizeof(gid
));
283 if (data_offset
< 0) {
289 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
292 ((struct get_index_u64
*) insn
->data
)->index
= data_offset
;
305 static int specialize_context_lookup_name(struct bytecode_runtime
*bytecode
,
306 struct load_op
*insn
)
311 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
312 name
= bytecode
->p
.bc
->bc
.data
+ bytecode
->p
.bc
->bc
.reloc_offset
+ offset
;
313 return lttng_get_context_index(lttng_static_ctx
, name
);
316 static int specialize_load_object(const struct lttng_event_field
*field
,
317 struct vstack_load
*load
, bool is_context
)
319 load
->type
= LOAD_OBJECT
;
321 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
323 switch (field
->type
.atype
) {
325 if (field
->type
.u
.basic
.integer
.signedness
)
326 load
->object_type
= OBJECT_TYPE_S64
;
328 load
->object_type
= OBJECT_TYPE_U64
;
329 load
->rev_bo
= false;
333 const struct lttng_integer_type
*itype
=
334 &field
->type
.u
.basic
.enumeration
.container_type
;
336 if (itype
->signedness
)
337 load
->object_type
= OBJECT_TYPE_S64
;
339 load
->object_type
= OBJECT_TYPE_U64
;
340 load
->rev_bo
= false;
344 if (field
->type
.u
.array
.elem_type
.atype
!= atype_integer
) {
345 printk(KERN_WARNING
"Array nesting only supports integer types.\n");
349 load
->object_type
= OBJECT_TYPE_STRING
;
351 if (field
->type
.u
.array
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
352 load
->object_type
= OBJECT_TYPE_ARRAY
;
355 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
360 if (field
->type
.u
.sequence
.elem_type
.atype
!= atype_integer
) {
361 printk(KERN_WARNING
"Sequence nesting only supports integer types.\n");
365 load
->object_type
= OBJECT_TYPE_STRING
;
367 if (field
->type
.u
.sequence
.elem_type
.u
.basic
.integer
.encoding
== lttng_encode_none
) {
368 load
->object_type
= OBJECT_TYPE_SEQUENCE
;
371 load
->object_type
= OBJECT_TYPE_STRING_SEQUENCE
;
375 case atype_array_bitfield
:
376 printk(KERN_WARNING
"Bitfield array type is not supported.\n");
378 case atype_sequence_bitfield
:
379 printk(KERN_WARNING
"Bitfield sequence type is not supported.\n");
382 load
->object_type
= OBJECT_TYPE_STRING
;
385 printk(KERN_WARNING
"Structure type cannot be loaded.\n");
388 printk(KERN_WARNING
"Unknown type: %d", (int) field
->type
.atype
);
394 static int specialize_context_lookup(struct bytecode_runtime
*runtime
,
395 struct load_op
*insn
,
396 struct vstack_load
*load
)
399 struct lttng_ctx_field
*ctx_field
;
400 struct lttng_event_field
*field
;
401 struct filter_get_index_data gid
;
404 idx
= specialize_context_lookup_name(runtime
, insn
);
408 ctx_field
= <tng_static_ctx
->fields
[idx
];
409 field
= &ctx_field
->event_field
;
410 ret
= specialize_load_object(field
, load
, true);
413 /* Specialize each get_symbol into a get_index. */
414 insn
->op
= FILTER_OP_GET_INDEX_U16
;
415 memset(&gid
, 0, sizeof(gid
));
417 gid
.elem
.type
= load
->object_type
;
418 data_offset
= bytecode_push_data(runtime
, &gid
,
419 __alignof__(gid
), sizeof(gid
));
420 if (data_offset
< 0) {
423 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
427 static int specialize_event_payload_lookup(struct lttng_event
*event
,
428 struct bytecode_runtime
*runtime
,
429 struct load_op
*insn
,
430 struct vstack_load
*load
)
434 const struct lttng_event_desc
*desc
= event
->desc
;
435 unsigned int i
, nr_fields
;
437 uint32_t field_offset
= 0;
438 const struct lttng_event_field
*field
;
440 struct filter_get_index_data gid
;
443 nr_fields
= desc
->nr_fields
;
444 offset
= ((struct get_symbol
*) insn
->data
)->offset
;
445 name
= runtime
->p
.bc
->bc
.data
+ runtime
->p
.bc
->bc
.reloc_offset
+ offset
;
446 for (i
= 0; i
< nr_fields
; i
++) {
447 field
= &desc
->fields
[i
];
448 if (!strcmp(field
->name
, name
)) {
452 /* compute field offset on stack */
453 switch (field
->type
.atype
) {
456 field_offset
+= sizeof(int64_t);
460 case atype_array_bitfield
:
461 case atype_sequence_bitfield
:
462 field_offset
+= sizeof(unsigned long);
463 field_offset
+= sizeof(void *);
466 field_offset
+= sizeof(void *);
478 ret
= specialize_load_object(field
, load
, false);
482 /* Specialize each get_symbol into a get_index. */
483 insn
->op
= FILTER_OP_GET_INDEX_U16
;
484 memset(&gid
, 0, sizeof(gid
));
485 gid
.offset
= field_offset
;
486 gid
.elem
.type
= load
->object_type
;
487 data_offset
= bytecode_push_data(runtime
, &gid
,
488 __alignof__(gid
), sizeof(gid
));
489 if (data_offset
< 0) {
493 ((struct get_index_u16
*) insn
->data
)->index
= data_offset
;
499 int lttng_filter_specialize_bytecode(struct lttng_event
*event
,
500 struct bytecode_runtime
*bytecode
)
502 void *pc
, *next_pc
, *start_pc
;
504 struct vstack _stack
;
505 struct vstack
*stack
= &_stack
;
509 start_pc
= &bytecode
->code
[0];
510 for (pc
= next_pc
= start_pc
; pc
- start_pc
< bytecode
->len
;
512 switch (*(filter_opcode_t
*) pc
) {
513 case FILTER_OP_UNKNOWN
:
515 printk(KERN_WARNING
"unknown bytecode op %u\n",
516 (unsigned int) *(filter_opcode_t
*) pc
);
520 case FILTER_OP_RETURN
:
521 case FILTER_OP_RETURN_S64
:
530 case FILTER_OP_MINUS
:
531 printk(KERN_WARNING
"unsupported bytecode op %u\n",
532 (unsigned int) *(filter_opcode_t
*) pc
);
538 struct binary_op
*insn
= (struct binary_op
*) pc
;
540 switch(vstack_ax(stack
)->type
) {
542 printk(KERN_WARNING
"unknown register type\n");
547 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
548 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
550 insn
->op
= FILTER_OP_EQ_STRING
;
552 case REG_STAR_GLOB_STRING
:
553 insn
->op
= FILTER_OP_EQ_STAR_GLOB_STRING
;
556 if (vstack_bx(stack
)->type
== REG_S64
)
557 insn
->op
= FILTER_OP_EQ_S64
;
559 insn
->op
= FILTER_OP_EQ_DOUBLE_S64
;
562 if (vstack_bx(stack
)->type
== REG_S64
)
563 insn
->op
= FILTER_OP_EQ_S64_DOUBLE
;
565 insn
->op
= FILTER_OP_EQ_DOUBLE
;
569 if (vstack_pop(stack
)) {
573 vstack_ax(stack
)->type
= REG_S64
;
574 next_pc
+= sizeof(struct binary_op
);
580 struct binary_op
*insn
= (struct binary_op
*) pc
;
582 switch(vstack_ax(stack
)->type
) {
584 printk(KERN_WARNING
"unknown register type\n");
589 if (vstack_bx(stack
)->type
== REG_STAR_GLOB_STRING
)
590 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
592 insn
->op
= FILTER_OP_NE_STRING
;
594 case REG_STAR_GLOB_STRING
:
595 insn
->op
= FILTER_OP_NE_STAR_GLOB_STRING
;
598 if (vstack_bx(stack
)->type
== REG_S64
)
599 insn
->op
= FILTER_OP_NE_S64
;
601 insn
->op
= FILTER_OP_NE_DOUBLE_S64
;
604 if (vstack_bx(stack
)->type
== REG_S64
)
605 insn
->op
= FILTER_OP_NE_S64_DOUBLE
;
607 insn
->op
= FILTER_OP_NE_DOUBLE
;
611 if (vstack_pop(stack
)) {
615 vstack_ax(stack
)->type
= REG_S64
;
616 next_pc
+= sizeof(struct binary_op
);
622 struct binary_op
*insn
= (struct binary_op
*) pc
;
624 switch(vstack_ax(stack
)->type
) {
626 printk(KERN_WARNING
"unknown register type\n");
630 case REG_STAR_GLOB_STRING
:
631 printk(KERN_WARNING
"invalid register type for > binary operator\n");
635 insn
->op
= FILTER_OP_GT_STRING
;
638 if (vstack_bx(stack
)->type
== REG_S64
)
639 insn
->op
= FILTER_OP_GT_S64
;
641 insn
->op
= FILTER_OP_GT_DOUBLE_S64
;
644 if (vstack_bx(stack
)->type
== REG_S64
)
645 insn
->op
= FILTER_OP_GT_S64_DOUBLE
;
647 insn
->op
= FILTER_OP_GT_DOUBLE
;
651 if (vstack_pop(stack
)) {
655 vstack_ax(stack
)->type
= REG_S64
;
656 next_pc
+= sizeof(struct binary_op
);
662 struct binary_op
*insn
= (struct binary_op
*) pc
;
664 switch(vstack_ax(stack
)->type
) {
666 printk(KERN_WARNING
"unknown register type\n");
670 case REG_STAR_GLOB_STRING
:
671 printk(KERN_WARNING
"invalid register type for < binary operator\n");
675 insn
->op
= FILTER_OP_LT_STRING
;
678 if (vstack_bx(stack
)->type
== REG_S64
)
679 insn
->op
= FILTER_OP_LT_S64
;
681 insn
->op
= FILTER_OP_LT_DOUBLE_S64
;
684 if (vstack_bx(stack
)->type
== REG_S64
)
685 insn
->op
= FILTER_OP_LT_S64_DOUBLE
;
687 insn
->op
= FILTER_OP_LT_DOUBLE
;
691 if (vstack_pop(stack
)) {
695 vstack_ax(stack
)->type
= REG_S64
;
696 next_pc
+= sizeof(struct binary_op
);
702 struct binary_op
*insn
= (struct binary_op
*) pc
;
704 switch(vstack_ax(stack
)->type
) {
706 printk(KERN_WARNING
"unknown register type\n");
710 case REG_STAR_GLOB_STRING
:
711 printk(KERN_WARNING
"invalid register type for >= binary operator\n");
715 insn
->op
= FILTER_OP_GE_STRING
;
718 if (vstack_bx(stack
)->type
== REG_S64
)
719 insn
->op
= FILTER_OP_GE_S64
;
721 insn
->op
= FILTER_OP_GE_DOUBLE_S64
;
724 if (vstack_bx(stack
)->type
== REG_S64
)
725 insn
->op
= FILTER_OP_GE_S64_DOUBLE
;
727 insn
->op
= FILTER_OP_GE_DOUBLE
;
731 if (vstack_pop(stack
)) {
735 vstack_ax(stack
)->type
= REG_S64
;
736 next_pc
+= sizeof(struct binary_op
);
741 struct binary_op
*insn
= (struct binary_op
*) pc
;
743 switch(vstack_ax(stack
)->type
) {
745 printk(KERN_WARNING
"unknown register type\n");
749 case REG_STAR_GLOB_STRING
:
750 printk(KERN_WARNING
"invalid register type for <= binary operator\n");
754 insn
->op
= FILTER_OP_LE_STRING
;
757 if (vstack_bx(stack
)->type
== REG_S64
)
758 insn
->op
= FILTER_OP_LE_S64
;
760 insn
->op
= FILTER_OP_LE_DOUBLE_S64
;
763 if (vstack_bx(stack
)->type
== REG_S64
)
764 insn
->op
= FILTER_OP_LE_S64_DOUBLE
;
766 insn
->op
= FILTER_OP_LE_DOUBLE
;
769 vstack_ax(stack
)->type
= REG_S64
;
770 next_pc
+= sizeof(struct binary_op
);
774 case FILTER_OP_EQ_STRING
:
775 case FILTER_OP_NE_STRING
:
776 case FILTER_OP_GT_STRING
:
777 case FILTER_OP_LT_STRING
:
778 case FILTER_OP_GE_STRING
:
779 case FILTER_OP_LE_STRING
:
780 case FILTER_OP_EQ_STAR_GLOB_STRING
:
781 case FILTER_OP_NE_STAR_GLOB_STRING
:
782 case FILTER_OP_EQ_S64
:
783 case FILTER_OP_NE_S64
:
784 case FILTER_OP_GT_S64
:
785 case FILTER_OP_LT_S64
:
786 case FILTER_OP_GE_S64
:
787 case FILTER_OP_LE_S64
:
788 case FILTER_OP_EQ_DOUBLE
:
789 case FILTER_OP_NE_DOUBLE
:
790 case FILTER_OP_GT_DOUBLE
:
791 case FILTER_OP_LT_DOUBLE
:
792 case FILTER_OP_GE_DOUBLE
:
793 case FILTER_OP_LE_DOUBLE
:
794 case FILTER_OP_EQ_DOUBLE_S64
:
795 case FILTER_OP_NE_DOUBLE_S64
:
796 case FILTER_OP_GT_DOUBLE_S64
:
797 case FILTER_OP_LT_DOUBLE_S64
:
798 case FILTER_OP_GE_DOUBLE_S64
:
799 case FILTER_OP_LE_DOUBLE_S64
:
800 case FILTER_OP_EQ_S64_DOUBLE
:
801 case FILTER_OP_NE_S64_DOUBLE
:
802 case FILTER_OP_GT_S64_DOUBLE
:
803 case FILTER_OP_LT_S64_DOUBLE
:
804 case FILTER_OP_GE_S64_DOUBLE
:
805 case FILTER_OP_LE_S64_DOUBLE
:
806 case FILTER_OP_BIT_RSHIFT
:
807 case FILTER_OP_BIT_LSHIFT
:
808 case FILTER_OP_BIT_AND
:
809 case FILTER_OP_BIT_OR
:
810 case FILTER_OP_BIT_XOR
:
813 if (vstack_pop(stack
)) {
817 vstack_ax(stack
)->type
= REG_S64
;
818 next_pc
+= sizeof(struct binary_op
);
823 case FILTER_OP_UNARY_PLUS
:
825 struct unary_op
*insn
= (struct unary_op
*) pc
;
827 switch(vstack_ax(stack
)->type
) {
829 printk(KERN_WARNING
"unknown register type\n");
834 insn
->op
= FILTER_OP_UNARY_PLUS_S64
;
837 insn
->op
= FILTER_OP_UNARY_PLUS_DOUBLE
;
841 next_pc
+= sizeof(struct unary_op
);
845 case FILTER_OP_UNARY_MINUS
:
847 struct unary_op
*insn
= (struct unary_op
*) pc
;
849 switch(vstack_ax(stack
)->type
) {
851 printk(KERN_WARNING
"unknown register type\n");
856 insn
->op
= FILTER_OP_UNARY_MINUS_S64
;
859 insn
->op
= FILTER_OP_UNARY_MINUS_DOUBLE
;
863 next_pc
+= sizeof(struct unary_op
);
867 case FILTER_OP_UNARY_NOT
:
869 struct unary_op
*insn
= (struct unary_op
*) pc
;
871 switch(vstack_ax(stack
)->type
) {
873 printk(KERN_WARNING
"unknown register type\n");
878 insn
->op
= FILTER_OP_UNARY_NOT_S64
;
881 insn
->op
= FILTER_OP_UNARY_NOT_DOUBLE
;
885 next_pc
+= sizeof(struct unary_op
);
889 case FILTER_OP_UNARY_BIT_NOT
:
892 next_pc
+= sizeof(struct unary_op
);
896 case FILTER_OP_UNARY_PLUS_S64
:
897 case FILTER_OP_UNARY_MINUS_S64
:
898 case FILTER_OP_UNARY_NOT_S64
:
899 case FILTER_OP_UNARY_PLUS_DOUBLE
:
900 case FILTER_OP_UNARY_MINUS_DOUBLE
:
901 case FILTER_OP_UNARY_NOT_DOUBLE
:
904 next_pc
+= sizeof(struct unary_op
);
912 /* Continue to next instruction */
913 /* Pop 1 when jump not taken */
914 if (vstack_pop(stack
)) {
918 next_pc
+= sizeof(struct logical_op
);
923 case FILTER_OP_LOAD_FIELD_REF
:
925 printk(KERN_WARNING
"Unknown field ref type\n");
929 /* get context ref */
930 case FILTER_OP_GET_CONTEXT_REF
:
932 printk(KERN_WARNING
"Unknown get context ref type\n");
936 case FILTER_OP_LOAD_FIELD_REF_STRING
:
937 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE
:
938 case FILTER_OP_GET_CONTEXT_REF_STRING
:
939 case FILTER_OP_LOAD_FIELD_REF_USER_STRING
:
940 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE
:
942 if (vstack_push(stack
)) {
946 vstack_ax(stack
)->type
= REG_STRING
;
947 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
950 case FILTER_OP_LOAD_FIELD_REF_S64
:
951 case FILTER_OP_GET_CONTEXT_REF_S64
:
953 if (vstack_push(stack
)) {
957 vstack_ax(stack
)->type
= REG_S64
;
958 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
961 case FILTER_OP_LOAD_FIELD_REF_DOUBLE
:
962 case FILTER_OP_GET_CONTEXT_REF_DOUBLE
:
964 if (vstack_push(stack
)) {
968 vstack_ax(stack
)->type
= REG_DOUBLE
;
969 next_pc
+= sizeof(struct load_op
) + sizeof(struct field_ref
);
973 /* load from immediate operand */
974 case FILTER_OP_LOAD_STRING
:
976 struct load_op
*insn
= (struct load_op
*) pc
;
978 if (vstack_push(stack
)) {
982 vstack_ax(stack
)->type
= REG_STRING
;
983 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
987 case FILTER_OP_LOAD_STAR_GLOB_STRING
:
989 struct load_op
*insn
= (struct load_op
*) pc
;
991 if (vstack_push(stack
)) {
995 vstack_ax(stack
)->type
= REG_STAR_GLOB_STRING
;
996 next_pc
+= sizeof(struct load_op
) + strlen(insn
->data
) + 1;
1000 case FILTER_OP_LOAD_S64
:
1002 if (vstack_push(stack
)) {
1006 vstack_ax(stack
)->type
= REG_S64
;
1007 next_pc
+= sizeof(struct load_op
)
1008 + sizeof(struct literal_numeric
);
1012 case FILTER_OP_LOAD_DOUBLE
:
1014 if (vstack_push(stack
)) {
1018 vstack_ax(stack
)->type
= REG_DOUBLE
;
1019 next_pc
+= sizeof(struct load_op
)
1020 + sizeof(struct literal_double
);
1025 case FILTER_OP_CAST_TO_S64
:
1027 struct cast_op
*insn
= (struct cast_op
*) pc
;
1029 switch (vstack_ax(stack
)->type
) {
1031 printk(KERN_WARNING
"unknown register type\n");
1036 case REG_STAR_GLOB_STRING
:
1037 printk(KERN_WARNING
"Cast op can only be applied to numeric or floating point registers\n");
1041 insn
->op
= FILTER_OP_CAST_NOP
;
1044 insn
->op
= FILTER_OP_CAST_DOUBLE_TO_S64
;
1048 vstack_ax(stack
)->type
= REG_S64
;
1049 next_pc
+= sizeof(struct cast_op
);
1052 case FILTER_OP_CAST_DOUBLE_TO_S64
:
1055 vstack_ax(stack
)->type
= REG_S64
;
1056 next_pc
+= sizeof(struct cast_op
);
1059 case FILTER_OP_CAST_NOP
:
1061 next_pc
+= sizeof(struct cast_op
);
1066 * Instructions for recursive traversal through composed types.
1068 case FILTER_OP_GET_CONTEXT_ROOT
:
1070 if (vstack_push(stack
)) {
1074 vstack_ax(stack
)->type
= REG_PTR
;
1075 vstack_ax(stack
)->load
.type
= LOAD_ROOT_CONTEXT
;
1076 next_pc
+= sizeof(struct load_op
);
1079 case FILTER_OP_GET_APP_CONTEXT_ROOT
:
1081 if (vstack_push(stack
)) {
1085 vstack_ax(stack
)->type
= REG_PTR
;
1086 vstack_ax(stack
)->load
.type
= LOAD_ROOT_APP_CONTEXT
;
1087 next_pc
+= sizeof(struct load_op
);
1090 case FILTER_OP_GET_PAYLOAD_ROOT
:
1092 if (vstack_push(stack
)) {
1096 vstack_ax(stack
)->type
= REG_PTR
;
1097 vstack_ax(stack
)->load
.type
= LOAD_ROOT_PAYLOAD
;
1098 next_pc
+= sizeof(struct load_op
);
1102 case FILTER_OP_LOAD_FIELD
:
1104 struct load_op
*insn
= (struct load_op
*) pc
;
1106 WARN_ON_ONCE(vstack_ax(stack
)->type
!= REG_PTR
);
1108 ret
= specialize_load_field(vstack_ax(stack
), insn
);
1112 next_pc
+= sizeof(struct load_op
);
1116 case FILTER_OP_LOAD_FIELD_S8
:
1117 case FILTER_OP_LOAD_FIELD_S16
:
1118 case FILTER_OP_LOAD_FIELD_S32
:
1119 case FILTER_OP_LOAD_FIELD_S64
:
1120 case FILTER_OP_LOAD_FIELD_U8
:
1121 case FILTER_OP_LOAD_FIELD_U16
:
1122 case FILTER_OP_LOAD_FIELD_U32
:
1123 case FILTER_OP_LOAD_FIELD_U64
:
1126 vstack_ax(stack
)->type
= REG_S64
;
1127 next_pc
+= sizeof(struct load_op
);
1131 case FILTER_OP_LOAD_FIELD_STRING
:
1132 case FILTER_OP_LOAD_FIELD_SEQUENCE
:
1135 vstack_ax(stack
)->type
= REG_STRING
;
1136 next_pc
+= sizeof(struct load_op
);
1140 case FILTER_OP_LOAD_FIELD_DOUBLE
:
1143 vstack_ax(stack
)->type
= REG_DOUBLE
;
1144 next_pc
+= sizeof(struct load_op
);
1148 case FILTER_OP_GET_SYMBOL
:
1150 struct load_op
*insn
= (struct load_op
*) pc
;
1152 dbg_printk("op get symbol\n");
1153 switch (vstack_ax(stack
)->load
.type
) {
1155 printk(KERN_WARNING
"Nested fields not implemented yet.\n");
1158 case LOAD_ROOT_CONTEXT
:
1159 /* Lookup context field. */
1160 ret
= specialize_context_lookup(bytecode
, insn
,
1161 &vstack_ax(stack
)->load
);
1165 case LOAD_ROOT_APP_CONTEXT
:
1168 case LOAD_ROOT_PAYLOAD
:
1169 /* Lookup event payload field. */
1170 ret
= specialize_event_payload_lookup(event
,
1172 &vstack_ax(stack
)->load
);
1177 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_symbol
);
1181 case FILTER_OP_GET_SYMBOL_FIELD
:
1183 /* Always generated by specialize phase. */
1188 case FILTER_OP_GET_INDEX_U16
:
1190 struct load_op
*insn
= (struct load_op
*) pc
;
1191 struct get_index_u16
*index
= (struct get_index_u16
*) insn
->data
;
1193 dbg_printk("op get index u16\n");
1195 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1196 vstack_ax(stack
), sizeof(*index
));
1199 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u16
);
1203 case FILTER_OP_GET_INDEX_U64
:
1205 struct load_op
*insn
= (struct load_op
*) pc
;
1206 struct get_index_u64
*index
= (struct get_index_u64
*) insn
->data
;
1208 dbg_printk("op get index u64\n");
1210 ret
= specialize_get_index(bytecode
, insn
, index
->index
,
1211 vstack_ax(stack
), sizeof(*index
));
1214 next_pc
+= sizeof(struct load_op
) + sizeof(struct get_index_u64
);