callstack context: use delimiter when stack is incomplete
[lttng-modules.git] / lttng-filter-specialize.c
CommitLineData
07dfc1d0
MD
1/*
2 * lttng-filter-specialize.c
3 *
4 * LTTng modules filter code specializer.
5 *
bbf3aef5 6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
07dfc1d0 7 *
bbf3aef5
MD
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
07dfc1d0 14 *
bbf3aef5
MD
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
07dfc1d0 17 *
bbf3aef5
MD
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
07dfc1d0
MD
25 */
26
3834b99f 27#include <linux/slab.h>
241ae9a8 28#include <lttng-filter.h>
3834b99f 29#include "lib/align.h"
07dfc1d0 30
3834b99f
MD
31static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
32 size_t align, size_t len)
33{
34 ssize_t ret;
35 size_t padding = offset_align(runtime->data_len, align);
36 size_t new_len = runtime->data_len + padding + len;
37 size_t new_alloc_len = new_len;
38 size_t old_alloc_len = runtime->data_alloc_len;
39
40 if (new_len > FILTER_MAX_DATA_LEN)
41 return -EINVAL;
42
43 if (new_alloc_len > old_alloc_len) {
44 char *newptr;
45
46 new_alloc_len =
47 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
48 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
49 if (!newptr)
50 return -ENOMEM;
51 runtime->data = newptr;
52 /* We zero directly the memory from start of allocation. */
53 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
54 runtime->data_alloc_len = new_alloc_len;
55 }
56 runtime->data_len += padding;
57 ret = runtime->data_len;
58 runtime->data_len += len;
59 return ret;
60}
61
62static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
63 const void *p, size_t align, size_t len)
64{
65 ssize_t offset;
66
67 offset = bytecode_reserve_data(runtime, align, len);
68 if (offset < 0)
69 return -ENOMEM;
70 memcpy(&runtime->data[offset], p, len);
71 return offset;
72}
73
74static int specialize_load_field(struct vstack_entry *stack_top,
75 struct load_op *insn)
76{
77 int ret;
78
79 switch (stack_top->load.type) {
80 case LOAD_OBJECT:
81 break;
82 case LOAD_ROOT_CONTEXT:
83 case LOAD_ROOT_APP_CONTEXT:
84 case LOAD_ROOT_PAYLOAD:
85 default:
86 dbg_printk("Filter warning: cannot load root, missing field name.\n");
87 ret = -EINVAL;
88 goto end;
89 }
90 switch (stack_top->load.object_type) {
91 case OBJECT_TYPE_S8:
92 dbg_printk("op load field s8\n");
93 stack_top->type = REG_S64;
94 if (!stack_top->load.rev_bo)
95 insn->op = FILTER_OP_LOAD_FIELD_S8;
96 break;
97 case OBJECT_TYPE_S16:
98 dbg_printk("op load field s16\n");
99 stack_top->type = REG_S64;
100 if (!stack_top->load.rev_bo)
101 insn->op = FILTER_OP_LOAD_FIELD_S16;
102 break;
103 case OBJECT_TYPE_S32:
104 dbg_printk("op load field s32\n");
105 stack_top->type = REG_S64;
106 if (!stack_top->load.rev_bo)
107 insn->op = FILTER_OP_LOAD_FIELD_S32;
108 break;
109 case OBJECT_TYPE_S64:
110 dbg_printk("op load field s64\n");
111 stack_top->type = REG_S64;
112 if (!stack_top->load.rev_bo)
113 insn->op = FILTER_OP_LOAD_FIELD_S64;
114 break;
115 case OBJECT_TYPE_U8:
116 dbg_printk("op load field u8\n");
117 stack_top->type = REG_S64;
118 insn->op = FILTER_OP_LOAD_FIELD_U8;
119 break;
120 case OBJECT_TYPE_U16:
121 dbg_printk("op load field u16\n");
122 stack_top->type = REG_S64;
123 if (!stack_top->load.rev_bo)
124 insn->op = FILTER_OP_LOAD_FIELD_U16;
125 break;
126 case OBJECT_TYPE_U32:
127 dbg_printk("op load field u32\n");
128 stack_top->type = REG_S64;
129 if (!stack_top->load.rev_bo)
130 insn->op = FILTER_OP_LOAD_FIELD_U32;
131 break;
132 case OBJECT_TYPE_U64:
133 dbg_printk("op load field u64\n");
134 stack_top->type = REG_S64;
135 if (!stack_top->load.rev_bo)
136 insn->op = FILTER_OP_LOAD_FIELD_U64;
137 break;
138 case OBJECT_TYPE_DOUBLE:
139 printk(KERN_WARNING "Double type unsupported\n\n");
140 ret = -EINVAL;
141 goto end;
142 case OBJECT_TYPE_STRING:
143 dbg_printk("op load field string\n");
144 stack_top->type = REG_STRING;
145 insn->op = FILTER_OP_LOAD_FIELD_STRING;
146 break;
147 case OBJECT_TYPE_STRING_SEQUENCE:
148 dbg_printk("op load field string sequence\n");
149 stack_top->type = REG_STRING;
150 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
151 break;
152 case OBJECT_TYPE_DYNAMIC:
153 ret = -EINVAL;
154 goto end;
155 case OBJECT_TYPE_SEQUENCE:
156 case OBJECT_TYPE_ARRAY:
157 case OBJECT_TYPE_STRUCT:
158 case OBJECT_TYPE_VARIANT:
159 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
160 ret = -EINVAL;
161 goto end;
162 }
163 return 0;
164
165end:
166 return ret;
167}
168
169static int specialize_get_index_object_type(enum object_type *otype,
170 int signedness, uint32_t elem_len)
171{
172 switch (elem_len) {
173 case 8:
174 if (signedness)
175 *otype = OBJECT_TYPE_S8;
176 else
177 *otype = OBJECT_TYPE_U8;
178 break;
179 case 16:
180 if (signedness)
181 *otype = OBJECT_TYPE_S16;
182 else
183 *otype = OBJECT_TYPE_U16;
184 break;
185 case 32:
186 if (signedness)
187 *otype = OBJECT_TYPE_S32;
188 else
189 *otype = OBJECT_TYPE_U32;
190 break;
191 case 64:
192 if (signedness)
193 *otype = OBJECT_TYPE_S64;
194 else
195 *otype = OBJECT_TYPE_U64;
196 break;
197 default:
198 return -EINVAL;
199 }
200 return 0;
201}
202
203static int specialize_get_index(struct bytecode_runtime *runtime,
204 struct load_op *insn, uint64_t index,
205 struct vstack_entry *stack_top,
206 int idx_len)
207{
208 int ret;
209 struct filter_get_index_data gid;
210 ssize_t data_offset;
211
212 memset(&gid, 0, sizeof(gid));
213 switch (stack_top->load.type) {
214 case LOAD_OBJECT:
215 switch (stack_top->load.object_type) {
216 case OBJECT_TYPE_ARRAY:
217 {
218 const struct lttng_event_field *field;
219 uint32_t elem_len, num_elems;
220 int signedness;
221
222 field = stack_top->load.field;
223 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
224 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
225 num_elems = field->type.u.array.length;
226 if (index >= num_elems) {
227 ret = -EINVAL;
228 goto end;
229 }
230 ret = specialize_get_index_object_type(&stack_top->load.object_type,
231 signedness, elem_len);
232 if (ret)
233 goto end;
234 gid.offset = index * (elem_len / CHAR_BIT);
235 gid.array_len = num_elems * (elem_len / CHAR_BIT);
236 gid.elem.type = stack_top->load.object_type;
237 gid.elem.len = elem_len;
238 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
239 gid.elem.rev_bo = true;
240 stack_top->load.rev_bo = gid.elem.rev_bo;
241 break;
242 }
243 case OBJECT_TYPE_SEQUENCE:
244 {
245 const struct lttng_event_field *field;
246 uint32_t elem_len;
247 int signedness;
248
249 field = stack_top->load.field;
250 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
251 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
252 ret = specialize_get_index_object_type(&stack_top->load.object_type,
253 signedness, elem_len);
254 if (ret)
255 goto end;
256 gid.offset = index * (elem_len / CHAR_BIT);
257 gid.elem.type = stack_top->load.object_type;
258 gid.elem.len = elem_len;
259 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
260 gid.elem.rev_bo = true;
261 stack_top->load.rev_bo = gid.elem.rev_bo;
262 break;
263 }
264 case OBJECT_TYPE_STRUCT:
265 /* Only generated by the specialize phase. */
266 case OBJECT_TYPE_VARIANT: /* Fall-through */
267 default:
268 printk(KERN_WARNING "Unexpected get index type %d",
269 (int) stack_top->load.object_type);
270 ret = -EINVAL;
271 goto end;
272 }
273 break;
274 case LOAD_ROOT_CONTEXT:
275 case LOAD_ROOT_APP_CONTEXT:
276 case LOAD_ROOT_PAYLOAD:
277 printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
278 ret = -EINVAL;
279 goto end;
280 }
281 data_offset = bytecode_push_data(runtime, &gid,
282 __alignof__(gid), sizeof(gid));
283 if (data_offset < 0) {
284 ret = -EINVAL;
285 goto end;
286 }
287 switch (idx_len) {
288 case 2:
289 ((struct get_index_u16 *) insn->data)->index = data_offset;
290 break;
291 case 8:
292 ((struct get_index_u64 *) insn->data)->index = data_offset;
293 break;
294 default:
295 ret = -EINVAL;
296 goto end;
297 }
298
299 return 0;
300
301end:
302 return ret;
303}
304
305static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
306 struct load_op *insn)
307{
308 uint16_t offset;
309 const char *name;
310
311 offset = ((struct get_symbol *) insn->data)->offset;
312 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
313 return lttng_get_context_index(lttng_static_ctx, name);
314}
315
316static int specialize_load_object(const struct lttng_event_field *field,
317 struct vstack_load *load, bool is_context)
318{
319 load->type = LOAD_OBJECT;
320 /*
321 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
322 */
323 switch (field->type.atype) {
324 case atype_integer:
325 if (field->type.u.basic.integer.signedness)
326 load->object_type = OBJECT_TYPE_S64;
327 else
328 load->object_type = OBJECT_TYPE_U64;
329 load->rev_bo = false;
330 break;
331 case atype_enum:
332 {
333 const struct lttng_integer_type *itype =
334 &field->type.u.basic.enumeration.container_type;
335
336 if (itype->signedness)
337 load->object_type = OBJECT_TYPE_S64;
338 else
339 load->object_type = OBJECT_TYPE_U64;
340 load->rev_bo = false;
341 break;
342 }
343 case atype_array:
344 if (field->type.u.array.elem_type.atype != atype_integer) {
345 printk(KERN_WARNING "Array nesting only supports integer types.\n");
346 return -EINVAL;
347 }
348 if (is_context) {
349 load->object_type = OBJECT_TYPE_STRING;
350 } else {
351 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
352 load->object_type = OBJECT_TYPE_ARRAY;
353 load->field = field;
354 } else {
355 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
356 }
357 }
358 break;
359 case atype_sequence:
360 if (field->type.u.sequence.elem_type.atype != atype_integer) {
361 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
362 return -EINVAL;
363 }
364 if (is_context) {
365 load->object_type = OBJECT_TYPE_STRING;
366 } else {
367 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
368 load->object_type = OBJECT_TYPE_SEQUENCE;
369 load->field = field;
370 } else {
371 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
372 }
373 }
374 break;
375 case atype_array_bitfield:
376 printk(KERN_WARNING "Bitfield array type is not supported.\n");
377 return -EINVAL;
378 case atype_sequence_bitfield:
379 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
380 return -EINVAL;
381 case atype_string:
382 load->object_type = OBJECT_TYPE_STRING;
383 break;
384 case atype_struct:
385 printk(KERN_WARNING "Structure type cannot be loaded.\n");
386 return -EINVAL;
387 default:
388 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
389 return -EINVAL;
390 }
391 return 0;
392}
393
394static int specialize_context_lookup(struct bytecode_runtime *runtime,
395 struct load_op *insn,
396 struct vstack_load *load)
397{
398 int idx, ret;
399 struct lttng_ctx_field *ctx_field;
400 struct lttng_event_field *field;
401 struct filter_get_index_data gid;
402 ssize_t data_offset;
403
404 idx = specialize_context_lookup_name(runtime, insn);
405 if (idx < 0) {
406 return -ENOENT;
407 }
408 ctx_field = &lttng_static_ctx->fields[idx];
409 field = &ctx_field->event_field;
410 ret = specialize_load_object(field, load, true);
411 if (ret)
412 return ret;
413 /* Specialize each get_symbol into a get_index. */
414 insn->op = FILTER_OP_GET_INDEX_U16;
415 memset(&gid, 0, sizeof(gid));
416 gid.ctx_index = idx;
417 gid.elem.type = load->object_type;
418 data_offset = bytecode_push_data(runtime, &gid,
419 __alignof__(gid), sizeof(gid));
420 if (data_offset < 0) {
421 return -EINVAL;
422 }
423 ((struct get_index_u16 *) insn->data)->index = data_offset;
424 return 0;
425}
426
427static int specialize_event_payload_lookup(struct lttng_event *event,
428 struct bytecode_runtime *runtime,
429 struct load_op *insn,
430 struct vstack_load *load)
431{
432 const char *name;
433 uint16_t offset;
434 const struct lttng_event_desc *desc = event->desc;
435 unsigned int i, nr_fields;
436 bool found = false;
437 uint32_t field_offset = 0;
438 const struct lttng_event_field *field;
439 int ret;
440 struct filter_get_index_data gid;
441 ssize_t data_offset;
442
443 nr_fields = desc->nr_fields;
444 offset = ((struct get_symbol *) insn->data)->offset;
445 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
446 for (i = 0; i < nr_fields; i++) {
447 field = &desc->fields[i];
448 if (!strcmp(field->name, name)) {
449 found = true;
450 break;
451 }
452 /* compute field offset on stack */
453 switch (field->type.atype) {
454 case atype_integer:
455 case atype_enum:
456 field_offset += sizeof(int64_t);
457 break;
458 case atype_array:
459 case atype_sequence:
460 case atype_array_bitfield:
461 case atype_sequence_bitfield:
462 field_offset += sizeof(unsigned long);
463 field_offset += sizeof(void *);
464 break;
465 case atype_string:
466 field_offset += sizeof(void *);
467 break;
468 default:
469 ret = -EINVAL;
470 goto end;
471 }
472 }
473 if (!found) {
474 ret = -EINVAL;
475 goto end;
476 }
477
478 ret = specialize_load_object(field, load, false);
479 if (ret)
480 goto end;
481
482 /* Specialize each get_symbol into a get_index. */
483 insn->op = FILTER_OP_GET_INDEX_U16;
484 memset(&gid, 0, sizeof(gid));
485 gid.offset = field_offset;
486 gid.elem.type = load->object_type;
487 data_offset = bytecode_push_data(runtime, &gid,
488 __alignof__(gid), sizeof(gid));
489 if (data_offset < 0) {
490 ret = -EINVAL;
491 goto end;
492 }
493 ((struct get_index_u16 *) insn->data)->index = data_offset;
494 ret = 0;
495end:
496 return ret;
497}
498
499int lttng_filter_specialize_bytecode(struct lttng_event *event,
500 struct bytecode_runtime *bytecode)
07dfc1d0
MD
501{
502 void *pc, *next_pc, *start_pc;
503 int ret = -EINVAL;
504 struct vstack _stack;
505 struct vstack *stack = &_stack;
506
507 vstack_init(stack);
508
3834b99f 509 start_pc = &bytecode->code[0];
07dfc1d0
MD
510 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
511 pc = next_pc) {
512 switch (*(filter_opcode_t *) pc) {
513 case FILTER_OP_UNKNOWN:
514 default:
515 printk(KERN_WARNING "unknown bytecode op %u\n",
516 (unsigned int) *(filter_opcode_t *) pc);
517 ret = -EINVAL;
518 goto end;
519
520 case FILTER_OP_RETURN:
57ba4b41 521 case FILTER_OP_RETURN_S64:
07dfc1d0
MD
522 ret = 0;
523 goto end;
524
525 /* binary */
526 case FILTER_OP_MUL:
527 case FILTER_OP_DIV:
528 case FILTER_OP_MOD:
529 case FILTER_OP_PLUS:
530 case FILTER_OP_MINUS:
07dfc1d0
MD
531 printk(KERN_WARNING "unsupported bytecode op %u\n",
532 (unsigned int) *(filter_opcode_t *) pc);
533 ret = -EINVAL;
534 goto end;
535
536 case FILTER_OP_EQ:
537 {
538 struct binary_op *insn = (struct binary_op *) pc;
539
540 switch(vstack_ax(stack)->type) {
541 default:
542 printk(KERN_WARNING "unknown register type\n");
543 ret = -EINVAL;
544 goto end;
545
546 case REG_STRING:
02aca193
PP
547 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
548 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
549 else
550 insn->op = FILTER_OP_EQ_STRING;
551 break;
552 case REG_STAR_GLOB_STRING:
553 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
07dfc1d0
MD
554 break;
555 case REG_S64:
556 if (vstack_bx(stack)->type == REG_S64)
557 insn->op = FILTER_OP_EQ_S64;
558 else
559 insn->op = FILTER_OP_EQ_DOUBLE_S64;
560 break;
561 case REG_DOUBLE:
562 if (vstack_bx(stack)->type == REG_S64)
563 insn->op = FILTER_OP_EQ_S64_DOUBLE;
564 else
565 insn->op = FILTER_OP_EQ_DOUBLE;
566 break;
567 }
568 /* Pop 2, push 1 */
569 if (vstack_pop(stack)) {
570 ret = -EINVAL;
571 goto end;
572 }
573 vstack_ax(stack)->type = REG_S64;
574 next_pc += sizeof(struct binary_op);
575 break;
576 }
577
578 case FILTER_OP_NE:
579 {
580 struct binary_op *insn = (struct binary_op *) pc;
581
582 switch(vstack_ax(stack)->type) {
583 default:
584 printk(KERN_WARNING "unknown register type\n");
585 ret = -EINVAL;
586 goto end;
587
588 case REG_STRING:
02aca193
PP
589 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
590 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
591 else
592 insn->op = FILTER_OP_NE_STRING;
593 break;
594 case REG_STAR_GLOB_STRING:
595 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
07dfc1d0
MD
596 break;
597 case REG_S64:
598 if (vstack_bx(stack)->type == REG_S64)
599 insn->op = FILTER_OP_NE_S64;
600 else
601 insn->op = FILTER_OP_NE_DOUBLE_S64;
602 break;
603 case REG_DOUBLE:
604 if (vstack_bx(stack)->type == REG_S64)
605 insn->op = FILTER_OP_NE_S64_DOUBLE;
606 else
607 insn->op = FILTER_OP_NE_DOUBLE;
608 break;
609 }
610 /* Pop 2, push 1 */
611 if (vstack_pop(stack)) {
612 ret = -EINVAL;
613 goto end;
614 }
615 vstack_ax(stack)->type = REG_S64;
616 next_pc += sizeof(struct binary_op);
617 break;
618 }
619
620 case FILTER_OP_GT:
621 {
622 struct binary_op *insn = (struct binary_op *) pc;
623
624 switch(vstack_ax(stack)->type) {
625 default:
626 printk(KERN_WARNING "unknown register type\n");
627 ret = -EINVAL;
628 goto end;
629
02aca193
PP
630 case REG_STAR_GLOB_STRING:
631 printk(KERN_WARNING "invalid register type for > binary operator\n");
632 ret = -EINVAL;
633 goto end;
07dfc1d0
MD
634 case REG_STRING:
635 insn->op = FILTER_OP_GT_STRING;
636 break;
637 case REG_S64:
638 if (vstack_bx(stack)->type == REG_S64)
639 insn->op = FILTER_OP_GT_S64;
640 else
641 insn->op = FILTER_OP_GT_DOUBLE_S64;
642 break;
643 case REG_DOUBLE:
644 if (vstack_bx(stack)->type == REG_S64)
645 insn->op = FILTER_OP_GT_S64_DOUBLE;
646 else
647 insn->op = FILTER_OP_GT_DOUBLE;
648 break;
649 }
650 /* Pop 2, push 1 */
651 if (vstack_pop(stack)) {
652 ret = -EINVAL;
653 goto end;
654 }
655 vstack_ax(stack)->type = REG_S64;
656 next_pc += sizeof(struct binary_op);
657 break;
658 }
659
660 case FILTER_OP_LT:
661 {
662 struct binary_op *insn = (struct binary_op *) pc;
663
664 switch(vstack_ax(stack)->type) {
665 default:
666 printk(KERN_WARNING "unknown register type\n");
667 ret = -EINVAL;
668 goto end;
669
02aca193
PP
670 case REG_STAR_GLOB_STRING:
671 printk(KERN_WARNING "invalid register type for < binary operator\n");
672 ret = -EINVAL;
673 goto end;
07dfc1d0
MD
674 case REG_STRING:
675 insn->op = FILTER_OP_LT_STRING;
676 break;
677 case REG_S64:
678 if (vstack_bx(stack)->type == REG_S64)
679 insn->op = FILTER_OP_LT_S64;
680 else
681 insn->op = FILTER_OP_LT_DOUBLE_S64;
682 break;
683 case REG_DOUBLE:
684 if (vstack_bx(stack)->type == REG_S64)
685 insn->op = FILTER_OP_LT_S64_DOUBLE;
686 else
687 insn->op = FILTER_OP_LT_DOUBLE;
688 break;
689 }
690 /* Pop 2, push 1 */
691 if (vstack_pop(stack)) {
692 ret = -EINVAL;
693 goto end;
694 }
695 vstack_ax(stack)->type = REG_S64;
696 next_pc += sizeof(struct binary_op);
697 break;
698 }
699
700 case FILTER_OP_GE:
701 {
702 struct binary_op *insn = (struct binary_op *) pc;
703
704 switch(vstack_ax(stack)->type) {
705 default:
706 printk(KERN_WARNING "unknown register type\n");
707 ret = -EINVAL;
708 goto end;
709
02aca193
PP
710 case REG_STAR_GLOB_STRING:
711 printk(KERN_WARNING "invalid register type for >= binary operator\n");
712 ret = -EINVAL;
713 goto end;
07dfc1d0
MD
714 case REG_STRING:
715 insn->op = FILTER_OP_GE_STRING;
716 break;
717 case REG_S64:
718 if (vstack_bx(stack)->type == REG_S64)
719 insn->op = FILTER_OP_GE_S64;
720 else
721 insn->op = FILTER_OP_GE_DOUBLE_S64;
722 break;
723 case REG_DOUBLE:
724 if (vstack_bx(stack)->type == REG_S64)
725 insn->op = FILTER_OP_GE_S64_DOUBLE;
726 else
727 insn->op = FILTER_OP_GE_DOUBLE;
728 break;
729 }
730 /* Pop 2, push 1 */
731 if (vstack_pop(stack)) {
732 ret = -EINVAL;
733 goto end;
734 }
735 vstack_ax(stack)->type = REG_S64;
736 next_pc += sizeof(struct binary_op);
737 break;
738 }
739 case FILTER_OP_LE:
740 {
741 struct binary_op *insn = (struct binary_op *) pc;
742
743 switch(vstack_ax(stack)->type) {
744 default:
745 printk(KERN_WARNING "unknown register type\n");
746 ret = -EINVAL;
747 goto end;
748
02aca193
PP
749 case REG_STAR_GLOB_STRING:
750 printk(KERN_WARNING "invalid register type for <= binary operator\n");
751 ret = -EINVAL;
752 goto end;
07dfc1d0
MD
753 case REG_STRING:
754 insn->op = FILTER_OP_LE_STRING;
755 break;
756 case REG_S64:
757 if (vstack_bx(stack)->type == REG_S64)
758 insn->op = FILTER_OP_LE_S64;
759 else
760 insn->op = FILTER_OP_LE_DOUBLE_S64;
761 break;
762 case REG_DOUBLE:
763 if (vstack_bx(stack)->type == REG_S64)
764 insn->op = FILTER_OP_LE_S64_DOUBLE;
765 else
766 insn->op = FILTER_OP_LE_DOUBLE;
767 break;
768 }
769 vstack_ax(stack)->type = REG_S64;
770 next_pc += sizeof(struct binary_op);
771 break;
772 }
773
774 case FILTER_OP_EQ_STRING:
775 case FILTER_OP_NE_STRING:
776 case FILTER_OP_GT_STRING:
777 case FILTER_OP_LT_STRING:
778 case FILTER_OP_GE_STRING:
779 case FILTER_OP_LE_STRING:
02aca193
PP
780 case FILTER_OP_EQ_STAR_GLOB_STRING:
781 case FILTER_OP_NE_STAR_GLOB_STRING:
07dfc1d0
MD
782 case FILTER_OP_EQ_S64:
783 case FILTER_OP_NE_S64:
784 case FILTER_OP_GT_S64:
785 case FILTER_OP_LT_S64:
786 case FILTER_OP_GE_S64:
787 case FILTER_OP_LE_S64:
788 case FILTER_OP_EQ_DOUBLE:
789 case FILTER_OP_NE_DOUBLE:
790 case FILTER_OP_GT_DOUBLE:
791 case FILTER_OP_LT_DOUBLE:
792 case FILTER_OP_GE_DOUBLE:
793 case FILTER_OP_LE_DOUBLE:
794 case FILTER_OP_EQ_DOUBLE_S64:
795 case FILTER_OP_NE_DOUBLE_S64:
796 case FILTER_OP_GT_DOUBLE_S64:
797 case FILTER_OP_LT_DOUBLE_S64:
798 case FILTER_OP_GE_DOUBLE_S64:
799 case FILTER_OP_LE_DOUBLE_S64:
800 case FILTER_OP_EQ_S64_DOUBLE:
801 case FILTER_OP_NE_S64_DOUBLE:
802 case FILTER_OP_GT_S64_DOUBLE:
803 case FILTER_OP_LT_S64_DOUBLE:
804 case FILTER_OP_GE_S64_DOUBLE:
805 case FILTER_OP_LE_S64_DOUBLE:
e16c054b
MD
806 case FILTER_OP_BIT_RSHIFT:
807 case FILTER_OP_BIT_LSHIFT:
3834b99f
MD
808 case FILTER_OP_BIT_AND:
809 case FILTER_OP_BIT_OR:
810 case FILTER_OP_BIT_XOR:
07dfc1d0
MD
811 {
812 /* Pop 2, push 1 */
813 if (vstack_pop(stack)) {
814 ret = -EINVAL;
815 goto end;
816 }
817 vstack_ax(stack)->type = REG_S64;
818 next_pc += sizeof(struct binary_op);
819 break;
820 }
821
822 /* unary */
823 case FILTER_OP_UNARY_PLUS:
824 {
825 struct unary_op *insn = (struct unary_op *) pc;
826
827 switch(vstack_ax(stack)->type) {
828 default:
829 printk(KERN_WARNING "unknown register type\n");
830 ret = -EINVAL;
831 goto end;
832
833 case REG_S64:
834 insn->op = FILTER_OP_UNARY_PLUS_S64;
835 break;
836 case REG_DOUBLE:
837 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
838 break;
839 }
840 /* Pop 1, push 1 */
841 next_pc += sizeof(struct unary_op);
842 break;
843 }
844
845 case FILTER_OP_UNARY_MINUS:
846 {
847 struct unary_op *insn = (struct unary_op *) pc;
848
849 switch(vstack_ax(stack)->type) {
850 default:
851 printk(KERN_WARNING "unknown register type\n");
852 ret = -EINVAL;
853 goto end;
854
855 case REG_S64:
856 insn->op = FILTER_OP_UNARY_MINUS_S64;
857 break;
858 case REG_DOUBLE:
859 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
860 break;
861 }
862 /* Pop 1, push 1 */
863 next_pc += sizeof(struct unary_op);
864 break;
865 }
866
867 case FILTER_OP_UNARY_NOT:
868 {
869 struct unary_op *insn = (struct unary_op *) pc;
870
871 switch(vstack_ax(stack)->type) {
872 default:
873 printk(KERN_WARNING "unknown register type\n");
874 ret = -EINVAL;
875 goto end;
876
877 case REG_S64:
878 insn->op = FILTER_OP_UNARY_NOT_S64;
879 break;
880 case REG_DOUBLE:
881 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
882 break;
883 }
884 /* Pop 1, push 1 */
885 next_pc += sizeof(struct unary_op);
886 break;
887 }
888
e16c054b
MD
889 case FILTER_OP_UNARY_BIT_NOT:
890 {
891 /* Pop 1, push 1 */
892 next_pc += sizeof(struct unary_op);
893 break;
894 }
895
07dfc1d0
MD
896 case FILTER_OP_UNARY_PLUS_S64:
897 case FILTER_OP_UNARY_MINUS_S64:
898 case FILTER_OP_UNARY_NOT_S64:
899 case FILTER_OP_UNARY_PLUS_DOUBLE:
900 case FILTER_OP_UNARY_MINUS_DOUBLE:
901 case FILTER_OP_UNARY_NOT_DOUBLE:
902 {
903 /* Pop 1, push 1 */
904 next_pc += sizeof(struct unary_op);
905 break;
906 }
907
908 /* logical */
909 case FILTER_OP_AND:
910 case FILTER_OP_OR:
911 {
912 /* Continue to next instruction */
913 /* Pop 1 when jump not taken */
914 if (vstack_pop(stack)) {
915 ret = -EINVAL;
916 goto end;
917 }
918 next_pc += sizeof(struct logical_op);
919 break;
920 }
921
922 /* load field ref */
923 case FILTER_OP_LOAD_FIELD_REF:
924 {
925 printk(KERN_WARNING "Unknown field ref type\n");
926 ret = -EINVAL;
927 goto end;
928 }
929 /* get context ref */
930 case FILTER_OP_GET_CONTEXT_REF:
931 {
932 printk(KERN_WARNING "Unknown get context ref type\n");
933 ret = -EINVAL;
934 goto end;
935 }
936 case FILTER_OP_LOAD_FIELD_REF_STRING:
937 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
938 case FILTER_OP_GET_CONTEXT_REF_STRING:
f127e61e
MD
939 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
940 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
07dfc1d0
MD
941 {
942 if (vstack_push(stack)) {
943 ret = -EINVAL;
944 goto end;
945 }
946 vstack_ax(stack)->type = REG_STRING;
947 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
948 break;
949 }
950 case FILTER_OP_LOAD_FIELD_REF_S64:
951 case FILTER_OP_GET_CONTEXT_REF_S64:
952 {
953 if (vstack_push(stack)) {
954 ret = -EINVAL;
955 goto end;
956 }
957 vstack_ax(stack)->type = REG_S64;
958 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
959 break;
960 }
961 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
962 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
963 {
964 if (vstack_push(stack)) {
965 ret = -EINVAL;
966 goto end;
967 }
968 vstack_ax(stack)->type = REG_DOUBLE;
969 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
970 break;
971 }
972
973 /* load from immediate operand */
974 case FILTER_OP_LOAD_STRING:
975 {
976 struct load_op *insn = (struct load_op *) pc;
977
978 if (vstack_push(stack)) {
979 ret = -EINVAL;
980 goto end;
981 }
982 vstack_ax(stack)->type = REG_STRING;
983 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
984 break;
985 }
986
02aca193
PP
987 case FILTER_OP_LOAD_STAR_GLOB_STRING:
988 {
989 struct load_op *insn = (struct load_op *) pc;
990
991 if (vstack_push(stack)) {
992 ret = -EINVAL;
993 goto end;
994 }
995 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
996 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
997 break;
998 }
999
07dfc1d0
MD
1000 case FILTER_OP_LOAD_S64:
1001 {
1002 if (vstack_push(stack)) {
1003 ret = -EINVAL;
1004 goto end;
1005 }
1006 vstack_ax(stack)->type = REG_S64;
1007 next_pc += sizeof(struct load_op)
1008 + sizeof(struct literal_numeric);
1009 break;
1010 }
1011
1012 case FILTER_OP_LOAD_DOUBLE:
1013 {
1014 if (vstack_push(stack)) {
1015 ret = -EINVAL;
1016 goto end;
1017 }
1018 vstack_ax(stack)->type = REG_DOUBLE;
1019 next_pc += sizeof(struct load_op)
1020 + sizeof(struct literal_double);
1021 break;
1022 }
1023
1024 /* cast */
1025 case FILTER_OP_CAST_TO_S64:
1026 {
1027 struct cast_op *insn = (struct cast_op *) pc;
1028
1029 switch (vstack_ax(stack)->type) {
1030 default:
1031 printk(KERN_WARNING "unknown register type\n");
1032 ret = -EINVAL;
1033 goto end;
1034
1035 case REG_STRING:
02aca193 1036 case REG_STAR_GLOB_STRING:
07dfc1d0
MD
1037 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
1038 ret = -EINVAL;
1039 goto end;
1040 case REG_S64:
1041 insn->op = FILTER_OP_CAST_NOP;
1042 break;
1043 case REG_DOUBLE:
1044 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1045 break;
1046 }
1047 /* Pop 1, push 1 */
1048 vstack_ax(stack)->type = REG_S64;
1049 next_pc += sizeof(struct cast_op);
1050 break;
1051 }
1052 case FILTER_OP_CAST_DOUBLE_TO_S64:
1053 {
1054 /* Pop 1, push 1 */
1055 vstack_ax(stack)->type = REG_S64;
1056 next_pc += sizeof(struct cast_op);
1057 break;
1058 }
1059 case FILTER_OP_CAST_NOP:
1060 {
1061 next_pc += sizeof(struct cast_op);
1062 break;
1063 }
1064
3834b99f
MD
1065 /*
1066 * Instructions for recursive traversal through composed types.
1067 */
1068 case FILTER_OP_GET_CONTEXT_ROOT:
1069 {
1070 if (vstack_push(stack)) {
1071 ret = -EINVAL;
1072 goto end;
1073 }
1074 vstack_ax(stack)->type = REG_PTR;
1075 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1076 next_pc += sizeof(struct load_op);
1077 break;
1078 }
1079 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1080 {
1081 if (vstack_push(stack)) {
1082 ret = -EINVAL;
1083 goto end;
1084 }
1085 vstack_ax(stack)->type = REG_PTR;
1086 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1087 next_pc += sizeof(struct load_op);
1088 break;
1089 }
1090 case FILTER_OP_GET_PAYLOAD_ROOT:
1091 {
1092 if (vstack_push(stack)) {
1093 ret = -EINVAL;
1094 goto end;
1095 }
1096 vstack_ax(stack)->type = REG_PTR;
1097 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1098 next_pc += sizeof(struct load_op);
1099 break;
1100 }
1101
1102 case FILTER_OP_LOAD_FIELD:
1103 {
1104 struct load_op *insn = (struct load_op *) pc;
1105
1106 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1107 /* Pop 1, push 1 */
1108 ret = specialize_load_field(vstack_ax(stack), insn);
1109 if (ret)
1110 goto end;
1111
1112 next_pc += sizeof(struct load_op);
1113 break;
1114 }
1115
1116 case FILTER_OP_LOAD_FIELD_S8:
1117 case FILTER_OP_LOAD_FIELD_S16:
1118 case FILTER_OP_LOAD_FIELD_S32:
1119 case FILTER_OP_LOAD_FIELD_S64:
1120 case FILTER_OP_LOAD_FIELD_U8:
1121 case FILTER_OP_LOAD_FIELD_U16:
1122 case FILTER_OP_LOAD_FIELD_U32:
1123 case FILTER_OP_LOAD_FIELD_U64:
1124 {
1125 /* Pop 1, push 1 */
1126 vstack_ax(stack)->type = REG_S64;
1127 next_pc += sizeof(struct load_op);
1128 break;
1129 }
1130
1131 case FILTER_OP_LOAD_FIELD_STRING:
1132 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1133 {
1134 /* Pop 1, push 1 */
1135 vstack_ax(stack)->type = REG_STRING;
1136 next_pc += sizeof(struct load_op);
1137 break;
1138 }
1139
1140 case FILTER_OP_LOAD_FIELD_DOUBLE:
1141 {
1142 /* Pop 1, push 1 */
1143 vstack_ax(stack)->type = REG_DOUBLE;
1144 next_pc += sizeof(struct load_op);
1145 break;
1146 }
1147
1148 case FILTER_OP_GET_SYMBOL:
1149 {
1150 struct load_op *insn = (struct load_op *) pc;
1151
1152 dbg_printk("op get symbol\n");
1153 switch (vstack_ax(stack)->load.type) {
1154 case LOAD_OBJECT:
1155 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1156 ret = -EINVAL;
1157 goto end;
1158 case LOAD_ROOT_CONTEXT:
1159 /* Lookup context field. */
1160 ret = specialize_context_lookup(bytecode, insn,
1161 &vstack_ax(stack)->load);
1162 if (ret)
1163 goto end;
1164 break;
1165 case LOAD_ROOT_APP_CONTEXT:
1166 ret = -EINVAL;
1167 goto end;
1168 case LOAD_ROOT_PAYLOAD:
1169 /* Lookup event payload field. */
1170 ret = specialize_event_payload_lookup(event,
1171 bytecode, insn,
1172 &vstack_ax(stack)->load);
1173 if (ret)
1174 goto end;
1175 break;
1176 }
1177 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1178 break;
1179 }
1180
1181 case FILTER_OP_GET_SYMBOL_FIELD:
1182 {
1183 /* Always generated by specialize phase. */
1184 ret = -EINVAL;
1185 goto end;
1186 }
1187
1188 case FILTER_OP_GET_INDEX_U16:
1189 {
1190 struct load_op *insn = (struct load_op *) pc;
1191 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1192
1193 dbg_printk("op get index u16\n");
1194 /* Pop 1, push 1 */
1195 ret = specialize_get_index(bytecode, insn, index->index,
1196 vstack_ax(stack), sizeof(*index));
1197 if (ret)
1198 goto end;
1199 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1200 break;
1201 }
1202
1203 case FILTER_OP_GET_INDEX_U64:
1204 {
1205 struct load_op *insn = (struct load_op *) pc;
1206 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1207
1208 dbg_printk("op get index u64\n");
1209 /* Pop 1, push 1 */
1210 ret = specialize_get_index(bytecode, insn, index->index,
1211 vstack_ax(stack), sizeof(*index));
1212 if (ret)
1213 goto end;
1214 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1215 break;
1216 }
1217
07dfc1d0
MD
1218 }
1219 }
1220end:
1221 return ret;
1222}
This page took 0.072489 seconds and 4 git commands to generate.