Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / lttng-filter-specialize.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-specialize.c
4 *
5 * LTTng modules filter code specializer.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/slab.h>
11 #include <lttng-filter.h>
12 #include "lib/align.h"
13
14 #include <wrapper/compiler_attributes.h>
15
16 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
17 size_t align, size_t len)
18 {
19 ssize_t ret;
20 size_t padding = offset_align(runtime->data_len, align);
21 size_t new_len = runtime->data_len + padding + len;
22 size_t new_alloc_len = new_len;
23 size_t old_alloc_len = runtime->data_alloc_len;
24
25 if (new_len > FILTER_MAX_DATA_LEN)
26 return -EINVAL;
27
28 if (new_alloc_len > old_alloc_len) {
29 char *newptr;
30
31 new_alloc_len =
32 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
33 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
34 if (!newptr)
35 return -ENOMEM;
36 runtime->data = newptr;
37 /* We zero directly the memory from start of allocation. */
38 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
39 runtime->data_alloc_len = new_alloc_len;
40 }
41 runtime->data_len += padding;
42 ret = runtime->data_len;
43 runtime->data_len += len;
44 return ret;
45 }
46
47 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
48 const void *p, size_t align, size_t len)
49 {
50 ssize_t offset;
51
52 offset = bytecode_reserve_data(runtime, align, len);
53 if (offset < 0)
54 return -ENOMEM;
55 memcpy(&runtime->data[offset], p, len);
56 return offset;
57 }
58
59 static int specialize_load_field(struct vstack_entry *stack_top,
60 struct load_op *insn)
61 {
62 int ret;
63
64 switch (stack_top->load.type) {
65 case LOAD_OBJECT:
66 break;
67 case LOAD_ROOT_CONTEXT:
68 case LOAD_ROOT_APP_CONTEXT:
69 case LOAD_ROOT_PAYLOAD:
70 default:
71 dbg_printk("Filter warning: cannot load root, missing field name.\n");
72 ret = -EINVAL;
73 goto end;
74 }
75 switch (stack_top->load.object_type) {
76 case OBJECT_TYPE_S8:
77 dbg_printk("op load field s8\n");
78 stack_top->type = REG_S64;
79 if (!stack_top->load.user)
80 insn->op = FILTER_OP_LOAD_FIELD_S8;
81 break;
82 case OBJECT_TYPE_S16:
83 dbg_printk("op load field s16\n");
84 stack_top->type = REG_S64;
85 if (!stack_top->load.rev_bo && !stack_top->load.user)
86 insn->op = FILTER_OP_LOAD_FIELD_S16;
87 break;
88 case OBJECT_TYPE_S32:
89 dbg_printk("op load field s32\n");
90 stack_top->type = REG_S64;
91 if (!stack_top->load.rev_bo && !stack_top->load.user)
92 insn->op = FILTER_OP_LOAD_FIELD_S32;
93 break;
94 case OBJECT_TYPE_S64:
95 dbg_printk("op load field s64\n");
96 stack_top->type = REG_S64;
97 if (!stack_top->load.rev_bo && !stack_top->load.user)
98 insn->op = FILTER_OP_LOAD_FIELD_S64;
99 break;
100 case OBJECT_TYPE_U8:
101 dbg_printk("op load field u8\n");
102 stack_top->type = REG_S64;
103 if (!stack_top->load.user)
104 insn->op = FILTER_OP_LOAD_FIELD_U8;
105 break;
106 case OBJECT_TYPE_U16:
107 dbg_printk("op load field u16\n");
108 stack_top->type = REG_S64;
109 if (!stack_top->load.rev_bo && !stack_top->load.user)
110 insn->op = FILTER_OP_LOAD_FIELD_U16;
111 break;
112 case OBJECT_TYPE_U32:
113 dbg_printk("op load field u32\n");
114 stack_top->type = REG_S64;
115 if (!stack_top->load.rev_bo && !stack_top->load.user)
116 insn->op = FILTER_OP_LOAD_FIELD_U32;
117 break;
118 case OBJECT_TYPE_U64:
119 dbg_printk("op load field u64\n");
120 stack_top->type = REG_S64;
121 if (!stack_top->load.rev_bo && !stack_top->load.user)
122 insn->op = FILTER_OP_LOAD_FIELD_U64;
123 break;
124 case OBJECT_TYPE_DOUBLE:
125 printk(KERN_WARNING "Double type unsupported\n\n");
126 ret = -EINVAL;
127 goto end;
128 case OBJECT_TYPE_STRING:
129 dbg_printk("op load field string\n");
130 stack_top->type = REG_STRING;
131 if (!stack_top->load.user)
132 insn->op = FILTER_OP_LOAD_FIELD_STRING;
133 break;
134 case OBJECT_TYPE_STRING_SEQUENCE:
135 dbg_printk("op load field string sequence\n");
136 stack_top->type = REG_STRING;
137 if (!stack_top->load.user)
138 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
139 break;
140 case OBJECT_TYPE_DYNAMIC:
141 ret = -EINVAL;
142 goto end;
143 case OBJECT_TYPE_SEQUENCE:
144 case OBJECT_TYPE_ARRAY:
145 case OBJECT_TYPE_STRUCT:
146 case OBJECT_TYPE_VARIANT:
147 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
148 ret = -EINVAL;
149 goto end;
150 }
151 return 0;
152
153 end:
154 return ret;
155 }
156
157 static int specialize_get_index_object_type(enum object_type *otype,
158 int signedness, uint32_t elem_len)
159 {
160 switch (elem_len) {
161 case 8:
162 if (signedness)
163 *otype = OBJECT_TYPE_S8;
164 else
165 *otype = OBJECT_TYPE_U8;
166 break;
167 case 16:
168 if (signedness)
169 *otype = OBJECT_TYPE_S16;
170 else
171 *otype = OBJECT_TYPE_U16;
172 break;
173 case 32:
174 if (signedness)
175 *otype = OBJECT_TYPE_S32;
176 else
177 *otype = OBJECT_TYPE_U32;
178 break;
179 case 64:
180 if (signedness)
181 *otype = OBJECT_TYPE_S64;
182 else
183 *otype = OBJECT_TYPE_U64;
184 break;
185 default:
186 return -EINVAL;
187 }
188 return 0;
189 }
190
191 static int specialize_get_index(struct bytecode_runtime *runtime,
192 struct load_op *insn, uint64_t index,
193 struct vstack_entry *stack_top,
194 int idx_len)
195 {
196 int ret;
197 struct filter_get_index_data gid;
198 ssize_t data_offset;
199
200 memset(&gid, 0, sizeof(gid));
201 switch (stack_top->load.type) {
202 case LOAD_OBJECT:
203 switch (stack_top->load.object_type) {
204 case OBJECT_TYPE_ARRAY:
205 {
206 const struct lttng_event_field *field;
207 uint32_t elem_len, num_elems;
208 int signedness;
209
210 field = stack_top->load.field;
211 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
212 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
213 num_elems = field->type.u.array.length;
214 if (index >= num_elems) {
215 ret = -EINVAL;
216 goto end;
217 }
218 ret = specialize_get_index_object_type(&stack_top->load.object_type,
219 signedness, elem_len);
220 if (ret)
221 goto end;
222 gid.offset = index * (elem_len / CHAR_BIT);
223 gid.array_len = num_elems * (elem_len / CHAR_BIT);
224 gid.elem.type = stack_top->load.object_type;
225 gid.elem.len = elem_len;
226 stack_top->load.rev_bo = gid.elem.rev_bo = field->type.u.array.elem_type.u.basic.integer.reverse_byte_order;
227 stack_top->load.user = gid.elem.user = field->type.u.array.elem_type.u.basic.integer.user;
228 break;
229 }
230 case OBJECT_TYPE_SEQUENCE:
231 {
232 const struct lttng_event_field *field;
233 uint32_t elem_len;
234 int signedness;
235
236 field = stack_top->load.field;
237 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
238 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
239 ret = specialize_get_index_object_type(&stack_top->load.object_type,
240 signedness, elem_len);
241 if (ret)
242 goto end;
243 gid.offset = index * (elem_len / CHAR_BIT);
244 gid.elem.type = stack_top->load.object_type;
245 gid.elem.len = elem_len;
246 stack_top->load.rev_bo = gid.elem.rev_bo = field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order;
247 stack_top->load.user = gid.elem.user = field->type.u.sequence.elem_type.u.basic.integer.user;
248 break;
249 }
250 case OBJECT_TYPE_STRUCT:
251 /* Only generated by the specialize phase. */
252 case OBJECT_TYPE_VARIANT:
253 lttng_fallthrough;
254 default:
255 printk(KERN_WARNING "Unexpected get index type %d",
256 (int) stack_top->load.object_type);
257 ret = -EINVAL;
258 goto end;
259 }
260 break;
261 case LOAD_ROOT_CONTEXT:
262 case LOAD_ROOT_APP_CONTEXT:
263 case LOAD_ROOT_PAYLOAD:
264 printk(KERN_WARNING "Index lookup for root field not implemented yet.\n");
265 ret = -EINVAL;
266 goto end;
267 }
268 data_offset = bytecode_push_data(runtime, &gid,
269 __alignof__(gid), sizeof(gid));
270 if (data_offset < 0) {
271 ret = -EINVAL;
272 goto end;
273 }
274 switch (idx_len) {
275 case 2:
276 ((struct get_index_u16 *) insn->data)->index = data_offset;
277 break;
278 case 8:
279 ((struct get_index_u64 *) insn->data)->index = data_offset;
280 break;
281 default:
282 ret = -EINVAL;
283 goto end;
284 }
285
286 return 0;
287
288 end:
289 return ret;
290 }
291
292 static int specialize_context_lookup_name(struct bytecode_runtime *bytecode,
293 struct load_op *insn)
294 {
295 uint16_t offset;
296 const char *name;
297
298 offset = ((struct get_symbol *) insn->data)->offset;
299 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
300 return lttng_get_context_index(lttng_static_ctx, name);
301 }
302
303 static int specialize_load_object(const struct lttng_event_field *field,
304 struct vstack_load *load, bool is_context)
305 {
306 load->type = LOAD_OBJECT;
307 /*
308 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
309 */
310 switch (field->type.atype) {
311 case atype_integer:
312 if (field->type.u.basic.integer.signedness)
313 load->object_type = OBJECT_TYPE_S64;
314 else
315 load->object_type = OBJECT_TYPE_U64;
316 load->rev_bo = field->type.u.basic.integer.reverse_byte_order;
317 load->user = field->type.u.basic.integer.user;
318 break;
319 case atype_enum:
320 {
321 const struct lttng_integer_type *itype =
322 &field->type.u.basic.enumeration.container_type;
323
324 if (itype->signedness)
325 load->object_type = OBJECT_TYPE_S64;
326 else
327 load->object_type = OBJECT_TYPE_U64;
328 load->rev_bo = itype->reverse_byte_order;
329 load->user = itype->user;
330 break;
331 }
332 case atype_array:
333 if (field->type.u.array.elem_type.atype != atype_integer) {
334 printk(KERN_WARNING "Array nesting only supports integer types.\n");
335 return -EINVAL;
336 }
337 if (is_context) {
338 load->object_type = OBJECT_TYPE_STRING;
339 load->user = field->type.u.array.elem_type.u.basic.integer.user;
340 } else {
341 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
342 load->object_type = OBJECT_TYPE_ARRAY;
343 load->field = field;
344 } else {
345 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
346 load->user = field->type.u.array.elem_type.u.basic.integer.user;
347 }
348 }
349 break;
350 case atype_sequence:
351 if (field->type.u.sequence.elem_type.atype != atype_integer) {
352 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
353 return -EINVAL;
354 }
355 if (is_context) {
356 load->object_type = OBJECT_TYPE_STRING;
357 load->user = field->type.u.sequence.elem_type.u.basic.integer.user;
358 } else {
359 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
360 load->object_type = OBJECT_TYPE_SEQUENCE;
361 load->field = field;
362 } else {
363 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
364 load->user = field->type.u.sequence.elem_type.u.basic.integer.user;
365 }
366 }
367 break;
368 case atype_array_bitfield:
369 printk(KERN_WARNING "Bitfield array type is not supported.\n");
370 return -EINVAL;
371 case atype_sequence_bitfield:
372 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
373 return -EINVAL;
374 case atype_string:
375 load->object_type = OBJECT_TYPE_STRING;
376 load->user = field->type.u.basic.string.user;
377 break;
378 case atype_struct:
379 printk(KERN_WARNING "Structure type cannot be loaded.\n");
380 return -EINVAL;
381 default:
382 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
383 return -EINVAL;
384 }
385 return 0;
386 }
387
388 static int specialize_context_lookup(struct bytecode_runtime *runtime,
389 struct load_op *insn,
390 struct vstack_load *load)
391 {
392 int idx, ret;
393 struct lttng_ctx_field *ctx_field;
394 struct lttng_event_field *field;
395 struct filter_get_index_data gid;
396 ssize_t data_offset;
397
398 idx = specialize_context_lookup_name(runtime, insn);
399 if (idx < 0) {
400 return -ENOENT;
401 }
402 ctx_field = &lttng_static_ctx->fields[idx];
403 field = &ctx_field->event_field;
404 ret = specialize_load_object(field, load, true);
405 if (ret)
406 return ret;
407 /* Specialize each get_symbol into a get_index. */
408 insn->op = FILTER_OP_GET_INDEX_U16;
409 memset(&gid, 0, sizeof(gid));
410 gid.ctx_index = idx;
411 gid.elem.type = load->object_type;
412 gid.elem.rev_bo = load->rev_bo;
413 gid.elem.user = load->user;
414 data_offset = bytecode_push_data(runtime, &gid,
415 __alignof__(gid), sizeof(gid));
416 if (data_offset < 0) {
417 return -EINVAL;
418 }
419 ((struct get_index_u16 *) insn->data)->index = data_offset;
420 return 0;
421 }
422
423 static int specialize_event_payload_lookup(struct lttng_event *event,
424 struct bytecode_runtime *runtime,
425 struct load_op *insn,
426 struct vstack_load *load)
427 {
428 const char *name;
429 uint16_t offset;
430 const struct lttng_event_desc *desc = event->desc;
431 unsigned int i, nr_fields;
432 bool found = false;
433 uint32_t field_offset = 0;
434 const struct lttng_event_field *field;
435 int ret;
436 struct filter_get_index_data gid;
437 ssize_t data_offset;
438
439 nr_fields = desc->nr_fields;
440 offset = ((struct get_symbol *) insn->data)->offset;
441 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
442 for (i = 0; i < nr_fields; i++) {
443 field = &desc->fields[i];
444 if (!strcmp(field->name, name)) {
445 found = true;
446 break;
447 }
448 /* compute field offset on stack */
449 switch (field->type.atype) {
450 case atype_integer:
451 case atype_enum:
452 field_offset += sizeof(int64_t);
453 break;
454 case atype_array:
455 case atype_sequence:
456 case atype_array_bitfield:
457 case atype_sequence_bitfield:
458 field_offset += sizeof(unsigned long);
459 field_offset += sizeof(void *);
460 break;
461 case atype_string:
462 field_offset += sizeof(void *);
463 break;
464 default:
465 ret = -EINVAL;
466 goto end;
467 }
468 }
469 if (!found) {
470 ret = -EINVAL;
471 goto end;
472 }
473
474 ret = specialize_load_object(field, load, false);
475 if (ret)
476 goto end;
477
478 /* Specialize each get_symbol into a get_index. */
479 insn->op = FILTER_OP_GET_INDEX_U16;
480 memset(&gid, 0, sizeof(gid));
481 gid.offset = field_offset;
482 gid.elem.type = load->object_type;
483 gid.elem.rev_bo = load->rev_bo;
484 gid.elem.user = load->user;
485 data_offset = bytecode_push_data(runtime, &gid,
486 __alignof__(gid), sizeof(gid));
487 if (data_offset < 0) {
488 ret = -EINVAL;
489 goto end;
490 }
491 ((struct get_index_u16 *) insn->data)->index = data_offset;
492 ret = 0;
493 end:
494 return ret;
495 }
496
497 int lttng_filter_specialize_bytecode(struct lttng_event *event,
498 struct bytecode_runtime *bytecode)
499 {
500 void *pc, *next_pc, *start_pc;
501 int ret = -EINVAL;
502 struct vstack _stack;
503 struct vstack *stack = &_stack;
504
505 vstack_init(stack);
506
507 start_pc = &bytecode->code[0];
508 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
509 pc = next_pc) {
510 switch (*(filter_opcode_t *) pc) {
511 case FILTER_OP_UNKNOWN:
512 default:
513 printk(KERN_WARNING "unknown bytecode op %u\n",
514 (unsigned int) *(filter_opcode_t *) pc);
515 ret = -EINVAL;
516 goto end;
517
518 case FILTER_OP_RETURN:
519 case FILTER_OP_RETURN_S64:
520 ret = 0;
521 goto end;
522
523 /* binary */
524 case FILTER_OP_MUL:
525 case FILTER_OP_DIV:
526 case FILTER_OP_MOD:
527 case FILTER_OP_PLUS:
528 case FILTER_OP_MINUS:
529 printk(KERN_WARNING "unsupported bytecode op %u\n",
530 (unsigned int) *(filter_opcode_t *) pc);
531 ret = -EINVAL;
532 goto end;
533
534 case FILTER_OP_EQ:
535 {
536 struct binary_op *insn = (struct binary_op *) pc;
537
538 switch(vstack_ax(stack)->type) {
539 default:
540 printk(KERN_WARNING "unknown register type\n");
541 ret = -EINVAL;
542 goto end;
543
544 case REG_STRING:
545 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
546 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
547 else
548 insn->op = FILTER_OP_EQ_STRING;
549 break;
550 case REG_STAR_GLOB_STRING:
551 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
552 break;
553 case REG_S64:
554 if (vstack_bx(stack)->type == REG_S64)
555 insn->op = FILTER_OP_EQ_S64;
556 else
557 insn->op = FILTER_OP_EQ_DOUBLE_S64;
558 break;
559 case REG_DOUBLE:
560 if (vstack_bx(stack)->type == REG_S64)
561 insn->op = FILTER_OP_EQ_S64_DOUBLE;
562 else
563 insn->op = FILTER_OP_EQ_DOUBLE;
564 break;
565 }
566 /* Pop 2, push 1 */
567 if (vstack_pop(stack)) {
568 ret = -EINVAL;
569 goto end;
570 }
571 vstack_ax(stack)->type = REG_S64;
572 next_pc += sizeof(struct binary_op);
573 break;
574 }
575
576 case FILTER_OP_NE:
577 {
578 struct binary_op *insn = (struct binary_op *) pc;
579
580 switch(vstack_ax(stack)->type) {
581 default:
582 printk(KERN_WARNING "unknown register type\n");
583 ret = -EINVAL;
584 goto end;
585
586 case REG_STRING:
587 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
588 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
589 else
590 insn->op = FILTER_OP_NE_STRING;
591 break;
592 case REG_STAR_GLOB_STRING:
593 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
594 break;
595 case REG_S64:
596 if (vstack_bx(stack)->type == REG_S64)
597 insn->op = FILTER_OP_NE_S64;
598 else
599 insn->op = FILTER_OP_NE_DOUBLE_S64;
600 break;
601 case REG_DOUBLE:
602 if (vstack_bx(stack)->type == REG_S64)
603 insn->op = FILTER_OP_NE_S64_DOUBLE;
604 else
605 insn->op = FILTER_OP_NE_DOUBLE;
606 break;
607 }
608 /* Pop 2, push 1 */
609 if (vstack_pop(stack)) {
610 ret = -EINVAL;
611 goto end;
612 }
613 vstack_ax(stack)->type = REG_S64;
614 next_pc += sizeof(struct binary_op);
615 break;
616 }
617
618 case FILTER_OP_GT:
619 {
620 struct binary_op *insn = (struct binary_op *) pc;
621
622 switch(vstack_ax(stack)->type) {
623 default:
624 printk(KERN_WARNING "unknown register type\n");
625 ret = -EINVAL;
626 goto end;
627
628 case REG_STAR_GLOB_STRING:
629 printk(KERN_WARNING "invalid register type for > binary operator\n");
630 ret = -EINVAL;
631 goto end;
632 case REG_STRING:
633 insn->op = FILTER_OP_GT_STRING;
634 break;
635 case REG_S64:
636 if (vstack_bx(stack)->type == REG_S64)
637 insn->op = FILTER_OP_GT_S64;
638 else
639 insn->op = FILTER_OP_GT_DOUBLE_S64;
640 break;
641 case REG_DOUBLE:
642 if (vstack_bx(stack)->type == REG_S64)
643 insn->op = FILTER_OP_GT_S64_DOUBLE;
644 else
645 insn->op = FILTER_OP_GT_DOUBLE;
646 break;
647 }
648 /* Pop 2, push 1 */
649 if (vstack_pop(stack)) {
650 ret = -EINVAL;
651 goto end;
652 }
653 vstack_ax(stack)->type = REG_S64;
654 next_pc += sizeof(struct binary_op);
655 break;
656 }
657
658 case FILTER_OP_LT:
659 {
660 struct binary_op *insn = (struct binary_op *) pc;
661
662 switch(vstack_ax(stack)->type) {
663 default:
664 printk(KERN_WARNING "unknown register type\n");
665 ret = -EINVAL;
666 goto end;
667
668 case REG_STAR_GLOB_STRING:
669 printk(KERN_WARNING "invalid register type for < binary operator\n");
670 ret = -EINVAL;
671 goto end;
672 case REG_STRING:
673 insn->op = FILTER_OP_LT_STRING;
674 break;
675 case REG_S64:
676 if (vstack_bx(stack)->type == REG_S64)
677 insn->op = FILTER_OP_LT_S64;
678 else
679 insn->op = FILTER_OP_LT_DOUBLE_S64;
680 break;
681 case REG_DOUBLE:
682 if (vstack_bx(stack)->type == REG_S64)
683 insn->op = FILTER_OP_LT_S64_DOUBLE;
684 else
685 insn->op = FILTER_OP_LT_DOUBLE;
686 break;
687 }
688 /* Pop 2, push 1 */
689 if (vstack_pop(stack)) {
690 ret = -EINVAL;
691 goto end;
692 }
693 vstack_ax(stack)->type = REG_S64;
694 next_pc += sizeof(struct binary_op);
695 break;
696 }
697
698 case FILTER_OP_GE:
699 {
700 struct binary_op *insn = (struct binary_op *) pc;
701
702 switch(vstack_ax(stack)->type) {
703 default:
704 printk(KERN_WARNING "unknown register type\n");
705 ret = -EINVAL;
706 goto end;
707
708 case REG_STAR_GLOB_STRING:
709 printk(KERN_WARNING "invalid register type for >= binary operator\n");
710 ret = -EINVAL;
711 goto end;
712 case REG_STRING:
713 insn->op = FILTER_OP_GE_STRING;
714 break;
715 case REG_S64:
716 if (vstack_bx(stack)->type == REG_S64)
717 insn->op = FILTER_OP_GE_S64;
718 else
719 insn->op = FILTER_OP_GE_DOUBLE_S64;
720 break;
721 case REG_DOUBLE:
722 if (vstack_bx(stack)->type == REG_S64)
723 insn->op = FILTER_OP_GE_S64_DOUBLE;
724 else
725 insn->op = FILTER_OP_GE_DOUBLE;
726 break;
727 }
728 /* Pop 2, push 1 */
729 if (vstack_pop(stack)) {
730 ret = -EINVAL;
731 goto end;
732 }
733 vstack_ax(stack)->type = REG_S64;
734 next_pc += sizeof(struct binary_op);
735 break;
736 }
737 case FILTER_OP_LE:
738 {
739 struct binary_op *insn = (struct binary_op *) pc;
740
741 switch(vstack_ax(stack)->type) {
742 default:
743 printk(KERN_WARNING "unknown register type\n");
744 ret = -EINVAL;
745 goto end;
746
747 case REG_STAR_GLOB_STRING:
748 printk(KERN_WARNING "invalid register type for <= binary operator\n");
749 ret = -EINVAL;
750 goto end;
751 case REG_STRING:
752 insn->op = FILTER_OP_LE_STRING;
753 break;
754 case REG_S64:
755 if (vstack_bx(stack)->type == REG_S64)
756 insn->op = FILTER_OP_LE_S64;
757 else
758 insn->op = FILTER_OP_LE_DOUBLE_S64;
759 break;
760 case REG_DOUBLE:
761 if (vstack_bx(stack)->type == REG_S64)
762 insn->op = FILTER_OP_LE_S64_DOUBLE;
763 else
764 insn->op = FILTER_OP_LE_DOUBLE;
765 break;
766 }
767 vstack_ax(stack)->type = REG_S64;
768 next_pc += sizeof(struct binary_op);
769 break;
770 }
771
772 case FILTER_OP_EQ_STRING:
773 case FILTER_OP_NE_STRING:
774 case FILTER_OP_GT_STRING:
775 case FILTER_OP_LT_STRING:
776 case FILTER_OP_GE_STRING:
777 case FILTER_OP_LE_STRING:
778 case FILTER_OP_EQ_STAR_GLOB_STRING:
779 case FILTER_OP_NE_STAR_GLOB_STRING:
780 case FILTER_OP_EQ_S64:
781 case FILTER_OP_NE_S64:
782 case FILTER_OP_GT_S64:
783 case FILTER_OP_LT_S64:
784 case FILTER_OP_GE_S64:
785 case FILTER_OP_LE_S64:
786 case FILTER_OP_EQ_DOUBLE:
787 case FILTER_OP_NE_DOUBLE:
788 case FILTER_OP_GT_DOUBLE:
789 case FILTER_OP_LT_DOUBLE:
790 case FILTER_OP_GE_DOUBLE:
791 case FILTER_OP_LE_DOUBLE:
792 case FILTER_OP_EQ_DOUBLE_S64:
793 case FILTER_OP_NE_DOUBLE_S64:
794 case FILTER_OP_GT_DOUBLE_S64:
795 case FILTER_OP_LT_DOUBLE_S64:
796 case FILTER_OP_GE_DOUBLE_S64:
797 case FILTER_OP_LE_DOUBLE_S64:
798 case FILTER_OP_EQ_S64_DOUBLE:
799 case FILTER_OP_NE_S64_DOUBLE:
800 case FILTER_OP_GT_S64_DOUBLE:
801 case FILTER_OP_LT_S64_DOUBLE:
802 case FILTER_OP_GE_S64_DOUBLE:
803 case FILTER_OP_LE_S64_DOUBLE:
804 case FILTER_OP_BIT_RSHIFT:
805 case FILTER_OP_BIT_LSHIFT:
806 case FILTER_OP_BIT_AND:
807 case FILTER_OP_BIT_OR:
808 case FILTER_OP_BIT_XOR:
809 {
810 /* Pop 2, push 1 */
811 if (vstack_pop(stack)) {
812 ret = -EINVAL;
813 goto end;
814 }
815 vstack_ax(stack)->type = REG_S64;
816 next_pc += sizeof(struct binary_op);
817 break;
818 }
819
820 /* unary */
821 case FILTER_OP_UNARY_PLUS:
822 {
823 struct unary_op *insn = (struct unary_op *) pc;
824
825 switch(vstack_ax(stack)->type) {
826 default:
827 printk(KERN_WARNING "unknown register type\n");
828 ret = -EINVAL;
829 goto end;
830
831 case REG_S64:
832 insn->op = FILTER_OP_UNARY_PLUS_S64;
833 break;
834 case REG_DOUBLE:
835 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
836 break;
837 }
838 /* Pop 1, push 1 */
839 next_pc += sizeof(struct unary_op);
840 break;
841 }
842
843 case FILTER_OP_UNARY_MINUS:
844 {
845 struct unary_op *insn = (struct unary_op *) pc;
846
847 switch(vstack_ax(stack)->type) {
848 default:
849 printk(KERN_WARNING "unknown register type\n");
850 ret = -EINVAL;
851 goto end;
852
853 case REG_S64:
854 insn->op = FILTER_OP_UNARY_MINUS_S64;
855 break;
856 case REG_DOUBLE:
857 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
858 break;
859 }
860 /* Pop 1, push 1 */
861 next_pc += sizeof(struct unary_op);
862 break;
863 }
864
865 case FILTER_OP_UNARY_NOT:
866 {
867 struct unary_op *insn = (struct unary_op *) pc;
868
869 switch(vstack_ax(stack)->type) {
870 default:
871 printk(KERN_WARNING "unknown register type\n");
872 ret = -EINVAL;
873 goto end;
874
875 case REG_S64:
876 insn->op = FILTER_OP_UNARY_NOT_S64;
877 break;
878 case REG_DOUBLE:
879 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
880 break;
881 }
882 /* Pop 1, push 1 */
883 next_pc += sizeof(struct unary_op);
884 break;
885 }
886
887 case FILTER_OP_UNARY_BIT_NOT:
888 {
889 /* Pop 1, push 1 */
890 next_pc += sizeof(struct unary_op);
891 break;
892 }
893
894 case FILTER_OP_UNARY_PLUS_S64:
895 case FILTER_OP_UNARY_MINUS_S64:
896 case FILTER_OP_UNARY_NOT_S64:
897 case FILTER_OP_UNARY_PLUS_DOUBLE:
898 case FILTER_OP_UNARY_MINUS_DOUBLE:
899 case FILTER_OP_UNARY_NOT_DOUBLE:
900 {
901 /* Pop 1, push 1 */
902 next_pc += sizeof(struct unary_op);
903 break;
904 }
905
906 /* logical */
907 case FILTER_OP_AND:
908 case FILTER_OP_OR:
909 {
910 /* Continue to next instruction */
911 /* Pop 1 when jump not taken */
912 if (vstack_pop(stack)) {
913 ret = -EINVAL;
914 goto end;
915 }
916 next_pc += sizeof(struct logical_op);
917 break;
918 }
919
920 /* load field ref */
921 case FILTER_OP_LOAD_FIELD_REF:
922 {
923 printk(KERN_WARNING "Unknown field ref type\n");
924 ret = -EINVAL;
925 goto end;
926 }
927 /* get context ref */
928 case FILTER_OP_GET_CONTEXT_REF:
929 {
930 printk(KERN_WARNING "Unknown get context ref type\n");
931 ret = -EINVAL;
932 goto end;
933 }
934 case FILTER_OP_LOAD_FIELD_REF_STRING:
935 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
936 case FILTER_OP_GET_CONTEXT_REF_STRING:
937 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
938 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
939 {
940 if (vstack_push(stack)) {
941 ret = -EINVAL;
942 goto end;
943 }
944 vstack_ax(stack)->type = REG_STRING;
945 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
946 break;
947 }
948 case FILTER_OP_LOAD_FIELD_REF_S64:
949 case FILTER_OP_GET_CONTEXT_REF_S64:
950 {
951 if (vstack_push(stack)) {
952 ret = -EINVAL;
953 goto end;
954 }
955 vstack_ax(stack)->type = REG_S64;
956 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
957 break;
958 }
959 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
960 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
961 {
962 if (vstack_push(stack)) {
963 ret = -EINVAL;
964 goto end;
965 }
966 vstack_ax(stack)->type = REG_DOUBLE;
967 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
968 break;
969 }
970
971 /* load from immediate operand */
972 case FILTER_OP_LOAD_STRING:
973 {
974 struct load_op *insn = (struct load_op *) pc;
975
976 if (vstack_push(stack)) {
977 ret = -EINVAL;
978 goto end;
979 }
980 vstack_ax(stack)->type = REG_STRING;
981 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
982 break;
983 }
984
985 case FILTER_OP_LOAD_STAR_GLOB_STRING:
986 {
987 struct load_op *insn = (struct load_op *) pc;
988
989 if (vstack_push(stack)) {
990 ret = -EINVAL;
991 goto end;
992 }
993 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
994 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
995 break;
996 }
997
998 case FILTER_OP_LOAD_S64:
999 {
1000 if (vstack_push(stack)) {
1001 ret = -EINVAL;
1002 goto end;
1003 }
1004 vstack_ax(stack)->type = REG_S64;
1005 next_pc += sizeof(struct load_op)
1006 + sizeof(struct literal_numeric);
1007 break;
1008 }
1009
1010 case FILTER_OP_LOAD_DOUBLE:
1011 {
1012 if (vstack_push(stack)) {
1013 ret = -EINVAL;
1014 goto end;
1015 }
1016 vstack_ax(stack)->type = REG_DOUBLE;
1017 next_pc += sizeof(struct load_op)
1018 + sizeof(struct literal_double);
1019 break;
1020 }
1021
1022 /* cast */
1023 case FILTER_OP_CAST_TO_S64:
1024 {
1025 struct cast_op *insn = (struct cast_op *) pc;
1026
1027 switch (vstack_ax(stack)->type) {
1028 default:
1029 printk(KERN_WARNING "unknown register type\n");
1030 ret = -EINVAL;
1031 goto end;
1032
1033 case REG_STRING:
1034 case REG_STAR_GLOB_STRING:
1035 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
1036 ret = -EINVAL;
1037 goto end;
1038 case REG_S64:
1039 insn->op = FILTER_OP_CAST_NOP;
1040 break;
1041 case REG_DOUBLE:
1042 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1043 break;
1044 }
1045 /* Pop 1, push 1 */
1046 vstack_ax(stack)->type = REG_S64;
1047 next_pc += sizeof(struct cast_op);
1048 break;
1049 }
1050 case FILTER_OP_CAST_DOUBLE_TO_S64:
1051 {
1052 /* Pop 1, push 1 */
1053 vstack_ax(stack)->type = REG_S64;
1054 next_pc += sizeof(struct cast_op);
1055 break;
1056 }
1057 case FILTER_OP_CAST_NOP:
1058 {
1059 next_pc += sizeof(struct cast_op);
1060 break;
1061 }
1062
1063 /*
1064 * Instructions for recursive traversal through composed types.
1065 */
1066 case FILTER_OP_GET_CONTEXT_ROOT:
1067 {
1068 if (vstack_push(stack)) {
1069 ret = -EINVAL;
1070 goto end;
1071 }
1072 vstack_ax(stack)->type = REG_PTR;
1073 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1074 next_pc += sizeof(struct load_op);
1075 break;
1076 }
1077 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1078 {
1079 if (vstack_push(stack)) {
1080 ret = -EINVAL;
1081 goto end;
1082 }
1083 vstack_ax(stack)->type = REG_PTR;
1084 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1085 next_pc += sizeof(struct load_op);
1086 break;
1087 }
1088 case FILTER_OP_GET_PAYLOAD_ROOT:
1089 {
1090 if (vstack_push(stack)) {
1091 ret = -EINVAL;
1092 goto end;
1093 }
1094 vstack_ax(stack)->type = REG_PTR;
1095 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1096 next_pc += sizeof(struct load_op);
1097 break;
1098 }
1099
1100 case FILTER_OP_LOAD_FIELD:
1101 {
1102 struct load_op *insn = (struct load_op *) pc;
1103
1104 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1105 /* Pop 1, push 1 */
1106 ret = specialize_load_field(vstack_ax(stack), insn);
1107 if (ret)
1108 goto end;
1109
1110 next_pc += sizeof(struct load_op);
1111 break;
1112 }
1113
1114 case FILTER_OP_LOAD_FIELD_S8:
1115 case FILTER_OP_LOAD_FIELD_S16:
1116 case FILTER_OP_LOAD_FIELD_S32:
1117 case FILTER_OP_LOAD_FIELD_S64:
1118 case FILTER_OP_LOAD_FIELD_U8:
1119 case FILTER_OP_LOAD_FIELD_U16:
1120 case FILTER_OP_LOAD_FIELD_U32:
1121 case FILTER_OP_LOAD_FIELD_U64:
1122 {
1123 /* Pop 1, push 1 */
1124 vstack_ax(stack)->type = REG_S64;
1125 next_pc += sizeof(struct load_op);
1126 break;
1127 }
1128
1129 case FILTER_OP_LOAD_FIELD_STRING:
1130 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1131 {
1132 /* Pop 1, push 1 */
1133 vstack_ax(stack)->type = REG_STRING;
1134 next_pc += sizeof(struct load_op);
1135 break;
1136 }
1137
1138 case FILTER_OP_LOAD_FIELD_DOUBLE:
1139 {
1140 /* Pop 1, push 1 */
1141 vstack_ax(stack)->type = REG_DOUBLE;
1142 next_pc += sizeof(struct load_op);
1143 break;
1144 }
1145
1146 case FILTER_OP_GET_SYMBOL:
1147 {
1148 struct load_op *insn = (struct load_op *) pc;
1149
1150 dbg_printk("op get symbol\n");
1151 switch (vstack_ax(stack)->load.type) {
1152 case LOAD_OBJECT:
1153 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1154 ret = -EINVAL;
1155 goto end;
1156 case LOAD_ROOT_CONTEXT:
1157 /* Lookup context field. */
1158 ret = specialize_context_lookup(bytecode, insn,
1159 &vstack_ax(stack)->load);
1160 if (ret)
1161 goto end;
1162 break;
1163 case LOAD_ROOT_APP_CONTEXT:
1164 ret = -EINVAL;
1165 goto end;
1166 case LOAD_ROOT_PAYLOAD:
1167 /* Lookup event payload field. */
1168 ret = specialize_event_payload_lookup(event,
1169 bytecode, insn,
1170 &vstack_ax(stack)->load);
1171 if (ret)
1172 goto end;
1173 break;
1174 }
1175 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1176 break;
1177 }
1178
1179 case FILTER_OP_GET_SYMBOL_FIELD:
1180 {
1181 /* Always generated by specialize phase. */
1182 ret = -EINVAL;
1183 goto end;
1184 }
1185
1186 case FILTER_OP_GET_INDEX_U16:
1187 {
1188 struct load_op *insn = (struct load_op *) pc;
1189 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1190
1191 dbg_printk("op get index u16\n");
1192 /* Pop 1, push 1 */
1193 ret = specialize_get_index(bytecode, insn, index->index,
1194 vstack_ax(stack), sizeof(*index));
1195 if (ret)
1196 goto end;
1197 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1198 break;
1199 }
1200
1201 case FILTER_OP_GET_INDEX_U64:
1202 {
1203 struct load_op *insn = (struct load_op *) pc;
1204 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1205
1206 dbg_printk("op get index u64\n");
1207 /* Pop 1, push 1 */
1208 ret = specialize_get_index(bytecode, insn, index->index,
1209 vstack_ax(stack), sizeof(*index));
1210 if (ret)
1211 goto end;
1212 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1213 break;
1214 }
1215
1216 }
1217 }
1218 end:
1219 return ret;
1220 }
This page took 0.076452 seconds and 4 git commands to generate.