Add `lttng_bytecode_interpret_format_output()` for top of stack extraction
[lttng-modules.git] / src / lttng-filter-specialize.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-specialize.c
4 *
5 * LTTng modules filter code specializer.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/slab.h>
11 #include <lttng/filter.h>
12 #include <lttng/align.h>
13
14 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
15 size_t align, size_t len)
16 {
17 ssize_t ret;
18 size_t padding = offset_align(runtime->data_len, align);
19 size_t new_len = runtime->data_len + padding + len;
20 size_t new_alloc_len = new_len;
21 size_t old_alloc_len = runtime->data_alloc_len;
22
23 if (new_len > FILTER_MAX_DATA_LEN)
24 return -EINVAL;
25
26 if (new_alloc_len > old_alloc_len) {
27 char *newptr;
28
29 new_alloc_len =
30 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
31 newptr = krealloc(runtime->data, new_alloc_len, GFP_KERNEL);
32 if (!newptr)
33 return -ENOMEM;
34 runtime->data = newptr;
35 /* We zero directly the memory from start of allocation. */
36 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
37 runtime->data_alloc_len = new_alloc_len;
38 }
39 runtime->data_len += padding;
40 ret = runtime->data_len;
41 runtime->data_len += len;
42 return ret;
43 }
44
45 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
46 const void *p, size_t align, size_t len)
47 {
48 ssize_t offset;
49
50 offset = bytecode_reserve_data(runtime, align, len);
51 if (offset < 0)
52 return -ENOMEM;
53 memcpy(&runtime->data[offset], p, len);
54 return offset;
55 }
56
57 static int specialize_load_field(struct vstack_entry *stack_top,
58 struct load_op *insn)
59 {
60 int ret;
61
62 switch (stack_top->load.type) {
63 case LOAD_OBJECT:
64 break;
65 case LOAD_ROOT_CONTEXT:
66 case LOAD_ROOT_APP_CONTEXT:
67 case LOAD_ROOT_PAYLOAD:
68 default:
69 dbg_printk("Filter warning: cannot load root, missing field name.\n");
70 ret = -EINVAL;
71 goto end;
72 }
73 switch (stack_top->load.object_type) {
74 case OBJECT_TYPE_S8:
75 dbg_printk("op load field s8\n");
76 stack_top->type = REG_S64;
77 if (!stack_top->load.rev_bo)
78 insn->op = FILTER_OP_LOAD_FIELD_S8;
79 break;
80 case OBJECT_TYPE_S16:
81 dbg_printk("op load field s16\n");
82 stack_top->type = REG_S64;
83 if (!stack_top->load.rev_bo)
84 insn->op = FILTER_OP_LOAD_FIELD_S16;
85 break;
86 case OBJECT_TYPE_S32:
87 dbg_printk("op load field s32\n");
88 stack_top->type = REG_S64;
89 if (!stack_top->load.rev_bo)
90 insn->op = FILTER_OP_LOAD_FIELD_S32;
91 break;
92 case OBJECT_TYPE_S64:
93 dbg_printk("op load field s64\n");
94 stack_top->type = REG_S64;
95 if (!stack_top->load.rev_bo)
96 insn->op = FILTER_OP_LOAD_FIELD_S64;
97 break;
98 case OBJECT_TYPE_U8:
99 dbg_printk("op load field u8\n");
100 stack_top->type = REG_S64;
101 insn->op = FILTER_OP_LOAD_FIELD_U8;
102 break;
103 case OBJECT_TYPE_U16:
104 dbg_printk("op load field u16\n");
105 stack_top->type = REG_S64;
106 if (!stack_top->load.rev_bo)
107 insn->op = FILTER_OP_LOAD_FIELD_U16;
108 break;
109 case OBJECT_TYPE_U32:
110 dbg_printk("op load field u32\n");
111 stack_top->type = REG_S64;
112 if (!stack_top->load.rev_bo)
113 insn->op = FILTER_OP_LOAD_FIELD_U32;
114 break;
115 case OBJECT_TYPE_U64:
116 dbg_printk("op load field u64\n");
117 stack_top->type = REG_S64;
118 if (!stack_top->load.rev_bo)
119 insn->op = FILTER_OP_LOAD_FIELD_U64;
120 break;
121 case OBJECT_TYPE_DOUBLE:
122 printk(KERN_WARNING "LTTng: filter: Double type unsupported\n\n");
123 ret = -EINVAL;
124 goto end;
125 case OBJECT_TYPE_STRING:
126 dbg_printk("op load field string\n");
127 stack_top->type = REG_STRING;
128 insn->op = FILTER_OP_LOAD_FIELD_STRING;
129 break;
130 case OBJECT_TYPE_STRING_SEQUENCE:
131 dbg_printk("op load field string sequence\n");
132 stack_top->type = REG_STRING;
133 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
134 break;
135 case OBJECT_TYPE_DYNAMIC:
136 ret = -EINVAL;
137 goto end;
138 case OBJECT_TYPE_SEQUENCE:
139 case OBJECT_TYPE_ARRAY:
140 case OBJECT_TYPE_STRUCT:
141 case OBJECT_TYPE_VARIANT:
142 printk(KERN_WARNING "LTTng: filter: Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
143 ret = -EINVAL;
144 goto end;
145 }
146 return 0;
147
148 end:
149 return ret;
150 }
151
152 static int specialize_get_index_object_type(enum object_type *otype,
153 int signedness, uint32_t elem_len)
154 {
155 switch (elem_len) {
156 case 8:
157 if (signedness)
158 *otype = OBJECT_TYPE_S8;
159 else
160 *otype = OBJECT_TYPE_U8;
161 break;
162 case 16:
163 if (signedness)
164 *otype = OBJECT_TYPE_S16;
165 else
166 *otype = OBJECT_TYPE_U16;
167 break;
168 case 32:
169 if (signedness)
170 *otype = OBJECT_TYPE_S32;
171 else
172 *otype = OBJECT_TYPE_U32;
173 break;
174 case 64:
175 if (signedness)
176 *otype = OBJECT_TYPE_S64;
177 else
178 *otype = OBJECT_TYPE_U64;
179 break;
180 default:
181 return -EINVAL;
182 }
183 return 0;
184 }
185
186 static int specialize_get_index(struct bytecode_runtime *runtime,
187 struct load_op *insn, uint64_t index,
188 struct vstack_entry *stack_top,
189 int idx_len)
190 {
191 int ret;
192 struct filter_get_index_data gid;
193 ssize_t data_offset;
194
195 memset(&gid, 0, sizeof(gid));
196 switch (stack_top->load.type) {
197 case LOAD_OBJECT:
198 switch (stack_top->load.object_type) {
199 case OBJECT_TYPE_ARRAY:
200 {
201 const struct lttng_integer_type *integer_type;
202 const struct lttng_event_field *field;
203 uint32_t elem_len, num_elems;
204 int signedness;
205
206 field = stack_top->load.field;
207 if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
208 ret = -EINVAL;
209 goto end;
210 }
211 integer_type = &field->type.u.array_nestable.elem_type->u.integer;
212 num_elems = field->type.u.array_nestable.length;
213 elem_len = integer_type->size;
214 signedness = integer_type->signedness;
215 if (index >= num_elems) {
216 ret = -EINVAL;
217 goto end;
218 }
219 ret = specialize_get_index_object_type(&stack_top->load.object_type,
220 signedness, elem_len);
221 if (ret)
222 goto end;
223 gid.offset = index * (elem_len / CHAR_BIT);
224 gid.array_len = num_elems * (elem_len / CHAR_BIT);
225 gid.elem.type = stack_top->load.object_type;
226 gid.elem.len = elem_len;
227 if (integer_type->reverse_byte_order)
228 gid.elem.rev_bo = true;
229 stack_top->load.rev_bo = gid.elem.rev_bo;
230 break;
231 }
232 case OBJECT_TYPE_SEQUENCE:
233 {
234 const struct lttng_integer_type *integer_type;
235 const struct lttng_event_field *field;
236 uint32_t elem_len;
237 int signedness;
238
239 field = stack_top->load.field;
240 if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
241 ret = -EINVAL;
242 goto end;
243 }
244 integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
245 elem_len = integer_type->size;
246 signedness = integer_type->signedness;
247 ret = specialize_get_index_object_type(&stack_top->load.object_type,
248 signedness, elem_len);
249 if (ret)
250 goto end;
251 gid.offset = index * (elem_len / CHAR_BIT);
252 gid.elem.type = stack_top->load.object_type;
253 gid.elem.len = elem_len;
254 if (integer_type->reverse_byte_order)
255 gid.elem.rev_bo = true;
256 stack_top->load.rev_bo = gid.elem.rev_bo;
257 break;
258 }
259 case OBJECT_TYPE_STRUCT:
260 /* Only generated by the specialize phase. */
261 case OBJECT_TYPE_VARIANT: /* Fall-through */
262 default:
263 printk(KERN_WARNING "LTTng: filter: Unexpected get index type %d",
264 (int) stack_top->load.object_type);
265 ret = -EINVAL;
266 goto end;
267 }
268 break;
269 case LOAD_ROOT_CONTEXT:
270 case LOAD_ROOT_APP_CONTEXT:
271 case LOAD_ROOT_PAYLOAD:
272 printk(KERN_WARNING "LTTng: filter: Index lookup for root field not implemented yet.\n");
273 ret = -EINVAL;
274 goto end;
275 }
276 data_offset = bytecode_push_data(runtime, &gid,
277 __alignof__(gid), sizeof(gid));
278 if (data_offset < 0) {
279 ret = -EINVAL;
280 goto end;
281 }
282 switch (idx_len) {
283 case 2:
284 ((struct get_index_u16 *) insn->data)->index = data_offset;
285 break;
286 case 8:
287 ((struct get_index_u64 *) insn->data)->index = data_offset;
288 break;
289 default:
290 ret = -EINVAL;
291 goto end;
292 }
293
294 return 0;
295
296 end:
297 return ret;
298 }
299
300 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
301 struct bytecode_runtime *bytecode,
302 struct load_op *insn)
303 {
304 uint16_t offset;
305 const char *name;
306
307 offset = ((struct get_symbol *) insn->data)->offset;
308 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
309 return lttng_get_context_index(ctx, name);
310 }
311
312 static int specialize_load_object(const struct lttng_event_field *field,
313 struct vstack_load *load, bool is_context)
314 {
315 load->type = LOAD_OBJECT;
316
317 switch (field->type.atype) {
318 case atype_integer:
319 if (field->type.u.integer.signedness)
320 load->object_type = OBJECT_TYPE_S64;
321 else
322 load->object_type = OBJECT_TYPE_U64;
323 load->rev_bo = false;
324 break;
325 case atype_enum_nestable:
326 {
327 const struct lttng_integer_type *itype =
328 &field->type.u.enum_nestable.container_type->u.integer;
329
330 if (itype->signedness)
331 load->object_type = OBJECT_TYPE_S64;
332 else
333 load->object_type = OBJECT_TYPE_U64;
334 load->rev_bo = false;
335 break;
336 }
337 case atype_array_nestable:
338 if (!lttng_is_bytewise_integer(field->type.u.array_nestable.elem_type)) {
339 printk(KERN_WARNING "LTTng: filter Array nesting only supports integer types.\n");
340 return -EINVAL;
341 }
342 if (is_context) {
343 load->object_type = OBJECT_TYPE_STRING;
344 } else {
345 if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
346 load->object_type = OBJECT_TYPE_ARRAY;
347 load->field = field;
348 } else {
349 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
350 }
351 }
352 break;
353 case atype_sequence_nestable:
354 if (!lttng_is_bytewise_integer(field->type.u.sequence_nestable.elem_type)) {
355 printk(KERN_WARNING "LTTng: filter Sequence nesting only supports integer types.\n");
356 return -EINVAL;
357 }
358 if (is_context) {
359 load->object_type = OBJECT_TYPE_STRING;
360 } else {
361 if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
362 load->object_type = OBJECT_TYPE_SEQUENCE;
363 load->field = field;
364 } else {
365 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
366 }
367 }
368 break;
369 case atype_string:
370 load->object_type = OBJECT_TYPE_STRING;
371 break;
372 case atype_struct_nestable:
373 printk(KERN_WARNING "LTTng: filter: Structure type cannot be loaded.\n");
374 return -EINVAL;
375 case atype_variant_nestable:
376 printk(KERN_WARNING "LTTng: filter: Variant type cannot be loaded.\n");
377 return -EINVAL;
378 default:
379 printk(KERN_WARNING "LTTng: filter: Unknown type: %d", (int) field->type.atype);
380 return -EINVAL;
381 }
382 return 0;
383 }
384
385 static int specialize_context_lookup(struct lttng_ctx *ctx,
386 struct bytecode_runtime *runtime,
387 struct load_op *insn,
388 struct vstack_load *load)
389 {
390 int idx, ret;
391 struct lttng_ctx_field *ctx_field;
392 struct lttng_event_field *field;
393 struct filter_get_index_data gid;
394 ssize_t data_offset;
395
396 idx = specialize_context_lookup_name(ctx, runtime, insn);
397 if (idx < 0) {
398 return -ENOENT;
399 }
400 ctx_field = &lttng_static_ctx->fields[idx];
401 field = &ctx_field->event_field;
402 ret = specialize_load_object(field, load, true);
403 if (ret)
404 return ret;
405 /* Specialize each get_symbol into a get_index. */
406 insn->op = FILTER_OP_GET_INDEX_U16;
407 memset(&gid, 0, sizeof(gid));
408 gid.ctx_index = idx;
409 gid.elem.type = load->object_type;
410 gid.field = field;
411 data_offset = bytecode_push_data(runtime, &gid,
412 __alignof__(gid), sizeof(gid));
413 if (data_offset < 0) {
414 return -EINVAL;
415 }
416 ((struct get_index_u16 *) insn->data)->index = data_offset;
417 return 0;
418 }
419
420 static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
421 struct bytecode_runtime *runtime,
422 struct load_op *insn,
423 struct vstack_load *load)
424 {
425 const char *name;
426 uint16_t offset;
427 unsigned int i, nr_fields;
428 bool found = false;
429 uint32_t field_offset = 0;
430 const struct lttng_event_field *field;
431 int ret;
432 struct filter_get_index_data gid;
433 ssize_t data_offset;
434
435 nr_fields = event_desc->nr_fields;
436 offset = ((struct get_symbol *) insn->data)->offset;
437 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
438 for (i = 0; i < nr_fields; i++) {
439 field = &event_desc->fields[i];
440 if (field->nofilter) {
441 continue;
442 }
443 if (!strcmp(field->name, name)) {
444 found = true;
445 break;
446 }
447 /* compute field offset on stack */
448 switch (field->type.atype) {
449 case atype_integer:
450 case atype_enum_nestable:
451 field_offset += sizeof(int64_t);
452 break;
453 case atype_array_nestable:
454 case atype_sequence_nestable:
455 field_offset += sizeof(unsigned long);
456 field_offset += sizeof(void *);
457 break;
458 case atype_string:
459 field_offset += sizeof(void *);
460 break;
461 default:
462 ret = -EINVAL;
463 goto end;
464 }
465 }
466 if (!found) {
467 ret = -EINVAL;
468 goto end;
469 }
470
471 ret = specialize_load_object(field, load, false);
472 if (ret)
473 goto end;
474
475 /* Specialize each get_symbol into a get_index. */
476 insn->op = FILTER_OP_GET_INDEX_U16;
477 memset(&gid, 0, sizeof(gid));
478 gid.offset = field_offset;
479 gid.elem.type = load->object_type;
480 gid.field = field;
481 data_offset = bytecode_push_data(runtime, &gid,
482 __alignof__(gid), sizeof(gid));
483 if (data_offset < 0) {
484 ret = -EINVAL;
485 goto end;
486 }
487 ((struct get_index_u16 *) insn->data)->index = data_offset;
488 ret = 0;
489 end:
490 return ret;
491 }
492
493 int lttng_filter_specialize_bytecode(const struct lttng_event_desc *event_desc,
494 struct bytecode_runtime *bytecode)
495 {
496 void *pc, *next_pc, *start_pc;
497 int ret = -EINVAL;
498 struct vstack _stack;
499 struct vstack *stack = &_stack;
500 struct lttng_ctx *ctx = bytecode->p.ctx;
501
502 vstack_init(stack);
503
504 start_pc = &bytecode->code[0];
505 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
506 pc = next_pc) {
507 switch (*(filter_opcode_t *) pc) {
508 case FILTER_OP_UNKNOWN:
509 default:
510 printk(KERN_WARNING "LTTng: filter: unknown bytecode op %u\n",
511 (unsigned int) *(filter_opcode_t *) pc);
512 ret = -EINVAL;
513 goto end;
514
515 case FILTER_OP_RETURN:
516 case FILTER_OP_RETURN_S64:
517 ret = 0;
518 goto end;
519
520 /* binary */
521 case FILTER_OP_MUL:
522 case FILTER_OP_DIV:
523 case FILTER_OP_MOD:
524 case FILTER_OP_PLUS:
525 case FILTER_OP_MINUS:
526 printk(KERN_WARNING "LTTng: filter: unsupported bytecode op %u\n",
527 (unsigned int) *(filter_opcode_t *) pc);
528 ret = -EINVAL;
529 goto end;
530
531 case FILTER_OP_EQ:
532 {
533 struct binary_op *insn = (struct binary_op *) pc;
534
535 switch(vstack_ax(stack)->type) {
536 default:
537 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
538 ret = -EINVAL;
539 goto end;
540
541 case REG_STRING:
542 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
543 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
544 else
545 insn->op = FILTER_OP_EQ_STRING;
546 break;
547 case REG_STAR_GLOB_STRING:
548 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
549 break;
550 case REG_S64:
551 if (vstack_bx(stack)->type == REG_S64)
552 insn->op = FILTER_OP_EQ_S64;
553 else
554 insn->op = FILTER_OP_EQ_DOUBLE_S64;
555 break;
556 case REG_DOUBLE:
557 if (vstack_bx(stack)->type == REG_S64)
558 insn->op = FILTER_OP_EQ_S64_DOUBLE;
559 else
560 insn->op = FILTER_OP_EQ_DOUBLE;
561 break;
562 }
563 /* Pop 2, push 1 */
564 if (vstack_pop(stack)) {
565 ret = -EINVAL;
566 goto end;
567 }
568 vstack_ax(stack)->type = REG_S64;
569 next_pc += sizeof(struct binary_op);
570 break;
571 }
572
573 case FILTER_OP_NE:
574 {
575 struct binary_op *insn = (struct binary_op *) pc;
576
577 switch(vstack_ax(stack)->type) {
578 default:
579 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
580 ret = -EINVAL;
581 goto end;
582
583 case REG_STRING:
584 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
585 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
586 else
587 insn->op = FILTER_OP_NE_STRING;
588 break;
589 case REG_STAR_GLOB_STRING:
590 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
591 break;
592 case REG_S64:
593 if (vstack_bx(stack)->type == REG_S64)
594 insn->op = FILTER_OP_NE_S64;
595 else
596 insn->op = FILTER_OP_NE_DOUBLE_S64;
597 break;
598 case REG_DOUBLE:
599 if (vstack_bx(stack)->type == REG_S64)
600 insn->op = FILTER_OP_NE_S64_DOUBLE;
601 else
602 insn->op = FILTER_OP_NE_DOUBLE;
603 break;
604 }
605 /* Pop 2, push 1 */
606 if (vstack_pop(stack)) {
607 ret = -EINVAL;
608 goto end;
609 }
610 vstack_ax(stack)->type = REG_S64;
611 next_pc += sizeof(struct binary_op);
612 break;
613 }
614
615 case FILTER_OP_GT:
616 {
617 struct binary_op *insn = (struct binary_op *) pc;
618
619 switch(vstack_ax(stack)->type) {
620 default:
621 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
622 ret = -EINVAL;
623 goto end;
624
625 case REG_STAR_GLOB_STRING:
626 printk(KERN_WARNING "LTTng: filter: invalid register type for '>' binary operator\n");
627 ret = -EINVAL;
628 goto end;
629 case REG_STRING:
630 insn->op = FILTER_OP_GT_STRING;
631 break;
632 case REG_S64:
633 if (vstack_bx(stack)->type == REG_S64)
634 insn->op = FILTER_OP_GT_S64;
635 else
636 insn->op = FILTER_OP_GT_DOUBLE_S64;
637 break;
638 case REG_DOUBLE:
639 if (vstack_bx(stack)->type == REG_S64)
640 insn->op = FILTER_OP_GT_S64_DOUBLE;
641 else
642 insn->op = FILTER_OP_GT_DOUBLE;
643 break;
644 }
645 /* Pop 2, push 1 */
646 if (vstack_pop(stack)) {
647 ret = -EINVAL;
648 goto end;
649 }
650 vstack_ax(stack)->type = REG_S64;
651 next_pc += sizeof(struct binary_op);
652 break;
653 }
654
655 case FILTER_OP_LT:
656 {
657 struct binary_op *insn = (struct binary_op *) pc;
658
659 switch(vstack_ax(stack)->type) {
660 default:
661 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
662 ret = -EINVAL;
663 goto end;
664
665 case REG_STAR_GLOB_STRING:
666 printk(KERN_WARNING "LTTng: filter: invalid register type for '<' binary operator\n");
667 ret = -EINVAL;
668 goto end;
669 case REG_STRING:
670 insn->op = FILTER_OP_LT_STRING;
671 break;
672 case REG_S64:
673 if (vstack_bx(stack)->type == REG_S64)
674 insn->op = FILTER_OP_LT_S64;
675 else
676 insn->op = FILTER_OP_LT_DOUBLE_S64;
677 break;
678 case REG_DOUBLE:
679 if (vstack_bx(stack)->type == REG_S64)
680 insn->op = FILTER_OP_LT_S64_DOUBLE;
681 else
682 insn->op = FILTER_OP_LT_DOUBLE;
683 break;
684 }
685 /* Pop 2, push 1 */
686 if (vstack_pop(stack)) {
687 ret = -EINVAL;
688 goto end;
689 }
690 vstack_ax(stack)->type = REG_S64;
691 next_pc += sizeof(struct binary_op);
692 break;
693 }
694
695 case FILTER_OP_GE:
696 {
697 struct binary_op *insn = (struct binary_op *) pc;
698
699 switch(vstack_ax(stack)->type) {
700 default:
701 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
702 ret = -EINVAL;
703 goto end;
704
705 case REG_STAR_GLOB_STRING:
706 printk(KERN_WARNING "LTTng: filter: invalid register type for '>=' binary operator\n");
707 ret = -EINVAL;
708 goto end;
709 case REG_STRING:
710 insn->op = FILTER_OP_GE_STRING;
711 break;
712 case REG_S64:
713 if (vstack_bx(stack)->type == REG_S64)
714 insn->op = FILTER_OP_GE_S64;
715 else
716 insn->op = FILTER_OP_GE_DOUBLE_S64;
717 break;
718 case REG_DOUBLE:
719 if (vstack_bx(stack)->type == REG_S64)
720 insn->op = FILTER_OP_GE_S64_DOUBLE;
721 else
722 insn->op = FILTER_OP_GE_DOUBLE;
723 break;
724 }
725 /* Pop 2, push 1 */
726 if (vstack_pop(stack)) {
727 ret = -EINVAL;
728 goto end;
729 }
730 vstack_ax(stack)->type = REG_S64;
731 next_pc += sizeof(struct binary_op);
732 break;
733 }
734 case FILTER_OP_LE:
735 {
736 struct binary_op *insn = (struct binary_op *) pc;
737
738 switch(vstack_ax(stack)->type) {
739 default:
740 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
741 ret = -EINVAL;
742 goto end;
743
744 case REG_STAR_GLOB_STRING:
745 printk(KERN_WARNING "LTTng: filter: invalid register type for '<=' binary operator\n");
746 ret = -EINVAL;
747 goto end;
748 case REG_STRING:
749 insn->op = FILTER_OP_LE_STRING;
750 break;
751 case REG_S64:
752 if (vstack_bx(stack)->type == REG_S64)
753 insn->op = FILTER_OP_LE_S64;
754 else
755 insn->op = FILTER_OP_LE_DOUBLE_S64;
756 break;
757 case REG_DOUBLE:
758 if (vstack_bx(stack)->type == REG_S64)
759 insn->op = FILTER_OP_LE_S64_DOUBLE;
760 else
761 insn->op = FILTER_OP_LE_DOUBLE;
762 break;
763 }
764 vstack_ax(stack)->type = REG_S64;
765 next_pc += sizeof(struct binary_op);
766 break;
767 }
768
769 case FILTER_OP_EQ_STRING:
770 case FILTER_OP_NE_STRING:
771 case FILTER_OP_GT_STRING:
772 case FILTER_OP_LT_STRING:
773 case FILTER_OP_GE_STRING:
774 case FILTER_OP_LE_STRING:
775 case FILTER_OP_EQ_STAR_GLOB_STRING:
776 case FILTER_OP_NE_STAR_GLOB_STRING:
777 case FILTER_OP_EQ_S64:
778 case FILTER_OP_NE_S64:
779 case FILTER_OP_GT_S64:
780 case FILTER_OP_LT_S64:
781 case FILTER_OP_GE_S64:
782 case FILTER_OP_LE_S64:
783 case FILTER_OP_EQ_DOUBLE:
784 case FILTER_OP_NE_DOUBLE:
785 case FILTER_OP_GT_DOUBLE:
786 case FILTER_OP_LT_DOUBLE:
787 case FILTER_OP_GE_DOUBLE:
788 case FILTER_OP_LE_DOUBLE:
789 case FILTER_OP_EQ_DOUBLE_S64:
790 case FILTER_OP_NE_DOUBLE_S64:
791 case FILTER_OP_GT_DOUBLE_S64:
792 case FILTER_OP_LT_DOUBLE_S64:
793 case FILTER_OP_GE_DOUBLE_S64:
794 case FILTER_OP_LE_DOUBLE_S64:
795 case FILTER_OP_EQ_S64_DOUBLE:
796 case FILTER_OP_NE_S64_DOUBLE:
797 case FILTER_OP_GT_S64_DOUBLE:
798 case FILTER_OP_LT_S64_DOUBLE:
799 case FILTER_OP_GE_S64_DOUBLE:
800 case FILTER_OP_LE_S64_DOUBLE:
801 case FILTER_OP_BIT_RSHIFT:
802 case FILTER_OP_BIT_LSHIFT:
803 case FILTER_OP_BIT_AND:
804 case FILTER_OP_BIT_OR:
805 case FILTER_OP_BIT_XOR:
806 {
807 /* Pop 2, push 1 */
808 if (vstack_pop(stack)) {
809 ret = -EINVAL;
810 goto end;
811 }
812 vstack_ax(stack)->type = REG_S64;
813 next_pc += sizeof(struct binary_op);
814 break;
815 }
816
817 /* unary */
818 case FILTER_OP_UNARY_PLUS:
819 {
820 struct unary_op *insn = (struct unary_op *) pc;
821
822 switch(vstack_ax(stack)->type) {
823 default:
824 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
825 ret = -EINVAL;
826 goto end;
827
828 case REG_S64:
829 insn->op = FILTER_OP_UNARY_PLUS_S64;
830 break;
831 case REG_DOUBLE:
832 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
833 break;
834 }
835 /* Pop 1, push 1 */
836 next_pc += sizeof(struct unary_op);
837 break;
838 }
839
840 case FILTER_OP_UNARY_MINUS:
841 {
842 struct unary_op *insn = (struct unary_op *) pc;
843
844 switch(vstack_ax(stack)->type) {
845 default:
846 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
847 ret = -EINVAL;
848 goto end;
849
850 case REG_S64:
851 insn->op = FILTER_OP_UNARY_MINUS_S64;
852 break;
853 case REG_DOUBLE:
854 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
855 break;
856 }
857 /* Pop 1, push 1 */
858 next_pc += sizeof(struct unary_op);
859 break;
860 }
861
862 case FILTER_OP_UNARY_NOT:
863 {
864 struct unary_op *insn = (struct unary_op *) pc;
865
866 switch(vstack_ax(stack)->type) {
867 default:
868 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
869 ret = -EINVAL;
870 goto end;
871
872 case REG_S64:
873 insn->op = FILTER_OP_UNARY_NOT_S64;
874 break;
875 case REG_DOUBLE:
876 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
877 break;
878 }
879 /* Pop 1, push 1 */
880 next_pc += sizeof(struct unary_op);
881 break;
882 }
883
884 case FILTER_OP_UNARY_BIT_NOT:
885 {
886 /* Pop 1, push 1 */
887 next_pc += sizeof(struct unary_op);
888 break;
889 }
890
891 case FILTER_OP_UNARY_PLUS_S64:
892 case FILTER_OP_UNARY_MINUS_S64:
893 case FILTER_OP_UNARY_NOT_S64:
894 case FILTER_OP_UNARY_PLUS_DOUBLE:
895 case FILTER_OP_UNARY_MINUS_DOUBLE:
896 case FILTER_OP_UNARY_NOT_DOUBLE:
897 {
898 /* Pop 1, push 1 */
899 next_pc += sizeof(struct unary_op);
900 break;
901 }
902
903 /* logical */
904 case FILTER_OP_AND:
905 case FILTER_OP_OR:
906 {
907 /* Continue to next instruction */
908 /* Pop 1 when jump not taken */
909 if (vstack_pop(stack)) {
910 ret = -EINVAL;
911 goto end;
912 }
913 next_pc += sizeof(struct logical_op);
914 break;
915 }
916
917 /* load field ref */
918 case FILTER_OP_LOAD_FIELD_REF:
919 {
920 printk(KERN_WARNING "LTTng: filter: Unknown field ref type\n");
921 ret = -EINVAL;
922 goto end;
923 }
924 /* get context ref */
925 case FILTER_OP_GET_CONTEXT_REF:
926 {
927 printk(KERN_WARNING "LTTng: filter: Unknown get context ref type\n");
928 ret = -EINVAL;
929 goto end;
930 }
931 case FILTER_OP_LOAD_FIELD_REF_STRING:
932 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
933 case FILTER_OP_GET_CONTEXT_REF_STRING:
934 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
935 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
936 {
937 if (vstack_push(stack)) {
938 ret = -EINVAL;
939 goto end;
940 }
941 vstack_ax(stack)->type = REG_STRING;
942 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
943 break;
944 }
945 case FILTER_OP_LOAD_FIELD_REF_S64:
946 case FILTER_OP_GET_CONTEXT_REF_S64:
947 {
948 if (vstack_push(stack)) {
949 ret = -EINVAL;
950 goto end;
951 }
952 vstack_ax(stack)->type = REG_S64;
953 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
954 break;
955 }
956 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
957 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
958 {
959 if (vstack_push(stack)) {
960 ret = -EINVAL;
961 goto end;
962 }
963 vstack_ax(stack)->type = REG_DOUBLE;
964 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
965 break;
966 }
967
968 /* load from immediate operand */
969 case FILTER_OP_LOAD_STRING:
970 {
971 struct load_op *insn = (struct load_op *) pc;
972
973 if (vstack_push(stack)) {
974 ret = -EINVAL;
975 goto end;
976 }
977 vstack_ax(stack)->type = REG_STRING;
978 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
979 break;
980 }
981
982 case FILTER_OP_LOAD_STAR_GLOB_STRING:
983 {
984 struct load_op *insn = (struct load_op *) pc;
985
986 if (vstack_push(stack)) {
987 ret = -EINVAL;
988 goto end;
989 }
990 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
991 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
992 break;
993 }
994
995 case FILTER_OP_LOAD_S64:
996 {
997 if (vstack_push(stack)) {
998 ret = -EINVAL;
999 goto end;
1000 }
1001 vstack_ax(stack)->type = REG_S64;
1002 next_pc += sizeof(struct load_op)
1003 + sizeof(struct literal_numeric);
1004 break;
1005 }
1006
1007 case FILTER_OP_LOAD_DOUBLE:
1008 {
1009 if (vstack_push(stack)) {
1010 ret = -EINVAL;
1011 goto end;
1012 }
1013 vstack_ax(stack)->type = REG_DOUBLE;
1014 next_pc += sizeof(struct load_op)
1015 + sizeof(struct literal_double);
1016 break;
1017 }
1018
1019 /* cast */
1020 case FILTER_OP_CAST_TO_S64:
1021 {
1022 struct cast_op *insn = (struct cast_op *) pc;
1023
1024 switch (vstack_ax(stack)->type) {
1025 default:
1026 printk(KERN_WARNING "LTTng: filter: unknown register type\n");
1027 ret = -EINVAL;
1028 goto end;
1029
1030 case REG_STRING:
1031 case REG_STAR_GLOB_STRING:
1032 printk(KERN_WARNING "LTTng: filter: Cast op can only be applied to numeric or floating point registers\n");
1033 ret = -EINVAL;
1034 goto end;
1035 case REG_S64:
1036 insn->op = FILTER_OP_CAST_NOP;
1037 break;
1038 case REG_DOUBLE:
1039 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1040 break;
1041 }
1042 /* Pop 1, push 1 */
1043 vstack_ax(stack)->type = REG_S64;
1044 next_pc += sizeof(struct cast_op);
1045 break;
1046 }
1047 case FILTER_OP_CAST_DOUBLE_TO_S64:
1048 {
1049 /* Pop 1, push 1 */
1050 vstack_ax(stack)->type = REG_S64;
1051 next_pc += sizeof(struct cast_op);
1052 break;
1053 }
1054 case FILTER_OP_CAST_NOP:
1055 {
1056 next_pc += sizeof(struct cast_op);
1057 break;
1058 }
1059
1060 /*
1061 * Instructions for recursive traversal through composed types.
1062 */
1063 case FILTER_OP_GET_CONTEXT_ROOT:
1064 {
1065 if (vstack_push(stack)) {
1066 ret = -EINVAL;
1067 goto end;
1068 }
1069 vstack_ax(stack)->type = REG_PTR;
1070 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1071 next_pc += sizeof(struct load_op);
1072 break;
1073 }
1074 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1075 {
1076 if (vstack_push(stack)) {
1077 ret = -EINVAL;
1078 goto end;
1079 }
1080 vstack_ax(stack)->type = REG_PTR;
1081 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1082 next_pc += sizeof(struct load_op);
1083 break;
1084 }
1085 case FILTER_OP_GET_PAYLOAD_ROOT:
1086 {
1087 if (vstack_push(stack)) {
1088 ret = -EINVAL;
1089 goto end;
1090 }
1091 vstack_ax(stack)->type = REG_PTR;
1092 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1093 next_pc += sizeof(struct load_op);
1094 break;
1095 }
1096
1097 case FILTER_OP_LOAD_FIELD:
1098 {
1099 struct load_op *insn = (struct load_op *) pc;
1100
1101 WARN_ON_ONCE(vstack_ax(stack)->type != REG_PTR);
1102 /* Pop 1, push 1 */
1103 ret = specialize_load_field(vstack_ax(stack), insn);
1104 if (ret)
1105 goto end;
1106
1107 next_pc += sizeof(struct load_op);
1108 break;
1109 }
1110
1111 case FILTER_OP_LOAD_FIELD_S8:
1112 case FILTER_OP_LOAD_FIELD_S16:
1113 case FILTER_OP_LOAD_FIELD_S32:
1114 case FILTER_OP_LOAD_FIELD_S64:
1115 case FILTER_OP_LOAD_FIELD_U8:
1116 case FILTER_OP_LOAD_FIELD_U16:
1117 case FILTER_OP_LOAD_FIELD_U32:
1118 case FILTER_OP_LOAD_FIELD_U64:
1119 {
1120 /* Pop 1, push 1 */
1121 vstack_ax(stack)->type = REG_S64;
1122 next_pc += sizeof(struct load_op);
1123 break;
1124 }
1125
1126 case FILTER_OP_LOAD_FIELD_STRING:
1127 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1128 {
1129 /* Pop 1, push 1 */
1130 vstack_ax(stack)->type = REG_STRING;
1131 next_pc += sizeof(struct load_op);
1132 break;
1133 }
1134
1135 case FILTER_OP_LOAD_FIELD_DOUBLE:
1136 {
1137 /* Pop 1, push 1 */
1138 vstack_ax(stack)->type = REG_DOUBLE;
1139 next_pc += sizeof(struct load_op);
1140 break;
1141 }
1142
1143 case FILTER_OP_GET_SYMBOL:
1144 {
1145 struct load_op *insn = (struct load_op *) pc;
1146
1147 dbg_printk("op get symbol\n");
1148 switch (vstack_ax(stack)->load.type) {
1149 case LOAD_OBJECT:
1150 printk(KERN_WARNING "LTTng: filter: Nested fields not implemented yet.\n");
1151 ret = -EINVAL;
1152 goto end;
1153 case LOAD_ROOT_CONTEXT:
1154 /* Lookup context field. */
1155 ret = specialize_context_lookup(ctx, bytecode, insn,
1156 &vstack_ax(stack)->load);
1157 if (ret)
1158 goto end;
1159 break;
1160 case LOAD_ROOT_APP_CONTEXT:
1161 ret = -EINVAL;
1162 goto end;
1163 case LOAD_ROOT_PAYLOAD:
1164 /* Lookup event payload field. */
1165 ret = specialize_payload_lookup(event_desc,
1166 bytecode, insn,
1167 &vstack_ax(stack)->load);
1168 if (ret)
1169 goto end;
1170 break;
1171 }
1172 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1173 break;
1174 }
1175
1176 case FILTER_OP_GET_SYMBOL_FIELD:
1177 {
1178 /* Always generated by specialize phase. */
1179 ret = -EINVAL;
1180 goto end;
1181 }
1182
1183 case FILTER_OP_GET_INDEX_U16:
1184 {
1185 struct load_op *insn = (struct load_op *) pc;
1186 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1187
1188 dbg_printk("op get index u16\n");
1189 /* Pop 1, push 1 */
1190 ret = specialize_get_index(bytecode, insn, index->index,
1191 vstack_ax(stack), sizeof(*index));
1192 if (ret)
1193 goto end;
1194 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1195 break;
1196 }
1197
1198 case FILTER_OP_GET_INDEX_U64:
1199 {
1200 struct load_op *insn = (struct load_op *) pc;
1201 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1202
1203 dbg_printk("op get index u64\n");
1204 /* Pop 1, push 1 */
1205 ret = specialize_get_index(bytecode, insn, index->index,
1206 vstack_ax(stack), sizeof(*index));
1207 if (ret)
1208 goto end;
1209 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1210 break;
1211 }
1212
1213 }
1214 }
1215 end:
1216 return ret;
1217 }
This page took 0.084002 seconds and 4 git commands to generate.