21e35923859e37f7a3a7a4e68aaa14cc7b4470c0
[lttng-ust.git] / liblttng-ust / lttng-filter-specialize.c
1 /*
2 * lttng-filter-specialize.c
3 *
4 * LTTng UST filter code specializer.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include <stddef.h>
29
30 #include "lttng-filter.h"
31 #include <lttng/align.h>
32
33 static int lttng_fls(int val)
34 {
35 int r = 32;
36 unsigned int x = (unsigned int) val;
37
38 if (!x)
39 return 0;
40 if (!(x & 0xFFFF0000U)) {
41 x <<= 16;
42 r -= 16;
43 }
44 if (!(x & 0xFF000000U)) {
45 x <<= 8;
46 r -= 8;
47 }
48 if (!(x & 0xF0000000U)) {
49 x <<= 4;
50 r -= 4;
51 }
52 if (!(x & 0xC0000000U)) {
53 x <<= 2;
54 r -= 2;
55 }
56 if (!(x & 0x80000000U)) {
57 r -= 1;
58 }
59 return r;
60 }
61
62 static int get_count_order(unsigned int count)
63 {
64 int order;
65
66 order = lttng_fls(count) - 1;
67 if (count & (count - 1))
68 order++;
69 return order;
70 }
71
72 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
73 size_t align, size_t len)
74 {
75 ssize_t ret;
76 size_t padding = offset_align(runtime->data_len, align);
77 size_t new_len = runtime->data_len + padding + len;
78 size_t new_alloc_len = new_len;
79 size_t old_alloc_len = runtime->data_alloc_len;
80
81 if (new_len > FILTER_MAX_DATA_LEN)
82 return -EINVAL;
83
84 if (new_alloc_len > old_alloc_len) {
85 char *newptr;
86
87 new_alloc_len =
88 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
89 newptr = realloc(runtime->data, new_alloc_len);
90 if (!newptr)
91 return -ENOMEM;
92 runtime->data = newptr;
93 /* We zero directly the memory from start of allocation. */
94 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
95 runtime->data_alloc_len = new_alloc_len;
96 }
97 runtime->data_len += padding;
98 ret = runtime->data_len;
99 runtime->data_len += len;
100 return ret;
101 }
102
103 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
104 const void *p, size_t align, size_t len)
105 {
106 ssize_t offset;
107
108 offset = bytecode_reserve_data(runtime, align, len);
109 if (offset < 0)
110 return -ENOMEM;
111 memcpy(&runtime->data[offset], p, len);
112 return offset;
113 }
114
115 static int specialize_load_field(struct vstack_entry *stack_top,
116 struct load_op *insn)
117 {
118 int ret;
119
120 switch (stack_top->load.type) {
121 case LOAD_OBJECT:
122 break;
123 case LOAD_ROOT_CONTEXT:
124 case LOAD_ROOT_APP_CONTEXT:
125 case LOAD_ROOT_PAYLOAD:
126 default:
127 dbg_printf("Filter warning: cannot load root, missing field name.\n");
128 ret = -EINVAL;
129 goto end;
130 }
131 switch (stack_top->load.object_type) {
132 case OBJECT_TYPE_S8:
133 dbg_printf("op load field s8\n");
134 stack_top->type = REG_S64;
135 if (!stack_top->load.rev_bo)
136 insn->op = FILTER_OP_LOAD_FIELD_S8;
137 break;
138 case OBJECT_TYPE_S16:
139 dbg_printf("op load field s16\n");
140 stack_top->type = REG_S64;
141 if (!stack_top->load.rev_bo)
142 insn->op = FILTER_OP_LOAD_FIELD_S16;
143 break;
144 case OBJECT_TYPE_S32:
145 dbg_printf("op load field s32\n");
146 stack_top->type = REG_S64;
147 if (!stack_top->load.rev_bo)
148 insn->op = FILTER_OP_LOAD_FIELD_S32;
149 break;
150 case OBJECT_TYPE_S64:
151 dbg_printf("op load field s64\n");
152 stack_top->type = REG_S64;
153 if (!stack_top->load.rev_bo)
154 insn->op = FILTER_OP_LOAD_FIELD_S64;
155 break;
156 case OBJECT_TYPE_U8:
157 dbg_printf("op load field u8\n");
158 stack_top->type = REG_S64;
159 insn->op = FILTER_OP_LOAD_FIELD_U8;
160 break;
161 case OBJECT_TYPE_U16:
162 dbg_printf("op load field u16\n");
163 stack_top->type = REG_S64;
164 if (!stack_top->load.rev_bo)
165 insn->op = FILTER_OP_LOAD_FIELD_U16;
166 break;
167 case OBJECT_TYPE_U32:
168 dbg_printf("op load field u32\n");
169 stack_top->type = REG_S64;
170 if (!stack_top->load.rev_bo)
171 insn->op = FILTER_OP_LOAD_FIELD_U32;
172 break;
173 case OBJECT_TYPE_U64:
174 dbg_printf("op load field u64\n");
175 stack_top->type = REG_S64;
176 if (!stack_top->load.rev_bo)
177 insn->op = FILTER_OP_LOAD_FIELD_U64;
178 break;
179 case OBJECT_TYPE_DOUBLE:
180 stack_top->type = REG_DOUBLE;
181 insn->op = FILTER_OP_LOAD_FIELD_DOUBLE;
182 break;
183 case OBJECT_TYPE_STRING:
184 dbg_printf("op load field string\n");
185 stack_top->type = REG_STRING;
186 insn->op = FILTER_OP_LOAD_FIELD_STRING;
187 break;
188 case OBJECT_TYPE_STRING_SEQUENCE:
189 dbg_printf("op load field string sequence\n");
190 stack_top->type = REG_STRING;
191 insn->op = FILTER_OP_LOAD_FIELD_SEQUENCE;
192 break;
193 case OBJECT_TYPE_DYNAMIC:
194 dbg_printf("op load field dynamic\n");
195 stack_top->type = REG_UNKNOWN;
196 /* Don't specialize load op. */
197 break;
198 case OBJECT_TYPE_SEQUENCE:
199 case OBJECT_TYPE_ARRAY:
200 case OBJECT_TYPE_STRUCT:
201 case OBJECT_TYPE_VARIANT:
202 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
203 ret = -EINVAL;
204 goto end;
205 }
206 return 0;
207
208 end:
209 return ret;
210 }
211
212 static int specialize_get_index_object_type(enum object_type *otype,
213 int signedness, uint32_t elem_len)
214 {
215 switch (elem_len) {
216 case 8:
217 if (signedness)
218 *otype = OBJECT_TYPE_S8;
219 else
220 *otype = OBJECT_TYPE_U8;
221 break;
222 case 16:
223 if (signedness)
224 *otype = OBJECT_TYPE_S16;
225 else
226 *otype = OBJECT_TYPE_U16;
227 break;
228 case 32:
229 if (signedness)
230 *otype = OBJECT_TYPE_S32;
231 else
232 *otype = OBJECT_TYPE_U32;
233 break;
234 case 64:
235 if (signedness)
236 *otype = OBJECT_TYPE_S64;
237 else
238 *otype = OBJECT_TYPE_U64;
239 break;
240 default:
241 return -EINVAL;
242 }
243 return 0;
244 }
245
246 static int specialize_get_index(struct bytecode_runtime *runtime,
247 struct load_op *insn, uint64_t index,
248 struct vstack_entry *stack_top,
249 int idx_len)
250 {
251 int ret;
252 struct filter_get_index_data gid;
253 ssize_t data_offset;
254
255 memset(&gid, 0, sizeof(gid));
256 switch (stack_top->load.type) {
257 case LOAD_OBJECT:
258 switch (stack_top->load.object_type) {
259 case OBJECT_TYPE_ARRAY:
260 {
261 const struct lttng_event_field *field;
262 uint32_t elem_len, num_elems;
263 int signedness;
264
265 field = stack_top->load.field;
266 elem_len = field->type.u.array.elem_type.u.basic.integer.size;
267 signedness = field->type.u.array.elem_type.u.basic.integer.signedness;
268 num_elems = field->type.u.array.length;
269 if (index >= num_elems) {
270 ret = -EINVAL;
271 goto end;
272 }
273 ret = specialize_get_index_object_type(&stack_top->load.object_type,
274 signedness, elem_len);
275 if (ret)
276 goto end;
277 gid.offset = index * (elem_len / CHAR_BIT);
278 gid.array_len = num_elems * (elem_len / CHAR_BIT);
279 gid.elem.type = stack_top->load.object_type;
280 gid.elem.len = elem_len;
281 if (field->type.u.array.elem_type.u.basic.integer.reverse_byte_order)
282 gid.elem.rev_bo = true;
283 stack_top->load.rev_bo = gid.elem.rev_bo;
284 break;
285 }
286 case OBJECT_TYPE_SEQUENCE:
287 {
288 const struct lttng_event_field *field;
289 uint32_t elem_len;
290 int signedness;
291
292 field = stack_top->load.field;
293 elem_len = field->type.u.sequence.elem_type.u.basic.integer.size;
294 signedness = field->type.u.sequence.elem_type.u.basic.integer.signedness;
295 ret = specialize_get_index_object_type(&stack_top->load.object_type,
296 signedness, elem_len);
297 if (ret)
298 goto end;
299 gid.offset = index * (elem_len / CHAR_BIT);
300 gid.elem.type = stack_top->load.object_type;
301 gid.elem.len = elem_len;
302 if (field->type.u.sequence.elem_type.u.basic.integer.reverse_byte_order)
303 gid.elem.rev_bo = true;
304 stack_top->load.rev_bo = gid.elem.rev_bo;
305 break;
306 }
307 case OBJECT_TYPE_STRUCT:
308 /* Only generated by the specialize phase. */
309 case OBJECT_TYPE_VARIANT: /* Fall-through */
310 default:
311 ERR("Unexpected get index type %d",
312 (int) stack_top->load.object_type);
313 ret = -EINVAL;
314 goto end;
315 }
316 break;
317 case LOAD_ROOT_CONTEXT:
318 case LOAD_ROOT_APP_CONTEXT:
319 case LOAD_ROOT_PAYLOAD:
320 ERR("Index lookup for root field not implemented yet.");
321 ret = -EINVAL;
322 goto end;
323 }
324 data_offset = bytecode_push_data(runtime, &gid,
325 __alignof__(gid), sizeof(gid));
326 if (data_offset < 0) {
327 ret = -EINVAL;
328 goto end;
329 }
330 switch (idx_len) {
331 case 2:
332 ((struct get_index_u16 *) insn->data)->index = data_offset;
333 break;
334 case 8:
335 ((struct get_index_u64 *) insn->data)->index = data_offset;
336 break;
337 default:
338 ret = -EINVAL;
339 goto end;
340 }
341
342 return 0;
343
344 end:
345 return ret;
346 }
347
348 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
349 struct bytecode_runtime *bytecode,
350 struct load_op *insn)
351 {
352 uint16_t offset;
353 const char *name;
354
355 offset = ((struct get_symbol *) insn->data)->offset;
356 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
357 return lttng_get_context_index(ctx, name);
358 }
359
360 static int specialize_load_object(const struct lttng_event_field *field,
361 struct vstack_load *load, bool is_context)
362 {
363 load->type = LOAD_OBJECT;
364 /*
365 * LTTng-UST layout all integer fields as s64 on the stack for the filter.
366 */
367 switch (field->type.atype) {
368 case atype_integer:
369 if (field->type.u.basic.integer.signedness)
370 load->object_type = OBJECT_TYPE_S64;
371 else
372 load->object_type = OBJECT_TYPE_U64;
373 load->rev_bo = false;
374 break;
375 case atype_enum:
376 {
377 const struct lttng_integer_type *itype =
378 &field->type.u.basic.enumeration.container_type;
379
380 if (itype->signedness)
381 load->object_type = OBJECT_TYPE_S64;
382 else
383 load->object_type = OBJECT_TYPE_U64;
384 load->rev_bo = false;
385 break;
386 }
387 case atype_array:
388 if (field->type.u.array.elem_type.atype != atype_integer) {
389 ERR("Array nesting only supports integer types.");
390 return -EINVAL;
391 }
392 if (is_context) {
393 load->object_type = OBJECT_TYPE_STRING;
394 } else {
395 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
396 load->object_type = OBJECT_TYPE_ARRAY;
397 load->field = field;
398 } else {
399 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
400 }
401 }
402 break;
403 case atype_sequence:
404 if (field->type.u.sequence.elem_type.atype != atype_integer) {
405 ERR("Sequence nesting only supports integer types.");
406 return -EINVAL;
407 }
408 if (is_context) {
409 load->object_type = OBJECT_TYPE_STRING;
410 } else {
411 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
412 load->object_type = OBJECT_TYPE_SEQUENCE;
413 load->field = field;
414 } else {
415 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
416 }
417 }
418 break;
419 case atype_string:
420 load->object_type = OBJECT_TYPE_STRING;
421 break;
422 case atype_float:
423 load->object_type = OBJECT_TYPE_DOUBLE;
424 break;
425 case atype_dynamic:
426 load->object_type = OBJECT_TYPE_DYNAMIC;
427 break;
428 case atype_struct:
429 ERR("Structure type cannot be loaded.");
430 return -EINVAL;
431 default:
432 ERR("Unknown type: %d", (int) field->type.atype);
433 return -EINVAL;
434 }
435 return 0;
436 }
437
438 static int specialize_context_lookup(struct lttng_session *session,
439 struct bytecode_runtime *runtime,
440 struct load_op *insn,
441 struct vstack_load *load)
442 {
443 int idx, ret;
444 struct lttng_ctx_field *ctx_field;
445 struct lttng_event_field *field;
446 struct filter_get_index_data gid;
447 ssize_t data_offset;
448
449 idx = specialize_context_lookup_name(session->ctx, runtime, insn);
450 if (idx < 0) {
451 return -ENOENT;
452 }
453 ctx_field = &session->ctx->fields[idx];
454 field = &ctx_field->event_field;
455 ret = specialize_load_object(field, load, true);
456 if (ret)
457 return ret;
458 /* Specialize each get_symbol into a get_index. */
459 insn->op = FILTER_OP_GET_INDEX_U16;
460 memset(&gid, 0, sizeof(gid));
461 gid.ctx_index = idx;
462 gid.elem.type = load->object_type;
463 data_offset = bytecode_push_data(runtime, &gid,
464 __alignof__(gid), sizeof(gid));
465 if (data_offset < 0) {
466 return -EINVAL;
467 }
468 ((struct get_index_u16 *) insn->data)->index = data_offset;
469 return 0;
470 }
471
472 static int specialize_app_context_lookup(struct lttng_session *session,
473 struct bytecode_runtime *runtime,
474 struct load_op *insn,
475 struct vstack_load *load)
476 {
477 uint16_t offset;
478 const char *orig_name;
479 char *name = NULL;
480 int idx, ret;
481 struct lttng_ctx_field *ctx_field;
482 struct lttng_event_field *field;
483 struct filter_get_index_data gid;
484 ssize_t data_offset;
485
486 offset = ((struct get_symbol *) insn->data)->offset;
487 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
488 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
489 if (!name) {
490 ret = -ENOMEM;
491 goto end;
492 }
493 strcpy(name, "$app.");
494 strcat(name, orig_name);
495 idx = lttng_get_context_index(session->ctx, name);
496 if (idx < 0) {
497 assert(lttng_context_is_app(name));
498 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
499 &session->ctx);
500 if (ret)
501 return ret;
502 idx = lttng_get_context_index(session->ctx,
503 name);
504 if (idx < 0)
505 return -ENOENT;
506 }
507 ctx_field = &session->ctx->fields[idx];
508 field = &ctx_field->event_field;
509 ret = specialize_load_object(field, load, true);
510 if (ret)
511 goto end;
512 /* Specialize each get_symbol into a get_index. */
513 insn->op = FILTER_OP_GET_INDEX_U16;
514 memset(&gid, 0, sizeof(gid));
515 gid.ctx_index = idx;
516 gid.elem.type = load->object_type;
517 data_offset = bytecode_push_data(runtime, &gid,
518 __alignof__(gid), sizeof(gid));
519 if (data_offset < 0) {
520 ret = -EINVAL;
521 goto end;
522 }
523 ((struct get_index_u16 *) insn->data)->index = data_offset;
524 ret = 0;
525 end:
526 free(name);
527 return ret;
528 }
529
530 static int specialize_event_payload_lookup(struct lttng_event *event,
531 struct bytecode_runtime *runtime,
532 struct load_op *insn,
533 struct vstack_load *load)
534 {
535 const char *name;
536 uint16_t offset;
537 const struct lttng_event_desc *desc = event->desc;
538 unsigned int i, nr_fields;
539 bool found = false;
540 uint32_t field_offset = 0;
541 const struct lttng_event_field *field;
542 int ret;
543 struct filter_get_index_data gid;
544 ssize_t data_offset;
545
546 nr_fields = desc->nr_fields;
547 offset = ((struct get_symbol *) insn->data)->offset;
548 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
549 for (i = 0; i < nr_fields; i++) {
550 field = &desc->fields[i];
551 if (!strcmp(field->name, name)) {
552 found = true;
553 break;
554 }
555 /* compute field offset on stack */
556 switch (field->type.atype) {
557 case atype_integer:
558 case atype_enum:
559 field_offset += sizeof(int64_t);
560 break;
561 case atype_array:
562 case atype_sequence:
563 field_offset += sizeof(unsigned long);
564 field_offset += sizeof(void *);
565 break;
566 case atype_string:
567 field_offset += sizeof(void *);
568 break;
569 case atype_float:
570 field_offset += sizeof(double);
571 break;
572 default:
573 ret = -EINVAL;
574 goto end;
575 }
576 }
577 if (!found) {
578 ret = -EINVAL;
579 goto end;
580 }
581
582 ret = specialize_load_object(field, load, false);
583 if (ret)
584 goto end;
585
586 /* Specialize each get_symbol into a get_index. */
587 insn->op = FILTER_OP_GET_INDEX_U16;
588 memset(&gid, 0, sizeof(gid));
589 gid.offset = field_offset;
590 gid.elem.type = load->object_type;
591 data_offset = bytecode_push_data(runtime, &gid,
592 __alignof__(gid), sizeof(gid));
593 if (data_offset < 0) {
594 ret = -EINVAL;
595 goto end;
596 }
597 ((struct get_index_u16 *) insn->data)->index = data_offset;
598 ret = 0;
599 end:
600 return ret;
601 }
602
603 int lttng_filter_specialize_bytecode(struct lttng_event *event,
604 struct bytecode_runtime *bytecode)
605 {
606 void *pc, *next_pc, *start_pc;
607 int ret = -EINVAL;
608 struct vstack _stack;
609 struct vstack *stack = &_stack;
610 struct lttng_session *session = bytecode->p.session;
611
612 vstack_init(stack);
613
614 start_pc = &bytecode->code[0];
615 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
616 pc = next_pc) {
617 switch (*(filter_opcode_t *) pc) {
618 case FILTER_OP_UNKNOWN:
619 default:
620 ERR("unknown bytecode op %u\n",
621 (unsigned int) *(filter_opcode_t *) pc);
622 ret = -EINVAL;
623 goto end;
624
625 case FILTER_OP_RETURN:
626 if (vstack_ax(stack)->type == REG_S64)
627 *(filter_opcode_t *) pc = FILTER_OP_RETURN_S64;
628 ret = 0;
629 goto end;
630
631 case FILTER_OP_RETURN_S64:
632 if (vstack_ax(stack)->type != REG_S64) {
633 ERR("Unexpected register type\n");
634 ret = -EINVAL;
635 goto end;
636 }
637 ret = 0;
638 goto end;
639
640 /* binary */
641 case FILTER_OP_MUL:
642 case FILTER_OP_DIV:
643 case FILTER_OP_MOD:
644 case FILTER_OP_PLUS:
645 case FILTER_OP_MINUS:
646 ERR("unsupported bytecode op %u\n",
647 (unsigned int) *(filter_opcode_t *) pc);
648 ret = -EINVAL;
649 goto end;
650
651 case FILTER_OP_EQ:
652 {
653 struct binary_op *insn = (struct binary_op *) pc;
654
655 switch(vstack_ax(stack)->type) {
656 default:
657 ERR("unknown register type\n");
658 ret = -EINVAL;
659 goto end;
660
661 case REG_STRING:
662 if (vstack_bx(stack)->type == REG_UNKNOWN)
663 break;
664 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
665 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
666 else
667 insn->op = FILTER_OP_EQ_STRING;
668 break;
669 case REG_STAR_GLOB_STRING:
670 if (vstack_bx(stack)->type == REG_UNKNOWN)
671 break;
672 insn->op = FILTER_OP_EQ_STAR_GLOB_STRING;
673 break;
674 case REG_S64:
675 if (vstack_bx(stack)->type == REG_UNKNOWN)
676 break;
677 if (vstack_bx(stack)->type == REG_S64)
678 insn->op = FILTER_OP_EQ_S64;
679 else
680 insn->op = FILTER_OP_EQ_DOUBLE_S64;
681 break;
682 case REG_DOUBLE:
683 if (vstack_bx(stack)->type == REG_UNKNOWN)
684 break;
685 if (vstack_bx(stack)->type == REG_S64)
686 insn->op = FILTER_OP_EQ_S64_DOUBLE;
687 else
688 insn->op = FILTER_OP_EQ_DOUBLE;
689 break;
690 case REG_UNKNOWN:
691 break; /* Dynamic typing. */
692 }
693 /* Pop 2, push 1 */
694 if (vstack_pop(stack)) {
695 ret = -EINVAL;
696 goto end;
697 }
698 vstack_ax(stack)->type = REG_S64;
699 next_pc += sizeof(struct binary_op);
700 break;
701 }
702
703 case FILTER_OP_NE:
704 {
705 struct binary_op *insn = (struct binary_op *) pc;
706
707 switch(vstack_ax(stack)->type) {
708 default:
709 ERR("unknown register type\n");
710 ret = -EINVAL;
711 goto end;
712
713 case REG_STRING:
714 if (vstack_bx(stack)->type == REG_UNKNOWN)
715 break;
716 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
717 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
718 else
719 insn->op = FILTER_OP_NE_STRING;
720 break;
721 case REG_STAR_GLOB_STRING:
722 if (vstack_bx(stack)->type == REG_UNKNOWN)
723 break;
724 insn->op = FILTER_OP_NE_STAR_GLOB_STRING;
725 break;
726 case REG_S64:
727 if (vstack_bx(stack)->type == REG_UNKNOWN)
728 break;
729 if (vstack_bx(stack)->type == REG_S64)
730 insn->op = FILTER_OP_NE_S64;
731 else
732 insn->op = FILTER_OP_NE_DOUBLE_S64;
733 break;
734 case REG_DOUBLE:
735 if (vstack_bx(stack)->type == REG_UNKNOWN)
736 break;
737 if (vstack_bx(stack)->type == REG_S64)
738 insn->op = FILTER_OP_NE_S64_DOUBLE;
739 else
740 insn->op = FILTER_OP_NE_DOUBLE;
741 break;
742 case REG_UNKNOWN:
743 break; /* Dynamic typing. */
744 }
745 /* Pop 2, push 1 */
746 if (vstack_pop(stack)) {
747 ret = -EINVAL;
748 goto end;
749 }
750 vstack_ax(stack)->type = REG_S64;
751 next_pc += sizeof(struct binary_op);
752 break;
753 }
754
755 case FILTER_OP_GT:
756 {
757 struct binary_op *insn = (struct binary_op *) pc;
758
759 switch(vstack_ax(stack)->type) {
760 default:
761 ERR("unknown register type\n");
762 ret = -EINVAL;
763 goto end;
764
765 case REG_STAR_GLOB_STRING:
766 ERR("invalid register type for > binary operator\n");
767 ret = -EINVAL;
768 goto end;
769 case REG_STRING:
770 if (vstack_bx(stack)->type == REG_UNKNOWN)
771 break;
772 insn->op = FILTER_OP_GT_STRING;
773 break;
774 case REG_S64:
775 if (vstack_bx(stack)->type == REG_UNKNOWN)
776 break;
777 if (vstack_bx(stack)->type == REG_S64)
778 insn->op = FILTER_OP_GT_S64;
779 else
780 insn->op = FILTER_OP_GT_DOUBLE_S64;
781 break;
782 case REG_DOUBLE:
783 if (vstack_bx(stack)->type == REG_UNKNOWN)
784 break;
785 if (vstack_bx(stack)->type == REG_S64)
786 insn->op = FILTER_OP_GT_S64_DOUBLE;
787 else
788 insn->op = FILTER_OP_GT_DOUBLE;
789 break;
790 case REG_UNKNOWN:
791 break; /* Dynamic typing. */
792 }
793 /* Pop 2, push 1 */
794 if (vstack_pop(stack)) {
795 ret = -EINVAL;
796 goto end;
797 }
798 vstack_ax(stack)->type = REG_S64;
799 next_pc += sizeof(struct binary_op);
800 break;
801 }
802
803 case FILTER_OP_LT:
804 {
805 struct binary_op *insn = (struct binary_op *) pc;
806
807 switch(vstack_ax(stack)->type) {
808 default:
809 ERR("unknown register type\n");
810 ret = -EINVAL;
811 goto end;
812
813 case REG_STAR_GLOB_STRING:
814 ERR("invalid register type for < binary operator\n");
815 ret = -EINVAL;
816 goto end;
817 case REG_STRING:
818 if (vstack_bx(stack)->type == REG_UNKNOWN)
819 break;
820 insn->op = FILTER_OP_LT_STRING;
821 break;
822 case REG_S64:
823 if (vstack_bx(stack)->type == REG_UNKNOWN)
824 break;
825 if (vstack_bx(stack)->type == REG_S64)
826 insn->op = FILTER_OP_LT_S64;
827 else
828 insn->op = FILTER_OP_LT_DOUBLE_S64;
829 break;
830 case REG_DOUBLE:
831 if (vstack_bx(stack)->type == REG_UNKNOWN)
832 break;
833 if (vstack_bx(stack)->type == REG_S64)
834 insn->op = FILTER_OP_LT_S64_DOUBLE;
835 else
836 insn->op = FILTER_OP_LT_DOUBLE;
837 break;
838 case REG_UNKNOWN:
839 break; /* Dynamic typing. */
840 }
841 /* Pop 2, push 1 */
842 if (vstack_pop(stack)) {
843 ret = -EINVAL;
844 goto end;
845 }
846 vstack_ax(stack)->type = REG_S64;
847 next_pc += sizeof(struct binary_op);
848 break;
849 }
850
851 case FILTER_OP_GE:
852 {
853 struct binary_op *insn = (struct binary_op *) pc;
854
855 switch(vstack_ax(stack)->type) {
856 default:
857 ERR("unknown register type\n");
858 ret = -EINVAL;
859 goto end;
860
861 case REG_STAR_GLOB_STRING:
862 ERR("invalid register type for >= binary operator\n");
863 ret = -EINVAL;
864 goto end;
865 case REG_STRING:
866 if (vstack_bx(stack)->type == REG_UNKNOWN)
867 break;
868 insn->op = FILTER_OP_GE_STRING;
869 break;
870 case REG_S64:
871 if (vstack_bx(stack)->type == REG_UNKNOWN)
872 break;
873 if (vstack_bx(stack)->type == REG_S64)
874 insn->op = FILTER_OP_GE_S64;
875 else
876 insn->op = FILTER_OP_GE_DOUBLE_S64;
877 break;
878 case REG_DOUBLE:
879 if (vstack_bx(stack)->type == REG_UNKNOWN)
880 break;
881 if (vstack_bx(stack)->type == REG_S64)
882 insn->op = FILTER_OP_GE_S64_DOUBLE;
883 else
884 insn->op = FILTER_OP_GE_DOUBLE;
885 break;
886 case REG_UNKNOWN:
887 break; /* Dynamic typing. */
888 }
889 /* Pop 2, push 1 */
890 if (vstack_pop(stack)) {
891 ret = -EINVAL;
892 goto end;
893 }
894 vstack_ax(stack)->type = REG_S64;
895 next_pc += sizeof(struct binary_op);
896 break;
897 }
898 case FILTER_OP_LE:
899 {
900 struct binary_op *insn = (struct binary_op *) pc;
901
902 switch(vstack_ax(stack)->type) {
903 default:
904 ERR("unknown register type\n");
905 ret = -EINVAL;
906 goto end;
907
908 case REG_STAR_GLOB_STRING:
909 ERR("invalid register type for <= binary operator\n");
910 ret = -EINVAL;
911 goto end;
912 case REG_STRING:
913 if (vstack_bx(stack)->type == REG_UNKNOWN)
914 break;
915 insn->op = FILTER_OP_LE_STRING;
916 break;
917 case REG_S64:
918 if (vstack_bx(stack)->type == REG_UNKNOWN)
919 break;
920 if (vstack_bx(stack)->type == REG_S64)
921 insn->op = FILTER_OP_LE_S64;
922 else
923 insn->op = FILTER_OP_LE_DOUBLE_S64;
924 break;
925 case REG_DOUBLE:
926 if (vstack_bx(stack)->type == REG_UNKNOWN)
927 break;
928 if (vstack_bx(stack)->type == REG_S64)
929 insn->op = FILTER_OP_LE_S64_DOUBLE;
930 else
931 insn->op = FILTER_OP_LE_DOUBLE;
932 break;
933 case REG_UNKNOWN:
934 break; /* Dynamic typing. */
935 }
936 vstack_ax(stack)->type = REG_S64;
937 next_pc += sizeof(struct binary_op);
938 break;
939 }
940
941 case FILTER_OP_EQ_STRING:
942 case FILTER_OP_NE_STRING:
943 case FILTER_OP_GT_STRING:
944 case FILTER_OP_LT_STRING:
945 case FILTER_OP_GE_STRING:
946 case FILTER_OP_LE_STRING:
947 case FILTER_OP_EQ_STAR_GLOB_STRING:
948 case FILTER_OP_NE_STAR_GLOB_STRING:
949 case FILTER_OP_EQ_S64:
950 case FILTER_OP_NE_S64:
951 case FILTER_OP_GT_S64:
952 case FILTER_OP_LT_S64:
953 case FILTER_OP_GE_S64:
954 case FILTER_OP_LE_S64:
955 case FILTER_OP_EQ_DOUBLE:
956 case FILTER_OP_NE_DOUBLE:
957 case FILTER_OP_GT_DOUBLE:
958 case FILTER_OP_LT_DOUBLE:
959 case FILTER_OP_GE_DOUBLE:
960 case FILTER_OP_LE_DOUBLE:
961 case FILTER_OP_EQ_DOUBLE_S64:
962 case FILTER_OP_NE_DOUBLE_S64:
963 case FILTER_OP_GT_DOUBLE_S64:
964 case FILTER_OP_LT_DOUBLE_S64:
965 case FILTER_OP_GE_DOUBLE_S64:
966 case FILTER_OP_LE_DOUBLE_S64:
967 case FILTER_OP_EQ_S64_DOUBLE:
968 case FILTER_OP_NE_S64_DOUBLE:
969 case FILTER_OP_GT_S64_DOUBLE:
970 case FILTER_OP_LT_S64_DOUBLE:
971 case FILTER_OP_GE_S64_DOUBLE:
972 case FILTER_OP_LE_S64_DOUBLE:
973 case FILTER_OP_BIT_RSHIFT:
974 case FILTER_OP_BIT_LSHIFT:
975 case FILTER_OP_BIT_AND:
976 case FILTER_OP_BIT_OR:
977 case FILTER_OP_BIT_XOR:
978 {
979 /* Pop 2, push 1 */
980 if (vstack_pop(stack)) {
981 ret = -EINVAL;
982 goto end;
983 }
984 vstack_ax(stack)->type = REG_S64;
985 next_pc += sizeof(struct binary_op);
986 break;
987 }
988
989 /* unary */
990 case FILTER_OP_UNARY_PLUS:
991 {
992 struct unary_op *insn = (struct unary_op *) pc;
993
994 switch(vstack_ax(stack)->type) {
995 default:
996 ERR("unknown register type\n");
997 ret = -EINVAL;
998 goto end;
999
1000 case REG_S64:
1001 insn->op = FILTER_OP_UNARY_PLUS_S64;
1002 break;
1003 case REG_DOUBLE:
1004 insn->op = FILTER_OP_UNARY_PLUS_DOUBLE;
1005 break;
1006 case REG_UNKNOWN: /* Dynamic typing. */
1007 break;
1008 }
1009 /* Pop 1, push 1 */
1010 next_pc += sizeof(struct unary_op);
1011 break;
1012 }
1013
1014 case FILTER_OP_UNARY_MINUS:
1015 {
1016 struct unary_op *insn = (struct unary_op *) pc;
1017
1018 switch(vstack_ax(stack)->type) {
1019 default:
1020 ERR("unknown register type\n");
1021 ret = -EINVAL;
1022 goto end;
1023
1024 case REG_S64:
1025 insn->op = FILTER_OP_UNARY_MINUS_S64;
1026 break;
1027 case REG_DOUBLE:
1028 insn->op = FILTER_OP_UNARY_MINUS_DOUBLE;
1029 break;
1030 case REG_UNKNOWN: /* Dynamic typing. */
1031 break;
1032 }
1033 /* Pop 1, push 1 */
1034 next_pc += sizeof(struct unary_op);
1035 break;
1036 }
1037
1038 case FILTER_OP_UNARY_NOT:
1039 {
1040 struct unary_op *insn = (struct unary_op *) pc;
1041
1042 switch(vstack_ax(stack)->type) {
1043 default:
1044 ERR("unknown register type\n");
1045 ret = -EINVAL;
1046 goto end;
1047
1048 case REG_S64:
1049 insn->op = FILTER_OP_UNARY_NOT_S64;
1050 break;
1051 case REG_DOUBLE:
1052 insn->op = FILTER_OP_UNARY_NOT_DOUBLE;
1053 break;
1054 case REG_UNKNOWN: /* Dynamic typing. */
1055 break;
1056 }
1057 /* Pop 1, push 1 */
1058 next_pc += sizeof(struct unary_op);
1059 break;
1060 }
1061
1062 case FILTER_OP_UNARY_BIT_NOT:
1063 {
1064 /* Pop 1, push 1 */
1065 next_pc += sizeof(struct unary_op);
1066 break;
1067 }
1068
1069 case FILTER_OP_UNARY_PLUS_S64:
1070 case FILTER_OP_UNARY_MINUS_S64:
1071 case FILTER_OP_UNARY_NOT_S64:
1072 case FILTER_OP_UNARY_PLUS_DOUBLE:
1073 case FILTER_OP_UNARY_MINUS_DOUBLE:
1074 case FILTER_OP_UNARY_NOT_DOUBLE:
1075 {
1076 /* Pop 1, push 1 */
1077 next_pc += sizeof(struct unary_op);
1078 break;
1079 }
1080
1081 /* logical */
1082 case FILTER_OP_AND:
1083 case FILTER_OP_OR:
1084 {
1085 /* Continue to next instruction */
1086 /* Pop 1 when jump not taken */
1087 if (vstack_pop(stack)) {
1088 ret = -EINVAL;
1089 goto end;
1090 }
1091 next_pc += sizeof(struct logical_op);
1092 break;
1093 }
1094
1095 /* load field ref */
1096 case FILTER_OP_LOAD_FIELD_REF:
1097 {
1098 ERR("Unknown field ref type\n");
1099 ret = -EINVAL;
1100 goto end;
1101 }
1102 /* get context ref */
1103 case FILTER_OP_GET_CONTEXT_REF:
1104 {
1105 if (vstack_push(stack)) {
1106 ret = -EINVAL;
1107 goto end;
1108 }
1109 vstack_ax(stack)->type = REG_UNKNOWN;
1110 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1111 break;
1112 }
1113 case FILTER_OP_LOAD_FIELD_REF_STRING:
1114 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1115 case FILTER_OP_GET_CONTEXT_REF_STRING:
1116 {
1117 if (vstack_push(stack)) {
1118 ret = -EINVAL;
1119 goto end;
1120 }
1121 vstack_ax(stack)->type = REG_STRING;
1122 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1123 break;
1124 }
1125 case FILTER_OP_LOAD_FIELD_REF_S64:
1126 case FILTER_OP_GET_CONTEXT_REF_S64:
1127 {
1128 if (vstack_push(stack)) {
1129 ret = -EINVAL;
1130 goto end;
1131 }
1132 vstack_ax(stack)->type = REG_S64;
1133 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1134 break;
1135 }
1136 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1137 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1138 {
1139 if (vstack_push(stack)) {
1140 ret = -EINVAL;
1141 goto end;
1142 }
1143 vstack_ax(stack)->type = REG_DOUBLE;
1144 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1145 break;
1146 }
1147
1148 /* load from immediate operand */
1149 case FILTER_OP_LOAD_STRING:
1150 {
1151 struct load_op *insn = (struct load_op *) pc;
1152
1153 if (vstack_push(stack)) {
1154 ret = -EINVAL;
1155 goto end;
1156 }
1157 vstack_ax(stack)->type = REG_STRING;
1158 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1159 break;
1160 }
1161
1162 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1163 {
1164 struct load_op *insn = (struct load_op *) pc;
1165
1166 if (vstack_push(stack)) {
1167 ret = -EINVAL;
1168 goto end;
1169 }
1170 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1171 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1172 break;
1173 }
1174
1175 case FILTER_OP_LOAD_S64:
1176 {
1177 if (vstack_push(stack)) {
1178 ret = -EINVAL;
1179 goto end;
1180 }
1181 vstack_ax(stack)->type = REG_S64;
1182 next_pc += sizeof(struct load_op)
1183 + sizeof(struct literal_numeric);
1184 break;
1185 }
1186
1187 case FILTER_OP_LOAD_DOUBLE:
1188 {
1189 if (vstack_push(stack)) {
1190 ret = -EINVAL;
1191 goto end;
1192 }
1193 vstack_ax(stack)->type = REG_DOUBLE;
1194 next_pc += sizeof(struct load_op)
1195 + sizeof(struct literal_double);
1196 break;
1197 }
1198
1199 /* cast */
1200 case FILTER_OP_CAST_TO_S64:
1201 {
1202 struct cast_op *insn = (struct cast_op *) pc;
1203
1204 switch (vstack_ax(stack)->type) {
1205 default:
1206 ERR("unknown register type\n");
1207 ret = -EINVAL;
1208 goto end;
1209
1210 case REG_STRING:
1211 case REG_STAR_GLOB_STRING:
1212 ERR("Cast op can only be applied to numeric or floating point registers\n");
1213 ret = -EINVAL;
1214 goto end;
1215 case REG_S64:
1216 insn->op = FILTER_OP_CAST_NOP;
1217 break;
1218 case REG_DOUBLE:
1219 insn->op = FILTER_OP_CAST_DOUBLE_TO_S64;
1220 break;
1221 case REG_UNKNOWN:
1222 break;
1223 }
1224 /* Pop 1, push 1 */
1225 vstack_ax(stack)->type = REG_S64;
1226 next_pc += sizeof(struct cast_op);
1227 break;
1228 }
1229 case FILTER_OP_CAST_DOUBLE_TO_S64:
1230 {
1231 /* Pop 1, push 1 */
1232 vstack_ax(stack)->type = REG_S64;
1233 next_pc += sizeof(struct cast_op);
1234 break;
1235 }
1236 case FILTER_OP_CAST_NOP:
1237 {
1238 next_pc += sizeof(struct cast_op);
1239 break;
1240 }
1241
1242 /*
1243 * Instructions for recursive traversal through composed types.
1244 */
1245 case FILTER_OP_GET_CONTEXT_ROOT:
1246 {
1247 if (vstack_push(stack)) {
1248 ret = -EINVAL;
1249 goto end;
1250 }
1251 vstack_ax(stack)->type = REG_PTR;
1252 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1253 next_pc += sizeof(struct load_op);
1254 break;
1255 }
1256 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1257 {
1258 if (vstack_push(stack)) {
1259 ret = -EINVAL;
1260 goto end;
1261 }
1262 vstack_ax(stack)->type = REG_PTR;
1263 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1264 next_pc += sizeof(struct load_op);
1265 break;
1266 }
1267 case FILTER_OP_GET_PAYLOAD_ROOT:
1268 {
1269 if (vstack_push(stack)) {
1270 ret = -EINVAL;
1271 goto end;
1272 }
1273 vstack_ax(stack)->type = REG_PTR;
1274 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1275 next_pc += sizeof(struct load_op);
1276 break;
1277 }
1278
1279 case FILTER_OP_LOAD_FIELD:
1280 {
1281 struct load_op *insn = (struct load_op *) pc;
1282
1283 assert(vstack_ax(stack)->type == REG_PTR);
1284 /* Pop 1, push 1 */
1285 ret = specialize_load_field(vstack_ax(stack), insn);
1286 if (ret)
1287 goto end;
1288
1289 next_pc += sizeof(struct load_op);
1290 break;
1291 }
1292
1293 case FILTER_OP_LOAD_FIELD_S8:
1294 case FILTER_OP_LOAD_FIELD_S16:
1295 case FILTER_OP_LOAD_FIELD_S32:
1296 case FILTER_OP_LOAD_FIELD_S64:
1297 case FILTER_OP_LOAD_FIELD_U8:
1298 case FILTER_OP_LOAD_FIELD_U16:
1299 case FILTER_OP_LOAD_FIELD_U32:
1300 case FILTER_OP_LOAD_FIELD_U64:
1301 {
1302 /* Pop 1, push 1 */
1303 vstack_ax(stack)->type = REG_S64;
1304 next_pc += sizeof(struct load_op);
1305 break;
1306 }
1307
1308 case FILTER_OP_LOAD_FIELD_STRING:
1309 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1310 {
1311 /* Pop 1, push 1 */
1312 vstack_ax(stack)->type = REG_STRING;
1313 next_pc += sizeof(struct load_op);
1314 break;
1315 }
1316
1317 case FILTER_OP_LOAD_FIELD_DOUBLE:
1318 {
1319 /* Pop 1, push 1 */
1320 vstack_ax(stack)->type = REG_DOUBLE;
1321 next_pc += sizeof(struct load_op);
1322 break;
1323 }
1324
1325 case FILTER_OP_GET_SYMBOL:
1326 {
1327 struct load_op *insn = (struct load_op *) pc;
1328
1329 dbg_printf("op get symbol\n");
1330 switch (vstack_ax(stack)->load.type) {
1331 case LOAD_OBJECT:
1332 ERR("Nested fields not implemented yet.");
1333 ret = -EINVAL;
1334 goto end;
1335 case LOAD_ROOT_CONTEXT:
1336 /* Lookup context field. */
1337 ret = specialize_context_lookup(session,
1338 bytecode, insn,
1339 &vstack_ax(stack)->load);
1340 if (ret)
1341 goto end;
1342 break;
1343 case LOAD_ROOT_APP_CONTEXT:
1344 /* Lookup app context field. */
1345 ret = specialize_app_context_lookup(session,
1346 bytecode, insn,
1347 &vstack_ax(stack)->load);
1348 if (ret)
1349 goto end;
1350 break;
1351 case LOAD_ROOT_PAYLOAD:
1352 /* Lookup event payload field. */
1353 ret = specialize_event_payload_lookup(event,
1354 bytecode, insn,
1355 &vstack_ax(stack)->load);
1356 if (ret)
1357 goto end;
1358 break;
1359 }
1360 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1361 break;
1362 }
1363
1364 case FILTER_OP_GET_SYMBOL_FIELD:
1365 {
1366 /* Always generated by specialize phase. */
1367 ret = -EINVAL;
1368 goto end;
1369 }
1370
1371 case FILTER_OP_GET_INDEX_U16:
1372 {
1373 struct load_op *insn = (struct load_op *) pc;
1374 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1375
1376 dbg_printf("op get index u16\n");
1377 /* Pop 1, push 1 */
1378 ret = specialize_get_index(bytecode, insn, index->index,
1379 vstack_ax(stack), sizeof(*index));
1380 if (ret)
1381 goto end;
1382 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1383 break;
1384 }
1385
1386 case FILTER_OP_GET_INDEX_U64:
1387 {
1388 struct load_op *insn = (struct load_op *) pc;
1389 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1390
1391 dbg_printf("op get index u64\n");
1392 /* Pop 1, push 1 */
1393 ret = specialize_get_index(bytecode, insn, index->index,
1394 vstack_ax(stack), sizeof(*index));
1395 if (ret)
1396 goto end;
1397 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1398 break;
1399 }
1400
1401 }
1402 }
1403 end:
1404 return ret;
1405 }
This page took 0.062274 seconds and 3 git commands to generate.