Move to kernel style SPDX license identifiers
[lttng-ust.git] / liblttng-ust / lttng-bytecode-specialize.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST bytecode specializer.
7 */
8
9 #define _LGPL_SOURCE
10 #include <stddef.h>
11 #include <stdint.h>
12
13 #include "lttng-bytecode.h"
14 #include <lttng/align.h>
15 #include "ust-events-internal.h"
16
17 static int lttng_fls(int val)
18 {
19 int r = 32;
20 unsigned int x = (unsigned int) val;
21
22 if (!x)
23 return 0;
24 if (!(x & 0xFFFF0000U)) {
25 x <<= 16;
26 r -= 16;
27 }
28 if (!(x & 0xFF000000U)) {
29 x <<= 8;
30 r -= 8;
31 }
32 if (!(x & 0xF0000000U)) {
33 x <<= 4;
34 r -= 4;
35 }
36 if (!(x & 0xC0000000U)) {
37 x <<= 2;
38 r -= 2;
39 }
40 if (!(x & 0x80000000U)) {
41 r -= 1;
42 }
43 return r;
44 }
45
46 static int get_count_order(unsigned int count)
47 {
48 int order;
49
50 order = lttng_fls(count) - 1;
51 if (count & (count - 1))
52 order++;
53 return order;
54 }
55
56 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
57 size_t align, size_t len)
58 {
59 ssize_t ret;
60 size_t padding = lttng_ust_offset_align(runtime->data_len, align);
61 size_t new_len = runtime->data_len + padding + len;
62 size_t new_alloc_len = new_len;
63 size_t old_alloc_len = runtime->data_alloc_len;
64
65 if (new_len > BYTECODE_MAX_DATA_LEN)
66 return -EINVAL;
67
68 if (new_alloc_len > old_alloc_len) {
69 char *newptr;
70
71 new_alloc_len =
72 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
73 newptr = realloc(runtime->data, new_alloc_len);
74 if (!newptr)
75 return -ENOMEM;
76 runtime->data = newptr;
77 /* We zero directly the memory from start of allocation. */
78 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
79 runtime->data_alloc_len = new_alloc_len;
80 }
81 runtime->data_len += padding;
82 ret = runtime->data_len;
83 runtime->data_len += len;
84 return ret;
85 }
86
87 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
88 const void *p, size_t align, size_t len)
89 {
90 ssize_t offset;
91
92 offset = bytecode_reserve_data(runtime, align, len);
93 if (offset < 0)
94 return -ENOMEM;
95 memcpy(&runtime->data[offset], p, len);
96 return offset;
97 }
98
99 static int specialize_load_field(struct vstack_entry *stack_top,
100 struct load_op *insn)
101 {
102 int ret;
103
104 switch (stack_top->load.type) {
105 case LOAD_OBJECT:
106 break;
107 case LOAD_ROOT_CONTEXT:
108 case LOAD_ROOT_APP_CONTEXT:
109 case LOAD_ROOT_PAYLOAD:
110 default:
111 dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
112 ret = -EINVAL;
113 goto end;
114 }
115 switch (stack_top->load.object_type) {
116 case OBJECT_TYPE_S8:
117 dbg_printf("op load field s8\n");
118 stack_top->type = REG_S64;
119 if (!stack_top->load.rev_bo)
120 insn->op = BYTECODE_OP_LOAD_FIELD_S8;
121 break;
122 case OBJECT_TYPE_S16:
123 dbg_printf("op load field s16\n");
124 stack_top->type = REG_S64;
125 if (!stack_top->load.rev_bo)
126 insn->op = BYTECODE_OP_LOAD_FIELD_S16;
127 break;
128 case OBJECT_TYPE_S32:
129 dbg_printf("op load field s32\n");
130 stack_top->type = REG_S64;
131 if (!stack_top->load.rev_bo)
132 insn->op = BYTECODE_OP_LOAD_FIELD_S32;
133 break;
134 case OBJECT_TYPE_S64:
135 dbg_printf("op load field s64\n");
136 stack_top->type = REG_S64;
137 if (!stack_top->load.rev_bo)
138 insn->op = BYTECODE_OP_LOAD_FIELD_S64;
139 break;
140 case OBJECT_TYPE_SIGNED_ENUM:
141 dbg_printf("op load field signed enumeration\n");
142 stack_top->type = REG_PTR;
143 break;
144 case OBJECT_TYPE_U8:
145 dbg_printf("op load field u8\n");
146 stack_top->type = REG_U64;
147 insn->op = BYTECODE_OP_LOAD_FIELD_U8;
148 break;
149 case OBJECT_TYPE_U16:
150 dbg_printf("op load field u16\n");
151 stack_top->type = REG_U64;
152 if (!stack_top->load.rev_bo)
153 insn->op = BYTECODE_OP_LOAD_FIELD_U16;
154 break;
155 case OBJECT_TYPE_U32:
156 dbg_printf("op load field u32\n");
157 stack_top->type = REG_U64;
158 if (!stack_top->load.rev_bo)
159 insn->op = BYTECODE_OP_LOAD_FIELD_U32;
160 break;
161 case OBJECT_TYPE_U64:
162 dbg_printf("op load field u64\n");
163 stack_top->type = REG_U64;
164 if (!stack_top->load.rev_bo)
165 insn->op = BYTECODE_OP_LOAD_FIELD_U64;
166 break;
167 case OBJECT_TYPE_UNSIGNED_ENUM:
168 dbg_printf("op load field unsigned enumeration\n");
169 stack_top->type = REG_PTR;
170 break;
171 case OBJECT_TYPE_DOUBLE:
172 stack_top->type = REG_DOUBLE;
173 insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
174 break;
175 case OBJECT_TYPE_STRING:
176 dbg_printf("op load field string\n");
177 stack_top->type = REG_STRING;
178 insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
179 break;
180 case OBJECT_TYPE_STRING_SEQUENCE:
181 dbg_printf("op load field string sequence\n");
182 stack_top->type = REG_STRING;
183 insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
184 break;
185 case OBJECT_TYPE_DYNAMIC:
186 dbg_printf("op load field dynamic\n");
187 stack_top->type = REG_UNKNOWN;
188 /* Don't specialize load op. */
189 break;
190 case OBJECT_TYPE_SEQUENCE:
191 case OBJECT_TYPE_ARRAY:
192 case OBJECT_TYPE_STRUCT:
193 case OBJECT_TYPE_VARIANT:
194 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
195 ret = -EINVAL;
196 goto end;
197 }
198 return 0;
199
200 end:
201 return ret;
202 }
203
204 static int specialize_get_index_object_type(enum object_type *otype,
205 int signedness, uint32_t elem_len)
206 {
207 switch (elem_len) {
208 case 8:
209 if (signedness)
210 *otype = OBJECT_TYPE_S8;
211 else
212 *otype = OBJECT_TYPE_U8;
213 break;
214 case 16:
215 if (signedness)
216 *otype = OBJECT_TYPE_S16;
217 else
218 *otype = OBJECT_TYPE_U16;
219 break;
220 case 32:
221 if (signedness)
222 *otype = OBJECT_TYPE_S32;
223 else
224 *otype = OBJECT_TYPE_U32;
225 break;
226 case 64:
227 if (signedness)
228 *otype = OBJECT_TYPE_S64;
229 else
230 *otype = OBJECT_TYPE_U64;
231 break;
232 default:
233 return -EINVAL;
234 }
235 return 0;
236 }
237
238 static int specialize_get_index(struct bytecode_runtime *runtime,
239 struct load_op *insn, uint64_t index,
240 struct vstack_entry *stack_top,
241 int idx_len)
242 {
243 int ret;
244 struct bytecode_get_index_data gid;
245 ssize_t data_offset;
246
247 memset(&gid, 0, sizeof(gid));
248 switch (stack_top->load.type) {
249 case LOAD_OBJECT:
250 switch (stack_top->load.object_type) {
251 case OBJECT_TYPE_ARRAY:
252 {
253 const struct lttng_integer_type *integer_type;
254 const struct lttng_event_field *field;
255 uint32_t elem_len, num_elems;
256 int signedness;
257
258 field = stack_top->load.field;
259 switch (field->type.atype) {
260 case atype_array:
261 integer_type = &field->type.u.legacy.array.elem_type.u.basic.integer;
262 num_elems = field->type.u.legacy.array.length;
263 break;
264 case atype_array_nestable:
265 if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
266 ret = -EINVAL;
267 goto end;
268 }
269 integer_type = &field->type.u.array_nestable.elem_type->u.integer;
270 num_elems = field->type.u.array_nestable.length;
271 break;
272 default:
273 ret = -EINVAL;
274 goto end;
275 }
276 elem_len = integer_type->size;
277 signedness = integer_type->signedness;
278 if (index >= num_elems) {
279 ret = -EINVAL;
280 goto end;
281 }
282 ret = specialize_get_index_object_type(&stack_top->load.object_type,
283 signedness, elem_len);
284 if (ret)
285 goto end;
286 gid.offset = index * (elem_len / CHAR_BIT);
287 gid.array_len = num_elems * (elem_len / CHAR_BIT);
288 gid.elem.type = stack_top->load.object_type;
289 gid.elem.len = elem_len;
290 if (integer_type->reverse_byte_order)
291 gid.elem.rev_bo = true;
292 stack_top->load.rev_bo = gid.elem.rev_bo;
293 break;
294 }
295 case OBJECT_TYPE_SEQUENCE:
296 {
297 const struct lttng_integer_type *integer_type;
298 const struct lttng_event_field *field;
299 uint32_t elem_len;
300 int signedness;
301
302 field = stack_top->load.field;
303 switch (field->type.atype) {
304 case atype_sequence:
305 integer_type = &field->type.u.legacy.sequence.elem_type.u.basic.integer;
306 break;
307 case atype_sequence_nestable:
308 if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
309 ret = -EINVAL;
310 goto end;
311 }
312 integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
313 break;
314 default:
315 ret = -EINVAL;
316 goto end;
317 }
318 elem_len = integer_type->size;
319 signedness = integer_type->signedness;
320 ret = specialize_get_index_object_type(&stack_top->load.object_type,
321 signedness, elem_len);
322 if (ret)
323 goto end;
324 gid.offset = index * (elem_len / CHAR_BIT);
325 gid.elem.type = stack_top->load.object_type;
326 gid.elem.len = elem_len;
327 if (integer_type->reverse_byte_order)
328 gid.elem.rev_bo = true;
329 stack_top->load.rev_bo = gid.elem.rev_bo;
330 break;
331 }
332 case OBJECT_TYPE_STRUCT:
333 /* Only generated by the specialize phase. */
334 case OBJECT_TYPE_VARIANT: /* Fall-through */
335 default:
336 ERR("Unexpected get index type %d",
337 (int) stack_top->load.object_type);
338 ret = -EINVAL;
339 goto end;
340 }
341 break;
342 case LOAD_ROOT_CONTEXT:
343 case LOAD_ROOT_APP_CONTEXT:
344 case LOAD_ROOT_PAYLOAD:
345 ERR("Index lookup for root field not implemented yet.");
346 ret = -EINVAL;
347 goto end;
348 }
349 data_offset = bytecode_push_data(runtime, &gid,
350 __alignof__(gid), sizeof(gid));
351 if (data_offset < 0) {
352 ret = -EINVAL;
353 goto end;
354 }
355 switch (idx_len) {
356 case 2:
357 ((struct get_index_u16 *) insn->data)->index = data_offset;
358 break;
359 case 8:
360 ((struct get_index_u64 *) insn->data)->index = data_offset;
361 break;
362 default:
363 ret = -EINVAL;
364 goto end;
365 }
366
367 return 0;
368
369 end:
370 return ret;
371 }
372
373 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
374 struct bytecode_runtime *bytecode,
375 struct load_op *insn)
376 {
377 uint16_t offset;
378 const char *name;
379
380 offset = ((struct get_symbol *) insn->data)->offset;
381 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
382 return lttng_get_context_index(ctx, name);
383 }
384
385 static int specialize_load_object(const struct lttng_event_field *field,
386 struct vstack_load *load, bool is_context)
387 {
388 load->type = LOAD_OBJECT;
389
390 switch (field->type.atype) {
391 case atype_integer:
392 if (field->type.u.integer.signedness)
393 load->object_type = OBJECT_TYPE_S64;
394 else
395 load->object_type = OBJECT_TYPE_U64;
396 load->rev_bo = false;
397 break;
398 case atype_enum:
399 case atype_enum_nestable:
400 {
401 const struct lttng_integer_type *itype;
402
403 if (field->type.atype == atype_enum) {
404 itype = &field->type.u.legacy.basic.enumeration.container_type;
405 } else {
406 itype = &field->type.u.enum_nestable.container_type->u.integer;
407 }
408 if (itype->signedness)
409 load->object_type = OBJECT_TYPE_SIGNED_ENUM;
410 else
411 load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
412 load->rev_bo = false;
413 break;
414 }
415 case atype_array:
416 if (field->type.u.legacy.array.elem_type.atype != atype_integer) {
417 ERR("Array nesting only supports integer types.");
418 return -EINVAL;
419 }
420 if (is_context) {
421 load->object_type = OBJECT_TYPE_STRING;
422 } else {
423 if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
424 load->object_type = OBJECT_TYPE_ARRAY;
425 load->field = field;
426 } else {
427 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
428 }
429 }
430 break;
431 case atype_array_nestable:
432 if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
433 ERR("Array nesting only supports integer types.");
434 return -EINVAL;
435 }
436 if (is_context) {
437 load->object_type = OBJECT_TYPE_STRING;
438 } else {
439 if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
440 load->object_type = OBJECT_TYPE_ARRAY;
441 load->field = field;
442 } else {
443 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
444 }
445 }
446 break;
447 case atype_sequence:
448 if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) {
449 ERR("Sequence nesting only supports integer types.");
450 return -EINVAL;
451 }
452 if (is_context) {
453 load->object_type = OBJECT_TYPE_STRING;
454 } else {
455 if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
456 load->object_type = OBJECT_TYPE_SEQUENCE;
457 load->field = field;
458 } else {
459 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
460 }
461 }
462 break;
463 case atype_sequence_nestable:
464 if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
465 ERR("Sequence nesting only supports integer types.");
466 return -EINVAL;
467 }
468 if (is_context) {
469 load->object_type = OBJECT_TYPE_STRING;
470 } else {
471 if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
472 load->object_type = OBJECT_TYPE_SEQUENCE;
473 load->field = field;
474 } else {
475 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
476 }
477 }
478 break;
479
480 case atype_string:
481 load->object_type = OBJECT_TYPE_STRING;
482 break;
483 case atype_float:
484 load->object_type = OBJECT_TYPE_DOUBLE;
485 break;
486 case atype_dynamic:
487 load->object_type = OBJECT_TYPE_DYNAMIC;
488 break;
489 case atype_struct:
490 ERR("Structure type cannot be loaded.");
491 return -EINVAL;
492 default:
493 ERR("Unknown type: %d", (int) field->type.atype);
494 return -EINVAL;
495 }
496 return 0;
497 }
498
499 static int specialize_context_lookup(struct lttng_ctx *ctx,
500 struct bytecode_runtime *runtime,
501 struct load_op *insn,
502 struct vstack_load *load)
503 {
504 int idx, ret;
505 struct lttng_ctx_field *ctx_field;
506 struct lttng_event_field *field;
507 struct bytecode_get_index_data gid;
508 ssize_t data_offset;
509
510 idx = specialize_context_lookup_name(ctx, runtime, insn);
511 if (idx < 0) {
512 return -ENOENT;
513 }
514 ctx_field = &ctx->fields[idx];
515 field = &ctx_field->event_field;
516 ret = specialize_load_object(field, load, true);
517 if (ret)
518 return ret;
519 /* Specialize each get_symbol into a get_index. */
520 insn->op = BYTECODE_OP_GET_INDEX_U16;
521 memset(&gid, 0, sizeof(gid));
522 gid.ctx_index = idx;
523 gid.elem.type = load->object_type;
524 gid.elem.rev_bo = load->rev_bo;
525 gid.field = field;
526 data_offset = bytecode_push_data(runtime, &gid,
527 __alignof__(gid), sizeof(gid));
528 if (data_offset < 0) {
529 return -EINVAL;
530 }
531 ((struct get_index_u16 *) insn->data)->index = data_offset;
532 return 0;
533 }
534
535 static int specialize_app_context_lookup(struct lttng_ctx **pctx,
536 struct bytecode_runtime *runtime,
537 struct load_op *insn,
538 struct vstack_load *load)
539 {
540 uint16_t offset;
541 const char *orig_name;
542 char *name = NULL;
543 int idx, ret;
544 struct lttng_ctx_field *ctx_field;
545 struct lttng_event_field *field;
546 struct bytecode_get_index_data gid;
547 ssize_t data_offset;
548
549 offset = ((struct get_symbol *) insn->data)->offset;
550 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
551 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
552 if (!name) {
553 ret = -ENOMEM;
554 goto end;
555 }
556 strcpy(name, "$app.");
557 strcat(name, orig_name);
558 idx = lttng_get_context_index(*pctx, name);
559 if (idx < 0) {
560 assert(lttng_context_is_app(name));
561 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
562 pctx);
563 if (ret)
564 return ret;
565 idx = lttng_get_context_index(*pctx, name);
566 if (idx < 0)
567 return -ENOENT;
568 }
569 ctx_field = &(*pctx)->fields[idx];
570 field = &ctx_field->event_field;
571 ret = specialize_load_object(field, load, true);
572 if (ret)
573 goto end;
574 /* Specialize each get_symbol into a get_index. */
575 insn->op = BYTECODE_OP_GET_INDEX_U16;
576 memset(&gid, 0, sizeof(gid));
577 gid.ctx_index = idx;
578 gid.elem.type = load->object_type;
579 gid.elem.rev_bo = load->rev_bo;
580 gid.field = field;
581 data_offset = bytecode_push_data(runtime, &gid,
582 __alignof__(gid), sizeof(gid));
583 if (data_offset < 0) {
584 ret = -EINVAL;
585 goto end;
586 }
587 ((struct get_index_u16 *) insn->data)->index = data_offset;
588 ret = 0;
589 end:
590 free(name);
591 return ret;
592 }
593
594 static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
595 struct bytecode_runtime *runtime,
596 struct load_op *insn,
597 struct vstack_load *load)
598 {
599 const char *name;
600 uint16_t offset;
601 unsigned int i, nr_fields;
602 bool found = false;
603 uint32_t field_offset = 0;
604 const struct lttng_event_field *field;
605 int ret;
606 struct bytecode_get_index_data gid;
607 ssize_t data_offset;
608
609 nr_fields = event_desc->nr_fields;
610 offset = ((struct get_symbol *) insn->data)->offset;
611 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
612 for (i = 0; i < nr_fields; i++) {
613 field = &event_desc->fields[i];
614 if (field->u.ext.nofilter) {
615 continue;
616 }
617 if (!strcmp(field->name, name)) {
618 found = true;
619 break;
620 }
621 /* compute field offset on stack */
622 switch (field->type.atype) {
623 case atype_integer:
624 case atype_enum:
625 case atype_enum_nestable:
626 field_offset += sizeof(int64_t);
627 break;
628 case atype_array:
629 case atype_array_nestable:
630 case atype_sequence:
631 case atype_sequence_nestable:
632 field_offset += sizeof(unsigned long);
633 field_offset += sizeof(void *);
634 break;
635 case atype_string:
636 field_offset += sizeof(void *);
637 break;
638 case atype_float:
639 field_offset += sizeof(double);
640 break;
641 default:
642 ret = -EINVAL;
643 goto end;
644 }
645 }
646 if (!found) {
647 ret = -EINVAL;
648 goto end;
649 }
650
651 ret = specialize_load_object(field, load, false);
652 if (ret)
653 goto end;
654
655 /* Specialize each get_symbol into a get_index. */
656 insn->op = BYTECODE_OP_GET_INDEX_U16;
657 memset(&gid, 0, sizeof(gid));
658 gid.offset = field_offset;
659 gid.elem.type = load->object_type;
660 gid.elem.rev_bo = load->rev_bo;
661 gid.field = field;
662 data_offset = bytecode_push_data(runtime, &gid,
663 __alignof__(gid), sizeof(gid));
664 if (data_offset < 0) {
665 ret = -EINVAL;
666 goto end;
667 }
668 ((struct get_index_u16 *) insn->data)->index = data_offset;
669 ret = 0;
670 end:
671 return ret;
672 }
673
674 int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
675 struct bytecode_runtime *bytecode)
676 {
677 void *pc, *next_pc, *start_pc;
678 int ret = -EINVAL;
679 struct vstack _stack;
680 struct vstack *stack = &_stack;
681 struct lttng_ctx **pctx = bytecode->p.pctx;
682
683 vstack_init(stack);
684
685 start_pc = &bytecode->code[0];
686 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
687 pc = next_pc) {
688 switch (*(bytecode_opcode_t *) pc) {
689 case BYTECODE_OP_UNKNOWN:
690 default:
691 ERR("unknown bytecode op %u\n",
692 (unsigned int) *(bytecode_opcode_t *) pc);
693 ret = -EINVAL;
694 goto end;
695
696 case BYTECODE_OP_RETURN:
697 if (vstack_ax(stack)->type == REG_S64 ||
698 vstack_ax(stack)->type == REG_U64)
699 *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
700 ret = 0;
701 goto end;
702
703 case BYTECODE_OP_RETURN_S64:
704 if (vstack_ax(stack)->type != REG_S64 &&
705 vstack_ax(stack)->type != REG_U64) {
706 ERR("Unexpected register type\n");
707 ret = -EINVAL;
708 goto end;
709 }
710 ret = 0;
711 goto end;
712
713 /* binary */
714 case BYTECODE_OP_MUL:
715 case BYTECODE_OP_DIV:
716 case BYTECODE_OP_MOD:
717 case BYTECODE_OP_PLUS:
718 case BYTECODE_OP_MINUS:
719 ERR("unsupported bytecode op %u\n",
720 (unsigned int) *(bytecode_opcode_t *) pc);
721 ret = -EINVAL;
722 goto end;
723
724 case BYTECODE_OP_EQ:
725 {
726 struct binary_op *insn = (struct binary_op *) pc;
727
728 switch(vstack_ax(stack)->type) {
729 default:
730 ERR("unknown register type\n");
731 ret = -EINVAL;
732 goto end;
733
734 case REG_STRING:
735 if (vstack_bx(stack)->type == REG_UNKNOWN)
736 break;
737 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
738 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
739 else
740 insn->op = BYTECODE_OP_EQ_STRING;
741 break;
742 case REG_STAR_GLOB_STRING:
743 if (vstack_bx(stack)->type == REG_UNKNOWN)
744 break;
745 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
746 break;
747 case REG_S64:
748 case REG_U64:
749 if (vstack_bx(stack)->type == REG_UNKNOWN)
750 break;
751 if (vstack_bx(stack)->type == REG_S64 ||
752 vstack_bx(stack)->type == REG_U64)
753 insn->op = BYTECODE_OP_EQ_S64;
754 else
755 insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
756 break;
757 case REG_DOUBLE:
758 if (vstack_bx(stack)->type == REG_UNKNOWN)
759 break;
760 if (vstack_bx(stack)->type == REG_S64 ||
761 vstack_bx(stack)->type == REG_U64)
762 insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
763 else
764 insn->op = BYTECODE_OP_EQ_DOUBLE;
765 break;
766 case REG_UNKNOWN:
767 break; /* Dynamic typing. */
768 }
769 /* Pop 2, push 1 */
770 if (vstack_pop(stack)) {
771 ret = -EINVAL;
772 goto end;
773 }
774 vstack_ax(stack)->type = REG_S64;
775 next_pc += sizeof(struct binary_op);
776 break;
777 }
778
779 case BYTECODE_OP_NE:
780 {
781 struct binary_op *insn = (struct binary_op *) pc;
782
783 switch(vstack_ax(stack)->type) {
784 default:
785 ERR("unknown register type\n");
786 ret = -EINVAL;
787 goto end;
788
789 case REG_STRING:
790 if (vstack_bx(stack)->type == REG_UNKNOWN)
791 break;
792 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
793 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
794 else
795 insn->op = BYTECODE_OP_NE_STRING;
796 break;
797 case REG_STAR_GLOB_STRING:
798 if (vstack_bx(stack)->type == REG_UNKNOWN)
799 break;
800 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
801 break;
802 case REG_S64:
803 case REG_U64:
804 if (vstack_bx(stack)->type == REG_UNKNOWN)
805 break;
806 if (vstack_bx(stack)->type == REG_S64 ||
807 vstack_bx(stack)->type == REG_U64)
808 insn->op = BYTECODE_OP_NE_S64;
809 else
810 insn->op = BYTECODE_OP_NE_DOUBLE_S64;
811 break;
812 case REG_DOUBLE:
813 if (vstack_bx(stack)->type == REG_UNKNOWN)
814 break;
815 if (vstack_bx(stack)->type == REG_S64 ||
816 vstack_bx(stack)->type == REG_U64)
817 insn->op = BYTECODE_OP_NE_S64_DOUBLE;
818 else
819 insn->op = BYTECODE_OP_NE_DOUBLE;
820 break;
821 case REG_UNKNOWN:
822 break; /* Dynamic typing. */
823 }
824 /* Pop 2, push 1 */
825 if (vstack_pop(stack)) {
826 ret = -EINVAL;
827 goto end;
828 }
829 vstack_ax(stack)->type = REG_S64;
830 next_pc += sizeof(struct binary_op);
831 break;
832 }
833
834 case BYTECODE_OP_GT:
835 {
836 struct binary_op *insn = (struct binary_op *) pc;
837
838 switch(vstack_ax(stack)->type) {
839 default:
840 ERR("unknown register type\n");
841 ret = -EINVAL;
842 goto end;
843
844 case REG_STAR_GLOB_STRING:
845 ERR("invalid register type for > binary operator\n");
846 ret = -EINVAL;
847 goto end;
848 case REG_STRING:
849 if (vstack_bx(stack)->type == REG_UNKNOWN)
850 break;
851 insn->op = BYTECODE_OP_GT_STRING;
852 break;
853 case REG_S64:
854 case REG_U64:
855 if (vstack_bx(stack)->type == REG_UNKNOWN)
856 break;
857 if (vstack_bx(stack)->type == REG_S64 ||
858 vstack_bx(stack)->type == REG_U64)
859 insn->op = BYTECODE_OP_GT_S64;
860 else
861 insn->op = BYTECODE_OP_GT_DOUBLE_S64;
862 break;
863 case REG_DOUBLE:
864 if (vstack_bx(stack)->type == REG_UNKNOWN)
865 break;
866 if (vstack_bx(stack)->type == REG_S64 ||
867 vstack_bx(stack)->type == REG_U64)
868 insn->op = BYTECODE_OP_GT_S64_DOUBLE;
869 else
870 insn->op = BYTECODE_OP_GT_DOUBLE;
871 break;
872 case REG_UNKNOWN:
873 break; /* Dynamic typing. */
874 }
875 /* Pop 2, push 1 */
876 if (vstack_pop(stack)) {
877 ret = -EINVAL;
878 goto end;
879 }
880 vstack_ax(stack)->type = REG_S64;
881 next_pc += sizeof(struct binary_op);
882 break;
883 }
884
885 case BYTECODE_OP_LT:
886 {
887 struct binary_op *insn = (struct binary_op *) pc;
888
889 switch(vstack_ax(stack)->type) {
890 default:
891 ERR("unknown register type\n");
892 ret = -EINVAL;
893 goto end;
894
895 case REG_STAR_GLOB_STRING:
896 ERR("invalid register type for < binary operator\n");
897 ret = -EINVAL;
898 goto end;
899 case REG_STRING:
900 if (vstack_bx(stack)->type == REG_UNKNOWN)
901 break;
902 insn->op = BYTECODE_OP_LT_STRING;
903 break;
904 case REG_S64:
905 case REG_U64:
906 if (vstack_bx(stack)->type == REG_UNKNOWN)
907 break;
908 if (vstack_bx(stack)->type == REG_S64 ||
909 vstack_bx(stack)->type == REG_U64)
910 insn->op = BYTECODE_OP_LT_S64;
911 else
912 insn->op = BYTECODE_OP_LT_DOUBLE_S64;
913 break;
914 case REG_DOUBLE:
915 if (vstack_bx(stack)->type == REG_UNKNOWN)
916 break;
917 if (vstack_bx(stack)->type == REG_S64 ||
918 vstack_bx(stack)->type == REG_U64)
919 insn->op = BYTECODE_OP_LT_S64_DOUBLE;
920 else
921 insn->op = BYTECODE_OP_LT_DOUBLE;
922 break;
923 case REG_UNKNOWN:
924 break; /* Dynamic typing. */
925 }
926 /* Pop 2, push 1 */
927 if (vstack_pop(stack)) {
928 ret = -EINVAL;
929 goto end;
930 }
931 vstack_ax(stack)->type = REG_S64;
932 next_pc += sizeof(struct binary_op);
933 break;
934 }
935
936 case BYTECODE_OP_GE:
937 {
938 struct binary_op *insn = (struct binary_op *) pc;
939
940 switch(vstack_ax(stack)->type) {
941 default:
942 ERR("unknown register type\n");
943 ret = -EINVAL;
944 goto end;
945
946 case REG_STAR_GLOB_STRING:
947 ERR("invalid register type for >= binary operator\n");
948 ret = -EINVAL;
949 goto end;
950 case REG_STRING:
951 if (vstack_bx(stack)->type == REG_UNKNOWN)
952 break;
953 insn->op = BYTECODE_OP_GE_STRING;
954 break;
955 case REG_S64:
956 case REG_U64:
957 if (vstack_bx(stack)->type == REG_UNKNOWN)
958 break;
959 if (vstack_bx(stack)->type == REG_S64 ||
960 vstack_bx(stack)->type == REG_U64)
961 insn->op = BYTECODE_OP_GE_S64;
962 else
963 insn->op = BYTECODE_OP_GE_DOUBLE_S64;
964 break;
965 case REG_DOUBLE:
966 if (vstack_bx(stack)->type == REG_UNKNOWN)
967 break;
968 if (vstack_bx(stack)->type == REG_S64 ||
969 vstack_bx(stack)->type == REG_U64)
970 insn->op = BYTECODE_OP_GE_S64_DOUBLE;
971 else
972 insn->op = BYTECODE_OP_GE_DOUBLE;
973 break;
974 case REG_UNKNOWN:
975 break; /* Dynamic typing. */
976 }
977 /* Pop 2, push 1 */
978 if (vstack_pop(stack)) {
979 ret = -EINVAL;
980 goto end;
981 }
982 vstack_ax(stack)->type = REG_U64;
983 next_pc += sizeof(struct binary_op);
984 break;
985 }
986 case BYTECODE_OP_LE:
987 {
988 struct binary_op *insn = (struct binary_op *) pc;
989
990 switch(vstack_ax(stack)->type) {
991 default:
992 ERR("unknown register type\n");
993 ret = -EINVAL;
994 goto end;
995
996 case REG_STAR_GLOB_STRING:
997 ERR("invalid register type for <= binary operator\n");
998 ret = -EINVAL;
999 goto end;
1000 case REG_STRING:
1001 if (vstack_bx(stack)->type == REG_UNKNOWN)
1002 break;
1003 insn->op = BYTECODE_OP_LE_STRING;
1004 break;
1005 case REG_S64:
1006 case REG_U64:
1007 if (vstack_bx(stack)->type == REG_UNKNOWN)
1008 break;
1009 if (vstack_bx(stack)->type == REG_S64 ||
1010 vstack_bx(stack)->type == REG_U64)
1011 insn->op = BYTECODE_OP_LE_S64;
1012 else
1013 insn->op = BYTECODE_OP_LE_DOUBLE_S64;
1014 break;
1015 case REG_DOUBLE:
1016 if (vstack_bx(stack)->type == REG_UNKNOWN)
1017 break;
1018 if (vstack_bx(stack)->type == REG_S64 ||
1019 vstack_bx(stack)->type == REG_U64)
1020 insn->op = BYTECODE_OP_LE_S64_DOUBLE;
1021 else
1022 insn->op = BYTECODE_OP_LE_DOUBLE;
1023 break;
1024 case REG_UNKNOWN:
1025 break; /* Dynamic typing. */
1026 }
1027 vstack_ax(stack)->type = REG_S64;
1028 next_pc += sizeof(struct binary_op);
1029 break;
1030 }
1031
1032 case BYTECODE_OP_EQ_STRING:
1033 case BYTECODE_OP_NE_STRING:
1034 case BYTECODE_OP_GT_STRING:
1035 case BYTECODE_OP_LT_STRING:
1036 case BYTECODE_OP_GE_STRING:
1037 case BYTECODE_OP_LE_STRING:
1038 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
1039 case BYTECODE_OP_NE_STAR_GLOB_STRING:
1040 case BYTECODE_OP_EQ_S64:
1041 case BYTECODE_OP_NE_S64:
1042 case BYTECODE_OP_GT_S64:
1043 case BYTECODE_OP_LT_S64:
1044 case BYTECODE_OP_GE_S64:
1045 case BYTECODE_OP_LE_S64:
1046 case BYTECODE_OP_EQ_DOUBLE:
1047 case BYTECODE_OP_NE_DOUBLE:
1048 case BYTECODE_OP_GT_DOUBLE:
1049 case BYTECODE_OP_LT_DOUBLE:
1050 case BYTECODE_OP_GE_DOUBLE:
1051 case BYTECODE_OP_LE_DOUBLE:
1052 case BYTECODE_OP_EQ_DOUBLE_S64:
1053 case BYTECODE_OP_NE_DOUBLE_S64:
1054 case BYTECODE_OP_GT_DOUBLE_S64:
1055 case BYTECODE_OP_LT_DOUBLE_S64:
1056 case BYTECODE_OP_GE_DOUBLE_S64:
1057 case BYTECODE_OP_LE_DOUBLE_S64:
1058 case BYTECODE_OP_EQ_S64_DOUBLE:
1059 case BYTECODE_OP_NE_S64_DOUBLE:
1060 case BYTECODE_OP_GT_S64_DOUBLE:
1061 case BYTECODE_OP_LT_S64_DOUBLE:
1062 case BYTECODE_OP_GE_S64_DOUBLE:
1063 case BYTECODE_OP_LE_S64_DOUBLE:
1064 {
1065 /* Pop 2, push 1 */
1066 if (vstack_pop(stack)) {
1067 ret = -EINVAL;
1068 goto end;
1069 }
1070 vstack_ax(stack)->type = REG_S64;
1071 next_pc += sizeof(struct binary_op);
1072 break;
1073 }
1074
1075 case BYTECODE_OP_BIT_RSHIFT:
1076 case BYTECODE_OP_BIT_LSHIFT:
1077 case BYTECODE_OP_BIT_AND:
1078 case BYTECODE_OP_BIT_OR:
1079 case BYTECODE_OP_BIT_XOR:
1080 {
1081 /* Pop 2, push 1 */
1082 if (vstack_pop(stack)) {
1083 ret = -EINVAL;
1084 goto end;
1085 }
1086 vstack_ax(stack)->type = REG_S64;
1087 next_pc += sizeof(struct binary_op);
1088 break;
1089 }
1090
1091 /* unary */
1092 case BYTECODE_OP_UNARY_PLUS:
1093 {
1094 struct unary_op *insn = (struct unary_op *) pc;
1095
1096 switch(vstack_ax(stack)->type) {
1097 default:
1098 ERR("unknown register type\n");
1099 ret = -EINVAL;
1100 goto end;
1101
1102 case REG_S64:
1103 case REG_U64:
1104 insn->op = BYTECODE_OP_UNARY_PLUS_S64;
1105 break;
1106 case REG_DOUBLE:
1107 insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
1108 break;
1109 case REG_UNKNOWN: /* Dynamic typing. */
1110 break;
1111 }
1112 /* Pop 1, push 1 */
1113 next_pc += sizeof(struct unary_op);
1114 break;
1115 }
1116
1117 case BYTECODE_OP_UNARY_MINUS:
1118 {
1119 struct unary_op *insn = (struct unary_op *) pc;
1120
1121 switch(vstack_ax(stack)->type) {
1122 default:
1123 ERR("unknown register type\n");
1124 ret = -EINVAL;
1125 goto end;
1126
1127 case REG_S64:
1128 case REG_U64:
1129 insn->op = BYTECODE_OP_UNARY_MINUS_S64;
1130 break;
1131 case REG_DOUBLE:
1132 insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
1133 break;
1134 case REG_UNKNOWN: /* Dynamic typing. */
1135 break;
1136 }
1137 /* Pop 1, push 1 */
1138 next_pc += sizeof(struct unary_op);
1139 break;
1140 }
1141
1142 case BYTECODE_OP_UNARY_NOT:
1143 {
1144 struct unary_op *insn = (struct unary_op *) pc;
1145
1146 switch(vstack_ax(stack)->type) {
1147 default:
1148 ERR("unknown register type\n");
1149 ret = -EINVAL;
1150 goto end;
1151
1152 case REG_S64:
1153 case REG_U64:
1154 insn->op = BYTECODE_OP_UNARY_NOT_S64;
1155 break;
1156 case REG_DOUBLE:
1157 insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
1158 break;
1159 case REG_UNKNOWN: /* Dynamic typing. */
1160 break;
1161 }
1162 /* Pop 1, push 1 */
1163 next_pc += sizeof(struct unary_op);
1164 break;
1165 }
1166
1167 case BYTECODE_OP_UNARY_BIT_NOT:
1168 {
1169 /* Pop 1, push 1 */
1170 next_pc += sizeof(struct unary_op);
1171 break;
1172 }
1173
1174 case BYTECODE_OP_UNARY_PLUS_S64:
1175 case BYTECODE_OP_UNARY_MINUS_S64:
1176 case BYTECODE_OP_UNARY_NOT_S64:
1177 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
1178 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
1179 case BYTECODE_OP_UNARY_NOT_DOUBLE:
1180 {
1181 /* Pop 1, push 1 */
1182 next_pc += sizeof(struct unary_op);
1183 break;
1184 }
1185
1186 /* logical */
1187 case BYTECODE_OP_AND:
1188 case BYTECODE_OP_OR:
1189 {
1190 /* Continue to next instruction */
1191 /* Pop 1 when jump not taken */
1192 if (vstack_pop(stack)) {
1193 ret = -EINVAL;
1194 goto end;
1195 }
1196 next_pc += sizeof(struct logical_op);
1197 break;
1198 }
1199
1200 /* load field ref */
1201 case BYTECODE_OP_LOAD_FIELD_REF:
1202 {
1203 ERR("Unknown field ref type\n");
1204 ret = -EINVAL;
1205 goto end;
1206 }
1207 /* get context ref */
1208 case BYTECODE_OP_GET_CONTEXT_REF:
1209 {
1210 if (vstack_push(stack)) {
1211 ret = -EINVAL;
1212 goto end;
1213 }
1214 vstack_ax(stack)->type = REG_UNKNOWN;
1215 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1216 break;
1217 }
1218 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
1219 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
1220 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
1221 {
1222 if (vstack_push(stack)) {
1223 ret = -EINVAL;
1224 goto end;
1225 }
1226 vstack_ax(stack)->type = REG_STRING;
1227 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1228 break;
1229 }
1230 case BYTECODE_OP_LOAD_FIELD_REF_S64:
1231 case BYTECODE_OP_GET_CONTEXT_REF_S64:
1232 {
1233 if (vstack_push(stack)) {
1234 ret = -EINVAL;
1235 goto end;
1236 }
1237 vstack_ax(stack)->type = REG_S64;
1238 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1239 break;
1240 }
1241 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
1242 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
1243 {
1244 if (vstack_push(stack)) {
1245 ret = -EINVAL;
1246 goto end;
1247 }
1248 vstack_ax(stack)->type = REG_DOUBLE;
1249 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1250 break;
1251 }
1252
1253 /* load from immediate operand */
1254 case BYTECODE_OP_LOAD_STRING:
1255 {
1256 struct load_op *insn = (struct load_op *) pc;
1257
1258 if (vstack_push(stack)) {
1259 ret = -EINVAL;
1260 goto end;
1261 }
1262 vstack_ax(stack)->type = REG_STRING;
1263 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1264 break;
1265 }
1266
1267 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
1268 {
1269 struct load_op *insn = (struct load_op *) pc;
1270
1271 if (vstack_push(stack)) {
1272 ret = -EINVAL;
1273 goto end;
1274 }
1275 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1276 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1277 break;
1278 }
1279
1280 case BYTECODE_OP_LOAD_S64:
1281 {
1282 if (vstack_push(stack)) {
1283 ret = -EINVAL;
1284 goto end;
1285 }
1286 vstack_ax(stack)->type = REG_S64;
1287 next_pc += sizeof(struct load_op)
1288 + sizeof(struct literal_numeric);
1289 break;
1290 }
1291
1292 case BYTECODE_OP_LOAD_DOUBLE:
1293 {
1294 if (vstack_push(stack)) {
1295 ret = -EINVAL;
1296 goto end;
1297 }
1298 vstack_ax(stack)->type = REG_DOUBLE;
1299 next_pc += sizeof(struct load_op)
1300 + sizeof(struct literal_double);
1301 break;
1302 }
1303
1304 /* cast */
1305 case BYTECODE_OP_CAST_TO_S64:
1306 {
1307 struct cast_op *insn = (struct cast_op *) pc;
1308
1309 switch (vstack_ax(stack)->type) {
1310 default:
1311 ERR("unknown register type\n");
1312 ret = -EINVAL;
1313 goto end;
1314
1315 case REG_STRING:
1316 case REG_STAR_GLOB_STRING:
1317 ERR("Cast op can only be applied to numeric or floating point registers\n");
1318 ret = -EINVAL;
1319 goto end;
1320 case REG_S64:
1321 insn->op = BYTECODE_OP_CAST_NOP;
1322 break;
1323 case REG_DOUBLE:
1324 insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
1325 break;
1326 case REG_UNKNOWN:
1327 case REG_U64:
1328 break;
1329 }
1330 /* Pop 1, push 1 */
1331 vstack_ax(stack)->type = REG_S64;
1332 next_pc += sizeof(struct cast_op);
1333 break;
1334 }
1335 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
1336 {
1337 /* Pop 1, push 1 */
1338 vstack_ax(stack)->type = REG_S64;
1339 next_pc += sizeof(struct cast_op);
1340 break;
1341 }
1342 case BYTECODE_OP_CAST_NOP:
1343 {
1344 next_pc += sizeof(struct cast_op);
1345 break;
1346 }
1347
1348 /*
1349 * Instructions for recursive traversal through composed types.
1350 */
1351 case BYTECODE_OP_GET_CONTEXT_ROOT:
1352 {
1353 if (vstack_push(stack)) {
1354 ret = -EINVAL;
1355 goto end;
1356 }
1357 vstack_ax(stack)->type = REG_PTR;
1358 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1359 next_pc += sizeof(struct load_op);
1360 break;
1361 }
1362 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
1363 {
1364 if (vstack_push(stack)) {
1365 ret = -EINVAL;
1366 goto end;
1367 }
1368 vstack_ax(stack)->type = REG_PTR;
1369 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1370 next_pc += sizeof(struct load_op);
1371 break;
1372 }
1373 case BYTECODE_OP_GET_PAYLOAD_ROOT:
1374 {
1375 if (vstack_push(stack)) {
1376 ret = -EINVAL;
1377 goto end;
1378 }
1379 vstack_ax(stack)->type = REG_PTR;
1380 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1381 next_pc += sizeof(struct load_op);
1382 break;
1383 }
1384
1385 case BYTECODE_OP_LOAD_FIELD:
1386 {
1387 struct load_op *insn = (struct load_op *) pc;
1388
1389 assert(vstack_ax(stack)->type == REG_PTR);
1390 /* Pop 1, push 1 */
1391 ret = specialize_load_field(vstack_ax(stack), insn);
1392 if (ret)
1393 goto end;
1394
1395 next_pc += sizeof(struct load_op);
1396 break;
1397 }
1398
1399 case BYTECODE_OP_LOAD_FIELD_S8:
1400 case BYTECODE_OP_LOAD_FIELD_S16:
1401 case BYTECODE_OP_LOAD_FIELD_S32:
1402 case BYTECODE_OP_LOAD_FIELD_S64:
1403 {
1404 /* Pop 1, push 1 */
1405 vstack_ax(stack)->type = REG_S64;
1406 next_pc += sizeof(struct load_op);
1407 break;
1408 }
1409
1410 case BYTECODE_OP_LOAD_FIELD_U8:
1411 case BYTECODE_OP_LOAD_FIELD_U16:
1412 case BYTECODE_OP_LOAD_FIELD_U32:
1413 case BYTECODE_OP_LOAD_FIELD_U64:
1414 {
1415 /* Pop 1, push 1 */
1416 vstack_ax(stack)->type = REG_U64;
1417 next_pc += sizeof(struct load_op);
1418 break;
1419 }
1420
1421 case BYTECODE_OP_LOAD_FIELD_STRING:
1422 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1423 {
1424 /* Pop 1, push 1 */
1425 vstack_ax(stack)->type = REG_STRING;
1426 next_pc += sizeof(struct load_op);
1427 break;
1428 }
1429
1430 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1431 {
1432 /* Pop 1, push 1 */
1433 vstack_ax(stack)->type = REG_DOUBLE;
1434 next_pc += sizeof(struct load_op);
1435 break;
1436 }
1437
1438 case BYTECODE_OP_GET_SYMBOL:
1439 {
1440 struct load_op *insn = (struct load_op *) pc;
1441
1442 dbg_printf("op get symbol\n");
1443 switch (vstack_ax(stack)->load.type) {
1444 case LOAD_OBJECT:
1445 ERR("Nested fields not implemented yet.");
1446 ret = -EINVAL;
1447 goto end;
1448 case LOAD_ROOT_CONTEXT:
1449 /* Lookup context field. */
1450 ret = specialize_context_lookup(*pctx,
1451 bytecode, insn,
1452 &vstack_ax(stack)->load);
1453 if (ret)
1454 goto end;
1455 break;
1456 case LOAD_ROOT_APP_CONTEXT:
1457 /* Lookup app context field. */
1458 ret = specialize_app_context_lookup(pctx,
1459 bytecode, insn,
1460 &vstack_ax(stack)->load);
1461 if (ret)
1462 goto end;
1463 break;
1464 case LOAD_ROOT_PAYLOAD:
1465 /* Lookup event payload field. */
1466 ret = specialize_payload_lookup(event_desc,
1467 bytecode, insn,
1468 &vstack_ax(stack)->load);
1469 if (ret)
1470 goto end;
1471 break;
1472 }
1473 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1474 break;
1475 }
1476
1477 case BYTECODE_OP_GET_SYMBOL_FIELD:
1478 {
1479 /* Always generated by specialize phase. */
1480 ret = -EINVAL;
1481 goto end;
1482 }
1483
1484 case BYTECODE_OP_GET_INDEX_U16:
1485 {
1486 struct load_op *insn = (struct load_op *) pc;
1487 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1488
1489 dbg_printf("op get index u16\n");
1490 /* Pop 1, push 1 */
1491 ret = specialize_get_index(bytecode, insn, index->index,
1492 vstack_ax(stack), sizeof(*index));
1493 if (ret)
1494 goto end;
1495 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1496 break;
1497 }
1498
1499 case BYTECODE_OP_GET_INDEX_U64:
1500 {
1501 struct load_op *insn = (struct load_op *) pc;
1502 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1503
1504 dbg_printf("op get index u64\n");
1505 /* Pop 1, push 1 */
1506 ret = specialize_get_index(bytecode, insn, index->index,
1507 vstack_ax(stack), sizeof(*index));
1508 if (ret)
1509 goto end;
1510 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1511 break;
1512 }
1513
1514 }
1515 }
1516 end:
1517 return ret;
1518 }
This page took 0.095112 seconds and 4 git commands to generate.