55b2ebf221687be36d8c4f306b8e85b8a2cbd70c
[lttng-ust.git] / liblttng-ust / lttng-bytecode-specialize.c
1 /*
2 * lttng-bytecode-specialize.c
3 *
4 * LTTng UST bytecode specializer.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include <stddef.h>
29 #include <stdint.h>
30
31 #include "lttng-bytecode.h"
32 #include <lttng/align.h>
33 #include "ust-events-internal.h"
34
35 static int lttng_fls(int val)
36 {
37 int r = 32;
38 unsigned int x = (unsigned int) val;
39
40 if (!x)
41 return 0;
42 if (!(x & 0xFFFF0000U)) {
43 x <<= 16;
44 r -= 16;
45 }
46 if (!(x & 0xFF000000U)) {
47 x <<= 8;
48 r -= 8;
49 }
50 if (!(x & 0xF0000000U)) {
51 x <<= 4;
52 r -= 4;
53 }
54 if (!(x & 0xC0000000U)) {
55 x <<= 2;
56 r -= 2;
57 }
58 if (!(x & 0x80000000U)) {
59 r -= 1;
60 }
61 return r;
62 }
63
64 static int get_count_order(unsigned int count)
65 {
66 int order;
67
68 order = lttng_fls(count) - 1;
69 if (count & (count - 1))
70 order++;
71 return order;
72 }
73
74 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
75 size_t align, size_t len)
76 {
77 ssize_t ret;
78 size_t padding = lttng_ust_offset_align(runtime->data_len, align);
79 size_t new_len = runtime->data_len + padding + len;
80 size_t new_alloc_len = new_len;
81 size_t old_alloc_len = runtime->data_alloc_len;
82
83 if (new_len > BYTECODE_MAX_DATA_LEN)
84 return -EINVAL;
85
86 if (new_alloc_len > old_alloc_len) {
87 char *newptr;
88
89 new_alloc_len =
90 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
91 newptr = realloc(runtime->data, new_alloc_len);
92 if (!newptr)
93 return -ENOMEM;
94 runtime->data = newptr;
95 /* We zero directly the memory from start of allocation. */
96 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
97 runtime->data_alloc_len = new_alloc_len;
98 }
99 runtime->data_len += padding;
100 ret = runtime->data_len;
101 runtime->data_len += len;
102 return ret;
103 }
104
105 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
106 const void *p, size_t align, size_t len)
107 {
108 ssize_t offset;
109
110 offset = bytecode_reserve_data(runtime, align, len);
111 if (offset < 0)
112 return -ENOMEM;
113 memcpy(&runtime->data[offset], p, len);
114 return offset;
115 }
116
117 static int specialize_load_field(struct vstack_entry *stack_top,
118 struct load_op *insn)
119 {
120 int ret;
121
122 switch (stack_top->load.type) {
123 case LOAD_OBJECT:
124 break;
125 case LOAD_ROOT_CONTEXT:
126 case LOAD_ROOT_APP_CONTEXT:
127 case LOAD_ROOT_PAYLOAD:
128 default:
129 dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
130 ret = -EINVAL;
131 goto end;
132 }
133 switch (stack_top->load.object_type) {
134 case OBJECT_TYPE_S8:
135 dbg_printf("op load field s8\n");
136 stack_top->type = REG_S64;
137 if (!stack_top->load.rev_bo)
138 insn->op = BYTECODE_OP_LOAD_FIELD_S8;
139 break;
140 case OBJECT_TYPE_S16:
141 dbg_printf("op load field s16\n");
142 stack_top->type = REG_S64;
143 if (!stack_top->load.rev_bo)
144 insn->op = BYTECODE_OP_LOAD_FIELD_S16;
145 break;
146 case OBJECT_TYPE_S32:
147 dbg_printf("op load field s32\n");
148 stack_top->type = REG_S64;
149 if (!stack_top->load.rev_bo)
150 insn->op = BYTECODE_OP_LOAD_FIELD_S32;
151 break;
152 case OBJECT_TYPE_S64:
153 dbg_printf("op load field s64\n");
154 stack_top->type = REG_S64;
155 if (!stack_top->load.rev_bo)
156 insn->op = BYTECODE_OP_LOAD_FIELD_S64;
157 break;
158 case OBJECT_TYPE_U8:
159 dbg_printf("op load field u8\n");
160 stack_top->type = REG_U64;
161 insn->op = BYTECODE_OP_LOAD_FIELD_U8;
162 break;
163 case OBJECT_TYPE_U16:
164 dbg_printf("op load field u16\n");
165 stack_top->type = REG_U64;
166 if (!stack_top->load.rev_bo)
167 insn->op = BYTECODE_OP_LOAD_FIELD_U16;
168 break;
169 case OBJECT_TYPE_U32:
170 dbg_printf("op load field u32\n");
171 stack_top->type = REG_U64;
172 if (!stack_top->load.rev_bo)
173 insn->op = BYTECODE_OP_LOAD_FIELD_U32;
174 break;
175 case OBJECT_TYPE_U64:
176 dbg_printf("op load field u64\n");
177 stack_top->type = REG_U64;
178 if (!stack_top->load.rev_bo)
179 insn->op = BYTECODE_OP_LOAD_FIELD_U64;
180 break;
181 case OBJECT_TYPE_DOUBLE:
182 stack_top->type = REG_DOUBLE;
183 insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
184 break;
185 case OBJECT_TYPE_STRING:
186 dbg_printf("op load field string\n");
187 stack_top->type = REG_STRING;
188 insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
189 break;
190 case OBJECT_TYPE_STRING_SEQUENCE:
191 dbg_printf("op load field string sequence\n");
192 stack_top->type = REG_STRING;
193 insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
194 break;
195 case OBJECT_TYPE_DYNAMIC:
196 dbg_printf("op load field dynamic\n");
197 stack_top->type = REG_UNKNOWN;
198 /* Don't specialize load op. */
199 break;
200 case OBJECT_TYPE_SEQUENCE:
201 case OBJECT_TYPE_ARRAY:
202 case OBJECT_TYPE_STRUCT:
203 case OBJECT_TYPE_VARIANT:
204 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
205 ret = -EINVAL;
206 goto end;
207 }
208 return 0;
209
210 end:
211 return ret;
212 }
213
214 static int specialize_get_index_object_type(enum object_type *otype,
215 int signedness, uint32_t elem_len)
216 {
217 switch (elem_len) {
218 case 8:
219 if (signedness)
220 *otype = OBJECT_TYPE_S8;
221 else
222 *otype = OBJECT_TYPE_U8;
223 break;
224 case 16:
225 if (signedness)
226 *otype = OBJECT_TYPE_S16;
227 else
228 *otype = OBJECT_TYPE_U16;
229 break;
230 case 32:
231 if (signedness)
232 *otype = OBJECT_TYPE_S32;
233 else
234 *otype = OBJECT_TYPE_U32;
235 break;
236 case 64:
237 if (signedness)
238 *otype = OBJECT_TYPE_S64;
239 else
240 *otype = OBJECT_TYPE_U64;
241 break;
242 default:
243 return -EINVAL;
244 }
245 return 0;
246 }
247
248 static int specialize_get_index(struct bytecode_runtime *runtime,
249 struct load_op *insn, uint64_t index,
250 struct vstack_entry *stack_top,
251 int idx_len)
252 {
253 int ret;
254 struct bytecode_get_index_data gid;
255 ssize_t data_offset;
256
257 memset(&gid, 0, sizeof(gid));
258 switch (stack_top->load.type) {
259 case LOAD_OBJECT:
260 switch (stack_top->load.object_type) {
261 case OBJECT_TYPE_ARRAY:
262 {
263 const struct lttng_integer_type *integer_type;
264 const struct lttng_event_field *field;
265 uint32_t elem_len, num_elems;
266 int signedness;
267
268 field = stack_top->load.field;
269 switch (field->type.atype) {
270 case atype_array:
271 integer_type = &field->type.u.legacy.array.elem_type.u.basic.integer;
272 num_elems = field->type.u.legacy.array.length;
273 break;
274 case atype_array_nestable:
275 if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
276 ret = -EINVAL;
277 goto end;
278 }
279 integer_type = &field->type.u.array_nestable.elem_type->u.integer;
280 num_elems = field->type.u.array_nestable.length;
281 break;
282 default:
283 ret = -EINVAL;
284 goto end;
285 }
286 elem_len = integer_type->size;
287 signedness = integer_type->signedness;
288 if (index >= num_elems) {
289 ret = -EINVAL;
290 goto end;
291 }
292 ret = specialize_get_index_object_type(&stack_top->load.object_type,
293 signedness, elem_len);
294 if (ret)
295 goto end;
296 gid.offset = index * (elem_len / CHAR_BIT);
297 gid.array_len = num_elems * (elem_len / CHAR_BIT);
298 gid.elem.type = stack_top->load.object_type;
299 gid.elem.len = elem_len;
300 if (integer_type->reverse_byte_order)
301 gid.elem.rev_bo = true;
302 stack_top->load.rev_bo = gid.elem.rev_bo;
303 break;
304 }
305 case OBJECT_TYPE_SEQUENCE:
306 {
307 const struct lttng_integer_type *integer_type;
308 const struct lttng_event_field *field;
309 uint32_t elem_len;
310 int signedness;
311
312 field = stack_top->load.field;
313 switch (field->type.atype) {
314 case atype_sequence:
315 integer_type = &field->type.u.legacy.sequence.elem_type.u.basic.integer;
316 break;
317 case atype_sequence_nestable:
318 if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
319 ret = -EINVAL;
320 goto end;
321 }
322 integer_type = &field->type.u.sequence_nestable.elem_type->u.integer;
323 break;
324 default:
325 ret = -EINVAL;
326 goto end;
327 }
328 elem_len = integer_type->size;
329 signedness = integer_type->signedness;
330 ret = specialize_get_index_object_type(&stack_top->load.object_type,
331 signedness, elem_len);
332 if (ret)
333 goto end;
334 gid.offset = index * (elem_len / CHAR_BIT);
335 gid.elem.type = stack_top->load.object_type;
336 gid.elem.len = elem_len;
337 if (integer_type->reverse_byte_order)
338 gid.elem.rev_bo = true;
339 stack_top->load.rev_bo = gid.elem.rev_bo;
340 break;
341 }
342 case OBJECT_TYPE_STRUCT:
343 /* Only generated by the specialize phase. */
344 case OBJECT_TYPE_VARIANT: /* Fall-through */
345 default:
346 ERR("Unexpected get index type %d",
347 (int) stack_top->load.object_type);
348 ret = -EINVAL;
349 goto end;
350 }
351 break;
352 case LOAD_ROOT_CONTEXT:
353 case LOAD_ROOT_APP_CONTEXT:
354 case LOAD_ROOT_PAYLOAD:
355 ERR("Index lookup for root field not implemented yet.");
356 ret = -EINVAL;
357 goto end;
358 }
359 data_offset = bytecode_push_data(runtime, &gid,
360 __alignof__(gid), sizeof(gid));
361 if (data_offset < 0) {
362 ret = -EINVAL;
363 goto end;
364 }
365 switch (idx_len) {
366 case 2:
367 ((struct get_index_u16 *) insn->data)->index = data_offset;
368 break;
369 case 8:
370 ((struct get_index_u64 *) insn->data)->index = data_offset;
371 break;
372 default:
373 ret = -EINVAL;
374 goto end;
375 }
376
377 return 0;
378
379 end:
380 return ret;
381 }
382
383 static int specialize_context_lookup_name(struct lttng_ctx *ctx,
384 struct bytecode_runtime *bytecode,
385 struct load_op *insn)
386 {
387 uint16_t offset;
388 const char *name;
389
390 offset = ((struct get_symbol *) insn->data)->offset;
391 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
392 return lttng_get_context_index(ctx, name);
393 }
394
395 static int specialize_load_object(const struct lttng_event_field *field,
396 struct vstack_load *load, bool is_context)
397 {
398 load->type = LOAD_OBJECT;
399
400 switch (field->type.atype) {
401 case atype_integer:
402 if (field->type.u.integer.signedness)
403 load->object_type = OBJECT_TYPE_S64;
404 else
405 load->object_type = OBJECT_TYPE_U64;
406 load->rev_bo = false;
407 break;
408 case atype_enum:
409 case atype_enum_nestable:
410 {
411 const struct lttng_integer_type *itype;
412
413 if (field->type.atype == atype_enum) {
414 itype = &field->type.u.legacy.basic.enumeration.container_type;
415 } else {
416 itype = &field->type.u.enum_nestable.container_type->u.integer;
417 }
418 if (itype->signedness)
419 load->object_type = OBJECT_TYPE_S64;
420 else
421 load->object_type = OBJECT_TYPE_U64;
422 load->rev_bo = false;
423 break;
424 }
425 case atype_array:
426 if (field->type.u.legacy.array.elem_type.atype != atype_integer) {
427 ERR("Array nesting only supports integer types.");
428 return -EINVAL;
429 }
430 if (is_context) {
431 load->object_type = OBJECT_TYPE_STRING;
432 } else {
433 if (field->type.u.legacy.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
434 load->object_type = OBJECT_TYPE_ARRAY;
435 load->field = field;
436 } else {
437 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
438 }
439 }
440 break;
441 case atype_array_nestable:
442 if (field->type.u.array_nestable.elem_type->atype != atype_integer) {
443 ERR("Array nesting only supports integer types.");
444 return -EINVAL;
445 }
446 if (is_context) {
447 load->object_type = OBJECT_TYPE_STRING;
448 } else {
449 if (field->type.u.array_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
450 load->object_type = OBJECT_TYPE_ARRAY;
451 load->field = field;
452 } else {
453 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
454 }
455 }
456 break;
457 case atype_sequence:
458 if (field->type.u.legacy.sequence.elem_type.atype != atype_integer) {
459 ERR("Sequence nesting only supports integer types.");
460 return -EINVAL;
461 }
462 if (is_context) {
463 load->object_type = OBJECT_TYPE_STRING;
464 } else {
465 if (field->type.u.legacy.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
466 load->object_type = OBJECT_TYPE_SEQUENCE;
467 load->field = field;
468 } else {
469 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
470 }
471 }
472 break;
473 case atype_sequence_nestable:
474 if (field->type.u.sequence_nestable.elem_type->atype != atype_integer) {
475 ERR("Sequence nesting only supports integer types.");
476 return -EINVAL;
477 }
478 if (is_context) {
479 load->object_type = OBJECT_TYPE_STRING;
480 } else {
481 if (field->type.u.sequence_nestable.elem_type->u.integer.encoding == lttng_encode_none) {
482 load->object_type = OBJECT_TYPE_SEQUENCE;
483 load->field = field;
484 } else {
485 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
486 }
487 }
488 break;
489
490 case atype_string:
491 load->object_type = OBJECT_TYPE_STRING;
492 break;
493 case atype_float:
494 load->object_type = OBJECT_TYPE_DOUBLE;
495 break;
496 case atype_dynamic:
497 load->object_type = OBJECT_TYPE_DYNAMIC;
498 break;
499 case atype_struct:
500 ERR("Structure type cannot be loaded.");
501 return -EINVAL;
502 default:
503 ERR("Unknown type: %d", (int) field->type.atype);
504 return -EINVAL;
505 }
506 return 0;
507 }
508
509 static int specialize_context_lookup(struct lttng_ctx *ctx,
510 struct bytecode_runtime *runtime,
511 struct load_op *insn,
512 struct vstack_load *load)
513 {
514 int idx, ret;
515 struct lttng_ctx_field *ctx_field;
516 struct lttng_event_field *field;
517 struct bytecode_get_index_data gid;
518 ssize_t data_offset;
519
520 idx = specialize_context_lookup_name(ctx, runtime, insn);
521 if (idx < 0) {
522 return -ENOENT;
523 }
524 ctx_field = &ctx->fields[idx];
525 field = &ctx_field->event_field;
526 ret = specialize_load_object(field, load, true);
527 if (ret)
528 return ret;
529 /* Specialize each get_symbol into a get_index. */
530 insn->op = BYTECODE_OP_GET_INDEX_U16;
531 memset(&gid, 0, sizeof(gid));
532 gid.ctx_index = idx;
533 gid.elem.type = load->object_type;
534 gid.elem.rev_bo = load->rev_bo;
535 gid.field = field;
536 data_offset = bytecode_push_data(runtime, &gid,
537 __alignof__(gid), sizeof(gid));
538 if (data_offset < 0) {
539 return -EINVAL;
540 }
541 ((struct get_index_u16 *) insn->data)->index = data_offset;
542 return 0;
543 }
544
545 static int specialize_app_context_lookup(struct lttng_ctx **pctx,
546 struct bytecode_runtime *runtime,
547 struct load_op *insn,
548 struct vstack_load *load)
549 {
550 uint16_t offset;
551 const char *orig_name;
552 char *name = NULL;
553 int idx, ret;
554 struct lttng_ctx_field *ctx_field;
555 struct lttng_event_field *field;
556 struct bytecode_get_index_data gid;
557 ssize_t data_offset;
558
559 offset = ((struct get_symbol *) insn->data)->offset;
560 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
561 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
562 if (!name) {
563 ret = -ENOMEM;
564 goto end;
565 }
566 strcpy(name, "$app.");
567 strcat(name, orig_name);
568 idx = lttng_get_context_index(*pctx, name);
569 if (idx < 0) {
570 assert(lttng_context_is_app(name));
571 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
572 pctx);
573 if (ret)
574 return ret;
575 idx = lttng_get_context_index(*pctx, name);
576 if (idx < 0)
577 return -ENOENT;
578 }
579 ctx_field = &(*pctx)->fields[idx];
580 field = &ctx_field->event_field;
581 ret = specialize_load_object(field, load, true);
582 if (ret)
583 goto end;
584 /* Specialize each get_symbol into a get_index. */
585 insn->op = BYTECODE_OP_GET_INDEX_U16;
586 memset(&gid, 0, sizeof(gid));
587 gid.ctx_index = idx;
588 gid.elem.type = load->object_type;
589 gid.elem.rev_bo = load->rev_bo;
590 gid.field = field;
591 data_offset = bytecode_push_data(runtime, &gid,
592 __alignof__(gid), sizeof(gid));
593 if (data_offset < 0) {
594 ret = -EINVAL;
595 goto end;
596 }
597 ((struct get_index_u16 *) insn->data)->index = data_offset;
598 ret = 0;
599 end:
600 free(name);
601 return ret;
602 }
603
604 static int specialize_payload_lookup(const struct lttng_event_desc *event_desc,
605 struct bytecode_runtime *runtime,
606 struct load_op *insn,
607 struct vstack_load *load)
608 {
609 const char *name;
610 uint16_t offset;
611 unsigned int i, nr_fields;
612 bool found = false;
613 uint32_t field_offset = 0;
614 const struct lttng_event_field *field;
615 int ret;
616 struct bytecode_get_index_data gid;
617 ssize_t data_offset;
618
619 nr_fields = event_desc->nr_fields;
620 offset = ((struct get_symbol *) insn->data)->offset;
621 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
622 for (i = 0; i < nr_fields; i++) {
623 field = &event_desc->fields[i];
624 if (field->u.ext.nofilter) {
625 continue;
626 }
627 if (!strcmp(field->name, name)) {
628 found = true;
629 break;
630 }
631 /* compute field offset on stack */
632 switch (field->type.atype) {
633 case atype_integer:
634 case atype_enum:
635 case atype_enum_nestable:
636 field_offset += sizeof(int64_t);
637 break;
638 case atype_array:
639 case atype_array_nestable:
640 case atype_sequence:
641 case atype_sequence_nestable:
642 field_offset += sizeof(unsigned long);
643 field_offset += sizeof(void *);
644 break;
645 case atype_string:
646 field_offset += sizeof(void *);
647 break;
648 case atype_float:
649 field_offset += sizeof(double);
650 break;
651 default:
652 ret = -EINVAL;
653 goto end;
654 }
655 }
656 if (!found) {
657 ret = -EINVAL;
658 goto end;
659 }
660
661 ret = specialize_load_object(field, load, false);
662 if (ret)
663 goto end;
664
665 /* Specialize each get_symbol into a get_index. */
666 insn->op = BYTECODE_OP_GET_INDEX_U16;
667 memset(&gid, 0, sizeof(gid));
668 gid.offset = field_offset;
669 gid.elem.type = load->object_type;
670 gid.elem.rev_bo = load->rev_bo;
671 gid.field = field;
672 data_offset = bytecode_push_data(runtime, &gid,
673 __alignof__(gid), sizeof(gid));
674 if (data_offset < 0) {
675 ret = -EINVAL;
676 goto end;
677 }
678 ((struct get_index_u16 *) insn->data)->index = data_offset;
679 ret = 0;
680 end:
681 return ret;
682 }
683
684 int lttng_bytecode_specialize(const struct lttng_event_desc *event_desc,
685 struct bytecode_runtime *bytecode)
686 {
687 void *pc, *next_pc, *start_pc;
688 int ret = -EINVAL;
689 struct vstack _stack;
690 struct vstack *stack = &_stack;
691 struct lttng_ctx **pctx = bytecode->p.pctx;
692
693 vstack_init(stack);
694
695 start_pc = &bytecode->code[0];
696 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
697 pc = next_pc) {
698 switch (*(bytecode_opcode_t *) pc) {
699 case BYTECODE_OP_UNKNOWN:
700 default:
701 ERR("unknown bytecode op %u\n",
702 (unsigned int) *(bytecode_opcode_t *) pc);
703 ret = -EINVAL;
704 goto end;
705
706 case BYTECODE_OP_RETURN:
707 if (vstack_ax(stack)->type == REG_S64 ||
708 vstack_ax(stack)->type == REG_U64)
709 *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
710 ret = 0;
711 goto end;
712
713 case BYTECODE_OP_RETURN_S64:
714 if (vstack_ax(stack)->type != REG_S64 &&
715 vstack_ax(stack)->type != REG_U64) {
716 ERR("Unexpected register type\n");
717 ret = -EINVAL;
718 goto end;
719 }
720 ret = 0;
721 goto end;
722
723 /* binary */
724 case BYTECODE_OP_MUL:
725 case BYTECODE_OP_DIV:
726 case BYTECODE_OP_MOD:
727 case BYTECODE_OP_PLUS:
728 case BYTECODE_OP_MINUS:
729 ERR("unsupported bytecode op %u\n",
730 (unsigned int) *(bytecode_opcode_t *) pc);
731 ret = -EINVAL;
732 goto end;
733
734 case BYTECODE_OP_EQ:
735 {
736 struct binary_op *insn = (struct binary_op *) pc;
737
738 switch(vstack_ax(stack)->type) {
739 default:
740 ERR("unknown register type\n");
741 ret = -EINVAL;
742 goto end;
743
744 case REG_STRING:
745 if (vstack_bx(stack)->type == REG_UNKNOWN)
746 break;
747 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
748 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
749 else
750 insn->op = BYTECODE_OP_EQ_STRING;
751 break;
752 case REG_STAR_GLOB_STRING:
753 if (vstack_bx(stack)->type == REG_UNKNOWN)
754 break;
755 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
756 break;
757 case REG_S64:
758 case REG_U64:
759 if (vstack_bx(stack)->type == REG_UNKNOWN)
760 break;
761 if (vstack_bx(stack)->type == REG_S64 ||
762 vstack_bx(stack)->type == REG_U64)
763 insn->op = BYTECODE_OP_EQ_S64;
764 else
765 insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
766 break;
767 case REG_DOUBLE:
768 if (vstack_bx(stack)->type == REG_UNKNOWN)
769 break;
770 if (vstack_bx(stack)->type == REG_S64 ||
771 vstack_bx(stack)->type == REG_U64)
772 insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
773 else
774 insn->op = BYTECODE_OP_EQ_DOUBLE;
775 break;
776 case REG_UNKNOWN:
777 break; /* Dynamic typing. */
778 }
779 /* Pop 2, push 1 */
780 if (vstack_pop(stack)) {
781 ret = -EINVAL;
782 goto end;
783 }
784 vstack_ax(stack)->type = REG_S64;
785 next_pc += sizeof(struct binary_op);
786 break;
787 }
788
789 case BYTECODE_OP_NE:
790 {
791 struct binary_op *insn = (struct binary_op *) pc;
792
793 switch(vstack_ax(stack)->type) {
794 default:
795 ERR("unknown register type\n");
796 ret = -EINVAL;
797 goto end;
798
799 case REG_STRING:
800 if (vstack_bx(stack)->type == REG_UNKNOWN)
801 break;
802 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
803 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
804 else
805 insn->op = BYTECODE_OP_NE_STRING;
806 break;
807 case REG_STAR_GLOB_STRING:
808 if (vstack_bx(stack)->type == REG_UNKNOWN)
809 break;
810 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
811 break;
812 case REG_S64:
813 case REG_U64:
814 if (vstack_bx(stack)->type == REG_UNKNOWN)
815 break;
816 if (vstack_bx(stack)->type == REG_S64 ||
817 vstack_bx(stack)->type == REG_U64)
818 insn->op = BYTECODE_OP_NE_S64;
819 else
820 insn->op = BYTECODE_OP_NE_DOUBLE_S64;
821 break;
822 case REG_DOUBLE:
823 if (vstack_bx(stack)->type == REG_UNKNOWN)
824 break;
825 if (vstack_bx(stack)->type == REG_S64 ||
826 vstack_bx(stack)->type == REG_U64)
827 insn->op = BYTECODE_OP_NE_S64_DOUBLE;
828 else
829 insn->op = BYTECODE_OP_NE_DOUBLE;
830 break;
831 case REG_UNKNOWN:
832 break; /* Dynamic typing. */
833 }
834 /* Pop 2, push 1 */
835 if (vstack_pop(stack)) {
836 ret = -EINVAL;
837 goto end;
838 }
839 vstack_ax(stack)->type = REG_S64;
840 next_pc += sizeof(struct binary_op);
841 break;
842 }
843
844 case BYTECODE_OP_GT:
845 {
846 struct binary_op *insn = (struct binary_op *) pc;
847
848 switch(vstack_ax(stack)->type) {
849 default:
850 ERR("unknown register type\n");
851 ret = -EINVAL;
852 goto end;
853
854 case REG_STAR_GLOB_STRING:
855 ERR("invalid register type for > binary operator\n");
856 ret = -EINVAL;
857 goto end;
858 case REG_STRING:
859 if (vstack_bx(stack)->type == REG_UNKNOWN)
860 break;
861 insn->op = BYTECODE_OP_GT_STRING;
862 break;
863 case REG_S64:
864 case REG_U64:
865 if (vstack_bx(stack)->type == REG_UNKNOWN)
866 break;
867 if (vstack_bx(stack)->type == REG_S64 ||
868 vstack_bx(stack)->type == REG_U64)
869 insn->op = BYTECODE_OP_GT_S64;
870 else
871 insn->op = BYTECODE_OP_GT_DOUBLE_S64;
872 break;
873 case REG_DOUBLE:
874 if (vstack_bx(stack)->type == REG_UNKNOWN)
875 break;
876 if (vstack_bx(stack)->type == REG_S64 ||
877 vstack_bx(stack)->type == REG_U64)
878 insn->op = BYTECODE_OP_GT_S64_DOUBLE;
879 else
880 insn->op = BYTECODE_OP_GT_DOUBLE;
881 break;
882 case REG_UNKNOWN:
883 break; /* Dynamic typing. */
884 }
885 /* Pop 2, push 1 */
886 if (vstack_pop(stack)) {
887 ret = -EINVAL;
888 goto end;
889 }
890 vstack_ax(stack)->type = REG_S64;
891 next_pc += sizeof(struct binary_op);
892 break;
893 }
894
895 case BYTECODE_OP_LT:
896 {
897 struct binary_op *insn = (struct binary_op *) pc;
898
899 switch(vstack_ax(stack)->type) {
900 default:
901 ERR("unknown register type\n");
902 ret = -EINVAL;
903 goto end;
904
905 case REG_STAR_GLOB_STRING:
906 ERR("invalid register type for < binary operator\n");
907 ret = -EINVAL;
908 goto end;
909 case REG_STRING:
910 if (vstack_bx(stack)->type == REG_UNKNOWN)
911 break;
912 insn->op = BYTECODE_OP_LT_STRING;
913 break;
914 case REG_S64:
915 case REG_U64:
916 if (vstack_bx(stack)->type == REG_UNKNOWN)
917 break;
918 if (vstack_bx(stack)->type == REG_S64 ||
919 vstack_bx(stack)->type == REG_U64)
920 insn->op = BYTECODE_OP_LT_S64;
921 else
922 insn->op = BYTECODE_OP_LT_DOUBLE_S64;
923 break;
924 case REG_DOUBLE:
925 if (vstack_bx(stack)->type == REG_UNKNOWN)
926 break;
927 if (vstack_bx(stack)->type == REG_S64 ||
928 vstack_bx(stack)->type == REG_U64)
929 insn->op = BYTECODE_OP_LT_S64_DOUBLE;
930 else
931 insn->op = BYTECODE_OP_LT_DOUBLE;
932 break;
933 case REG_UNKNOWN:
934 break; /* Dynamic typing. */
935 }
936 /* Pop 2, push 1 */
937 if (vstack_pop(stack)) {
938 ret = -EINVAL;
939 goto end;
940 }
941 vstack_ax(stack)->type = REG_S64;
942 next_pc += sizeof(struct binary_op);
943 break;
944 }
945
946 case BYTECODE_OP_GE:
947 {
948 struct binary_op *insn = (struct binary_op *) pc;
949
950 switch(vstack_ax(stack)->type) {
951 default:
952 ERR("unknown register type\n");
953 ret = -EINVAL;
954 goto end;
955
956 case REG_STAR_GLOB_STRING:
957 ERR("invalid register type for >= binary operator\n");
958 ret = -EINVAL;
959 goto end;
960 case REG_STRING:
961 if (vstack_bx(stack)->type == REG_UNKNOWN)
962 break;
963 insn->op = BYTECODE_OP_GE_STRING;
964 break;
965 case REG_S64:
966 case REG_U64:
967 if (vstack_bx(stack)->type == REG_UNKNOWN)
968 break;
969 if (vstack_bx(stack)->type == REG_S64 ||
970 vstack_bx(stack)->type == REG_U64)
971 insn->op = BYTECODE_OP_GE_S64;
972 else
973 insn->op = BYTECODE_OP_GE_DOUBLE_S64;
974 break;
975 case REG_DOUBLE:
976 if (vstack_bx(stack)->type == REG_UNKNOWN)
977 break;
978 if (vstack_bx(stack)->type == REG_S64 ||
979 vstack_bx(stack)->type == REG_U64)
980 insn->op = BYTECODE_OP_GE_S64_DOUBLE;
981 else
982 insn->op = BYTECODE_OP_GE_DOUBLE;
983 break;
984 case REG_UNKNOWN:
985 break; /* Dynamic typing. */
986 }
987 /* Pop 2, push 1 */
988 if (vstack_pop(stack)) {
989 ret = -EINVAL;
990 goto end;
991 }
992 vstack_ax(stack)->type = REG_U64;
993 next_pc += sizeof(struct binary_op);
994 break;
995 }
996 case BYTECODE_OP_LE:
997 {
998 struct binary_op *insn = (struct binary_op *) pc;
999
1000 switch(vstack_ax(stack)->type) {
1001 default:
1002 ERR("unknown register type\n");
1003 ret = -EINVAL;
1004 goto end;
1005
1006 case REG_STAR_GLOB_STRING:
1007 ERR("invalid register type for <= binary operator\n");
1008 ret = -EINVAL;
1009 goto end;
1010 case REG_STRING:
1011 if (vstack_bx(stack)->type == REG_UNKNOWN)
1012 break;
1013 insn->op = BYTECODE_OP_LE_STRING;
1014 break;
1015 case REG_S64:
1016 case REG_U64:
1017 if (vstack_bx(stack)->type == REG_UNKNOWN)
1018 break;
1019 if (vstack_bx(stack)->type == REG_S64 ||
1020 vstack_bx(stack)->type == REG_U64)
1021 insn->op = BYTECODE_OP_LE_S64;
1022 else
1023 insn->op = BYTECODE_OP_LE_DOUBLE_S64;
1024 break;
1025 case REG_DOUBLE:
1026 if (vstack_bx(stack)->type == REG_UNKNOWN)
1027 break;
1028 if (vstack_bx(stack)->type == REG_S64 ||
1029 vstack_bx(stack)->type == REG_U64)
1030 insn->op = BYTECODE_OP_LE_S64_DOUBLE;
1031 else
1032 insn->op = BYTECODE_OP_LE_DOUBLE;
1033 break;
1034 case REG_UNKNOWN:
1035 break; /* Dynamic typing. */
1036 }
1037 vstack_ax(stack)->type = REG_S64;
1038 next_pc += sizeof(struct binary_op);
1039 break;
1040 }
1041
1042 case BYTECODE_OP_EQ_STRING:
1043 case BYTECODE_OP_NE_STRING:
1044 case BYTECODE_OP_GT_STRING:
1045 case BYTECODE_OP_LT_STRING:
1046 case BYTECODE_OP_GE_STRING:
1047 case BYTECODE_OP_LE_STRING:
1048 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
1049 case BYTECODE_OP_NE_STAR_GLOB_STRING:
1050 case BYTECODE_OP_EQ_S64:
1051 case BYTECODE_OP_NE_S64:
1052 case BYTECODE_OP_GT_S64:
1053 case BYTECODE_OP_LT_S64:
1054 case BYTECODE_OP_GE_S64:
1055 case BYTECODE_OP_LE_S64:
1056 case BYTECODE_OP_EQ_DOUBLE:
1057 case BYTECODE_OP_NE_DOUBLE:
1058 case BYTECODE_OP_GT_DOUBLE:
1059 case BYTECODE_OP_LT_DOUBLE:
1060 case BYTECODE_OP_GE_DOUBLE:
1061 case BYTECODE_OP_LE_DOUBLE:
1062 case BYTECODE_OP_EQ_DOUBLE_S64:
1063 case BYTECODE_OP_NE_DOUBLE_S64:
1064 case BYTECODE_OP_GT_DOUBLE_S64:
1065 case BYTECODE_OP_LT_DOUBLE_S64:
1066 case BYTECODE_OP_GE_DOUBLE_S64:
1067 case BYTECODE_OP_LE_DOUBLE_S64:
1068 case BYTECODE_OP_EQ_S64_DOUBLE:
1069 case BYTECODE_OP_NE_S64_DOUBLE:
1070 case BYTECODE_OP_GT_S64_DOUBLE:
1071 case BYTECODE_OP_LT_S64_DOUBLE:
1072 case BYTECODE_OP_GE_S64_DOUBLE:
1073 case BYTECODE_OP_LE_S64_DOUBLE:
1074 {
1075 /* Pop 2, push 1 */
1076 if (vstack_pop(stack)) {
1077 ret = -EINVAL;
1078 goto end;
1079 }
1080 vstack_ax(stack)->type = REG_S64;
1081 next_pc += sizeof(struct binary_op);
1082 break;
1083 }
1084
1085 case BYTECODE_OP_BIT_RSHIFT:
1086 case BYTECODE_OP_BIT_LSHIFT:
1087 case BYTECODE_OP_BIT_AND:
1088 case BYTECODE_OP_BIT_OR:
1089 case BYTECODE_OP_BIT_XOR:
1090 {
1091 /* Pop 2, push 1 */
1092 if (vstack_pop(stack)) {
1093 ret = -EINVAL;
1094 goto end;
1095 }
1096 vstack_ax(stack)->type = REG_S64;
1097 next_pc += sizeof(struct binary_op);
1098 break;
1099 }
1100
1101 /* unary */
1102 case BYTECODE_OP_UNARY_PLUS:
1103 {
1104 struct unary_op *insn = (struct unary_op *) pc;
1105
1106 switch(vstack_ax(stack)->type) {
1107 default:
1108 ERR("unknown register type\n");
1109 ret = -EINVAL;
1110 goto end;
1111
1112 case REG_S64:
1113 case REG_U64:
1114 insn->op = BYTECODE_OP_UNARY_PLUS_S64;
1115 break;
1116 case REG_DOUBLE:
1117 insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
1118 break;
1119 case REG_UNKNOWN: /* Dynamic typing. */
1120 break;
1121 }
1122 /* Pop 1, push 1 */
1123 next_pc += sizeof(struct unary_op);
1124 break;
1125 }
1126
1127 case BYTECODE_OP_UNARY_MINUS:
1128 {
1129 struct unary_op *insn = (struct unary_op *) pc;
1130
1131 switch(vstack_ax(stack)->type) {
1132 default:
1133 ERR("unknown register type\n");
1134 ret = -EINVAL;
1135 goto end;
1136
1137 case REG_S64:
1138 case REG_U64:
1139 insn->op = BYTECODE_OP_UNARY_MINUS_S64;
1140 break;
1141 case REG_DOUBLE:
1142 insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
1143 break;
1144 case REG_UNKNOWN: /* Dynamic typing. */
1145 break;
1146 }
1147 /* Pop 1, push 1 */
1148 next_pc += sizeof(struct unary_op);
1149 break;
1150 }
1151
1152 case BYTECODE_OP_UNARY_NOT:
1153 {
1154 struct unary_op *insn = (struct unary_op *) pc;
1155
1156 switch(vstack_ax(stack)->type) {
1157 default:
1158 ERR("unknown register type\n");
1159 ret = -EINVAL;
1160 goto end;
1161
1162 case REG_S64:
1163 case REG_U64:
1164 insn->op = BYTECODE_OP_UNARY_NOT_S64;
1165 break;
1166 case REG_DOUBLE:
1167 insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
1168 break;
1169 case REG_UNKNOWN: /* Dynamic typing. */
1170 break;
1171 }
1172 /* Pop 1, push 1 */
1173 next_pc += sizeof(struct unary_op);
1174 break;
1175 }
1176
1177 case BYTECODE_OP_UNARY_BIT_NOT:
1178 {
1179 /* Pop 1, push 1 */
1180 next_pc += sizeof(struct unary_op);
1181 break;
1182 }
1183
1184 case BYTECODE_OP_UNARY_PLUS_S64:
1185 case BYTECODE_OP_UNARY_MINUS_S64:
1186 case BYTECODE_OP_UNARY_NOT_S64:
1187 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
1188 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
1189 case BYTECODE_OP_UNARY_NOT_DOUBLE:
1190 {
1191 /* Pop 1, push 1 */
1192 next_pc += sizeof(struct unary_op);
1193 break;
1194 }
1195
1196 /* logical */
1197 case BYTECODE_OP_AND:
1198 case BYTECODE_OP_OR:
1199 {
1200 /* Continue to next instruction */
1201 /* Pop 1 when jump not taken */
1202 if (vstack_pop(stack)) {
1203 ret = -EINVAL;
1204 goto end;
1205 }
1206 next_pc += sizeof(struct logical_op);
1207 break;
1208 }
1209
1210 /* load field ref */
1211 case BYTECODE_OP_LOAD_FIELD_REF:
1212 {
1213 ERR("Unknown field ref type\n");
1214 ret = -EINVAL;
1215 goto end;
1216 }
1217 /* get context ref */
1218 case BYTECODE_OP_GET_CONTEXT_REF:
1219 {
1220 if (vstack_push(stack)) {
1221 ret = -EINVAL;
1222 goto end;
1223 }
1224 vstack_ax(stack)->type = REG_UNKNOWN;
1225 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1226 break;
1227 }
1228 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
1229 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
1230 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
1231 {
1232 if (vstack_push(stack)) {
1233 ret = -EINVAL;
1234 goto end;
1235 }
1236 vstack_ax(stack)->type = REG_STRING;
1237 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1238 break;
1239 }
1240 case BYTECODE_OP_LOAD_FIELD_REF_S64:
1241 case BYTECODE_OP_GET_CONTEXT_REF_S64:
1242 {
1243 if (vstack_push(stack)) {
1244 ret = -EINVAL;
1245 goto end;
1246 }
1247 vstack_ax(stack)->type = REG_S64;
1248 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1249 break;
1250 }
1251 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
1252 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
1253 {
1254 if (vstack_push(stack)) {
1255 ret = -EINVAL;
1256 goto end;
1257 }
1258 vstack_ax(stack)->type = REG_DOUBLE;
1259 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1260 break;
1261 }
1262
1263 /* load from immediate operand */
1264 case BYTECODE_OP_LOAD_STRING:
1265 {
1266 struct load_op *insn = (struct load_op *) pc;
1267
1268 if (vstack_push(stack)) {
1269 ret = -EINVAL;
1270 goto end;
1271 }
1272 vstack_ax(stack)->type = REG_STRING;
1273 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1274 break;
1275 }
1276
1277 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
1278 {
1279 struct load_op *insn = (struct load_op *) pc;
1280
1281 if (vstack_push(stack)) {
1282 ret = -EINVAL;
1283 goto end;
1284 }
1285 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1286 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1287 break;
1288 }
1289
1290 case BYTECODE_OP_LOAD_S64:
1291 {
1292 if (vstack_push(stack)) {
1293 ret = -EINVAL;
1294 goto end;
1295 }
1296 vstack_ax(stack)->type = REG_S64;
1297 next_pc += sizeof(struct load_op)
1298 + sizeof(struct literal_numeric);
1299 break;
1300 }
1301
1302 case BYTECODE_OP_LOAD_DOUBLE:
1303 {
1304 if (vstack_push(stack)) {
1305 ret = -EINVAL;
1306 goto end;
1307 }
1308 vstack_ax(stack)->type = REG_DOUBLE;
1309 next_pc += sizeof(struct load_op)
1310 + sizeof(struct literal_double);
1311 break;
1312 }
1313
1314 /* cast */
1315 case BYTECODE_OP_CAST_TO_S64:
1316 {
1317 struct cast_op *insn = (struct cast_op *) pc;
1318
1319 switch (vstack_ax(stack)->type) {
1320 default:
1321 ERR("unknown register type\n");
1322 ret = -EINVAL;
1323 goto end;
1324
1325 case REG_STRING:
1326 case REG_STAR_GLOB_STRING:
1327 ERR("Cast op can only be applied to numeric or floating point registers\n");
1328 ret = -EINVAL;
1329 goto end;
1330 case REG_S64:
1331 insn->op = BYTECODE_OP_CAST_NOP;
1332 break;
1333 case REG_DOUBLE:
1334 insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
1335 break;
1336 case REG_UNKNOWN:
1337 case REG_U64:
1338 break;
1339 }
1340 /* Pop 1, push 1 */
1341 vstack_ax(stack)->type = REG_S64;
1342 next_pc += sizeof(struct cast_op);
1343 break;
1344 }
1345 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
1346 {
1347 /* Pop 1, push 1 */
1348 vstack_ax(stack)->type = REG_S64;
1349 next_pc += sizeof(struct cast_op);
1350 break;
1351 }
1352 case BYTECODE_OP_CAST_NOP:
1353 {
1354 next_pc += sizeof(struct cast_op);
1355 break;
1356 }
1357
1358 /*
1359 * Instructions for recursive traversal through composed types.
1360 */
1361 case BYTECODE_OP_GET_CONTEXT_ROOT:
1362 {
1363 if (vstack_push(stack)) {
1364 ret = -EINVAL;
1365 goto end;
1366 }
1367 vstack_ax(stack)->type = REG_PTR;
1368 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1369 next_pc += sizeof(struct load_op);
1370 break;
1371 }
1372 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
1373 {
1374 if (vstack_push(stack)) {
1375 ret = -EINVAL;
1376 goto end;
1377 }
1378 vstack_ax(stack)->type = REG_PTR;
1379 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1380 next_pc += sizeof(struct load_op);
1381 break;
1382 }
1383 case BYTECODE_OP_GET_PAYLOAD_ROOT:
1384 {
1385 if (vstack_push(stack)) {
1386 ret = -EINVAL;
1387 goto end;
1388 }
1389 vstack_ax(stack)->type = REG_PTR;
1390 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1391 next_pc += sizeof(struct load_op);
1392 break;
1393 }
1394
1395 case BYTECODE_OP_LOAD_FIELD:
1396 {
1397 struct load_op *insn = (struct load_op *) pc;
1398
1399 assert(vstack_ax(stack)->type == REG_PTR);
1400 /* Pop 1, push 1 */
1401 ret = specialize_load_field(vstack_ax(stack), insn);
1402 if (ret)
1403 goto end;
1404
1405 next_pc += sizeof(struct load_op);
1406 break;
1407 }
1408
1409 case BYTECODE_OP_LOAD_FIELD_S8:
1410 case BYTECODE_OP_LOAD_FIELD_S16:
1411 case BYTECODE_OP_LOAD_FIELD_S32:
1412 case BYTECODE_OP_LOAD_FIELD_S64:
1413 {
1414 /* Pop 1, push 1 */
1415 vstack_ax(stack)->type = REG_S64;
1416 next_pc += sizeof(struct load_op);
1417 break;
1418 }
1419
1420 case BYTECODE_OP_LOAD_FIELD_U8:
1421 case BYTECODE_OP_LOAD_FIELD_U16:
1422 case BYTECODE_OP_LOAD_FIELD_U32:
1423 case BYTECODE_OP_LOAD_FIELD_U64:
1424 {
1425 /* Pop 1, push 1 */
1426 vstack_ax(stack)->type = REG_U64;
1427 next_pc += sizeof(struct load_op);
1428 break;
1429 }
1430
1431 case BYTECODE_OP_LOAD_FIELD_STRING:
1432 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1433 {
1434 /* Pop 1, push 1 */
1435 vstack_ax(stack)->type = REG_STRING;
1436 next_pc += sizeof(struct load_op);
1437 break;
1438 }
1439
1440 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1441 {
1442 /* Pop 1, push 1 */
1443 vstack_ax(stack)->type = REG_DOUBLE;
1444 next_pc += sizeof(struct load_op);
1445 break;
1446 }
1447
1448 case BYTECODE_OP_GET_SYMBOL:
1449 {
1450 struct load_op *insn = (struct load_op *) pc;
1451
1452 dbg_printf("op get symbol\n");
1453 switch (vstack_ax(stack)->load.type) {
1454 case LOAD_OBJECT:
1455 ERR("Nested fields not implemented yet.");
1456 ret = -EINVAL;
1457 goto end;
1458 case LOAD_ROOT_CONTEXT:
1459 /* Lookup context field. */
1460 ret = specialize_context_lookup(*pctx,
1461 bytecode, insn,
1462 &vstack_ax(stack)->load);
1463 if (ret)
1464 goto end;
1465 break;
1466 case LOAD_ROOT_APP_CONTEXT:
1467 /* Lookup app context field. */
1468 ret = specialize_app_context_lookup(pctx,
1469 bytecode, insn,
1470 &vstack_ax(stack)->load);
1471 if (ret)
1472 goto end;
1473 break;
1474 case LOAD_ROOT_PAYLOAD:
1475 /* Lookup event payload field. */
1476 ret = specialize_payload_lookup(event_desc,
1477 bytecode, insn,
1478 &vstack_ax(stack)->load);
1479 if (ret)
1480 goto end;
1481 break;
1482 }
1483 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1484 break;
1485 }
1486
1487 case BYTECODE_OP_GET_SYMBOL_FIELD:
1488 {
1489 /* Always generated by specialize phase. */
1490 ret = -EINVAL;
1491 goto end;
1492 }
1493
1494 case BYTECODE_OP_GET_INDEX_U16:
1495 {
1496 struct load_op *insn = (struct load_op *) pc;
1497 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1498
1499 dbg_printf("op get index u16\n");
1500 /* Pop 1, push 1 */
1501 ret = specialize_get_index(bytecode, insn, index->index,
1502 vstack_ax(stack), sizeof(*index));
1503 if (ret)
1504 goto end;
1505 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1506 break;
1507 }
1508
1509 case BYTECODE_OP_GET_INDEX_U64:
1510 {
1511 struct load_op *insn = (struct load_op *) pc;
1512 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1513
1514 dbg_printf("op get index u64\n");
1515 /* Pop 1, push 1 */
1516 ret = specialize_get_index(bytecode, insn, index->index,
1517 vstack_ax(stack), sizeof(*index));
1518 if (ret)
1519 goto end;
1520 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1521 break;
1522 }
1523
1524 }
1525 }
1526 end:
1527 return ret;
1528 }
This page took 0.103345 seconds and 3 git commands to generate.