Move liblttng-ust to 'src/lib/'
[lttng-ust.git] / src / lib / lttng-ust / lttng-bytecode-specialize.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST bytecode specializer.
7 */
8
9 #define _LGPL_SOURCE
10 #include <limits.h>
11 #include <stddef.h>
12 #include <stdint.h>
13
14 #include <lttng/ust-utils.h>
15
16 #include "context-internal.h"
17 #include "lttng-bytecode.h"
18 #include "ust-events-internal.h"
19 #include "common/macros.h"
20
21 static int lttng_fls(int val)
22 {
23 int r = 32;
24 unsigned int x = (unsigned int) val;
25
26 if (!x)
27 return 0;
28 if (!(x & 0xFFFF0000U)) {
29 x <<= 16;
30 r -= 16;
31 }
32 if (!(x & 0xFF000000U)) {
33 x <<= 8;
34 r -= 8;
35 }
36 if (!(x & 0xF0000000U)) {
37 x <<= 4;
38 r -= 4;
39 }
40 if (!(x & 0xC0000000U)) {
41 x <<= 2;
42 r -= 2;
43 }
44 if (!(x & 0x80000000U)) {
45 r -= 1;
46 }
47 return r;
48 }
49
50 static int get_count_order(unsigned int count)
51 {
52 int order;
53
54 order = lttng_fls(count) - 1;
55 if (count & (count - 1))
56 order++;
57 return order;
58 }
59
60 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
61 size_t align, size_t len)
62 {
63 ssize_t ret;
64 size_t padding = lttng_ust_offset_align(runtime->data_len, align);
65 size_t new_len = runtime->data_len + padding + len;
66 size_t new_alloc_len = new_len;
67 size_t old_alloc_len = runtime->data_alloc_len;
68
69 if (new_len > BYTECODE_MAX_DATA_LEN)
70 return -EINVAL;
71
72 if (new_alloc_len > old_alloc_len) {
73 char *newptr;
74
75 new_alloc_len =
76 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
77 newptr = realloc(runtime->data, new_alloc_len);
78 if (!newptr)
79 return -ENOMEM;
80 runtime->data = newptr;
81 /* We zero directly the memory from start of allocation. */
82 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
83 runtime->data_alloc_len = new_alloc_len;
84 }
85 runtime->data_len += padding;
86 ret = runtime->data_len;
87 runtime->data_len += len;
88 return ret;
89 }
90
91 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
92 const void *p, size_t align, size_t len)
93 {
94 ssize_t offset;
95
96 offset = bytecode_reserve_data(runtime, align, len);
97 if (offset < 0)
98 return -ENOMEM;
99 memcpy(&runtime->data[offset], p, len);
100 return offset;
101 }
102
103 static int specialize_load_field(struct vstack_entry *stack_top,
104 struct load_op *insn)
105 {
106 int ret;
107
108 switch (stack_top->load.type) {
109 case LOAD_OBJECT:
110 break;
111 case LOAD_ROOT_CONTEXT:
112 case LOAD_ROOT_APP_CONTEXT:
113 case LOAD_ROOT_PAYLOAD:
114 default:
115 dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
116 ret = -EINVAL;
117 goto end;
118 }
119 switch (stack_top->load.object_type) {
120 case OBJECT_TYPE_S8:
121 dbg_printf("op load field s8\n");
122 stack_top->type = REG_S64;
123 if (!stack_top->load.rev_bo)
124 insn->op = BYTECODE_OP_LOAD_FIELD_S8;
125 break;
126 case OBJECT_TYPE_S16:
127 dbg_printf("op load field s16\n");
128 stack_top->type = REG_S64;
129 if (!stack_top->load.rev_bo)
130 insn->op = BYTECODE_OP_LOAD_FIELD_S16;
131 break;
132 case OBJECT_TYPE_S32:
133 dbg_printf("op load field s32\n");
134 stack_top->type = REG_S64;
135 if (!stack_top->load.rev_bo)
136 insn->op = BYTECODE_OP_LOAD_FIELD_S32;
137 break;
138 case OBJECT_TYPE_S64:
139 dbg_printf("op load field s64\n");
140 stack_top->type = REG_S64;
141 if (!stack_top->load.rev_bo)
142 insn->op = BYTECODE_OP_LOAD_FIELD_S64;
143 break;
144 case OBJECT_TYPE_SIGNED_ENUM:
145 dbg_printf("op load field signed enumeration\n");
146 stack_top->type = REG_PTR;
147 break;
148 case OBJECT_TYPE_U8:
149 dbg_printf("op load field u8\n");
150 stack_top->type = REG_U64;
151 insn->op = BYTECODE_OP_LOAD_FIELD_U8;
152 break;
153 case OBJECT_TYPE_U16:
154 dbg_printf("op load field u16\n");
155 stack_top->type = REG_U64;
156 if (!stack_top->load.rev_bo)
157 insn->op = BYTECODE_OP_LOAD_FIELD_U16;
158 break;
159 case OBJECT_TYPE_U32:
160 dbg_printf("op load field u32\n");
161 stack_top->type = REG_U64;
162 if (!stack_top->load.rev_bo)
163 insn->op = BYTECODE_OP_LOAD_FIELD_U32;
164 break;
165 case OBJECT_TYPE_U64:
166 dbg_printf("op load field u64\n");
167 stack_top->type = REG_U64;
168 if (!stack_top->load.rev_bo)
169 insn->op = BYTECODE_OP_LOAD_FIELD_U64;
170 break;
171 case OBJECT_TYPE_UNSIGNED_ENUM:
172 dbg_printf("op load field unsigned enumeration\n");
173 stack_top->type = REG_PTR;
174 break;
175 case OBJECT_TYPE_DOUBLE:
176 stack_top->type = REG_DOUBLE;
177 insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
178 break;
179 case OBJECT_TYPE_STRING:
180 dbg_printf("op load field string\n");
181 stack_top->type = REG_STRING;
182 insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
183 break;
184 case OBJECT_TYPE_STRING_SEQUENCE:
185 dbg_printf("op load field string sequence\n");
186 stack_top->type = REG_STRING;
187 insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
188 break;
189 case OBJECT_TYPE_DYNAMIC:
190 dbg_printf("op load field dynamic\n");
191 stack_top->type = REG_UNKNOWN;
192 /* Don't specialize load op. */
193 break;
194 case OBJECT_TYPE_SEQUENCE:
195 case OBJECT_TYPE_ARRAY:
196 case OBJECT_TYPE_STRUCT:
197 case OBJECT_TYPE_VARIANT:
198 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
199 ret = -EINVAL;
200 goto end;
201 }
202 return 0;
203
204 end:
205 return ret;
206 }
207
208 static int specialize_get_index_object_type(enum object_type *otype,
209 int signedness, uint32_t elem_len)
210 {
211 switch (elem_len) {
212 case 8:
213 if (signedness)
214 *otype = OBJECT_TYPE_S8;
215 else
216 *otype = OBJECT_TYPE_U8;
217 break;
218 case 16:
219 if (signedness)
220 *otype = OBJECT_TYPE_S16;
221 else
222 *otype = OBJECT_TYPE_U16;
223 break;
224 case 32:
225 if (signedness)
226 *otype = OBJECT_TYPE_S32;
227 else
228 *otype = OBJECT_TYPE_U32;
229 break;
230 case 64:
231 if (signedness)
232 *otype = OBJECT_TYPE_S64;
233 else
234 *otype = OBJECT_TYPE_U64;
235 break;
236 default:
237 return -EINVAL;
238 }
239 return 0;
240 }
241
242 static int specialize_get_index(struct bytecode_runtime *runtime,
243 struct load_op *insn, uint64_t index,
244 struct vstack_entry *stack_top,
245 int idx_len)
246 {
247 int ret;
248 struct bytecode_get_index_data gid;
249 ssize_t data_offset;
250
251 memset(&gid, 0, sizeof(gid));
252 switch (stack_top->load.type) {
253 case LOAD_OBJECT:
254 switch (stack_top->load.object_type) {
255 case OBJECT_TYPE_ARRAY:
256 {
257 const struct lttng_ust_type_integer *integer_type;
258 const struct lttng_ust_event_field *field;
259 uint32_t elem_len, num_elems;
260 int signedness;
261
262 field = stack_top->load.field;
263 switch (field->type->type) {
264 case lttng_ust_type_array:
265 if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
266 ret = -EINVAL;
267 goto end;
268 }
269 integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_array(field->type)->elem_type);
270 num_elems = lttng_ust_get_type_array(field->type)->length;
271 break;
272 default:
273 ret = -EINVAL;
274 goto end;
275 }
276 elem_len = integer_type->size;
277 signedness = integer_type->signedness;
278 if (index >= num_elems) {
279 ret = -EINVAL;
280 goto end;
281 }
282 ret = specialize_get_index_object_type(&stack_top->load.object_type,
283 signedness, elem_len);
284 if (ret)
285 goto end;
286 gid.offset = index * (elem_len / CHAR_BIT);
287 gid.array_len = num_elems * (elem_len / CHAR_BIT);
288 gid.elem.type = stack_top->load.object_type;
289 gid.elem.len = elem_len;
290 if (integer_type->reverse_byte_order)
291 gid.elem.rev_bo = true;
292 stack_top->load.rev_bo = gid.elem.rev_bo;
293 break;
294 }
295 case OBJECT_TYPE_SEQUENCE:
296 {
297 const struct lttng_ust_type_integer *integer_type;
298 const struct lttng_ust_event_field *field;
299 uint32_t elem_len;
300 int signedness;
301
302 field = stack_top->load.field;
303 switch (field->type->type) {
304 case lttng_ust_type_sequence:
305 if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
306 ret = -EINVAL;
307 goto end;
308 }
309 integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_sequence(field->type)->elem_type);
310 break;
311 default:
312 ret = -EINVAL;
313 goto end;
314 }
315 elem_len = integer_type->size;
316 signedness = integer_type->signedness;
317 ret = specialize_get_index_object_type(&stack_top->load.object_type,
318 signedness, elem_len);
319 if (ret)
320 goto end;
321 gid.offset = index * (elem_len / CHAR_BIT);
322 gid.elem.type = stack_top->load.object_type;
323 gid.elem.len = elem_len;
324 if (integer_type->reverse_byte_order)
325 gid.elem.rev_bo = true;
326 stack_top->load.rev_bo = gid.elem.rev_bo;
327 break;
328 }
329 case OBJECT_TYPE_STRUCT:
330 /* Only generated by the specialize phase. */
331 case OBJECT_TYPE_VARIANT: /* Fall-through */
332 default:
333 ERR("Unexpected get index type %d",
334 (int) stack_top->load.object_type);
335 ret = -EINVAL;
336 goto end;
337 }
338 break;
339 case LOAD_ROOT_CONTEXT:
340 case LOAD_ROOT_APP_CONTEXT:
341 case LOAD_ROOT_PAYLOAD:
342 ERR("Index lookup for root field not implemented yet.");
343 ret = -EINVAL;
344 goto end;
345 }
346 data_offset = bytecode_push_data(runtime, &gid,
347 __alignof__(gid), sizeof(gid));
348 if (data_offset < 0) {
349 ret = -EINVAL;
350 goto end;
351 }
352 switch (idx_len) {
353 case 2:
354 ((struct get_index_u16 *) insn->data)->index = data_offset;
355 break;
356 case 8:
357 ((struct get_index_u64 *) insn->data)->index = data_offset;
358 break;
359 default:
360 ret = -EINVAL;
361 goto end;
362 }
363
364 return 0;
365
366 end:
367 return ret;
368 }
369
370 static int specialize_context_lookup_name(struct lttng_ust_ctx *ctx,
371 struct bytecode_runtime *bytecode,
372 struct load_op *insn)
373 {
374 uint16_t offset;
375 const char *name;
376
377 offset = ((struct get_symbol *) insn->data)->offset;
378 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
379 return lttng_get_context_index(ctx, name);
380 }
381
382 static int specialize_load_object(const struct lttng_ust_event_field *field,
383 struct vstack_load *load, bool is_context)
384 {
385 load->type = LOAD_OBJECT;
386
387 switch (field->type->type) {
388 case lttng_ust_type_integer:
389 if (lttng_ust_get_type_integer(field->type)->signedness)
390 load->object_type = OBJECT_TYPE_S64;
391 else
392 load->object_type = OBJECT_TYPE_U64;
393 load->rev_bo = false;
394 break;
395 case lttng_ust_type_enum:
396 {
397 const struct lttng_ust_type_integer *itype;
398
399 itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
400 if (itype->signedness)
401 load->object_type = OBJECT_TYPE_SIGNED_ENUM;
402 else
403 load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
404 load->rev_bo = false;
405 break;
406 }
407 case lttng_ust_type_array:
408 if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
409 ERR("Array nesting only supports integer types.");
410 return -EINVAL;
411 }
412 if (is_context) {
413 load->object_type = OBJECT_TYPE_STRING;
414 } else {
415 if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
416 load->object_type = OBJECT_TYPE_ARRAY;
417 load->field = field;
418 } else {
419 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
420 }
421 }
422 break;
423 case lttng_ust_type_sequence:
424 if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
425 ERR("Sequence nesting only supports integer types.");
426 return -EINVAL;
427 }
428 if (is_context) {
429 load->object_type = OBJECT_TYPE_STRING;
430 } else {
431 if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
432 load->object_type = OBJECT_TYPE_SEQUENCE;
433 load->field = field;
434 } else {
435 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
436 }
437 }
438 break;
439
440 case lttng_ust_type_string:
441 load->object_type = OBJECT_TYPE_STRING;
442 break;
443 case lttng_ust_type_float:
444 load->object_type = OBJECT_TYPE_DOUBLE;
445 break;
446 case lttng_ust_type_dynamic:
447 load->object_type = OBJECT_TYPE_DYNAMIC;
448 break;
449 default:
450 ERR("Unknown type: %d", (int) field->type->type);
451 return -EINVAL;
452 }
453 return 0;
454 }
455
456 static int specialize_context_lookup(struct lttng_ust_ctx *ctx,
457 struct bytecode_runtime *runtime,
458 struct load_op *insn,
459 struct vstack_load *load)
460 {
461 int idx, ret;
462 const struct lttng_ust_ctx_field *ctx_field;
463 const struct lttng_ust_event_field *field;
464 struct bytecode_get_index_data gid;
465 ssize_t data_offset;
466
467 idx = specialize_context_lookup_name(ctx, runtime, insn);
468 if (idx < 0) {
469 return -ENOENT;
470 }
471 ctx_field = &ctx->fields[idx];
472 field = ctx_field->event_field;
473 ret = specialize_load_object(field, load, true);
474 if (ret)
475 return ret;
476 /* Specialize each get_symbol into a get_index. */
477 insn->op = BYTECODE_OP_GET_INDEX_U16;
478 memset(&gid, 0, sizeof(gid));
479 gid.ctx_index = idx;
480 gid.elem.type = load->object_type;
481 gid.elem.rev_bo = load->rev_bo;
482 gid.field = field;
483 data_offset = bytecode_push_data(runtime, &gid,
484 __alignof__(gid), sizeof(gid));
485 if (data_offset < 0) {
486 return -EINVAL;
487 }
488 ((struct get_index_u16 *) insn->data)->index = data_offset;
489 return 0;
490 }
491
492 static int specialize_app_context_lookup(struct lttng_ust_ctx **pctx,
493 struct bytecode_runtime *runtime,
494 struct load_op *insn,
495 struct vstack_load *load)
496 {
497 uint16_t offset;
498 const char *orig_name;
499 char *name = NULL;
500 int idx, ret;
501 const struct lttng_ust_ctx_field *ctx_field;
502 const struct lttng_ust_event_field *field;
503 struct bytecode_get_index_data gid;
504 ssize_t data_offset;
505
506 offset = ((struct get_symbol *) insn->data)->offset;
507 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
508 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
509 if (!name) {
510 ret = -ENOMEM;
511 goto end;
512 }
513 strcpy(name, "$app.");
514 strcat(name, orig_name);
515 idx = lttng_get_context_index(*pctx, name);
516 if (idx < 0) {
517 assert(lttng_context_is_app(name));
518 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
519 pctx);
520 if (ret)
521 return ret;
522 idx = lttng_get_context_index(*pctx, name);
523 if (idx < 0)
524 return -ENOENT;
525 }
526 ctx_field = &(*pctx)->fields[idx];
527 field = ctx_field->event_field;
528 ret = specialize_load_object(field, load, true);
529 if (ret)
530 goto end;
531 /* Specialize each get_symbol into a get_index. */
532 insn->op = BYTECODE_OP_GET_INDEX_U16;
533 memset(&gid, 0, sizeof(gid));
534 gid.ctx_index = idx;
535 gid.elem.type = load->object_type;
536 gid.elem.rev_bo = load->rev_bo;
537 gid.field = field;
538 data_offset = bytecode_push_data(runtime, &gid,
539 __alignof__(gid), sizeof(gid));
540 if (data_offset < 0) {
541 ret = -EINVAL;
542 goto end;
543 }
544 ((struct get_index_u16 *) insn->data)->index = data_offset;
545 ret = 0;
546 end:
547 free(name);
548 return ret;
549 }
550
551 static int specialize_payload_lookup(const struct lttng_ust_event_desc *event_desc,
552 struct bytecode_runtime *runtime,
553 struct load_op *insn,
554 struct vstack_load *load)
555 {
556 const char *name;
557 uint16_t offset;
558 unsigned int i, nr_fields;
559 bool found = false;
560 uint32_t field_offset = 0;
561 const struct lttng_ust_event_field *field;
562 int ret;
563 struct bytecode_get_index_data gid;
564 ssize_t data_offset;
565
566 nr_fields = event_desc->nr_fields;
567 offset = ((struct get_symbol *) insn->data)->offset;
568 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
569 for (i = 0; i < nr_fields; i++) {
570 field = event_desc->fields[i];
571 if (field->nofilter) {
572 continue;
573 }
574 if (!strcmp(field->name, name)) {
575 found = true;
576 break;
577 }
578 /* compute field offset on stack */
579 switch (field->type->type) {
580 case lttng_ust_type_integer:
581 case lttng_ust_type_enum:
582 field_offset += sizeof(int64_t);
583 break;
584 case lttng_ust_type_array:
585 case lttng_ust_type_sequence:
586 field_offset += sizeof(unsigned long);
587 field_offset += sizeof(void *);
588 break;
589 case lttng_ust_type_string:
590 field_offset += sizeof(void *);
591 break;
592 case lttng_ust_type_float:
593 field_offset += sizeof(double);
594 break;
595 default:
596 ret = -EINVAL;
597 goto end;
598 }
599 }
600 if (!found) {
601 ret = -EINVAL;
602 goto end;
603 }
604
605 ret = specialize_load_object(field, load, false);
606 if (ret)
607 goto end;
608
609 /* Specialize each get_symbol into a get_index. */
610 insn->op = BYTECODE_OP_GET_INDEX_U16;
611 memset(&gid, 0, sizeof(gid));
612 gid.offset = field_offset;
613 gid.elem.type = load->object_type;
614 gid.elem.rev_bo = load->rev_bo;
615 gid.field = field;
616 data_offset = bytecode_push_data(runtime, &gid,
617 __alignof__(gid), sizeof(gid));
618 if (data_offset < 0) {
619 ret = -EINVAL;
620 goto end;
621 }
622 ((struct get_index_u16 *) insn->data)->index = data_offset;
623 ret = 0;
624 end:
625 return ret;
626 }
627
628 int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
629 struct bytecode_runtime *bytecode)
630 {
631 void *pc, *next_pc, *start_pc;
632 int ret = -EINVAL;
633 struct vstack _stack;
634 struct vstack *stack = &_stack;
635 struct lttng_ust_ctx **pctx = bytecode->p.pctx;
636
637 vstack_init(stack);
638
639 start_pc = &bytecode->code[0];
640 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
641 pc = next_pc) {
642 switch (*(bytecode_opcode_t *) pc) {
643 case BYTECODE_OP_UNKNOWN:
644 default:
645 ERR("unknown bytecode op %u\n",
646 (unsigned int) *(bytecode_opcode_t *) pc);
647 ret = -EINVAL;
648 goto end;
649
650 case BYTECODE_OP_RETURN:
651 if (vstack_ax(stack)->type == REG_S64 ||
652 vstack_ax(stack)->type == REG_U64)
653 *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
654 ret = 0;
655 goto end;
656
657 case BYTECODE_OP_RETURN_S64:
658 if (vstack_ax(stack)->type != REG_S64 &&
659 vstack_ax(stack)->type != REG_U64) {
660 ERR("Unexpected register type\n");
661 ret = -EINVAL;
662 goto end;
663 }
664 ret = 0;
665 goto end;
666
667 /* binary */
668 case BYTECODE_OP_MUL:
669 case BYTECODE_OP_DIV:
670 case BYTECODE_OP_MOD:
671 case BYTECODE_OP_PLUS:
672 case BYTECODE_OP_MINUS:
673 ERR("unsupported bytecode op %u\n",
674 (unsigned int) *(bytecode_opcode_t *) pc);
675 ret = -EINVAL;
676 goto end;
677
678 case BYTECODE_OP_EQ:
679 {
680 struct binary_op *insn = (struct binary_op *) pc;
681
682 switch(vstack_ax(stack)->type) {
683 default:
684 ERR("unknown register type\n");
685 ret = -EINVAL;
686 goto end;
687
688 case REG_STRING:
689 if (vstack_bx(stack)->type == REG_UNKNOWN)
690 break;
691 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
692 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
693 else
694 insn->op = BYTECODE_OP_EQ_STRING;
695 break;
696 case REG_STAR_GLOB_STRING:
697 if (vstack_bx(stack)->type == REG_UNKNOWN)
698 break;
699 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
700 break;
701 case REG_S64:
702 case REG_U64:
703 if (vstack_bx(stack)->type == REG_UNKNOWN)
704 break;
705 if (vstack_bx(stack)->type == REG_S64 ||
706 vstack_bx(stack)->type == REG_U64)
707 insn->op = BYTECODE_OP_EQ_S64;
708 else
709 insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
710 break;
711 case REG_DOUBLE:
712 if (vstack_bx(stack)->type == REG_UNKNOWN)
713 break;
714 if (vstack_bx(stack)->type == REG_S64 ||
715 vstack_bx(stack)->type == REG_U64)
716 insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
717 else
718 insn->op = BYTECODE_OP_EQ_DOUBLE;
719 break;
720 case REG_UNKNOWN:
721 break; /* Dynamic typing. */
722 }
723 /* Pop 2, push 1 */
724 if (vstack_pop(stack)) {
725 ret = -EINVAL;
726 goto end;
727 }
728 vstack_ax(stack)->type = REG_S64;
729 next_pc += sizeof(struct binary_op);
730 break;
731 }
732
733 case BYTECODE_OP_NE:
734 {
735 struct binary_op *insn = (struct binary_op *) pc;
736
737 switch(vstack_ax(stack)->type) {
738 default:
739 ERR("unknown register type\n");
740 ret = -EINVAL;
741 goto end;
742
743 case REG_STRING:
744 if (vstack_bx(stack)->type == REG_UNKNOWN)
745 break;
746 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
747 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
748 else
749 insn->op = BYTECODE_OP_NE_STRING;
750 break;
751 case REG_STAR_GLOB_STRING:
752 if (vstack_bx(stack)->type == REG_UNKNOWN)
753 break;
754 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
755 break;
756 case REG_S64:
757 case REG_U64:
758 if (vstack_bx(stack)->type == REG_UNKNOWN)
759 break;
760 if (vstack_bx(stack)->type == REG_S64 ||
761 vstack_bx(stack)->type == REG_U64)
762 insn->op = BYTECODE_OP_NE_S64;
763 else
764 insn->op = BYTECODE_OP_NE_DOUBLE_S64;
765 break;
766 case REG_DOUBLE:
767 if (vstack_bx(stack)->type == REG_UNKNOWN)
768 break;
769 if (vstack_bx(stack)->type == REG_S64 ||
770 vstack_bx(stack)->type == REG_U64)
771 insn->op = BYTECODE_OP_NE_S64_DOUBLE;
772 else
773 insn->op = BYTECODE_OP_NE_DOUBLE;
774 break;
775 case REG_UNKNOWN:
776 break; /* Dynamic typing. */
777 }
778 /* Pop 2, push 1 */
779 if (vstack_pop(stack)) {
780 ret = -EINVAL;
781 goto end;
782 }
783 vstack_ax(stack)->type = REG_S64;
784 next_pc += sizeof(struct binary_op);
785 break;
786 }
787
788 case BYTECODE_OP_GT:
789 {
790 struct binary_op *insn = (struct binary_op *) pc;
791
792 switch(vstack_ax(stack)->type) {
793 default:
794 ERR("unknown register type\n");
795 ret = -EINVAL;
796 goto end;
797
798 case REG_STAR_GLOB_STRING:
799 ERR("invalid register type for > binary operator\n");
800 ret = -EINVAL;
801 goto end;
802 case REG_STRING:
803 if (vstack_bx(stack)->type == REG_UNKNOWN)
804 break;
805 insn->op = BYTECODE_OP_GT_STRING;
806 break;
807 case REG_S64:
808 case REG_U64:
809 if (vstack_bx(stack)->type == REG_UNKNOWN)
810 break;
811 if (vstack_bx(stack)->type == REG_S64 ||
812 vstack_bx(stack)->type == REG_U64)
813 insn->op = BYTECODE_OP_GT_S64;
814 else
815 insn->op = BYTECODE_OP_GT_DOUBLE_S64;
816 break;
817 case REG_DOUBLE:
818 if (vstack_bx(stack)->type == REG_UNKNOWN)
819 break;
820 if (vstack_bx(stack)->type == REG_S64 ||
821 vstack_bx(stack)->type == REG_U64)
822 insn->op = BYTECODE_OP_GT_S64_DOUBLE;
823 else
824 insn->op = BYTECODE_OP_GT_DOUBLE;
825 break;
826 case REG_UNKNOWN:
827 break; /* Dynamic typing. */
828 }
829 /* Pop 2, push 1 */
830 if (vstack_pop(stack)) {
831 ret = -EINVAL;
832 goto end;
833 }
834 vstack_ax(stack)->type = REG_S64;
835 next_pc += sizeof(struct binary_op);
836 break;
837 }
838
839 case BYTECODE_OP_LT:
840 {
841 struct binary_op *insn = (struct binary_op *) pc;
842
843 switch(vstack_ax(stack)->type) {
844 default:
845 ERR("unknown register type\n");
846 ret = -EINVAL;
847 goto end;
848
849 case REG_STAR_GLOB_STRING:
850 ERR("invalid register type for < binary operator\n");
851 ret = -EINVAL;
852 goto end;
853 case REG_STRING:
854 if (vstack_bx(stack)->type == REG_UNKNOWN)
855 break;
856 insn->op = BYTECODE_OP_LT_STRING;
857 break;
858 case REG_S64:
859 case REG_U64:
860 if (vstack_bx(stack)->type == REG_UNKNOWN)
861 break;
862 if (vstack_bx(stack)->type == REG_S64 ||
863 vstack_bx(stack)->type == REG_U64)
864 insn->op = BYTECODE_OP_LT_S64;
865 else
866 insn->op = BYTECODE_OP_LT_DOUBLE_S64;
867 break;
868 case REG_DOUBLE:
869 if (vstack_bx(stack)->type == REG_UNKNOWN)
870 break;
871 if (vstack_bx(stack)->type == REG_S64 ||
872 vstack_bx(stack)->type == REG_U64)
873 insn->op = BYTECODE_OP_LT_S64_DOUBLE;
874 else
875 insn->op = BYTECODE_OP_LT_DOUBLE;
876 break;
877 case REG_UNKNOWN:
878 break; /* Dynamic typing. */
879 }
880 /* Pop 2, push 1 */
881 if (vstack_pop(stack)) {
882 ret = -EINVAL;
883 goto end;
884 }
885 vstack_ax(stack)->type = REG_S64;
886 next_pc += sizeof(struct binary_op);
887 break;
888 }
889
890 case BYTECODE_OP_GE:
891 {
892 struct binary_op *insn = (struct binary_op *) pc;
893
894 switch(vstack_ax(stack)->type) {
895 default:
896 ERR("unknown register type\n");
897 ret = -EINVAL;
898 goto end;
899
900 case REG_STAR_GLOB_STRING:
901 ERR("invalid register type for >= binary operator\n");
902 ret = -EINVAL;
903 goto end;
904 case REG_STRING:
905 if (vstack_bx(stack)->type == REG_UNKNOWN)
906 break;
907 insn->op = BYTECODE_OP_GE_STRING;
908 break;
909 case REG_S64:
910 case REG_U64:
911 if (vstack_bx(stack)->type == REG_UNKNOWN)
912 break;
913 if (vstack_bx(stack)->type == REG_S64 ||
914 vstack_bx(stack)->type == REG_U64)
915 insn->op = BYTECODE_OP_GE_S64;
916 else
917 insn->op = BYTECODE_OP_GE_DOUBLE_S64;
918 break;
919 case REG_DOUBLE:
920 if (vstack_bx(stack)->type == REG_UNKNOWN)
921 break;
922 if (vstack_bx(stack)->type == REG_S64 ||
923 vstack_bx(stack)->type == REG_U64)
924 insn->op = BYTECODE_OP_GE_S64_DOUBLE;
925 else
926 insn->op = BYTECODE_OP_GE_DOUBLE;
927 break;
928 case REG_UNKNOWN:
929 break; /* Dynamic typing. */
930 }
931 /* Pop 2, push 1 */
932 if (vstack_pop(stack)) {
933 ret = -EINVAL;
934 goto end;
935 }
936 vstack_ax(stack)->type = REG_U64;
937 next_pc += sizeof(struct binary_op);
938 break;
939 }
940 case BYTECODE_OP_LE:
941 {
942 struct binary_op *insn = (struct binary_op *) pc;
943
944 switch(vstack_ax(stack)->type) {
945 default:
946 ERR("unknown register type\n");
947 ret = -EINVAL;
948 goto end;
949
950 case REG_STAR_GLOB_STRING:
951 ERR("invalid register type for <= binary operator\n");
952 ret = -EINVAL;
953 goto end;
954 case REG_STRING:
955 if (vstack_bx(stack)->type == REG_UNKNOWN)
956 break;
957 insn->op = BYTECODE_OP_LE_STRING;
958 break;
959 case REG_S64:
960 case REG_U64:
961 if (vstack_bx(stack)->type == REG_UNKNOWN)
962 break;
963 if (vstack_bx(stack)->type == REG_S64 ||
964 vstack_bx(stack)->type == REG_U64)
965 insn->op = BYTECODE_OP_LE_S64;
966 else
967 insn->op = BYTECODE_OP_LE_DOUBLE_S64;
968 break;
969 case REG_DOUBLE:
970 if (vstack_bx(stack)->type == REG_UNKNOWN)
971 break;
972 if (vstack_bx(stack)->type == REG_S64 ||
973 vstack_bx(stack)->type == REG_U64)
974 insn->op = BYTECODE_OP_LE_S64_DOUBLE;
975 else
976 insn->op = BYTECODE_OP_LE_DOUBLE;
977 break;
978 case REG_UNKNOWN:
979 break; /* Dynamic typing. */
980 }
981 vstack_ax(stack)->type = REG_S64;
982 next_pc += sizeof(struct binary_op);
983 break;
984 }
985
986 case BYTECODE_OP_EQ_STRING:
987 case BYTECODE_OP_NE_STRING:
988 case BYTECODE_OP_GT_STRING:
989 case BYTECODE_OP_LT_STRING:
990 case BYTECODE_OP_GE_STRING:
991 case BYTECODE_OP_LE_STRING:
992 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
993 case BYTECODE_OP_NE_STAR_GLOB_STRING:
994 case BYTECODE_OP_EQ_S64:
995 case BYTECODE_OP_NE_S64:
996 case BYTECODE_OP_GT_S64:
997 case BYTECODE_OP_LT_S64:
998 case BYTECODE_OP_GE_S64:
999 case BYTECODE_OP_LE_S64:
1000 case BYTECODE_OP_EQ_DOUBLE:
1001 case BYTECODE_OP_NE_DOUBLE:
1002 case BYTECODE_OP_GT_DOUBLE:
1003 case BYTECODE_OP_LT_DOUBLE:
1004 case BYTECODE_OP_GE_DOUBLE:
1005 case BYTECODE_OP_LE_DOUBLE:
1006 case BYTECODE_OP_EQ_DOUBLE_S64:
1007 case BYTECODE_OP_NE_DOUBLE_S64:
1008 case BYTECODE_OP_GT_DOUBLE_S64:
1009 case BYTECODE_OP_LT_DOUBLE_S64:
1010 case BYTECODE_OP_GE_DOUBLE_S64:
1011 case BYTECODE_OP_LE_DOUBLE_S64:
1012 case BYTECODE_OP_EQ_S64_DOUBLE:
1013 case BYTECODE_OP_NE_S64_DOUBLE:
1014 case BYTECODE_OP_GT_S64_DOUBLE:
1015 case BYTECODE_OP_LT_S64_DOUBLE:
1016 case BYTECODE_OP_GE_S64_DOUBLE:
1017 case BYTECODE_OP_LE_S64_DOUBLE:
1018 {
1019 /* Pop 2, push 1 */
1020 if (vstack_pop(stack)) {
1021 ret = -EINVAL;
1022 goto end;
1023 }
1024 vstack_ax(stack)->type = REG_S64;
1025 next_pc += sizeof(struct binary_op);
1026 break;
1027 }
1028
1029 case BYTECODE_OP_BIT_RSHIFT:
1030 case BYTECODE_OP_BIT_LSHIFT:
1031 case BYTECODE_OP_BIT_AND:
1032 case BYTECODE_OP_BIT_OR:
1033 case BYTECODE_OP_BIT_XOR:
1034 {
1035 /* Pop 2, push 1 */
1036 if (vstack_pop(stack)) {
1037 ret = -EINVAL;
1038 goto end;
1039 }
1040 vstack_ax(stack)->type = REG_S64;
1041 next_pc += sizeof(struct binary_op);
1042 break;
1043 }
1044
1045 /* unary */
1046 case BYTECODE_OP_UNARY_PLUS:
1047 {
1048 struct unary_op *insn = (struct unary_op *) pc;
1049
1050 switch(vstack_ax(stack)->type) {
1051 default:
1052 ERR("unknown register type\n");
1053 ret = -EINVAL;
1054 goto end;
1055
1056 case REG_S64:
1057 case REG_U64:
1058 insn->op = BYTECODE_OP_UNARY_PLUS_S64;
1059 break;
1060 case REG_DOUBLE:
1061 insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
1062 break;
1063 case REG_UNKNOWN: /* Dynamic typing. */
1064 break;
1065 }
1066 /* Pop 1, push 1 */
1067 next_pc += sizeof(struct unary_op);
1068 break;
1069 }
1070
1071 case BYTECODE_OP_UNARY_MINUS:
1072 {
1073 struct unary_op *insn = (struct unary_op *) pc;
1074
1075 switch(vstack_ax(stack)->type) {
1076 default:
1077 ERR("unknown register type\n");
1078 ret = -EINVAL;
1079 goto end;
1080
1081 case REG_S64:
1082 case REG_U64:
1083 insn->op = BYTECODE_OP_UNARY_MINUS_S64;
1084 break;
1085 case REG_DOUBLE:
1086 insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
1087 break;
1088 case REG_UNKNOWN: /* Dynamic typing. */
1089 break;
1090 }
1091 /* Pop 1, push 1 */
1092 next_pc += sizeof(struct unary_op);
1093 break;
1094 }
1095
1096 case BYTECODE_OP_UNARY_NOT:
1097 {
1098 struct unary_op *insn = (struct unary_op *) pc;
1099
1100 switch(vstack_ax(stack)->type) {
1101 default:
1102 ERR("unknown register type\n");
1103 ret = -EINVAL;
1104 goto end;
1105
1106 case REG_S64:
1107 case REG_U64:
1108 insn->op = BYTECODE_OP_UNARY_NOT_S64;
1109 break;
1110 case REG_DOUBLE:
1111 insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
1112 break;
1113 case REG_UNKNOWN: /* Dynamic typing. */
1114 break;
1115 }
1116 /* Pop 1, push 1 */
1117 next_pc += sizeof(struct unary_op);
1118 break;
1119 }
1120
1121 case BYTECODE_OP_UNARY_BIT_NOT:
1122 {
1123 /* Pop 1, push 1 */
1124 next_pc += sizeof(struct unary_op);
1125 break;
1126 }
1127
1128 case BYTECODE_OP_UNARY_PLUS_S64:
1129 case BYTECODE_OP_UNARY_MINUS_S64:
1130 case BYTECODE_OP_UNARY_NOT_S64:
1131 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
1132 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
1133 case BYTECODE_OP_UNARY_NOT_DOUBLE:
1134 {
1135 /* Pop 1, push 1 */
1136 next_pc += sizeof(struct unary_op);
1137 break;
1138 }
1139
1140 /* logical */
1141 case BYTECODE_OP_AND:
1142 case BYTECODE_OP_OR:
1143 {
1144 /* Continue to next instruction */
1145 /* Pop 1 when jump not taken */
1146 if (vstack_pop(stack)) {
1147 ret = -EINVAL;
1148 goto end;
1149 }
1150 next_pc += sizeof(struct logical_op);
1151 break;
1152 }
1153
1154 /* load field ref */
1155 case BYTECODE_OP_LOAD_FIELD_REF:
1156 {
1157 ERR("Unknown field ref type\n");
1158 ret = -EINVAL;
1159 goto end;
1160 }
1161 /* get context ref */
1162 case BYTECODE_OP_GET_CONTEXT_REF:
1163 {
1164 if (vstack_push(stack)) {
1165 ret = -EINVAL;
1166 goto end;
1167 }
1168 vstack_ax(stack)->type = REG_UNKNOWN;
1169 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1170 break;
1171 }
1172 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
1173 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
1174 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
1175 {
1176 if (vstack_push(stack)) {
1177 ret = -EINVAL;
1178 goto end;
1179 }
1180 vstack_ax(stack)->type = REG_STRING;
1181 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1182 break;
1183 }
1184 case BYTECODE_OP_LOAD_FIELD_REF_S64:
1185 case BYTECODE_OP_GET_CONTEXT_REF_S64:
1186 {
1187 if (vstack_push(stack)) {
1188 ret = -EINVAL;
1189 goto end;
1190 }
1191 vstack_ax(stack)->type = REG_S64;
1192 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1193 break;
1194 }
1195 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
1196 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
1197 {
1198 if (vstack_push(stack)) {
1199 ret = -EINVAL;
1200 goto end;
1201 }
1202 vstack_ax(stack)->type = REG_DOUBLE;
1203 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1204 break;
1205 }
1206
1207 /* load from immediate operand */
1208 case BYTECODE_OP_LOAD_STRING:
1209 {
1210 struct load_op *insn = (struct load_op *) pc;
1211
1212 if (vstack_push(stack)) {
1213 ret = -EINVAL;
1214 goto end;
1215 }
1216 vstack_ax(stack)->type = REG_STRING;
1217 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1218 break;
1219 }
1220
1221 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
1222 {
1223 struct load_op *insn = (struct load_op *) pc;
1224
1225 if (vstack_push(stack)) {
1226 ret = -EINVAL;
1227 goto end;
1228 }
1229 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1230 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1231 break;
1232 }
1233
1234 case BYTECODE_OP_LOAD_S64:
1235 {
1236 if (vstack_push(stack)) {
1237 ret = -EINVAL;
1238 goto end;
1239 }
1240 vstack_ax(stack)->type = REG_S64;
1241 next_pc += sizeof(struct load_op)
1242 + sizeof(struct literal_numeric);
1243 break;
1244 }
1245
1246 case BYTECODE_OP_LOAD_DOUBLE:
1247 {
1248 if (vstack_push(stack)) {
1249 ret = -EINVAL;
1250 goto end;
1251 }
1252 vstack_ax(stack)->type = REG_DOUBLE;
1253 next_pc += sizeof(struct load_op)
1254 + sizeof(struct literal_double);
1255 break;
1256 }
1257
1258 /* cast */
1259 case BYTECODE_OP_CAST_TO_S64:
1260 {
1261 struct cast_op *insn = (struct cast_op *) pc;
1262
1263 switch (vstack_ax(stack)->type) {
1264 default:
1265 ERR("unknown register type\n");
1266 ret = -EINVAL;
1267 goto end;
1268
1269 case REG_STRING:
1270 case REG_STAR_GLOB_STRING:
1271 ERR("Cast op can only be applied to numeric or floating point registers\n");
1272 ret = -EINVAL;
1273 goto end;
1274 case REG_S64:
1275 insn->op = BYTECODE_OP_CAST_NOP;
1276 break;
1277 case REG_DOUBLE:
1278 insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
1279 break;
1280 case REG_UNKNOWN:
1281 case REG_U64:
1282 break;
1283 }
1284 /* Pop 1, push 1 */
1285 vstack_ax(stack)->type = REG_S64;
1286 next_pc += sizeof(struct cast_op);
1287 break;
1288 }
1289 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
1290 {
1291 /* Pop 1, push 1 */
1292 vstack_ax(stack)->type = REG_S64;
1293 next_pc += sizeof(struct cast_op);
1294 break;
1295 }
1296 case BYTECODE_OP_CAST_NOP:
1297 {
1298 next_pc += sizeof(struct cast_op);
1299 break;
1300 }
1301
1302 /*
1303 * Instructions for recursive traversal through composed types.
1304 */
1305 case BYTECODE_OP_GET_CONTEXT_ROOT:
1306 {
1307 if (vstack_push(stack)) {
1308 ret = -EINVAL;
1309 goto end;
1310 }
1311 vstack_ax(stack)->type = REG_PTR;
1312 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1313 next_pc += sizeof(struct load_op);
1314 break;
1315 }
1316 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
1317 {
1318 if (vstack_push(stack)) {
1319 ret = -EINVAL;
1320 goto end;
1321 }
1322 vstack_ax(stack)->type = REG_PTR;
1323 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1324 next_pc += sizeof(struct load_op);
1325 break;
1326 }
1327 case BYTECODE_OP_GET_PAYLOAD_ROOT:
1328 {
1329 if (vstack_push(stack)) {
1330 ret = -EINVAL;
1331 goto end;
1332 }
1333 vstack_ax(stack)->type = REG_PTR;
1334 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1335 next_pc += sizeof(struct load_op);
1336 break;
1337 }
1338
1339 case BYTECODE_OP_LOAD_FIELD:
1340 {
1341 struct load_op *insn = (struct load_op *) pc;
1342
1343 assert(vstack_ax(stack)->type == REG_PTR);
1344 /* Pop 1, push 1 */
1345 ret = specialize_load_field(vstack_ax(stack), insn);
1346 if (ret)
1347 goto end;
1348
1349 next_pc += sizeof(struct load_op);
1350 break;
1351 }
1352
1353 case BYTECODE_OP_LOAD_FIELD_S8:
1354 case BYTECODE_OP_LOAD_FIELD_S16:
1355 case BYTECODE_OP_LOAD_FIELD_S32:
1356 case BYTECODE_OP_LOAD_FIELD_S64:
1357 {
1358 /* Pop 1, push 1 */
1359 vstack_ax(stack)->type = REG_S64;
1360 next_pc += sizeof(struct load_op);
1361 break;
1362 }
1363
1364 case BYTECODE_OP_LOAD_FIELD_U8:
1365 case BYTECODE_OP_LOAD_FIELD_U16:
1366 case BYTECODE_OP_LOAD_FIELD_U32:
1367 case BYTECODE_OP_LOAD_FIELD_U64:
1368 {
1369 /* Pop 1, push 1 */
1370 vstack_ax(stack)->type = REG_U64;
1371 next_pc += sizeof(struct load_op);
1372 break;
1373 }
1374
1375 case BYTECODE_OP_LOAD_FIELD_STRING:
1376 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1377 {
1378 /* Pop 1, push 1 */
1379 vstack_ax(stack)->type = REG_STRING;
1380 next_pc += sizeof(struct load_op);
1381 break;
1382 }
1383
1384 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1385 {
1386 /* Pop 1, push 1 */
1387 vstack_ax(stack)->type = REG_DOUBLE;
1388 next_pc += sizeof(struct load_op);
1389 break;
1390 }
1391
1392 case BYTECODE_OP_GET_SYMBOL:
1393 {
1394 struct load_op *insn = (struct load_op *) pc;
1395
1396 dbg_printf("op get symbol\n");
1397 switch (vstack_ax(stack)->load.type) {
1398 case LOAD_OBJECT:
1399 ERR("Nested fields not implemented yet.");
1400 ret = -EINVAL;
1401 goto end;
1402 case LOAD_ROOT_CONTEXT:
1403 /* Lookup context field. */
1404 ret = specialize_context_lookup(*pctx,
1405 bytecode, insn,
1406 &vstack_ax(stack)->load);
1407 if (ret)
1408 goto end;
1409 break;
1410 case LOAD_ROOT_APP_CONTEXT:
1411 /* Lookup app context field. */
1412 ret = specialize_app_context_lookup(pctx,
1413 bytecode, insn,
1414 &vstack_ax(stack)->load);
1415 if (ret)
1416 goto end;
1417 break;
1418 case LOAD_ROOT_PAYLOAD:
1419 /* Lookup event payload field. */
1420 ret = specialize_payload_lookup(event_desc,
1421 bytecode, insn,
1422 &vstack_ax(stack)->load);
1423 if (ret)
1424 goto end;
1425 break;
1426 }
1427 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1428 break;
1429 }
1430
1431 case BYTECODE_OP_GET_SYMBOL_FIELD:
1432 {
1433 /* Always generated by specialize phase. */
1434 ret = -EINVAL;
1435 goto end;
1436 }
1437
1438 case BYTECODE_OP_GET_INDEX_U16:
1439 {
1440 struct load_op *insn = (struct load_op *) pc;
1441 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1442
1443 dbg_printf("op get index u16\n");
1444 /* Pop 1, push 1 */
1445 ret = specialize_get_index(bytecode, insn, index->index,
1446 vstack_ax(stack), sizeof(*index));
1447 if (ret)
1448 goto end;
1449 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1450 break;
1451 }
1452
1453 case BYTECODE_OP_GET_INDEX_U64:
1454 {
1455 struct load_op *insn = (struct load_op *) pc;
1456 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1457
1458 dbg_printf("op get index u64\n");
1459 /* Pop 1, push 1 */
1460 ret = specialize_get_index(bytecode, insn, index->index,
1461 vstack_ax(stack), sizeof(*index));
1462 if (ret)
1463 goto end;
1464 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1465 break;
1466 }
1467
1468 }
1469 }
1470 end:
1471 return ret;
1472 }
This page took 0.129554 seconds and 5 git commands to generate.