Move the ringbuffer and counter clients to 'src/common/'
[lttng-ust.git] / src / lib / lttng-ust / lttng-bytecode-specialize.c
1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
5 *
6 * LTTng UST bytecode specializer.
7 */
8
9 #define _LGPL_SOURCE
10 #include <limits.h>
11 #include <stddef.h>
12 #include <stdint.h>
13
14 #include <lttng/ust-utils.h>
15
16 #include "context-internal.h"
17 #include "lttng-bytecode.h"
18 #include "lib/lttng-ust/events.h"
19 #include "common/macros.h"
20 #include "common/tracer.h"
21
22 static int lttng_fls(int val)
23 {
24 int r = 32;
25 unsigned int x = (unsigned int) val;
26
27 if (!x)
28 return 0;
29 if (!(x & 0xFFFF0000U)) {
30 x <<= 16;
31 r -= 16;
32 }
33 if (!(x & 0xFF000000U)) {
34 x <<= 8;
35 r -= 8;
36 }
37 if (!(x & 0xF0000000U)) {
38 x <<= 4;
39 r -= 4;
40 }
41 if (!(x & 0xC0000000U)) {
42 x <<= 2;
43 r -= 2;
44 }
45 if (!(x & 0x80000000U)) {
46 r -= 1;
47 }
48 return r;
49 }
50
51 static int get_count_order(unsigned int count)
52 {
53 int order;
54
55 order = lttng_fls(count) - 1;
56 if (count & (count - 1))
57 order++;
58 return order;
59 }
60
61 static ssize_t bytecode_reserve_data(struct bytecode_runtime *runtime,
62 size_t align, size_t len)
63 {
64 ssize_t ret;
65 size_t padding = lttng_ust_offset_align(runtime->data_len, align);
66 size_t new_len = runtime->data_len + padding + len;
67 size_t new_alloc_len = new_len;
68 size_t old_alloc_len = runtime->data_alloc_len;
69
70 if (new_len > BYTECODE_MAX_DATA_LEN)
71 return -EINVAL;
72
73 if (new_alloc_len > old_alloc_len) {
74 char *newptr;
75
76 new_alloc_len =
77 max_t(size_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
78 newptr = realloc(runtime->data, new_alloc_len);
79 if (!newptr)
80 return -ENOMEM;
81 runtime->data = newptr;
82 /* We zero directly the memory from start of allocation. */
83 memset(&runtime->data[old_alloc_len], 0, new_alloc_len - old_alloc_len);
84 runtime->data_alloc_len = new_alloc_len;
85 }
86 runtime->data_len += padding;
87 ret = runtime->data_len;
88 runtime->data_len += len;
89 return ret;
90 }
91
92 static ssize_t bytecode_push_data(struct bytecode_runtime *runtime,
93 const void *p, size_t align, size_t len)
94 {
95 ssize_t offset;
96
97 offset = bytecode_reserve_data(runtime, align, len);
98 if (offset < 0)
99 return -ENOMEM;
100 memcpy(&runtime->data[offset], p, len);
101 return offset;
102 }
103
104 static int specialize_load_field(struct vstack_entry *stack_top,
105 struct load_op *insn)
106 {
107 int ret;
108
109 switch (stack_top->load.type) {
110 case LOAD_OBJECT:
111 break;
112 case LOAD_ROOT_CONTEXT:
113 case LOAD_ROOT_APP_CONTEXT:
114 case LOAD_ROOT_PAYLOAD:
115 default:
116 dbg_printf("Bytecode warning: cannot load root, missing field name.\n");
117 ret = -EINVAL;
118 goto end;
119 }
120 switch (stack_top->load.object_type) {
121 case OBJECT_TYPE_S8:
122 dbg_printf("op load field s8\n");
123 stack_top->type = REG_S64;
124 if (!stack_top->load.rev_bo)
125 insn->op = BYTECODE_OP_LOAD_FIELD_S8;
126 break;
127 case OBJECT_TYPE_S16:
128 dbg_printf("op load field s16\n");
129 stack_top->type = REG_S64;
130 if (!stack_top->load.rev_bo)
131 insn->op = BYTECODE_OP_LOAD_FIELD_S16;
132 break;
133 case OBJECT_TYPE_S32:
134 dbg_printf("op load field s32\n");
135 stack_top->type = REG_S64;
136 if (!stack_top->load.rev_bo)
137 insn->op = BYTECODE_OP_LOAD_FIELD_S32;
138 break;
139 case OBJECT_TYPE_S64:
140 dbg_printf("op load field s64\n");
141 stack_top->type = REG_S64;
142 if (!stack_top->load.rev_bo)
143 insn->op = BYTECODE_OP_LOAD_FIELD_S64;
144 break;
145 case OBJECT_TYPE_SIGNED_ENUM:
146 dbg_printf("op load field signed enumeration\n");
147 stack_top->type = REG_PTR;
148 break;
149 case OBJECT_TYPE_U8:
150 dbg_printf("op load field u8\n");
151 stack_top->type = REG_U64;
152 insn->op = BYTECODE_OP_LOAD_FIELD_U8;
153 break;
154 case OBJECT_TYPE_U16:
155 dbg_printf("op load field u16\n");
156 stack_top->type = REG_U64;
157 if (!stack_top->load.rev_bo)
158 insn->op = BYTECODE_OP_LOAD_FIELD_U16;
159 break;
160 case OBJECT_TYPE_U32:
161 dbg_printf("op load field u32\n");
162 stack_top->type = REG_U64;
163 if (!stack_top->load.rev_bo)
164 insn->op = BYTECODE_OP_LOAD_FIELD_U32;
165 break;
166 case OBJECT_TYPE_U64:
167 dbg_printf("op load field u64\n");
168 stack_top->type = REG_U64;
169 if (!stack_top->load.rev_bo)
170 insn->op = BYTECODE_OP_LOAD_FIELD_U64;
171 break;
172 case OBJECT_TYPE_UNSIGNED_ENUM:
173 dbg_printf("op load field unsigned enumeration\n");
174 stack_top->type = REG_PTR;
175 break;
176 case OBJECT_TYPE_DOUBLE:
177 stack_top->type = REG_DOUBLE;
178 insn->op = BYTECODE_OP_LOAD_FIELD_DOUBLE;
179 break;
180 case OBJECT_TYPE_STRING:
181 dbg_printf("op load field string\n");
182 stack_top->type = REG_STRING;
183 insn->op = BYTECODE_OP_LOAD_FIELD_STRING;
184 break;
185 case OBJECT_TYPE_STRING_SEQUENCE:
186 dbg_printf("op load field string sequence\n");
187 stack_top->type = REG_STRING;
188 insn->op = BYTECODE_OP_LOAD_FIELD_SEQUENCE;
189 break;
190 case OBJECT_TYPE_DYNAMIC:
191 dbg_printf("op load field dynamic\n");
192 stack_top->type = REG_UNKNOWN;
193 /* Don't specialize load op. */
194 break;
195 case OBJECT_TYPE_SEQUENCE:
196 case OBJECT_TYPE_ARRAY:
197 case OBJECT_TYPE_STRUCT:
198 case OBJECT_TYPE_VARIANT:
199 ERR("Sequences, arrays, struct and variant cannot be loaded (nested types).");
200 ret = -EINVAL;
201 goto end;
202 }
203 return 0;
204
205 end:
206 return ret;
207 }
208
209 static int specialize_get_index_object_type(enum object_type *otype,
210 int signedness, uint32_t elem_len)
211 {
212 switch (elem_len) {
213 case 8:
214 if (signedness)
215 *otype = OBJECT_TYPE_S8;
216 else
217 *otype = OBJECT_TYPE_U8;
218 break;
219 case 16:
220 if (signedness)
221 *otype = OBJECT_TYPE_S16;
222 else
223 *otype = OBJECT_TYPE_U16;
224 break;
225 case 32:
226 if (signedness)
227 *otype = OBJECT_TYPE_S32;
228 else
229 *otype = OBJECT_TYPE_U32;
230 break;
231 case 64:
232 if (signedness)
233 *otype = OBJECT_TYPE_S64;
234 else
235 *otype = OBJECT_TYPE_U64;
236 break;
237 default:
238 return -EINVAL;
239 }
240 return 0;
241 }
242
243 static int specialize_get_index(struct bytecode_runtime *runtime,
244 struct load_op *insn, uint64_t index,
245 struct vstack_entry *stack_top,
246 int idx_len)
247 {
248 int ret;
249 struct bytecode_get_index_data gid;
250 ssize_t data_offset;
251
252 memset(&gid, 0, sizeof(gid));
253 switch (stack_top->load.type) {
254 case LOAD_OBJECT:
255 switch (stack_top->load.object_type) {
256 case OBJECT_TYPE_ARRAY:
257 {
258 const struct lttng_ust_type_integer *integer_type;
259 const struct lttng_ust_event_field *field;
260 uint32_t elem_len, num_elems;
261 int signedness;
262
263 field = stack_top->load.field;
264 switch (field->type->type) {
265 case lttng_ust_type_array:
266 if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
267 ret = -EINVAL;
268 goto end;
269 }
270 integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_array(field->type)->elem_type);
271 num_elems = lttng_ust_get_type_array(field->type)->length;
272 break;
273 default:
274 ret = -EINVAL;
275 goto end;
276 }
277 elem_len = integer_type->size;
278 signedness = integer_type->signedness;
279 if (index >= num_elems) {
280 ret = -EINVAL;
281 goto end;
282 }
283 ret = specialize_get_index_object_type(&stack_top->load.object_type,
284 signedness, elem_len);
285 if (ret)
286 goto end;
287 gid.offset = index * (elem_len / CHAR_BIT);
288 gid.array_len = num_elems * (elem_len / CHAR_BIT);
289 gid.elem.type = stack_top->load.object_type;
290 gid.elem.len = elem_len;
291 if (integer_type->reverse_byte_order)
292 gid.elem.rev_bo = true;
293 stack_top->load.rev_bo = gid.elem.rev_bo;
294 break;
295 }
296 case OBJECT_TYPE_SEQUENCE:
297 {
298 const struct lttng_ust_type_integer *integer_type;
299 const struct lttng_ust_event_field *field;
300 uint32_t elem_len;
301 int signedness;
302
303 field = stack_top->load.field;
304 switch (field->type->type) {
305 case lttng_ust_type_sequence:
306 if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
307 ret = -EINVAL;
308 goto end;
309 }
310 integer_type = lttng_ust_get_type_integer(lttng_ust_get_type_sequence(field->type)->elem_type);
311 break;
312 default:
313 ret = -EINVAL;
314 goto end;
315 }
316 elem_len = integer_type->size;
317 signedness = integer_type->signedness;
318 ret = specialize_get_index_object_type(&stack_top->load.object_type,
319 signedness, elem_len);
320 if (ret)
321 goto end;
322 gid.offset = index * (elem_len / CHAR_BIT);
323 gid.elem.type = stack_top->load.object_type;
324 gid.elem.len = elem_len;
325 if (integer_type->reverse_byte_order)
326 gid.elem.rev_bo = true;
327 stack_top->load.rev_bo = gid.elem.rev_bo;
328 break;
329 }
330 case OBJECT_TYPE_STRUCT:
331 /* Only generated by the specialize phase. */
332 case OBJECT_TYPE_VARIANT: /* Fall-through */
333 default:
334 ERR("Unexpected get index type %d",
335 (int) stack_top->load.object_type);
336 ret = -EINVAL;
337 goto end;
338 }
339 break;
340 case LOAD_ROOT_CONTEXT:
341 case LOAD_ROOT_APP_CONTEXT:
342 case LOAD_ROOT_PAYLOAD:
343 ERR("Index lookup for root field not implemented yet.");
344 ret = -EINVAL;
345 goto end;
346 }
347 data_offset = bytecode_push_data(runtime, &gid,
348 __alignof__(gid), sizeof(gid));
349 if (data_offset < 0) {
350 ret = -EINVAL;
351 goto end;
352 }
353 switch (idx_len) {
354 case 2:
355 ((struct get_index_u16 *) insn->data)->index = data_offset;
356 break;
357 case 8:
358 ((struct get_index_u64 *) insn->data)->index = data_offset;
359 break;
360 default:
361 ret = -EINVAL;
362 goto end;
363 }
364
365 return 0;
366
367 end:
368 return ret;
369 }
370
371 static int specialize_context_lookup_name(struct lttng_ust_ctx *ctx,
372 struct bytecode_runtime *bytecode,
373 struct load_op *insn)
374 {
375 uint16_t offset;
376 const char *name;
377
378 offset = ((struct get_symbol *) insn->data)->offset;
379 name = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + offset;
380 return lttng_get_context_index(ctx, name);
381 }
382
383 static int specialize_load_object(const struct lttng_ust_event_field *field,
384 struct vstack_load *load, bool is_context)
385 {
386 load->type = LOAD_OBJECT;
387
388 switch (field->type->type) {
389 case lttng_ust_type_integer:
390 if (lttng_ust_get_type_integer(field->type)->signedness)
391 load->object_type = OBJECT_TYPE_S64;
392 else
393 load->object_type = OBJECT_TYPE_U64;
394 load->rev_bo = false;
395 break;
396 case lttng_ust_type_enum:
397 {
398 const struct lttng_ust_type_integer *itype;
399
400 itype = lttng_ust_get_type_integer(lttng_ust_get_type_enum(field->type)->container_type);
401 if (itype->signedness)
402 load->object_type = OBJECT_TYPE_SIGNED_ENUM;
403 else
404 load->object_type = OBJECT_TYPE_UNSIGNED_ENUM;
405 load->rev_bo = false;
406 break;
407 }
408 case lttng_ust_type_array:
409 if (lttng_ust_get_type_array(field->type)->elem_type->type != lttng_ust_type_integer) {
410 ERR("Array nesting only supports integer types.");
411 return -EINVAL;
412 }
413 if (is_context) {
414 load->object_type = OBJECT_TYPE_STRING;
415 } else {
416 if (lttng_ust_get_type_array(field->type)->encoding == lttng_ust_string_encoding_none) {
417 load->object_type = OBJECT_TYPE_ARRAY;
418 load->field = field;
419 } else {
420 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
421 }
422 }
423 break;
424 case lttng_ust_type_sequence:
425 if (lttng_ust_get_type_sequence(field->type)->elem_type->type != lttng_ust_type_integer) {
426 ERR("Sequence nesting only supports integer types.");
427 return -EINVAL;
428 }
429 if (is_context) {
430 load->object_type = OBJECT_TYPE_STRING;
431 } else {
432 if (lttng_ust_get_type_sequence(field->type)->encoding == lttng_ust_string_encoding_none) {
433 load->object_type = OBJECT_TYPE_SEQUENCE;
434 load->field = field;
435 } else {
436 load->object_type = OBJECT_TYPE_STRING_SEQUENCE;
437 }
438 }
439 break;
440
441 case lttng_ust_type_string:
442 load->object_type = OBJECT_TYPE_STRING;
443 break;
444 case lttng_ust_type_float:
445 load->object_type = OBJECT_TYPE_DOUBLE;
446 break;
447 case lttng_ust_type_dynamic:
448 load->object_type = OBJECT_TYPE_DYNAMIC;
449 break;
450 default:
451 ERR("Unknown type: %d", (int) field->type->type);
452 return -EINVAL;
453 }
454 return 0;
455 }
456
457 static int specialize_context_lookup(struct lttng_ust_ctx *ctx,
458 struct bytecode_runtime *runtime,
459 struct load_op *insn,
460 struct vstack_load *load)
461 {
462 int idx, ret;
463 const struct lttng_ust_ctx_field *ctx_field;
464 const struct lttng_ust_event_field *field;
465 struct bytecode_get_index_data gid;
466 ssize_t data_offset;
467
468 idx = specialize_context_lookup_name(ctx, runtime, insn);
469 if (idx < 0) {
470 return -ENOENT;
471 }
472 ctx_field = &ctx->fields[idx];
473 field = ctx_field->event_field;
474 ret = specialize_load_object(field, load, true);
475 if (ret)
476 return ret;
477 /* Specialize each get_symbol into a get_index. */
478 insn->op = BYTECODE_OP_GET_INDEX_U16;
479 memset(&gid, 0, sizeof(gid));
480 gid.ctx_index = idx;
481 gid.elem.type = load->object_type;
482 gid.elem.rev_bo = load->rev_bo;
483 gid.field = field;
484 data_offset = bytecode_push_data(runtime, &gid,
485 __alignof__(gid), sizeof(gid));
486 if (data_offset < 0) {
487 return -EINVAL;
488 }
489 ((struct get_index_u16 *) insn->data)->index = data_offset;
490 return 0;
491 }
492
493 static int specialize_app_context_lookup(struct lttng_ust_ctx **pctx,
494 struct bytecode_runtime *runtime,
495 struct load_op *insn,
496 struct vstack_load *load)
497 {
498 uint16_t offset;
499 const char *orig_name;
500 char *name = NULL;
501 int idx, ret;
502 const struct lttng_ust_ctx_field *ctx_field;
503 const struct lttng_ust_event_field *field;
504 struct bytecode_get_index_data gid;
505 ssize_t data_offset;
506
507 offset = ((struct get_symbol *) insn->data)->offset;
508 orig_name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
509 name = zmalloc(strlen(orig_name) + strlen("$app.") + 1);
510 if (!name) {
511 ret = -ENOMEM;
512 goto end;
513 }
514 strcpy(name, "$app.");
515 strcat(name, orig_name);
516 idx = lttng_get_context_index(*pctx, name);
517 if (idx < 0) {
518 assert(lttng_context_is_app(name));
519 ret = lttng_ust_add_app_context_to_ctx_rcu(name,
520 pctx);
521 if (ret)
522 return ret;
523 idx = lttng_get_context_index(*pctx, name);
524 if (idx < 0)
525 return -ENOENT;
526 }
527 ctx_field = &(*pctx)->fields[idx];
528 field = ctx_field->event_field;
529 ret = specialize_load_object(field, load, true);
530 if (ret)
531 goto end;
532 /* Specialize each get_symbol into a get_index. */
533 insn->op = BYTECODE_OP_GET_INDEX_U16;
534 memset(&gid, 0, sizeof(gid));
535 gid.ctx_index = idx;
536 gid.elem.type = load->object_type;
537 gid.elem.rev_bo = load->rev_bo;
538 gid.field = field;
539 data_offset = bytecode_push_data(runtime, &gid,
540 __alignof__(gid), sizeof(gid));
541 if (data_offset < 0) {
542 ret = -EINVAL;
543 goto end;
544 }
545 ((struct get_index_u16 *) insn->data)->index = data_offset;
546 ret = 0;
547 end:
548 free(name);
549 return ret;
550 }
551
552 static int specialize_payload_lookup(const struct lttng_ust_event_desc *event_desc,
553 struct bytecode_runtime *runtime,
554 struct load_op *insn,
555 struct vstack_load *load)
556 {
557 const char *name;
558 uint16_t offset;
559 unsigned int i, nr_fields;
560 bool found = false;
561 uint32_t field_offset = 0;
562 const struct lttng_ust_event_field *field;
563 int ret;
564 struct bytecode_get_index_data gid;
565 ssize_t data_offset;
566
567 nr_fields = event_desc->nr_fields;
568 offset = ((struct get_symbol *) insn->data)->offset;
569 name = runtime->p.bc->bc.data + runtime->p.bc->bc.reloc_offset + offset;
570 for (i = 0; i < nr_fields; i++) {
571 field = event_desc->fields[i];
572 if (field->nofilter) {
573 continue;
574 }
575 if (!strcmp(field->name, name)) {
576 found = true;
577 break;
578 }
579 /* compute field offset on stack */
580 switch (field->type->type) {
581 case lttng_ust_type_integer:
582 case lttng_ust_type_enum:
583 field_offset += sizeof(int64_t);
584 break;
585 case lttng_ust_type_array:
586 case lttng_ust_type_sequence:
587 field_offset += sizeof(unsigned long);
588 field_offset += sizeof(void *);
589 break;
590 case lttng_ust_type_string:
591 field_offset += sizeof(void *);
592 break;
593 case lttng_ust_type_float:
594 field_offset += sizeof(double);
595 break;
596 default:
597 ret = -EINVAL;
598 goto end;
599 }
600 }
601 if (!found) {
602 ret = -EINVAL;
603 goto end;
604 }
605
606 ret = specialize_load_object(field, load, false);
607 if (ret)
608 goto end;
609
610 /* Specialize each get_symbol into a get_index. */
611 insn->op = BYTECODE_OP_GET_INDEX_U16;
612 memset(&gid, 0, sizeof(gid));
613 gid.offset = field_offset;
614 gid.elem.type = load->object_type;
615 gid.elem.rev_bo = load->rev_bo;
616 gid.field = field;
617 data_offset = bytecode_push_data(runtime, &gid,
618 __alignof__(gid), sizeof(gid));
619 if (data_offset < 0) {
620 ret = -EINVAL;
621 goto end;
622 }
623 ((struct get_index_u16 *) insn->data)->index = data_offset;
624 ret = 0;
625 end:
626 return ret;
627 }
628
629 int lttng_bytecode_specialize(const struct lttng_ust_event_desc *event_desc,
630 struct bytecode_runtime *bytecode)
631 {
632 void *pc, *next_pc, *start_pc;
633 int ret = -EINVAL;
634 struct vstack _stack;
635 struct vstack *stack = &_stack;
636 struct lttng_ust_ctx **pctx = bytecode->p.pctx;
637
638 vstack_init(stack);
639
640 start_pc = &bytecode->code[0];
641 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
642 pc = next_pc) {
643 switch (*(bytecode_opcode_t *) pc) {
644 case BYTECODE_OP_UNKNOWN:
645 default:
646 ERR("unknown bytecode op %u\n",
647 (unsigned int) *(bytecode_opcode_t *) pc);
648 ret = -EINVAL;
649 goto end;
650
651 case BYTECODE_OP_RETURN:
652 if (vstack_ax(stack)->type == REG_S64 ||
653 vstack_ax(stack)->type == REG_U64)
654 *(bytecode_opcode_t *) pc = BYTECODE_OP_RETURN_S64;
655 ret = 0;
656 goto end;
657
658 case BYTECODE_OP_RETURN_S64:
659 if (vstack_ax(stack)->type != REG_S64 &&
660 vstack_ax(stack)->type != REG_U64) {
661 ERR("Unexpected register type\n");
662 ret = -EINVAL;
663 goto end;
664 }
665 ret = 0;
666 goto end;
667
668 /* binary */
669 case BYTECODE_OP_MUL:
670 case BYTECODE_OP_DIV:
671 case BYTECODE_OP_MOD:
672 case BYTECODE_OP_PLUS:
673 case BYTECODE_OP_MINUS:
674 ERR("unsupported bytecode op %u\n",
675 (unsigned int) *(bytecode_opcode_t *) pc);
676 ret = -EINVAL;
677 goto end;
678
679 case BYTECODE_OP_EQ:
680 {
681 struct binary_op *insn = (struct binary_op *) pc;
682
683 switch(vstack_ax(stack)->type) {
684 default:
685 ERR("unknown register type\n");
686 ret = -EINVAL;
687 goto end;
688
689 case REG_STRING:
690 if (vstack_bx(stack)->type == REG_UNKNOWN)
691 break;
692 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
693 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
694 else
695 insn->op = BYTECODE_OP_EQ_STRING;
696 break;
697 case REG_STAR_GLOB_STRING:
698 if (vstack_bx(stack)->type == REG_UNKNOWN)
699 break;
700 insn->op = BYTECODE_OP_EQ_STAR_GLOB_STRING;
701 break;
702 case REG_S64:
703 case REG_U64:
704 if (vstack_bx(stack)->type == REG_UNKNOWN)
705 break;
706 if (vstack_bx(stack)->type == REG_S64 ||
707 vstack_bx(stack)->type == REG_U64)
708 insn->op = BYTECODE_OP_EQ_S64;
709 else
710 insn->op = BYTECODE_OP_EQ_DOUBLE_S64;
711 break;
712 case REG_DOUBLE:
713 if (vstack_bx(stack)->type == REG_UNKNOWN)
714 break;
715 if (vstack_bx(stack)->type == REG_S64 ||
716 vstack_bx(stack)->type == REG_U64)
717 insn->op = BYTECODE_OP_EQ_S64_DOUBLE;
718 else
719 insn->op = BYTECODE_OP_EQ_DOUBLE;
720 break;
721 case REG_UNKNOWN:
722 break; /* Dynamic typing. */
723 }
724 /* Pop 2, push 1 */
725 if (vstack_pop(stack)) {
726 ret = -EINVAL;
727 goto end;
728 }
729 vstack_ax(stack)->type = REG_S64;
730 next_pc += sizeof(struct binary_op);
731 break;
732 }
733
734 case BYTECODE_OP_NE:
735 {
736 struct binary_op *insn = (struct binary_op *) pc;
737
738 switch(vstack_ax(stack)->type) {
739 default:
740 ERR("unknown register type\n");
741 ret = -EINVAL;
742 goto end;
743
744 case REG_STRING:
745 if (vstack_bx(stack)->type == REG_UNKNOWN)
746 break;
747 if (vstack_bx(stack)->type == REG_STAR_GLOB_STRING)
748 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
749 else
750 insn->op = BYTECODE_OP_NE_STRING;
751 break;
752 case REG_STAR_GLOB_STRING:
753 if (vstack_bx(stack)->type == REG_UNKNOWN)
754 break;
755 insn->op = BYTECODE_OP_NE_STAR_GLOB_STRING;
756 break;
757 case REG_S64:
758 case REG_U64:
759 if (vstack_bx(stack)->type == REG_UNKNOWN)
760 break;
761 if (vstack_bx(stack)->type == REG_S64 ||
762 vstack_bx(stack)->type == REG_U64)
763 insn->op = BYTECODE_OP_NE_S64;
764 else
765 insn->op = BYTECODE_OP_NE_DOUBLE_S64;
766 break;
767 case REG_DOUBLE:
768 if (vstack_bx(stack)->type == REG_UNKNOWN)
769 break;
770 if (vstack_bx(stack)->type == REG_S64 ||
771 vstack_bx(stack)->type == REG_U64)
772 insn->op = BYTECODE_OP_NE_S64_DOUBLE;
773 else
774 insn->op = BYTECODE_OP_NE_DOUBLE;
775 break;
776 case REG_UNKNOWN:
777 break; /* Dynamic typing. */
778 }
779 /* Pop 2, push 1 */
780 if (vstack_pop(stack)) {
781 ret = -EINVAL;
782 goto end;
783 }
784 vstack_ax(stack)->type = REG_S64;
785 next_pc += sizeof(struct binary_op);
786 break;
787 }
788
789 case BYTECODE_OP_GT:
790 {
791 struct binary_op *insn = (struct binary_op *) pc;
792
793 switch(vstack_ax(stack)->type) {
794 default:
795 ERR("unknown register type\n");
796 ret = -EINVAL;
797 goto end;
798
799 case REG_STAR_GLOB_STRING:
800 ERR("invalid register type for > binary operator\n");
801 ret = -EINVAL;
802 goto end;
803 case REG_STRING:
804 if (vstack_bx(stack)->type == REG_UNKNOWN)
805 break;
806 insn->op = BYTECODE_OP_GT_STRING;
807 break;
808 case REG_S64:
809 case REG_U64:
810 if (vstack_bx(stack)->type == REG_UNKNOWN)
811 break;
812 if (vstack_bx(stack)->type == REG_S64 ||
813 vstack_bx(stack)->type == REG_U64)
814 insn->op = BYTECODE_OP_GT_S64;
815 else
816 insn->op = BYTECODE_OP_GT_DOUBLE_S64;
817 break;
818 case REG_DOUBLE:
819 if (vstack_bx(stack)->type == REG_UNKNOWN)
820 break;
821 if (vstack_bx(stack)->type == REG_S64 ||
822 vstack_bx(stack)->type == REG_U64)
823 insn->op = BYTECODE_OP_GT_S64_DOUBLE;
824 else
825 insn->op = BYTECODE_OP_GT_DOUBLE;
826 break;
827 case REG_UNKNOWN:
828 break; /* Dynamic typing. */
829 }
830 /* Pop 2, push 1 */
831 if (vstack_pop(stack)) {
832 ret = -EINVAL;
833 goto end;
834 }
835 vstack_ax(stack)->type = REG_S64;
836 next_pc += sizeof(struct binary_op);
837 break;
838 }
839
840 case BYTECODE_OP_LT:
841 {
842 struct binary_op *insn = (struct binary_op *) pc;
843
844 switch(vstack_ax(stack)->type) {
845 default:
846 ERR("unknown register type\n");
847 ret = -EINVAL;
848 goto end;
849
850 case REG_STAR_GLOB_STRING:
851 ERR("invalid register type for < binary operator\n");
852 ret = -EINVAL;
853 goto end;
854 case REG_STRING:
855 if (vstack_bx(stack)->type == REG_UNKNOWN)
856 break;
857 insn->op = BYTECODE_OP_LT_STRING;
858 break;
859 case REG_S64:
860 case REG_U64:
861 if (vstack_bx(stack)->type == REG_UNKNOWN)
862 break;
863 if (vstack_bx(stack)->type == REG_S64 ||
864 vstack_bx(stack)->type == REG_U64)
865 insn->op = BYTECODE_OP_LT_S64;
866 else
867 insn->op = BYTECODE_OP_LT_DOUBLE_S64;
868 break;
869 case REG_DOUBLE:
870 if (vstack_bx(stack)->type == REG_UNKNOWN)
871 break;
872 if (vstack_bx(stack)->type == REG_S64 ||
873 vstack_bx(stack)->type == REG_U64)
874 insn->op = BYTECODE_OP_LT_S64_DOUBLE;
875 else
876 insn->op = BYTECODE_OP_LT_DOUBLE;
877 break;
878 case REG_UNKNOWN:
879 break; /* Dynamic typing. */
880 }
881 /* Pop 2, push 1 */
882 if (vstack_pop(stack)) {
883 ret = -EINVAL;
884 goto end;
885 }
886 vstack_ax(stack)->type = REG_S64;
887 next_pc += sizeof(struct binary_op);
888 break;
889 }
890
891 case BYTECODE_OP_GE:
892 {
893 struct binary_op *insn = (struct binary_op *) pc;
894
895 switch(vstack_ax(stack)->type) {
896 default:
897 ERR("unknown register type\n");
898 ret = -EINVAL;
899 goto end;
900
901 case REG_STAR_GLOB_STRING:
902 ERR("invalid register type for >= binary operator\n");
903 ret = -EINVAL;
904 goto end;
905 case REG_STRING:
906 if (vstack_bx(stack)->type == REG_UNKNOWN)
907 break;
908 insn->op = BYTECODE_OP_GE_STRING;
909 break;
910 case REG_S64:
911 case REG_U64:
912 if (vstack_bx(stack)->type == REG_UNKNOWN)
913 break;
914 if (vstack_bx(stack)->type == REG_S64 ||
915 vstack_bx(stack)->type == REG_U64)
916 insn->op = BYTECODE_OP_GE_S64;
917 else
918 insn->op = BYTECODE_OP_GE_DOUBLE_S64;
919 break;
920 case REG_DOUBLE:
921 if (vstack_bx(stack)->type == REG_UNKNOWN)
922 break;
923 if (vstack_bx(stack)->type == REG_S64 ||
924 vstack_bx(stack)->type == REG_U64)
925 insn->op = BYTECODE_OP_GE_S64_DOUBLE;
926 else
927 insn->op = BYTECODE_OP_GE_DOUBLE;
928 break;
929 case REG_UNKNOWN:
930 break; /* Dynamic typing. */
931 }
932 /* Pop 2, push 1 */
933 if (vstack_pop(stack)) {
934 ret = -EINVAL;
935 goto end;
936 }
937 vstack_ax(stack)->type = REG_U64;
938 next_pc += sizeof(struct binary_op);
939 break;
940 }
941 case BYTECODE_OP_LE:
942 {
943 struct binary_op *insn = (struct binary_op *) pc;
944
945 switch(vstack_ax(stack)->type) {
946 default:
947 ERR("unknown register type\n");
948 ret = -EINVAL;
949 goto end;
950
951 case REG_STAR_GLOB_STRING:
952 ERR("invalid register type for <= binary operator\n");
953 ret = -EINVAL;
954 goto end;
955 case REG_STRING:
956 if (vstack_bx(stack)->type == REG_UNKNOWN)
957 break;
958 insn->op = BYTECODE_OP_LE_STRING;
959 break;
960 case REG_S64:
961 case REG_U64:
962 if (vstack_bx(stack)->type == REG_UNKNOWN)
963 break;
964 if (vstack_bx(stack)->type == REG_S64 ||
965 vstack_bx(stack)->type == REG_U64)
966 insn->op = BYTECODE_OP_LE_S64;
967 else
968 insn->op = BYTECODE_OP_LE_DOUBLE_S64;
969 break;
970 case REG_DOUBLE:
971 if (vstack_bx(stack)->type == REG_UNKNOWN)
972 break;
973 if (vstack_bx(stack)->type == REG_S64 ||
974 vstack_bx(stack)->type == REG_U64)
975 insn->op = BYTECODE_OP_LE_S64_DOUBLE;
976 else
977 insn->op = BYTECODE_OP_LE_DOUBLE;
978 break;
979 case REG_UNKNOWN:
980 break; /* Dynamic typing. */
981 }
982 vstack_ax(stack)->type = REG_S64;
983 next_pc += sizeof(struct binary_op);
984 break;
985 }
986
987 case BYTECODE_OP_EQ_STRING:
988 case BYTECODE_OP_NE_STRING:
989 case BYTECODE_OP_GT_STRING:
990 case BYTECODE_OP_LT_STRING:
991 case BYTECODE_OP_GE_STRING:
992 case BYTECODE_OP_LE_STRING:
993 case BYTECODE_OP_EQ_STAR_GLOB_STRING:
994 case BYTECODE_OP_NE_STAR_GLOB_STRING:
995 case BYTECODE_OP_EQ_S64:
996 case BYTECODE_OP_NE_S64:
997 case BYTECODE_OP_GT_S64:
998 case BYTECODE_OP_LT_S64:
999 case BYTECODE_OP_GE_S64:
1000 case BYTECODE_OP_LE_S64:
1001 case BYTECODE_OP_EQ_DOUBLE:
1002 case BYTECODE_OP_NE_DOUBLE:
1003 case BYTECODE_OP_GT_DOUBLE:
1004 case BYTECODE_OP_LT_DOUBLE:
1005 case BYTECODE_OP_GE_DOUBLE:
1006 case BYTECODE_OP_LE_DOUBLE:
1007 case BYTECODE_OP_EQ_DOUBLE_S64:
1008 case BYTECODE_OP_NE_DOUBLE_S64:
1009 case BYTECODE_OP_GT_DOUBLE_S64:
1010 case BYTECODE_OP_LT_DOUBLE_S64:
1011 case BYTECODE_OP_GE_DOUBLE_S64:
1012 case BYTECODE_OP_LE_DOUBLE_S64:
1013 case BYTECODE_OP_EQ_S64_DOUBLE:
1014 case BYTECODE_OP_NE_S64_DOUBLE:
1015 case BYTECODE_OP_GT_S64_DOUBLE:
1016 case BYTECODE_OP_LT_S64_DOUBLE:
1017 case BYTECODE_OP_GE_S64_DOUBLE:
1018 case BYTECODE_OP_LE_S64_DOUBLE:
1019 {
1020 /* Pop 2, push 1 */
1021 if (vstack_pop(stack)) {
1022 ret = -EINVAL;
1023 goto end;
1024 }
1025 vstack_ax(stack)->type = REG_S64;
1026 next_pc += sizeof(struct binary_op);
1027 break;
1028 }
1029
1030 case BYTECODE_OP_BIT_RSHIFT:
1031 case BYTECODE_OP_BIT_LSHIFT:
1032 case BYTECODE_OP_BIT_AND:
1033 case BYTECODE_OP_BIT_OR:
1034 case BYTECODE_OP_BIT_XOR:
1035 {
1036 /* Pop 2, push 1 */
1037 if (vstack_pop(stack)) {
1038 ret = -EINVAL;
1039 goto end;
1040 }
1041 vstack_ax(stack)->type = REG_S64;
1042 next_pc += sizeof(struct binary_op);
1043 break;
1044 }
1045
1046 /* unary */
1047 case BYTECODE_OP_UNARY_PLUS:
1048 {
1049 struct unary_op *insn = (struct unary_op *) pc;
1050
1051 switch(vstack_ax(stack)->type) {
1052 default:
1053 ERR("unknown register type\n");
1054 ret = -EINVAL;
1055 goto end;
1056
1057 case REG_S64:
1058 case REG_U64:
1059 insn->op = BYTECODE_OP_UNARY_PLUS_S64;
1060 break;
1061 case REG_DOUBLE:
1062 insn->op = BYTECODE_OP_UNARY_PLUS_DOUBLE;
1063 break;
1064 case REG_UNKNOWN: /* Dynamic typing. */
1065 break;
1066 }
1067 /* Pop 1, push 1 */
1068 next_pc += sizeof(struct unary_op);
1069 break;
1070 }
1071
1072 case BYTECODE_OP_UNARY_MINUS:
1073 {
1074 struct unary_op *insn = (struct unary_op *) pc;
1075
1076 switch(vstack_ax(stack)->type) {
1077 default:
1078 ERR("unknown register type\n");
1079 ret = -EINVAL;
1080 goto end;
1081
1082 case REG_S64:
1083 case REG_U64:
1084 insn->op = BYTECODE_OP_UNARY_MINUS_S64;
1085 break;
1086 case REG_DOUBLE:
1087 insn->op = BYTECODE_OP_UNARY_MINUS_DOUBLE;
1088 break;
1089 case REG_UNKNOWN: /* Dynamic typing. */
1090 break;
1091 }
1092 /* Pop 1, push 1 */
1093 next_pc += sizeof(struct unary_op);
1094 break;
1095 }
1096
1097 case BYTECODE_OP_UNARY_NOT:
1098 {
1099 struct unary_op *insn = (struct unary_op *) pc;
1100
1101 switch(vstack_ax(stack)->type) {
1102 default:
1103 ERR("unknown register type\n");
1104 ret = -EINVAL;
1105 goto end;
1106
1107 case REG_S64:
1108 case REG_U64:
1109 insn->op = BYTECODE_OP_UNARY_NOT_S64;
1110 break;
1111 case REG_DOUBLE:
1112 insn->op = BYTECODE_OP_UNARY_NOT_DOUBLE;
1113 break;
1114 case REG_UNKNOWN: /* Dynamic typing. */
1115 break;
1116 }
1117 /* Pop 1, push 1 */
1118 next_pc += sizeof(struct unary_op);
1119 break;
1120 }
1121
1122 case BYTECODE_OP_UNARY_BIT_NOT:
1123 {
1124 /* Pop 1, push 1 */
1125 next_pc += sizeof(struct unary_op);
1126 break;
1127 }
1128
1129 case BYTECODE_OP_UNARY_PLUS_S64:
1130 case BYTECODE_OP_UNARY_MINUS_S64:
1131 case BYTECODE_OP_UNARY_NOT_S64:
1132 case BYTECODE_OP_UNARY_PLUS_DOUBLE:
1133 case BYTECODE_OP_UNARY_MINUS_DOUBLE:
1134 case BYTECODE_OP_UNARY_NOT_DOUBLE:
1135 {
1136 /* Pop 1, push 1 */
1137 next_pc += sizeof(struct unary_op);
1138 break;
1139 }
1140
1141 /* logical */
1142 case BYTECODE_OP_AND:
1143 case BYTECODE_OP_OR:
1144 {
1145 /* Continue to next instruction */
1146 /* Pop 1 when jump not taken */
1147 if (vstack_pop(stack)) {
1148 ret = -EINVAL;
1149 goto end;
1150 }
1151 next_pc += sizeof(struct logical_op);
1152 break;
1153 }
1154
1155 /* load field ref */
1156 case BYTECODE_OP_LOAD_FIELD_REF:
1157 {
1158 ERR("Unknown field ref type\n");
1159 ret = -EINVAL;
1160 goto end;
1161 }
1162 /* get context ref */
1163 case BYTECODE_OP_GET_CONTEXT_REF:
1164 {
1165 if (vstack_push(stack)) {
1166 ret = -EINVAL;
1167 goto end;
1168 }
1169 vstack_ax(stack)->type = REG_UNKNOWN;
1170 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1171 break;
1172 }
1173 case BYTECODE_OP_LOAD_FIELD_REF_STRING:
1174 case BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE:
1175 case BYTECODE_OP_GET_CONTEXT_REF_STRING:
1176 {
1177 if (vstack_push(stack)) {
1178 ret = -EINVAL;
1179 goto end;
1180 }
1181 vstack_ax(stack)->type = REG_STRING;
1182 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1183 break;
1184 }
1185 case BYTECODE_OP_LOAD_FIELD_REF_S64:
1186 case BYTECODE_OP_GET_CONTEXT_REF_S64:
1187 {
1188 if (vstack_push(stack)) {
1189 ret = -EINVAL;
1190 goto end;
1191 }
1192 vstack_ax(stack)->type = REG_S64;
1193 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1194 break;
1195 }
1196 case BYTECODE_OP_LOAD_FIELD_REF_DOUBLE:
1197 case BYTECODE_OP_GET_CONTEXT_REF_DOUBLE:
1198 {
1199 if (vstack_push(stack)) {
1200 ret = -EINVAL;
1201 goto end;
1202 }
1203 vstack_ax(stack)->type = REG_DOUBLE;
1204 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1205 break;
1206 }
1207
1208 /* load from immediate operand */
1209 case BYTECODE_OP_LOAD_STRING:
1210 {
1211 struct load_op *insn = (struct load_op *) pc;
1212
1213 if (vstack_push(stack)) {
1214 ret = -EINVAL;
1215 goto end;
1216 }
1217 vstack_ax(stack)->type = REG_STRING;
1218 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1219 break;
1220 }
1221
1222 case BYTECODE_OP_LOAD_STAR_GLOB_STRING:
1223 {
1224 struct load_op *insn = (struct load_op *) pc;
1225
1226 if (vstack_push(stack)) {
1227 ret = -EINVAL;
1228 goto end;
1229 }
1230 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1231 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1232 break;
1233 }
1234
1235 case BYTECODE_OP_LOAD_S64:
1236 {
1237 if (vstack_push(stack)) {
1238 ret = -EINVAL;
1239 goto end;
1240 }
1241 vstack_ax(stack)->type = REG_S64;
1242 next_pc += sizeof(struct load_op)
1243 + sizeof(struct literal_numeric);
1244 break;
1245 }
1246
1247 case BYTECODE_OP_LOAD_DOUBLE:
1248 {
1249 if (vstack_push(stack)) {
1250 ret = -EINVAL;
1251 goto end;
1252 }
1253 vstack_ax(stack)->type = REG_DOUBLE;
1254 next_pc += sizeof(struct load_op)
1255 + sizeof(struct literal_double);
1256 break;
1257 }
1258
1259 /* cast */
1260 case BYTECODE_OP_CAST_TO_S64:
1261 {
1262 struct cast_op *insn = (struct cast_op *) pc;
1263
1264 switch (vstack_ax(stack)->type) {
1265 default:
1266 ERR("unknown register type\n");
1267 ret = -EINVAL;
1268 goto end;
1269
1270 case REG_STRING:
1271 case REG_STAR_GLOB_STRING:
1272 ERR("Cast op can only be applied to numeric or floating point registers\n");
1273 ret = -EINVAL;
1274 goto end;
1275 case REG_S64:
1276 insn->op = BYTECODE_OP_CAST_NOP;
1277 break;
1278 case REG_DOUBLE:
1279 insn->op = BYTECODE_OP_CAST_DOUBLE_TO_S64;
1280 break;
1281 case REG_UNKNOWN:
1282 case REG_U64:
1283 break;
1284 }
1285 /* Pop 1, push 1 */
1286 vstack_ax(stack)->type = REG_S64;
1287 next_pc += sizeof(struct cast_op);
1288 break;
1289 }
1290 case BYTECODE_OP_CAST_DOUBLE_TO_S64:
1291 {
1292 /* Pop 1, push 1 */
1293 vstack_ax(stack)->type = REG_S64;
1294 next_pc += sizeof(struct cast_op);
1295 break;
1296 }
1297 case BYTECODE_OP_CAST_NOP:
1298 {
1299 next_pc += sizeof(struct cast_op);
1300 break;
1301 }
1302
1303 /*
1304 * Instructions for recursive traversal through composed types.
1305 */
1306 case BYTECODE_OP_GET_CONTEXT_ROOT:
1307 {
1308 if (vstack_push(stack)) {
1309 ret = -EINVAL;
1310 goto end;
1311 }
1312 vstack_ax(stack)->type = REG_PTR;
1313 vstack_ax(stack)->load.type = LOAD_ROOT_CONTEXT;
1314 next_pc += sizeof(struct load_op);
1315 break;
1316 }
1317 case BYTECODE_OP_GET_APP_CONTEXT_ROOT:
1318 {
1319 if (vstack_push(stack)) {
1320 ret = -EINVAL;
1321 goto end;
1322 }
1323 vstack_ax(stack)->type = REG_PTR;
1324 vstack_ax(stack)->load.type = LOAD_ROOT_APP_CONTEXT;
1325 next_pc += sizeof(struct load_op);
1326 break;
1327 }
1328 case BYTECODE_OP_GET_PAYLOAD_ROOT:
1329 {
1330 if (vstack_push(stack)) {
1331 ret = -EINVAL;
1332 goto end;
1333 }
1334 vstack_ax(stack)->type = REG_PTR;
1335 vstack_ax(stack)->load.type = LOAD_ROOT_PAYLOAD;
1336 next_pc += sizeof(struct load_op);
1337 break;
1338 }
1339
1340 case BYTECODE_OP_LOAD_FIELD:
1341 {
1342 struct load_op *insn = (struct load_op *) pc;
1343
1344 assert(vstack_ax(stack)->type == REG_PTR);
1345 /* Pop 1, push 1 */
1346 ret = specialize_load_field(vstack_ax(stack), insn);
1347 if (ret)
1348 goto end;
1349
1350 next_pc += sizeof(struct load_op);
1351 break;
1352 }
1353
1354 case BYTECODE_OP_LOAD_FIELD_S8:
1355 case BYTECODE_OP_LOAD_FIELD_S16:
1356 case BYTECODE_OP_LOAD_FIELD_S32:
1357 case BYTECODE_OP_LOAD_FIELD_S64:
1358 {
1359 /* Pop 1, push 1 */
1360 vstack_ax(stack)->type = REG_S64;
1361 next_pc += sizeof(struct load_op);
1362 break;
1363 }
1364
1365 case BYTECODE_OP_LOAD_FIELD_U8:
1366 case BYTECODE_OP_LOAD_FIELD_U16:
1367 case BYTECODE_OP_LOAD_FIELD_U32:
1368 case BYTECODE_OP_LOAD_FIELD_U64:
1369 {
1370 /* Pop 1, push 1 */
1371 vstack_ax(stack)->type = REG_U64;
1372 next_pc += sizeof(struct load_op);
1373 break;
1374 }
1375
1376 case BYTECODE_OP_LOAD_FIELD_STRING:
1377 case BYTECODE_OP_LOAD_FIELD_SEQUENCE:
1378 {
1379 /* Pop 1, push 1 */
1380 vstack_ax(stack)->type = REG_STRING;
1381 next_pc += sizeof(struct load_op);
1382 break;
1383 }
1384
1385 case BYTECODE_OP_LOAD_FIELD_DOUBLE:
1386 {
1387 /* Pop 1, push 1 */
1388 vstack_ax(stack)->type = REG_DOUBLE;
1389 next_pc += sizeof(struct load_op);
1390 break;
1391 }
1392
1393 case BYTECODE_OP_GET_SYMBOL:
1394 {
1395 struct load_op *insn = (struct load_op *) pc;
1396
1397 dbg_printf("op get symbol\n");
1398 switch (vstack_ax(stack)->load.type) {
1399 case LOAD_OBJECT:
1400 ERR("Nested fields not implemented yet.");
1401 ret = -EINVAL;
1402 goto end;
1403 case LOAD_ROOT_CONTEXT:
1404 /* Lookup context field. */
1405 ret = specialize_context_lookup(*pctx,
1406 bytecode, insn,
1407 &vstack_ax(stack)->load);
1408 if (ret)
1409 goto end;
1410 break;
1411 case LOAD_ROOT_APP_CONTEXT:
1412 /* Lookup app context field. */
1413 ret = specialize_app_context_lookup(pctx,
1414 bytecode, insn,
1415 &vstack_ax(stack)->load);
1416 if (ret)
1417 goto end;
1418 break;
1419 case LOAD_ROOT_PAYLOAD:
1420 /* Lookup event payload field. */
1421 ret = specialize_payload_lookup(event_desc,
1422 bytecode, insn,
1423 &vstack_ax(stack)->load);
1424 if (ret)
1425 goto end;
1426 break;
1427 }
1428 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1429 break;
1430 }
1431
1432 case BYTECODE_OP_GET_SYMBOL_FIELD:
1433 {
1434 /* Always generated by specialize phase. */
1435 ret = -EINVAL;
1436 goto end;
1437 }
1438
1439 case BYTECODE_OP_GET_INDEX_U16:
1440 {
1441 struct load_op *insn = (struct load_op *) pc;
1442 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1443
1444 dbg_printf("op get index u16\n");
1445 /* Pop 1, push 1 */
1446 ret = specialize_get_index(bytecode, insn, index->index,
1447 vstack_ax(stack), sizeof(*index));
1448 if (ret)
1449 goto end;
1450 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1451 break;
1452 }
1453
1454 case BYTECODE_OP_GET_INDEX_U64:
1455 {
1456 struct load_op *insn = (struct load_op *) pc;
1457 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1458
1459 dbg_printf("op get index u64\n");
1460 /* Pop 1, push 1 */
1461 ret = specialize_get_index(bytecode, insn, index->index,
1462 vstack_ax(stack), sizeof(*index));
1463 if (ret)
1464 goto end;
1465 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1466 break;
1467 }
1468
1469 }
1470 }
1471 end:
1472 return ret;
1473 }
This page took 0.125484 seconds and 4 git commands to generate.