Filter: add FILTER_OP_RETURN_S64 instruction
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
CommitLineData
953192ba
MD
1/*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <stdlib.h>
23#include <string.h>
24#include <errno.h>
46820c8b 25#include <common/align.h>
afc5df03 26#include <common/compat/string.h>
46820c8b 27
953192ba
MD
28#include "filter-bytecode.h"
29#include "filter-ir.h"
30#include "filter-ast.h"
31
a187da1a
DG
32#include <common/macros.h>
33
953192ba
MD
34#ifndef max_t
35#define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
36#endif
37
953192ba
MD
38#define INIT_ALLOC_SIZE 4
39
40static
41int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
42 struct ir_op *node);
43
01a204f0
CB
44static inline int get_count_order(unsigned int count)
45{
46 int order;
47
afc5df03 48 order = lttng_fls(count) - 1;
01a204f0
CB
49 if (count & (count - 1))
50 order++;
51 return order;
52}
53
953192ba 54static
53a80697 55int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
953192ba 56{
1029587a
MD
57 uint32_t alloc_len;
58
59 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
60 *fb = calloc(alloc_len, 1);
953192ba
MD
61 if (!*fb) {
62 return -ENOMEM;
63 } else {
1029587a 64 (*fb)->alloc_len = alloc_len;
953192ba
MD
65 return 0;
66 }
67}
68
69static
53a80697 70int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
953192ba
MD
71{
72 int32_t ret;
73 uint32_t padding = offset_align((*fb)->b.len, align);
ec96a8f6 74 uint32_t new_len = (*fb)->b.len + padding + len;
1029587a 75 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
ec96a8f6 76 uint32_t old_alloc_len = (*fb)->alloc_len;
953192ba 77
ec96a8f6 78 if (new_len > LTTNG_FILTER_MAX_LEN)
5ddb0a08
CB
79 return -EINVAL;
80
ec96a8f6 81 if (new_alloc_len > old_alloc_len) {
d0b96690
DG
82 struct lttng_filter_bytecode_alloc *newptr;
83
ec96a8f6
MD
84 new_alloc_len =
85 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
d0b96690
DG
86 newptr = realloc(*fb, new_alloc_len);
87 if (!newptr)
953192ba 88 return -ENOMEM;
d0b96690 89 *fb = newptr;
1029587a 90 /* We zero directly the memory from start of allocation. */
ec96a8f6
MD
91 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
92 (*fb)->alloc_len = new_alloc_len;
953192ba
MD
93 }
94 (*fb)->b.len += padding;
95 ret = (*fb)->b.len;
96 (*fb)->b.len += len;
97 return ret;
98}
99
100static
53a80697 101int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
953192ba
MD
102 uint32_t align, uint32_t len)
103{
104 int32_t offset;
105
106 offset = bytecode_reserve(fb, align, len);
107 if (offset < 0)
108 return offset;
109 memcpy(&(*fb)->b.data[offset], data, len);
110 return 0;
111}
112
113static
53a80697 114int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
115 struct logical_op *data,
116 uint32_t align, uint32_t len,
117 uint16_t *skip_offset)
118{
119 int32_t offset;
120
121 offset = bytecode_reserve(fb, align, len);
122 if (offset < 0)
123 return offset;
124 memcpy(&(*fb)->b.data[offset], data, len);
125 *skip_offset =
126 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
127 - (void *) &(*fb)->b.data[0];
128 return 0;
129}
130
131static
53a80697 132int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
133 const void *data,
134 uint16_t offset,
135 uint32_t len)
136{
137 if (offset >= (*fb)->b.len) {
138 return -EINVAL;
139 }
140 memcpy(&(*fb)->b.data[offset], data, len);
141 return 0;
142}
143
144static
145int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
146{
147 int ret;
148 struct return_op insn;
149
150 /* Visit child */
151 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
152 if (ret)
153 return ret;
154
155 /* Generate end of bytecode instruction */
156 insn.op = FILTER_OP_RETURN;
157 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
158}
159
016dbbb4
MD
160static
161int append_str(char **s, const char *append)
162{
163 char *old = *s;
164 char *new;
165 size_t oldlen = (old == NULL) ? 0 : strlen(old);
166 size_t appendlen = strlen(append);
167
168 new = calloc(oldlen + appendlen + 1, 1);
169 if (!new) {
170 return -ENOMEM;
171 }
172 if (oldlen) {
173 strcpy(new, old);
174 }
175 strcat(new, append);
176 *s = new;
177 free(old);
178 return 0;
179}
180
181/*
182 * 1: match
183 * 0: no match
184 * < 0: error
185 */
186static
187int load_expression_legacy_match(const struct ir_load_expression *exp,
188 enum filter_op *op_type,
189 char **symbol)
190{
191 const struct ir_load_expression_op *op;
192 bool need_dot = false;
193
194 op = exp->child;
195 switch (op->type) {
196 case IR_LOAD_EXPRESSION_GET_CONTEXT_ROOT:
197 *op_type = FILTER_OP_GET_CONTEXT_REF;
198 if (append_str(symbol, "$ctx.")) {
199 return -ENOMEM;
200 }
201 need_dot = false;
202 break;
203 case IR_LOAD_EXPRESSION_GET_APP_CONTEXT_ROOT:
204 *op_type = FILTER_OP_GET_CONTEXT_REF;
205 if (append_str(symbol, "$app.")) {
206 return -ENOMEM;
207 }
208 need_dot = false;
209 break;
210 case IR_LOAD_EXPRESSION_GET_PAYLOAD_ROOT:
211 *op_type = FILTER_OP_LOAD_FIELD_REF;
212 need_dot = false;
213 break;
214
215 case IR_LOAD_EXPRESSION_GET_SYMBOL:
216 case IR_LOAD_EXPRESSION_GET_INDEX:
217 case IR_LOAD_EXPRESSION_LOAD_FIELD:
218 default:
219 return 0; /* no match */
220 }
221
222 for (;;) {
223 op = op->next;
224 if (!op) {
225 return 0; /* no match */
226 }
227 switch (op->type) {
228 case IR_LOAD_EXPRESSION_LOAD_FIELD:
229 goto end;
230 case IR_LOAD_EXPRESSION_GET_SYMBOL:
231 if (need_dot && append_str(symbol, ".")) {
232 return -ENOMEM;
233 }
234 if (append_str(symbol, op->u.symbol)) {
235 return -ENOMEM;
236 }
237 break;
238 default:
239 return 0; /* no match */
240 }
241 need_dot = true;
242 }
243end:
244 return 1; /* Legacy match */
245}
246
247/*
248 * 1: legacy match
249 * 0: no legacy match
250 * < 0: error
251 */
252static
253int visit_node_load_expression_legacy(struct filter_parser_ctx *ctx,
254 const struct ir_load_expression *exp,
255 const struct ir_load_expression_op *op)
256{
257 struct load_op *insn = NULL;
258 uint32_t insn_len = sizeof(struct load_op)
259 + sizeof(struct field_ref);
260 struct field_ref ref_offset;
261 uint32_t reloc_offset_u32;
262 uint16_t reloc_offset;
263 enum filter_op op_type;
264 char *symbol = NULL;
265 int ret;
266
267 ret = load_expression_legacy_match(exp, &op_type, &symbol);
268 if (ret <= 0) {
269 goto end;
270 }
271 insn = calloc(insn_len, 1);
272 if (!insn) {
273 ret = -ENOMEM;
274 goto end;
275 }
276 insn->op = op_type;
277 ref_offset.offset = (uint16_t) -1U;
278 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
279 /* reloc_offset points to struct load_op */
280 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
281 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
282 ret = -EINVAL;
283 goto end;
284 }
285 reloc_offset = (uint16_t) reloc_offset_u32;
286 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
287 if (ret) {
288 goto end;
289 }
290 /* append reloc */
291 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
292 1, sizeof(reloc_offset));
293 if (ret) {
294 goto end;
295 }
296 ret = bytecode_push(&ctx->bytecode_reloc, symbol,
297 1, strlen(symbol) + 1);
298 ret = 1; /* legacy */
299end:
300 free(insn);
301 free(symbol);
302 return ret;
303}
304
bff988fa
MD
305static
306int visit_node_load_expression(struct filter_parser_ctx *ctx,
307 const struct ir_op *node)
308{
309 struct ir_load_expression *exp;
310 struct ir_load_expression_op *op;
016dbbb4 311 int ret;
bff988fa
MD
312
313 exp = node->u.load.u.expression;
314 if (!exp) {
315 return -EINVAL;
316 }
317 op = exp->child;
318 if (!op) {
319 return -EINVAL;
320 }
016dbbb4
MD
321
322 ret = visit_node_load_expression_legacy(ctx, exp, op);
323 if (ret < 0) {
324 return ret;
325 }
326 if (ret > 0) {
327 return 0; /* legacy */
328 }
329
bff988fa
MD
330 for (; op != NULL; op = op->next) {
331 switch (op->type) {
332 case IR_LOAD_EXPRESSION_GET_CONTEXT_ROOT:
333 {
334 struct load_op *insn;
335 uint32_t insn_len = sizeof(struct load_op);
336 int ret;
337
338 insn = calloc(insn_len, 1);
339 if (!insn)
340 return -ENOMEM;
341 insn->op = FILTER_OP_GET_CONTEXT_ROOT;
342 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
343 free(insn);
344 if (ret) {
345 return ret;
346 }
347 break;
348 }
349 case IR_LOAD_EXPRESSION_GET_APP_CONTEXT_ROOT:
350 {
351 struct load_op *insn;
352 uint32_t insn_len = sizeof(struct load_op);
353 int ret;
354
355 insn = calloc(insn_len, 1);
356 if (!insn)
357 return -ENOMEM;
358 insn->op = FILTER_OP_GET_APP_CONTEXT_ROOT;
359 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
360 free(insn);
361 if (ret) {
362 return ret;
363 }
364 break;
365 }
366 case IR_LOAD_EXPRESSION_GET_PAYLOAD_ROOT:
367 {
368 struct load_op *insn;
369 uint32_t insn_len = sizeof(struct load_op);
370 int ret;
371
372 insn = calloc(insn_len, 1);
373 if (!insn)
374 return -ENOMEM;
375 insn->op = FILTER_OP_GET_PAYLOAD_ROOT;
376 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
377 free(insn);
378 if (ret) {
379 return ret;
380 }
381 break;
382 }
383 case IR_LOAD_EXPRESSION_GET_SYMBOL:
384 {
385 struct load_op *insn;
386 uint32_t insn_len = sizeof(struct load_op)
387 + sizeof(struct get_symbol);
388 struct get_symbol symbol_offset;
389 uint32_t reloc_offset_u32;
390 uint16_t reloc_offset;
391 uint32_t bytecode_reloc_offset_u32;
392 int ret;
393
394 insn = calloc(insn_len, 1);
395 if (!insn)
396 return -ENOMEM;
397 insn->op = FILTER_OP_GET_SYMBOL;
398 bytecode_reloc_offset_u32 =
399 bytecode_get_len(&ctx->bytecode_reloc->b)
400 + sizeof(reloc_offset);
401 symbol_offset.offset =
402 (uint16_t) bytecode_reloc_offset_u32;
403 memcpy(insn->data, &symbol_offset,
404 sizeof(symbol_offset));
405 /* reloc_offset points to struct load_op */
406 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
407 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
408 free(insn);
409 return -EINVAL;
410 }
411 reloc_offset = (uint16_t) reloc_offset_u32;
412 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
413 if (ret) {
414 free(insn);
415 return ret;
416 }
417 /* append reloc */
418 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
419 1, sizeof(reloc_offset));
420 if (ret) {
421 free(insn);
422 return ret;
423 }
424 ret = bytecode_push(&ctx->bytecode_reloc,
425 op->u.symbol,
426 1, strlen(op->u.symbol) + 1);
427 free(insn);
428 if (ret) {
429 return ret;
430 }
431 break;
432 }
433 case IR_LOAD_EXPRESSION_GET_INDEX:
434 {
435 struct load_op *insn;
436 uint32_t insn_len = sizeof(struct load_op)
437 + sizeof(struct get_index_u64);
438 struct get_index_u64 index;
439 int ret;
440
441 insn = calloc(insn_len, 1);
442 if (!insn)
443 return -ENOMEM;
444 insn->op = FILTER_OP_GET_INDEX_U64;
445 index.index = op->u.index;
446 memcpy(insn->data, &index, sizeof(index));
447 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
448 free(insn);
449 if (ret) {
450 return ret;
451 }
452 break;
453 }
454 case IR_LOAD_EXPRESSION_LOAD_FIELD:
455 {
456 struct load_op *insn;
457 uint32_t insn_len = sizeof(struct load_op);
458 int ret;
459
460 insn = calloc(insn_len, 1);
461 if (!insn)
462 return -ENOMEM;
463 insn->op = FILTER_OP_LOAD_FIELD;
464 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
465 free(insn);
466 if (ret) {
467 return ret;
468 }
469 break;
470 }
471 }
472 }
473 return 0;
474}
475
953192ba
MD
476static
477int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
478{
479 int ret;
480
481 switch (node->data_type) {
482 case IR_DATA_UNKNOWN:
483 default:
484 fprintf(stderr, "[error] Unknown data type in %s\n",
485 __func__);
486 return -EINVAL;
487
488 case IR_DATA_STRING:
489 {
490 struct load_op *insn;
491 uint32_t insn_len = sizeof(struct load_op)
9f449915 492 + strlen(node->u.load.u.string.value) + 1;
953192ba
MD
493
494 insn = calloc(insn_len, 1);
495 if (!insn)
496 return -ENOMEM;
9f449915
PP
497
498 switch (node->u.load.u.string.type) {
499 case IR_LOAD_STRING_TYPE_GLOB_STAR:
500 /*
501 * We explicitly tell the interpreter here that
502 * this load is a full star globbing pattern so
503 * that the appropriate matching function can be
504 * called. Also, see comment below.
505 */
506 insn->op = FILTER_OP_LOAD_STAR_GLOB_STRING;
507 break;
508 default:
509 /*
510 * This is the "legacy" string, which includes
511 * star globbing patterns with a star only at
512 * the end. Both "plain" and "star at the end"
513 * literal strings are handled at the same place
514 * by the tracer's filter bytecode interpreter,
515 * whereas full star globbing patterns (stars
516 * can be anywhere in the string) is a special
517 * case.
518 */
519 insn->op = FILTER_OP_LOAD_STRING;
520 break;
521 }
522
523 strcpy(insn->data, node->u.load.u.string.value);
953192ba
MD
524 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
525 free(insn);
526 return ret;
527 }
528 case IR_DATA_NUMERIC:
529 {
530 struct load_op *insn;
531 uint32_t insn_len = sizeof(struct load_op)
532 + sizeof(struct literal_numeric);
533
534 insn = calloc(insn_len, 1);
535 if (!insn)
536 return -ENOMEM;
537 insn->op = FILTER_OP_LOAD_S64;
58d494e4 538 memcpy(insn->data, &node->u.load.u.num, sizeof(int64_t));
953192ba
MD
539 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
540 free(insn);
541 return ret;
542 }
e90d8561
MD
543 case IR_DATA_FLOAT:
544 {
545 struct load_op *insn;
546 uint32_t insn_len = sizeof(struct load_op)
547 + sizeof(struct literal_double);
548
549 insn = calloc(insn_len, 1);
550 if (!insn)
551 return -ENOMEM;
552 insn->op = FILTER_OP_LOAD_DOUBLE;
58d494e4 553 memcpy(insn->data, &node->u.load.u.flt, sizeof(double));
e90d8561
MD
554 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
555 free(insn);
556 return ret;
557 }
bff988fa
MD
558 case IR_DATA_EXPRESSION:
559 return visit_node_load_expression(ctx, node);
953192ba
MD
560 }
561}
562
563static
564int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
565{
566 int ret;
567 struct unary_op insn;
568
569 /* Visit child */
570 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
571 if (ret)
572 return ret;
573
574 /* Generate end of bytecode instruction */
575 switch (node->u.unary.type) {
576 case AST_UNARY_UNKNOWN:
577 default:
578 fprintf(stderr, "[error] Unknown unary node type in %s\n",
579 __func__);
580 return -EINVAL;
581 case AST_UNARY_PLUS:
582 /* Nothing to do. */
583 return 0;
584 case AST_UNARY_MINUS:
585 insn.op = FILTER_OP_UNARY_MINUS;
953192ba
MD
586 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
587 case AST_UNARY_NOT:
588 insn.op = FILTER_OP_UNARY_NOT;
953192ba 589 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
116d3c01
MD
590 case AST_UNARY_BIT_NOT:
591 insn.op = FILTER_OP_UNARY_BIT_NOT;
592 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
953192ba
MD
593 }
594}
595
596/*
597 * Binary comparator nesting is disallowed. This allows fitting into
598 * only 2 registers.
599 */
600static
601int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
602{
603 int ret;
604 struct binary_op insn;
605
606 /* Visit child */
607 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
608 if (ret)
609 return ret;
610 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
611 if (ret)
612 return ret;
613
614 switch (node->u.binary.type) {
615 case AST_OP_UNKNOWN:
616 default:
617 fprintf(stderr, "[error] Unknown unary node type in %s\n",
618 __func__);
619 return -EINVAL;
620
621 case AST_OP_AND:
622 case AST_OP_OR:
623 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
624 __func__);
625 return -EINVAL;
626
627 case AST_OP_MUL:
628 insn.op = FILTER_OP_MUL;
629 break;
630 case AST_OP_DIV:
631 insn.op = FILTER_OP_DIV;
632 break;
633 case AST_OP_MOD:
634 insn.op = FILTER_OP_MOD;
635 break;
636 case AST_OP_PLUS:
637 insn.op = FILTER_OP_PLUS;
638 break;
639 case AST_OP_MINUS:
640 insn.op = FILTER_OP_MINUS;
641 break;
116d3c01
MD
642 case AST_OP_BIT_RSHIFT:
643 insn.op = FILTER_OP_BIT_RSHIFT;
953192ba 644 break;
116d3c01
MD
645 case AST_OP_BIT_LSHIFT:
646 insn.op = FILTER_OP_BIT_LSHIFT;
953192ba 647 break;
bff988fa
MD
648 case AST_OP_BIT_AND:
649 insn.op = FILTER_OP_BIT_AND;
953192ba 650 break;
bff988fa
MD
651 case AST_OP_BIT_OR:
652 insn.op = FILTER_OP_BIT_OR;
953192ba 653 break;
bff988fa
MD
654 case AST_OP_BIT_XOR:
655 insn.op = FILTER_OP_BIT_XOR;
953192ba
MD
656 break;
657
658 case AST_OP_EQ:
659 insn.op = FILTER_OP_EQ;
660 break;
661 case AST_OP_NE:
662 insn.op = FILTER_OP_NE;
663 break;
664 case AST_OP_GT:
665 insn.op = FILTER_OP_GT;
666 break;
667 case AST_OP_LT:
668 insn.op = FILTER_OP_LT;
669 break;
670 case AST_OP_GE:
671 insn.op = FILTER_OP_GE;
672 break;
673 case AST_OP_LE:
674 insn.op = FILTER_OP_LE;
675 break;
676 }
677 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
678}
679
8cf9540a
MD
680/*
681 * A logical op always return a s64 (1 or 0).
682 */
953192ba
MD
683static
684int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
685{
686 int ret;
687 struct logical_op insn;
688 uint16_t skip_offset_loc;
689 uint16_t target_loc;
690
691 /* Visit left child */
692 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
693 if (ret)
694 return ret;
8cf9540a 695 /* Cast to s64 if float or field ref */
586dc72f 696 if ((node->u.binary.left->data_type == IR_DATA_FIELD_REF
661dfdd1 697 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
bff988fa 698 || node->u.binary.left->data_type == IR_DATA_EXPRESSION)
8cf9540a
MD
699 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
700 struct cast_op cast_insn;
701
586dc72f 702 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
661dfdd1 703 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
bff988fa 704 || node->u.binary.left->data_type == IR_DATA_EXPRESSION) {
29fefef8
MD
705 cast_insn.op = FILTER_OP_CAST_TO_S64;
706 } else {
707 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
708 }
8cf9540a
MD
709 ret = bytecode_push(&ctx->bytecode, &cast_insn,
710 1, sizeof(cast_insn));
711 if (ret)
712 return ret;
713 }
953192ba
MD
714 switch (node->u.logical.type) {
715 default:
716 fprintf(stderr, "[error] Unknown node type in %s\n",
717 __func__);
718 return -EINVAL;
719
720 case AST_OP_AND:
721 insn.op = FILTER_OP_AND;
722 break;
723 case AST_OP_OR:
724 insn.op = FILTER_OP_OR;
725 break;
726 }
727 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
728 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
729 &skip_offset_loc);
730 if (ret)
731 return ret;
732 /* Visit right child */
733 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
734 if (ret)
735 return ret;
8cf9540a 736 /* Cast to s64 if float or field ref */
586dc72f 737 if ((node->u.binary.right->data_type == IR_DATA_FIELD_REF
661dfdd1 738 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
bff988fa 739 || node->u.binary.right->data_type == IR_DATA_EXPRESSION)
8cf9540a
MD
740 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
741 struct cast_op cast_insn;
742
586dc72f 743 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
661dfdd1 744 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
bff988fa 745 || node->u.binary.right->data_type == IR_DATA_EXPRESSION) {
29fefef8
MD
746 cast_insn.op = FILTER_OP_CAST_TO_S64;
747 } else {
748 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
749 }
8cf9540a
MD
750 ret = bytecode_push(&ctx->bytecode, &cast_insn,
751 1, sizeof(cast_insn));
752 if (ret)
753 return ret;
754 }
953192ba
MD
755 /* We now know where the logical op can skip. */
756 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
757 ret = bytecode_patch(&ctx->bytecode,
758 &target_loc, /* Offset to jump to */
759 skip_offset_loc, /* Where to patch */
760 sizeof(uint16_t));
761 return ret;
762}
763
764/*
765 * Postorder traversal of the tree. We need the children result before
766 * we can evaluate the parent.
767 */
768static
769int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
770 struct ir_op *node)
771{
772 switch (node->op) {
773 case IR_OP_UNKNOWN:
774 default:
775 fprintf(stderr, "[error] Unknown node type in %s\n",
776 __func__);
777 return -EINVAL;
778
779 case IR_OP_ROOT:
780 return visit_node_root(ctx, node);
781 case IR_OP_LOAD:
782 return visit_node_load(ctx, node);
783 case IR_OP_UNARY:
784 return visit_node_unary(ctx, node);
785 case IR_OP_BINARY:
786 return visit_node_binary(ctx, node);
787 case IR_OP_LOGICAL:
788 return visit_node_logical(ctx, node);
789 }
790}
791
a187da1a 792LTTNG_HIDDEN
953192ba
MD
793void filter_bytecode_free(struct filter_parser_ctx *ctx)
794{
7ca1dc6f
DG
795 if (!ctx) {
796 return;
797 }
798
3f0c8837
DG
799 if (ctx->bytecode) {
800 free(ctx->bytecode);
801 ctx->bytecode = NULL;
802 }
803
804 if (ctx->bytecode_reloc) {
805 free(ctx->bytecode_reloc);
806 ctx->bytecode_reloc = NULL;
807 }
953192ba
MD
808}
809
a187da1a 810LTTNG_HIDDEN
953192ba
MD
811int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
812{
813 int ret;
814
815 ret = bytecode_init(&ctx->bytecode);
816 if (ret)
817 return ret;
818 ret = bytecode_init(&ctx->bytecode_reloc);
819 if (ret)
820 goto error;
821 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
822 if (ret)
823 goto error;
824
825 /* Finally, append symbol table to bytecode */
826 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
827 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
828 1, bytecode_get_len(&ctx->bytecode_reloc->b));
829
830error:
831 filter_bytecode_free(ctx);
832 return ret;
833}
This page took 0.071064 seconds and 4 git commands to generate.