Implement support for brackets in filter expressions
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
CommitLineData
953192ba
MD
1/*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <stdlib.h>
23#include <string.h>
24#include <errno.h>
46820c8b 25#include <common/align.h>
afc5df03 26#include <common/compat/string.h>
46820c8b 27
953192ba
MD
28#include "filter-bytecode.h"
29#include "filter-ir.h"
30#include "filter-ast.h"
31
a187da1a
DG
32#include <common/macros.h>
33
953192ba
MD
34#ifndef max_t
35#define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
36#endif
37
953192ba
MD
38#define INIT_ALLOC_SIZE 4
39
40static
41int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
42 struct ir_op *node);
43
01a204f0
CB
44static inline int get_count_order(unsigned int count)
45{
46 int order;
47
afc5df03 48 order = lttng_fls(count) - 1;
01a204f0
CB
49 if (count & (count - 1))
50 order++;
51 return order;
52}
53
953192ba 54static
53a80697 55int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
953192ba 56{
1029587a
MD
57 uint32_t alloc_len;
58
59 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
60 *fb = calloc(alloc_len, 1);
953192ba
MD
61 if (!*fb) {
62 return -ENOMEM;
63 } else {
1029587a 64 (*fb)->alloc_len = alloc_len;
953192ba
MD
65 return 0;
66 }
67}
68
69static
53a80697 70int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
953192ba
MD
71{
72 int32_t ret;
73 uint32_t padding = offset_align((*fb)->b.len, align);
ec96a8f6 74 uint32_t new_len = (*fb)->b.len + padding + len;
1029587a 75 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
ec96a8f6 76 uint32_t old_alloc_len = (*fb)->alloc_len;
953192ba 77
ec96a8f6 78 if (new_len > LTTNG_FILTER_MAX_LEN)
5ddb0a08
CB
79 return -EINVAL;
80
ec96a8f6 81 if (new_alloc_len > old_alloc_len) {
d0b96690
DG
82 struct lttng_filter_bytecode_alloc *newptr;
83
ec96a8f6
MD
84 new_alloc_len =
85 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
d0b96690
DG
86 newptr = realloc(*fb, new_alloc_len);
87 if (!newptr)
953192ba 88 return -ENOMEM;
d0b96690 89 *fb = newptr;
1029587a 90 /* We zero directly the memory from start of allocation. */
ec96a8f6
MD
91 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
92 (*fb)->alloc_len = new_alloc_len;
953192ba
MD
93 }
94 (*fb)->b.len += padding;
95 ret = (*fb)->b.len;
96 (*fb)->b.len += len;
97 return ret;
98}
99
100static
53a80697 101int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
953192ba
MD
102 uint32_t align, uint32_t len)
103{
104 int32_t offset;
105
106 offset = bytecode_reserve(fb, align, len);
107 if (offset < 0)
108 return offset;
109 memcpy(&(*fb)->b.data[offset], data, len);
110 return 0;
111}
112
113static
53a80697 114int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
115 struct logical_op *data,
116 uint32_t align, uint32_t len,
117 uint16_t *skip_offset)
118{
119 int32_t offset;
120
121 offset = bytecode_reserve(fb, align, len);
122 if (offset < 0)
123 return offset;
124 memcpy(&(*fb)->b.data[offset], data, len);
125 *skip_offset =
126 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
127 - (void *) &(*fb)->b.data[0];
128 return 0;
129}
130
131static
53a80697 132int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
133 const void *data,
134 uint16_t offset,
135 uint32_t len)
136{
137 if (offset >= (*fb)->b.len) {
138 return -EINVAL;
139 }
140 memcpy(&(*fb)->b.data[offset], data, len);
141 return 0;
142}
143
144static
145int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
146{
147 int ret;
148 struct return_op insn;
149
150 /* Visit child */
151 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
152 if (ret)
153 return ret;
154
155 /* Generate end of bytecode instruction */
156 insn.op = FILTER_OP_RETURN;
157 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
158}
159
953192ba
MD
160static
161int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
162{
163 int ret;
164
165 switch (node->data_type) {
166 case IR_DATA_UNKNOWN:
167 default:
168 fprintf(stderr, "[error] Unknown data type in %s\n",
169 __func__);
170 return -EINVAL;
171
172 case IR_DATA_STRING:
173 {
174 struct load_op *insn;
175 uint32_t insn_len = sizeof(struct load_op)
9f449915 176 + strlen(node->u.load.u.string.value) + 1;
953192ba
MD
177
178 insn = calloc(insn_len, 1);
179 if (!insn)
180 return -ENOMEM;
9f449915
PP
181
182 switch (node->u.load.u.string.type) {
183 case IR_LOAD_STRING_TYPE_GLOB_STAR:
184 /*
185 * We explicitly tell the interpreter here that
186 * this load is a full star globbing pattern so
187 * that the appropriate matching function can be
188 * called. Also, see comment below.
189 */
190 insn->op = FILTER_OP_LOAD_STAR_GLOB_STRING;
191 break;
192 default:
193 /*
194 * This is the "legacy" string, which includes
195 * star globbing patterns with a star only at
196 * the end. Both "plain" and "star at the end"
197 * literal strings are handled at the same place
198 * by the tracer's filter bytecode interpreter,
199 * whereas full star globbing patterns (stars
200 * can be anywhere in the string) is a special
201 * case.
202 */
203 insn->op = FILTER_OP_LOAD_STRING;
204 break;
205 }
206
207 strcpy(insn->data, node->u.load.u.string.value);
953192ba
MD
208 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
209 free(insn);
210 return ret;
211 }
212 case IR_DATA_NUMERIC:
213 {
214 struct load_op *insn;
215 uint32_t insn_len = sizeof(struct load_op)
216 + sizeof(struct literal_numeric);
217
218 insn = calloc(insn_len, 1);
219 if (!insn)
220 return -ENOMEM;
221 insn->op = FILTER_OP_LOAD_S64;
58d494e4 222 memcpy(insn->data, &node->u.load.u.num, sizeof(int64_t));
953192ba
MD
223 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
224 free(insn);
225 return ret;
226 }
e90d8561
MD
227 case IR_DATA_FLOAT:
228 {
229 struct load_op *insn;
230 uint32_t insn_len = sizeof(struct load_op)
231 + sizeof(struct literal_double);
232
233 insn = calloc(insn_len, 1);
234 if (!insn)
235 return -ENOMEM;
236 insn->op = FILTER_OP_LOAD_DOUBLE;
58d494e4 237 memcpy(insn->data, &node->u.load.u.flt, sizeof(double));
e90d8561
MD
238 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
239 free(insn);
240 return ret;
241 }
586dc72f
MD
242 case IR_DATA_FIELD_REF: /* fall-through */
243 case IR_DATA_GET_CONTEXT_REF:
953192ba
MD
244 {
245 struct load_op *insn;
246 uint32_t insn_len = sizeof(struct load_op)
247 + sizeof(struct field_ref);
248 struct field_ref ref_offset;
ec96a8f6
MD
249 uint32_t reloc_offset_u32;
250 uint16_t reloc_offset;
953192ba
MD
251
252 insn = calloc(insn_len, 1);
253 if (!insn)
254 return -ENOMEM;
661dfdd1 255 switch (node->data_type) {
586dc72f
MD
256 case IR_DATA_FIELD_REF:
257 insn->op = FILTER_OP_LOAD_FIELD_REF;
258 break;
259 case IR_DATA_GET_CONTEXT_REF:
260 insn->op = FILTER_OP_GET_CONTEXT_REF;
261 break;
262 default:
3a68137c 263 free(insn);
586dc72f
MD
264 return -EINVAL;
265 }
953192ba
MD
266 ref_offset.offset = (uint16_t) -1U;
267 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
65775683 268 /* reloc_offset points to struct load_op */
ec96a8f6
MD
269 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
270 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
271 free(insn);
272 return -EINVAL;
273 }
274 reloc_offset = (uint16_t) reloc_offset_u32;
953192ba
MD
275 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
276 if (ret) {
277 free(insn);
278 return ret;
279 }
280 /* append reloc */
281 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
282 1, sizeof(reloc_offset));
283 if (ret) {
284 free(insn);
285 return ret;
286 }
287 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
288 1, strlen(node->u.load.u.ref) + 1);
289 free(insn);
290 return ret;
291 }
661dfdd1
MD
292 case IR_DATA_FIELD_REF_INDEX: /* fall-through */
293 case IR_DATA_GET_CONTEXT_REF_INDEX:
294 {
295 struct load_op *insn;
296 uint32_t insn_len = sizeof(struct load_op)
297 + sizeof(struct field_ref_index);
298 struct field_ref_index ref_index_offset;
299 uint32_t reloc_offset_u32;
300 uint16_t reloc_offset;
301
302 insn = calloc(insn_len, 1);
303 if (!insn)
304 return -ENOMEM;
305 switch (node->data_type) {
306 case IR_DATA_FIELD_REF_INDEX:
307 insn->op = FILTER_OP_LOAD_FIELD_REF_INDEX;
308 break;
309 case IR_DATA_GET_CONTEXT_REF_INDEX:
310 insn->op = FILTER_OP_GET_CONTEXT_REF_INDEX;
311 break;
312 default:
313 free(insn);
314 return -EINVAL;
315 }
316 ref_index_offset.offset = (uint16_t) -1U;
317 ref_index_offset.index = node->u.load.u.ref_index.index;
318 memcpy(insn->data, &ref_index_offset, sizeof(ref_index_offset));
319 /* reloc_offset points to struct load_op */
320 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
321 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
322 free(insn);
323 return -EINVAL;
324 }
325 reloc_offset = (uint16_t) reloc_offset_u32;
326 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
327 if (ret) {
328 free(insn);
329 return ret;
330 }
331 /* append reloc */
332 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
333 1, sizeof(reloc_offset));
334 if (ret) {
335 free(insn);
336 return ret;
337 }
338 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref_index.symbol,
339 1, strlen(node->u.load.u.ref_index.symbol) + 1);
340 free(insn);
341 return ret;
342 }
953192ba
MD
343 }
344}
345
346static
347int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
348{
349 int ret;
350 struct unary_op insn;
351
352 /* Visit child */
353 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
354 if (ret)
355 return ret;
356
357 /* Generate end of bytecode instruction */
358 switch (node->u.unary.type) {
359 case AST_UNARY_UNKNOWN:
360 default:
361 fprintf(stderr, "[error] Unknown unary node type in %s\n",
362 __func__);
363 return -EINVAL;
364 case AST_UNARY_PLUS:
365 /* Nothing to do. */
366 return 0;
367 case AST_UNARY_MINUS:
368 insn.op = FILTER_OP_UNARY_MINUS;
953192ba
MD
369 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
370 case AST_UNARY_NOT:
371 insn.op = FILTER_OP_UNARY_NOT;
953192ba
MD
372 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
373 }
374}
375
376/*
377 * Binary comparator nesting is disallowed. This allows fitting into
378 * only 2 registers.
379 */
380static
381int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
382{
383 int ret;
384 struct binary_op insn;
385
386 /* Visit child */
387 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
388 if (ret)
389 return ret;
390 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
391 if (ret)
392 return ret;
393
394 switch (node->u.binary.type) {
395 case AST_OP_UNKNOWN:
396 default:
397 fprintf(stderr, "[error] Unknown unary node type in %s\n",
398 __func__);
399 return -EINVAL;
400
401 case AST_OP_AND:
402 case AST_OP_OR:
403 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
404 __func__);
405 return -EINVAL;
406
407 case AST_OP_MUL:
408 insn.op = FILTER_OP_MUL;
409 break;
410 case AST_OP_DIV:
411 insn.op = FILTER_OP_DIV;
412 break;
413 case AST_OP_MOD:
414 insn.op = FILTER_OP_MOD;
415 break;
416 case AST_OP_PLUS:
417 insn.op = FILTER_OP_PLUS;
418 break;
419 case AST_OP_MINUS:
420 insn.op = FILTER_OP_MINUS;
421 break;
422 case AST_OP_RSHIFT:
423 insn.op = FILTER_OP_RSHIFT;
424 break;
425 case AST_OP_LSHIFT:
426 insn.op = FILTER_OP_LSHIFT;
427 break;
428 case AST_OP_BIN_AND:
429 insn.op = FILTER_OP_BIN_AND;
430 break;
431 case AST_OP_BIN_OR:
432 insn.op = FILTER_OP_BIN_OR;
433 break;
434 case AST_OP_BIN_XOR:
435 insn.op = FILTER_OP_BIN_XOR;
436 break;
437
438 case AST_OP_EQ:
439 insn.op = FILTER_OP_EQ;
440 break;
441 case AST_OP_NE:
442 insn.op = FILTER_OP_NE;
443 break;
444 case AST_OP_GT:
445 insn.op = FILTER_OP_GT;
446 break;
447 case AST_OP_LT:
448 insn.op = FILTER_OP_LT;
449 break;
450 case AST_OP_GE:
451 insn.op = FILTER_OP_GE;
452 break;
453 case AST_OP_LE:
454 insn.op = FILTER_OP_LE;
455 break;
456 }
457 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
458}
459
8cf9540a
MD
460/*
461 * A logical op always return a s64 (1 or 0).
462 */
953192ba
MD
463static
464int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
465{
466 int ret;
467 struct logical_op insn;
468 uint16_t skip_offset_loc;
469 uint16_t target_loc;
470
471 /* Visit left child */
472 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
473 if (ret)
474 return ret;
8cf9540a 475 /* Cast to s64 if float or field ref */
586dc72f 476 if ((node->u.binary.left->data_type == IR_DATA_FIELD_REF
661dfdd1
MD
477 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
478 || node->u.binary.left->data_type == IR_DATA_FIELD_REF_INDEX
479 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF_INDEX)
8cf9540a
MD
480 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
481 struct cast_op cast_insn;
482
586dc72f 483 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
661dfdd1
MD
484 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF
485 || node->u.binary.left->data_type == IR_DATA_FIELD_REF_INDEX
486 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF_INDEX) {
29fefef8
MD
487 cast_insn.op = FILTER_OP_CAST_TO_S64;
488 } else {
489 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
490 }
8cf9540a
MD
491 ret = bytecode_push(&ctx->bytecode, &cast_insn,
492 1, sizeof(cast_insn));
493 if (ret)
494 return ret;
495 }
953192ba
MD
496 switch (node->u.logical.type) {
497 default:
498 fprintf(stderr, "[error] Unknown node type in %s\n",
499 __func__);
500 return -EINVAL;
501
502 case AST_OP_AND:
503 insn.op = FILTER_OP_AND;
504 break;
505 case AST_OP_OR:
506 insn.op = FILTER_OP_OR;
507 break;
508 }
509 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
510 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
511 &skip_offset_loc);
512 if (ret)
513 return ret;
514 /* Visit right child */
515 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
516 if (ret)
517 return ret;
8cf9540a 518 /* Cast to s64 if float or field ref */
586dc72f 519 if ((node->u.binary.right->data_type == IR_DATA_FIELD_REF
661dfdd1
MD
520 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
521 || node->u.binary.right->data_type == IR_DATA_FIELD_REF_INDEX
522 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF_INDEX)
8cf9540a
MD
523 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
524 struct cast_op cast_insn;
525
586dc72f 526 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
661dfdd1
MD
527 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF
528 || node->u.binary.right->data_type == IR_DATA_FIELD_REF_INDEX
529 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF_INDEX) {
29fefef8
MD
530 cast_insn.op = FILTER_OP_CAST_TO_S64;
531 } else {
532 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
533 }
8cf9540a
MD
534 ret = bytecode_push(&ctx->bytecode, &cast_insn,
535 1, sizeof(cast_insn));
536 if (ret)
537 return ret;
538 }
953192ba
MD
539 /* We now know where the logical op can skip. */
540 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
541 ret = bytecode_patch(&ctx->bytecode,
542 &target_loc, /* Offset to jump to */
543 skip_offset_loc, /* Where to patch */
544 sizeof(uint16_t));
545 return ret;
546}
547
548/*
549 * Postorder traversal of the tree. We need the children result before
550 * we can evaluate the parent.
551 */
552static
553int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
554 struct ir_op *node)
555{
556 switch (node->op) {
557 case IR_OP_UNKNOWN:
558 default:
559 fprintf(stderr, "[error] Unknown node type in %s\n",
560 __func__);
561 return -EINVAL;
562
563 case IR_OP_ROOT:
564 return visit_node_root(ctx, node);
565 case IR_OP_LOAD:
566 return visit_node_load(ctx, node);
567 case IR_OP_UNARY:
568 return visit_node_unary(ctx, node);
569 case IR_OP_BINARY:
570 return visit_node_binary(ctx, node);
571 case IR_OP_LOGICAL:
572 return visit_node_logical(ctx, node);
573 }
574}
575
a187da1a 576LTTNG_HIDDEN
953192ba
MD
577void filter_bytecode_free(struct filter_parser_ctx *ctx)
578{
7ca1dc6f
DG
579 if (!ctx) {
580 return;
581 }
582
3f0c8837
DG
583 if (ctx->bytecode) {
584 free(ctx->bytecode);
585 ctx->bytecode = NULL;
586 }
587
588 if (ctx->bytecode_reloc) {
589 free(ctx->bytecode_reloc);
590 ctx->bytecode_reloc = NULL;
591 }
953192ba
MD
592}
593
a187da1a 594LTTNG_HIDDEN
953192ba
MD
595int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
596{
597 int ret;
598
599 ret = bytecode_init(&ctx->bytecode);
600 if (ret)
601 return ret;
602 ret = bytecode_init(&ctx->bytecode_reloc);
603 if (ret)
604 goto error;
605 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
606 if (ret)
607 goto error;
608
609 /* Finally, append symbol table to bytecode */
610 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
611 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
612 1, bytecode_get_len(&ctx->bytecode_reloc->b));
613
614error:
615 filter_bytecode_free(ctx);
616 return ret;
617}
This page took 0.100478 seconds and 4 git commands to generate.