Tests: Add periodical flush fast regression test
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
CommitLineData
953192ba
MD
1/*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <stdlib.h>
23#include <string.h>
24#include <errno.h>
25#include "align.h"
26#include "filter-bytecode.h"
27#include "filter-ir.h"
28#include "filter-ast.h"
29
a187da1a
DG
30#include <common/macros.h>
31
953192ba
MD
32#ifndef max_t
33#define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
34#endif
35
36//#define INIT_ALLOC_SIZE PAGE_SIZE
37#define INIT_ALLOC_SIZE 4
38
39static
40int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
41 struct ir_op *node);
42
01a204f0
CB
43static inline int fls(unsigned int x)
44{
45 int r = 32;
46
47 if (!x)
48 return 0;
49 if (!(x & 0xFFFF0000U)) {
50 x <<= 16;
51 r -= 16;
52 }
53 if (!(x & 0xFF000000U)) {
54 x <<= 8;
55 r -= 8;
56 }
57 if (!(x & 0xF0000000U)) {
58 x <<= 4;
59 r -= 4;
60 }
61 if (!(x & 0xC0000000U)) {
62 x <<= 2;
63 r -= 2;
64 }
65 if (!(x & 0x80000000U)) {
66 x <<= 1;
67 r -= 1;
68 }
69 return r;
70}
71
72static inline int get_count_order(unsigned int count)
73{
74 int order;
75
76 order = fls(count) - 1;
77 if (count & (count - 1))
78 order++;
79 return order;
80}
81
953192ba 82static
53a80697 83int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
953192ba 84{
1029587a
MD
85 uint32_t alloc_len;
86
87 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
88 *fb = calloc(alloc_len, 1);
953192ba
MD
89 if (!*fb) {
90 return -ENOMEM;
91 } else {
1029587a 92 (*fb)->alloc_len = alloc_len;
953192ba
MD
93 return 0;
94 }
95}
96
97static
53a80697 98int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
953192ba
MD
99{
100 int32_t ret;
101 uint32_t padding = offset_align((*fb)->b.len, align);
ec96a8f6 102 uint32_t new_len = (*fb)->b.len + padding + len;
1029587a 103 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
ec96a8f6 104 uint32_t old_alloc_len = (*fb)->alloc_len;
953192ba 105
ec96a8f6 106 if (new_len > LTTNG_FILTER_MAX_LEN)
5ddb0a08
CB
107 return -EINVAL;
108
ec96a8f6 109 if (new_alloc_len > old_alloc_len) {
d0b96690
DG
110 struct lttng_filter_bytecode_alloc *newptr;
111
ec96a8f6
MD
112 new_alloc_len =
113 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
d0b96690
DG
114 newptr = realloc(*fb, new_alloc_len);
115 if (!newptr)
953192ba 116 return -ENOMEM;
d0b96690 117 *fb = newptr;
1029587a 118 /* We zero directly the memory from start of allocation. */
ec96a8f6
MD
119 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
120 (*fb)->alloc_len = new_alloc_len;
953192ba
MD
121 }
122 (*fb)->b.len += padding;
123 ret = (*fb)->b.len;
124 (*fb)->b.len += len;
125 return ret;
126}
127
128static
53a80697 129int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
953192ba
MD
130 uint32_t align, uint32_t len)
131{
132 int32_t offset;
133
134 offset = bytecode_reserve(fb, align, len);
135 if (offset < 0)
136 return offset;
137 memcpy(&(*fb)->b.data[offset], data, len);
138 return 0;
139}
140
141static
53a80697 142int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
143 struct logical_op *data,
144 uint32_t align, uint32_t len,
145 uint16_t *skip_offset)
146{
147 int32_t offset;
148
149 offset = bytecode_reserve(fb, align, len);
150 if (offset < 0)
151 return offset;
152 memcpy(&(*fb)->b.data[offset], data, len);
153 *skip_offset =
154 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
155 - (void *) &(*fb)->b.data[0];
156 return 0;
157}
158
159static
53a80697 160int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
161 const void *data,
162 uint16_t offset,
163 uint32_t len)
164{
165 if (offset >= (*fb)->b.len) {
166 return -EINVAL;
167 }
168 memcpy(&(*fb)->b.data[offset], data, len);
169 return 0;
170}
171
172static
173int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
174{
175 int ret;
176 struct return_op insn;
177
178 /* Visit child */
179 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
180 if (ret)
181 return ret;
182
183 /* Generate end of bytecode instruction */
184 insn.op = FILTER_OP_RETURN;
185 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
186}
187
953192ba
MD
188static
189int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
190{
191 int ret;
192
193 switch (node->data_type) {
194 case IR_DATA_UNKNOWN:
195 default:
196 fprintf(stderr, "[error] Unknown data type in %s\n",
197 __func__);
198 return -EINVAL;
199
200 case IR_DATA_STRING:
201 {
202 struct load_op *insn;
203 uint32_t insn_len = sizeof(struct load_op)
204 + strlen(node->u.load.u.string) + 1;
205
206 insn = calloc(insn_len, 1);
207 if (!insn)
208 return -ENOMEM;
209 insn->op = FILTER_OP_LOAD_STRING;
953192ba
MD
210 strcpy(insn->data, node->u.load.u.string);
211 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
212 free(insn);
213 return ret;
214 }
215 case IR_DATA_NUMERIC:
216 {
217 struct load_op *insn;
218 uint32_t insn_len = sizeof(struct load_op)
219 + sizeof(struct literal_numeric);
220
221 insn = calloc(insn_len, 1);
222 if (!insn)
223 return -ENOMEM;
224 insn->op = FILTER_OP_LOAD_S64;
953192ba
MD
225 *(int64_t *) insn->data = node->u.load.u.num;
226 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
227 free(insn);
228 return ret;
229 }
e90d8561
MD
230 case IR_DATA_FLOAT:
231 {
232 struct load_op *insn;
233 uint32_t insn_len = sizeof(struct load_op)
234 + sizeof(struct literal_double);
235
236 insn = calloc(insn_len, 1);
237 if (!insn)
238 return -ENOMEM;
239 insn->op = FILTER_OP_LOAD_DOUBLE;
e90d8561
MD
240 *(double *) insn->data = node->u.load.u.flt;
241 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
242 free(insn);
243 return ret;
244 }
953192ba
MD
245 case IR_DATA_FIELD_REF:
246 {
247 struct load_op *insn;
248 uint32_t insn_len = sizeof(struct load_op)
249 + sizeof(struct field_ref);
250 struct field_ref ref_offset;
ec96a8f6
MD
251 uint32_t reloc_offset_u32;
252 uint16_t reloc_offset;
953192ba
MD
253
254 insn = calloc(insn_len, 1);
255 if (!insn)
256 return -ENOMEM;
257 insn->op = FILTER_OP_LOAD_FIELD_REF;
953192ba
MD
258 ref_offset.offset = (uint16_t) -1U;
259 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
65775683 260 /* reloc_offset points to struct load_op */
ec96a8f6
MD
261 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
262 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
263 free(insn);
264 return -EINVAL;
265 }
266 reloc_offset = (uint16_t) reloc_offset_u32;
953192ba
MD
267 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
268 if (ret) {
269 free(insn);
270 return ret;
271 }
272 /* append reloc */
273 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
274 1, sizeof(reloc_offset));
275 if (ret) {
276 free(insn);
277 return ret;
278 }
279 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
280 1, strlen(node->u.load.u.ref) + 1);
281 free(insn);
282 return ret;
283 }
284 }
285}
286
287static
288int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
289{
290 int ret;
291 struct unary_op insn;
292
293 /* Visit child */
294 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
295 if (ret)
296 return ret;
297
298 /* Generate end of bytecode instruction */
299 switch (node->u.unary.type) {
300 case AST_UNARY_UNKNOWN:
301 default:
302 fprintf(stderr, "[error] Unknown unary node type in %s\n",
303 __func__);
304 return -EINVAL;
305 case AST_UNARY_PLUS:
306 /* Nothing to do. */
307 return 0;
308 case AST_UNARY_MINUS:
309 insn.op = FILTER_OP_UNARY_MINUS;
953192ba
MD
310 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
311 case AST_UNARY_NOT:
312 insn.op = FILTER_OP_UNARY_NOT;
953192ba
MD
313 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
314 }
315}
316
317/*
318 * Binary comparator nesting is disallowed. This allows fitting into
319 * only 2 registers.
320 */
321static
322int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
323{
324 int ret;
325 struct binary_op insn;
326
327 /* Visit child */
328 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
329 if (ret)
330 return ret;
331 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
332 if (ret)
333 return ret;
334
335 switch (node->u.binary.type) {
336 case AST_OP_UNKNOWN:
337 default:
338 fprintf(stderr, "[error] Unknown unary node type in %s\n",
339 __func__);
340 return -EINVAL;
341
342 case AST_OP_AND:
343 case AST_OP_OR:
344 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
345 __func__);
346 return -EINVAL;
347
348 case AST_OP_MUL:
349 insn.op = FILTER_OP_MUL;
350 break;
351 case AST_OP_DIV:
352 insn.op = FILTER_OP_DIV;
353 break;
354 case AST_OP_MOD:
355 insn.op = FILTER_OP_MOD;
356 break;
357 case AST_OP_PLUS:
358 insn.op = FILTER_OP_PLUS;
359 break;
360 case AST_OP_MINUS:
361 insn.op = FILTER_OP_MINUS;
362 break;
363 case AST_OP_RSHIFT:
364 insn.op = FILTER_OP_RSHIFT;
365 break;
366 case AST_OP_LSHIFT:
367 insn.op = FILTER_OP_LSHIFT;
368 break;
369 case AST_OP_BIN_AND:
370 insn.op = FILTER_OP_BIN_AND;
371 break;
372 case AST_OP_BIN_OR:
373 insn.op = FILTER_OP_BIN_OR;
374 break;
375 case AST_OP_BIN_XOR:
376 insn.op = FILTER_OP_BIN_XOR;
377 break;
378
379 case AST_OP_EQ:
380 insn.op = FILTER_OP_EQ;
381 break;
382 case AST_OP_NE:
383 insn.op = FILTER_OP_NE;
384 break;
385 case AST_OP_GT:
386 insn.op = FILTER_OP_GT;
387 break;
388 case AST_OP_LT:
389 insn.op = FILTER_OP_LT;
390 break;
391 case AST_OP_GE:
392 insn.op = FILTER_OP_GE;
393 break;
394 case AST_OP_LE:
395 insn.op = FILTER_OP_LE;
396 break;
397 }
398 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
399}
400
8cf9540a
MD
401/*
402 * A logical op always return a s64 (1 or 0).
403 */
953192ba
MD
404static
405int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
406{
407 int ret;
408 struct logical_op insn;
409 uint16_t skip_offset_loc;
410 uint16_t target_loc;
411
412 /* Visit left child */
413 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
414 if (ret)
415 return ret;
8cf9540a
MD
416 /* Cast to s64 if float or field ref */
417 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
418 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
419 struct cast_op cast_insn;
420
29fefef8
MD
421 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF) {
422 cast_insn.op = FILTER_OP_CAST_TO_S64;
423 } else {
424 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
425 }
8cf9540a
MD
426 ret = bytecode_push(&ctx->bytecode, &cast_insn,
427 1, sizeof(cast_insn));
428 if (ret)
429 return ret;
430 }
953192ba
MD
431 switch (node->u.logical.type) {
432 default:
433 fprintf(stderr, "[error] Unknown node type in %s\n",
434 __func__);
435 return -EINVAL;
436
437 case AST_OP_AND:
438 insn.op = FILTER_OP_AND;
439 break;
440 case AST_OP_OR:
441 insn.op = FILTER_OP_OR;
442 break;
443 }
444 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
445 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
446 &skip_offset_loc);
447 if (ret)
448 return ret;
449 /* Visit right child */
450 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
451 if (ret)
452 return ret;
8cf9540a
MD
453 /* Cast to s64 if float or field ref */
454 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
455 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
456 struct cast_op cast_insn;
457
29fefef8
MD
458 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF) {
459 cast_insn.op = FILTER_OP_CAST_TO_S64;
460 } else {
461 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
462 }
8cf9540a
MD
463 ret = bytecode_push(&ctx->bytecode, &cast_insn,
464 1, sizeof(cast_insn));
465 if (ret)
466 return ret;
467 }
953192ba
MD
468 /* We now know where the logical op can skip. */
469 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
470 ret = bytecode_patch(&ctx->bytecode,
471 &target_loc, /* Offset to jump to */
472 skip_offset_loc, /* Where to patch */
473 sizeof(uint16_t));
474 return ret;
475}
476
477/*
478 * Postorder traversal of the tree. We need the children result before
479 * we can evaluate the parent.
480 */
481static
482int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
483 struct ir_op *node)
484{
485 switch (node->op) {
486 case IR_OP_UNKNOWN:
487 default:
488 fprintf(stderr, "[error] Unknown node type in %s\n",
489 __func__);
490 return -EINVAL;
491
492 case IR_OP_ROOT:
493 return visit_node_root(ctx, node);
494 case IR_OP_LOAD:
495 return visit_node_load(ctx, node);
496 case IR_OP_UNARY:
497 return visit_node_unary(ctx, node);
498 case IR_OP_BINARY:
499 return visit_node_binary(ctx, node);
500 case IR_OP_LOGICAL:
501 return visit_node_logical(ctx, node);
502 }
503}
504
a187da1a 505LTTNG_HIDDEN
953192ba
MD
506void filter_bytecode_free(struct filter_parser_ctx *ctx)
507{
508 free(ctx->bytecode);
509 ctx->bytecode = NULL;
510 free(ctx->bytecode_reloc);
511 ctx->bytecode_reloc = NULL;
512}
513
a187da1a 514LTTNG_HIDDEN
953192ba
MD
515int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
516{
517 int ret;
518
519 ret = bytecode_init(&ctx->bytecode);
520 if (ret)
521 return ret;
522 ret = bytecode_init(&ctx->bytecode_reloc);
523 if (ret)
524 goto error;
525 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
526 if (ret)
527 goto error;
528
529 /* Finally, append symbol table to bytecode */
530 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
531 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
532 1, bytecode_get_len(&ctx->bytecode_reloc->b));
533
534error:
535 filter_bytecode_free(ctx);
536 return ret;
537}
This page took 0.045625 seconds and 4 git commands to generate.