4dd52d8d0eed988a7c97e88bd5dc80449b0480fb
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
1 /*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <stdlib.h>
23 #include <string.h>
24 #include <errno.h>
25 #include <common/align.h>
26
27 #include "filter-bytecode.h"
28 #include "filter-ir.h"
29 #include "filter-ast.h"
30
31 #include <common/macros.h>
32
33 #ifndef max_t
34 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
35 #endif
36
37 //#define INIT_ALLOC_SIZE PAGE_SIZE
38 #define INIT_ALLOC_SIZE 4
39
40 static
41 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
42 struct ir_op *node);
43
44 static inline int fls(unsigned int x)
45 {
46 int r = 32;
47
48 if (!x)
49 return 0;
50 if (!(x & 0xFFFF0000U)) {
51 x <<= 16;
52 r -= 16;
53 }
54 if (!(x & 0xFF000000U)) {
55 x <<= 8;
56 r -= 8;
57 }
58 if (!(x & 0xF0000000U)) {
59 x <<= 4;
60 r -= 4;
61 }
62 if (!(x & 0xC0000000U)) {
63 x <<= 2;
64 r -= 2;
65 }
66 if (!(x & 0x80000000U)) {
67 x <<= 1;
68 r -= 1;
69 }
70 return r;
71 }
72
73 static inline int get_count_order(unsigned int count)
74 {
75 int order;
76
77 order = fls(count) - 1;
78 if (count & (count - 1))
79 order++;
80 return order;
81 }
82
83 static
84 int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
85 {
86 uint32_t alloc_len;
87
88 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
89 *fb = calloc(alloc_len, 1);
90 if (!*fb) {
91 return -ENOMEM;
92 } else {
93 (*fb)->alloc_len = alloc_len;
94 return 0;
95 }
96 }
97
98 static
99 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
100 {
101 int32_t ret;
102 uint32_t padding = offset_align((*fb)->b.len, align);
103 uint32_t new_len = (*fb)->b.len + padding + len;
104 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
105 uint32_t old_alloc_len = (*fb)->alloc_len;
106
107 if (new_len > LTTNG_FILTER_MAX_LEN)
108 return -EINVAL;
109
110 if (new_alloc_len > old_alloc_len) {
111 struct lttng_filter_bytecode_alloc *newptr;
112
113 new_alloc_len =
114 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
115 newptr = realloc(*fb, new_alloc_len);
116 if (!newptr)
117 return -ENOMEM;
118 *fb = newptr;
119 /* We zero directly the memory from start of allocation. */
120 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
121 (*fb)->alloc_len = new_alloc_len;
122 }
123 (*fb)->b.len += padding;
124 ret = (*fb)->b.len;
125 (*fb)->b.len += len;
126 return ret;
127 }
128
129 static
130 int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
131 uint32_t align, uint32_t len)
132 {
133 int32_t offset;
134
135 offset = bytecode_reserve(fb, align, len);
136 if (offset < 0)
137 return offset;
138 memcpy(&(*fb)->b.data[offset], data, len);
139 return 0;
140 }
141
142 static
143 int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
144 struct logical_op *data,
145 uint32_t align, uint32_t len,
146 uint16_t *skip_offset)
147 {
148 int32_t offset;
149
150 offset = bytecode_reserve(fb, align, len);
151 if (offset < 0)
152 return offset;
153 memcpy(&(*fb)->b.data[offset], data, len);
154 *skip_offset =
155 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
156 - (void *) &(*fb)->b.data[0];
157 return 0;
158 }
159
160 static
161 int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
162 const void *data,
163 uint16_t offset,
164 uint32_t len)
165 {
166 if (offset >= (*fb)->b.len) {
167 return -EINVAL;
168 }
169 memcpy(&(*fb)->b.data[offset], data, len);
170 return 0;
171 }
172
173 static
174 int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
175 {
176 int ret;
177 struct return_op insn;
178
179 /* Visit child */
180 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
181 if (ret)
182 return ret;
183
184 /* Generate end of bytecode instruction */
185 insn.op = FILTER_OP_RETURN;
186 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
187 }
188
189 static
190 int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
191 {
192 int ret;
193
194 switch (node->data_type) {
195 case IR_DATA_UNKNOWN:
196 default:
197 fprintf(stderr, "[error] Unknown data type in %s\n",
198 __func__);
199 return -EINVAL;
200
201 case IR_DATA_STRING:
202 {
203 struct load_op *insn;
204 uint32_t insn_len = sizeof(struct load_op)
205 + strlen(node->u.load.u.string) + 1;
206
207 insn = calloc(insn_len, 1);
208 if (!insn)
209 return -ENOMEM;
210 insn->op = FILTER_OP_LOAD_STRING;
211 strcpy(insn->data, node->u.load.u.string);
212 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
213 free(insn);
214 return ret;
215 }
216 case IR_DATA_NUMERIC:
217 {
218 struct load_op *insn;
219 uint32_t insn_len = sizeof(struct load_op)
220 + sizeof(struct literal_numeric);
221
222 insn = calloc(insn_len, 1);
223 if (!insn)
224 return -ENOMEM;
225 insn->op = FILTER_OP_LOAD_S64;
226 memcpy(insn->data, &node->u.load.u.num, sizeof(int64_t));
227 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
228 free(insn);
229 return ret;
230 }
231 case IR_DATA_FLOAT:
232 {
233 struct load_op *insn;
234 uint32_t insn_len = sizeof(struct load_op)
235 + sizeof(struct literal_double);
236
237 insn = calloc(insn_len, 1);
238 if (!insn)
239 return -ENOMEM;
240 insn->op = FILTER_OP_LOAD_DOUBLE;
241 memcpy(insn->data, &node->u.load.u.flt, sizeof(double));
242 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
243 free(insn);
244 return ret;
245 }
246 case IR_DATA_FIELD_REF: /* fall-through */
247 case IR_DATA_GET_CONTEXT_REF:
248 {
249 struct load_op *insn;
250 uint32_t insn_len = sizeof(struct load_op)
251 + sizeof(struct field_ref);
252 struct field_ref ref_offset;
253 uint32_t reloc_offset_u32;
254 uint16_t reloc_offset;
255
256 insn = calloc(insn_len, 1);
257 if (!insn)
258 return -ENOMEM;
259 switch(node->data_type) {
260 case IR_DATA_FIELD_REF:
261 insn->op = FILTER_OP_LOAD_FIELD_REF;
262 break;
263 case IR_DATA_GET_CONTEXT_REF:
264 insn->op = FILTER_OP_GET_CONTEXT_REF;
265 break;
266 default:
267 free(insn);
268 return -EINVAL;
269 }
270 ref_offset.offset = (uint16_t) -1U;
271 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
272 /* reloc_offset points to struct load_op */
273 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
274 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
275 free(insn);
276 return -EINVAL;
277 }
278 reloc_offset = (uint16_t) reloc_offset_u32;
279 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
280 if (ret) {
281 free(insn);
282 return ret;
283 }
284 /* append reloc */
285 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
286 1, sizeof(reloc_offset));
287 if (ret) {
288 free(insn);
289 return ret;
290 }
291 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
292 1, strlen(node->u.load.u.ref) + 1);
293 free(insn);
294 return ret;
295 }
296 }
297 }
298
299 static
300 int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
301 {
302 int ret;
303 struct unary_op insn;
304
305 /* Visit child */
306 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
307 if (ret)
308 return ret;
309
310 /* Generate end of bytecode instruction */
311 switch (node->u.unary.type) {
312 case AST_UNARY_UNKNOWN:
313 default:
314 fprintf(stderr, "[error] Unknown unary node type in %s\n",
315 __func__);
316 return -EINVAL;
317 case AST_UNARY_PLUS:
318 /* Nothing to do. */
319 return 0;
320 case AST_UNARY_MINUS:
321 insn.op = FILTER_OP_UNARY_MINUS;
322 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
323 case AST_UNARY_NOT:
324 insn.op = FILTER_OP_UNARY_NOT;
325 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
326 }
327 }
328
329 /*
330 * Binary comparator nesting is disallowed. This allows fitting into
331 * only 2 registers.
332 */
333 static
334 int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
335 {
336 int ret;
337 struct binary_op insn;
338
339 /* Visit child */
340 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
341 if (ret)
342 return ret;
343 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
344 if (ret)
345 return ret;
346
347 switch (node->u.binary.type) {
348 case AST_OP_UNKNOWN:
349 default:
350 fprintf(stderr, "[error] Unknown unary node type in %s\n",
351 __func__);
352 return -EINVAL;
353
354 case AST_OP_AND:
355 case AST_OP_OR:
356 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
357 __func__);
358 return -EINVAL;
359
360 case AST_OP_MUL:
361 insn.op = FILTER_OP_MUL;
362 break;
363 case AST_OP_DIV:
364 insn.op = FILTER_OP_DIV;
365 break;
366 case AST_OP_MOD:
367 insn.op = FILTER_OP_MOD;
368 break;
369 case AST_OP_PLUS:
370 insn.op = FILTER_OP_PLUS;
371 break;
372 case AST_OP_MINUS:
373 insn.op = FILTER_OP_MINUS;
374 break;
375 case AST_OP_RSHIFT:
376 insn.op = FILTER_OP_RSHIFT;
377 break;
378 case AST_OP_LSHIFT:
379 insn.op = FILTER_OP_LSHIFT;
380 break;
381 case AST_OP_BIN_AND:
382 insn.op = FILTER_OP_BIN_AND;
383 break;
384 case AST_OP_BIN_OR:
385 insn.op = FILTER_OP_BIN_OR;
386 break;
387 case AST_OP_BIN_XOR:
388 insn.op = FILTER_OP_BIN_XOR;
389 break;
390
391 case AST_OP_EQ:
392 insn.op = FILTER_OP_EQ;
393 break;
394 case AST_OP_NE:
395 insn.op = FILTER_OP_NE;
396 break;
397 case AST_OP_GT:
398 insn.op = FILTER_OP_GT;
399 break;
400 case AST_OP_LT:
401 insn.op = FILTER_OP_LT;
402 break;
403 case AST_OP_GE:
404 insn.op = FILTER_OP_GE;
405 break;
406 case AST_OP_LE:
407 insn.op = FILTER_OP_LE;
408 break;
409 }
410 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
411 }
412
413 /*
414 * A logical op always return a s64 (1 or 0).
415 */
416 static
417 int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
418 {
419 int ret;
420 struct logical_op insn;
421 uint16_t skip_offset_loc;
422 uint16_t target_loc;
423
424 /* Visit left child */
425 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
426 if (ret)
427 return ret;
428 /* Cast to s64 if float or field ref */
429 if ((node->u.binary.left->data_type == IR_DATA_FIELD_REF
430 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF)
431 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
432 struct cast_op cast_insn;
433
434 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
435 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF) {
436 cast_insn.op = FILTER_OP_CAST_TO_S64;
437 } else {
438 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
439 }
440 ret = bytecode_push(&ctx->bytecode, &cast_insn,
441 1, sizeof(cast_insn));
442 if (ret)
443 return ret;
444 }
445 switch (node->u.logical.type) {
446 default:
447 fprintf(stderr, "[error] Unknown node type in %s\n",
448 __func__);
449 return -EINVAL;
450
451 case AST_OP_AND:
452 insn.op = FILTER_OP_AND;
453 break;
454 case AST_OP_OR:
455 insn.op = FILTER_OP_OR;
456 break;
457 }
458 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
459 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
460 &skip_offset_loc);
461 if (ret)
462 return ret;
463 /* Visit right child */
464 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
465 if (ret)
466 return ret;
467 /* Cast to s64 if float or field ref */
468 if ((node->u.binary.right->data_type == IR_DATA_FIELD_REF
469 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF)
470 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
471 struct cast_op cast_insn;
472
473 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
474 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF) {
475 cast_insn.op = FILTER_OP_CAST_TO_S64;
476 } else {
477 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
478 }
479 ret = bytecode_push(&ctx->bytecode, &cast_insn,
480 1, sizeof(cast_insn));
481 if (ret)
482 return ret;
483 }
484 /* We now know where the logical op can skip. */
485 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
486 ret = bytecode_patch(&ctx->bytecode,
487 &target_loc, /* Offset to jump to */
488 skip_offset_loc, /* Where to patch */
489 sizeof(uint16_t));
490 return ret;
491 }
492
493 /*
494 * Postorder traversal of the tree. We need the children result before
495 * we can evaluate the parent.
496 */
497 static
498 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
499 struct ir_op *node)
500 {
501 switch (node->op) {
502 case IR_OP_UNKNOWN:
503 default:
504 fprintf(stderr, "[error] Unknown node type in %s\n",
505 __func__);
506 return -EINVAL;
507
508 case IR_OP_ROOT:
509 return visit_node_root(ctx, node);
510 case IR_OP_LOAD:
511 return visit_node_load(ctx, node);
512 case IR_OP_UNARY:
513 return visit_node_unary(ctx, node);
514 case IR_OP_BINARY:
515 return visit_node_binary(ctx, node);
516 case IR_OP_LOGICAL:
517 return visit_node_logical(ctx, node);
518 }
519 }
520
521 LTTNG_HIDDEN
522 void filter_bytecode_free(struct filter_parser_ctx *ctx)
523 {
524 if (!ctx) {
525 return;
526 }
527
528 if (ctx->bytecode) {
529 free(ctx->bytecode);
530 ctx->bytecode = NULL;
531 }
532
533 if (ctx->bytecode_reloc) {
534 free(ctx->bytecode_reloc);
535 ctx->bytecode_reloc = NULL;
536 }
537 }
538
539 LTTNG_HIDDEN
540 int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
541 {
542 int ret;
543
544 ret = bytecode_init(&ctx->bytecode);
545 if (ret)
546 return ret;
547 ret = bytecode_init(&ctx->bytecode_reloc);
548 if (ret)
549 goto error;
550 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
551 if (ret)
552 goto error;
553
554 /* Finally, append symbol table to bytecode */
555 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
556 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
557 1, bytecode_get_len(&ctx->bytecode_reloc->b));
558
559 error:
560 filter_bytecode_free(ctx);
561 return ret;
562 }
This page took 0.040522 seconds and 4 git commands to generate.