762d60464cc257e97c642bc7eb38c307e914937b
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
1 /*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <stdlib.h>
23 #include <string.h>
24 #include <errno.h>
25 #include "align.h"
26 #include "filter-bytecode.h"
27 #include "filter-ir.h"
28 #include "filter-ast.h"
29
30 #include <common/macros.h>
31
32 #ifndef max_t
33 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
34 #endif
35
36 //#define INIT_ALLOC_SIZE PAGE_SIZE
37 #define INIT_ALLOC_SIZE 4
38
39 static
40 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
41 struct ir_op *node);
42
43 static inline int fls(unsigned int x)
44 {
45 int r = 32;
46
47 if (!x)
48 return 0;
49 if (!(x & 0xFFFF0000U)) {
50 x <<= 16;
51 r -= 16;
52 }
53 if (!(x & 0xFF000000U)) {
54 x <<= 8;
55 r -= 8;
56 }
57 if (!(x & 0xF0000000U)) {
58 x <<= 4;
59 r -= 4;
60 }
61 if (!(x & 0xC0000000U)) {
62 x <<= 2;
63 r -= 2;
64 }
65 if (!(x & 0x80000000U)) {
66 x <<= 1;
67 r -= 1;
68 }
69 return r;
70 }
71
72 static inline int get_count_order(unsigned int count)
73 {
74 int order;
75
76 order = fls(count) - 1;
77 if (count & (count - 1))
78 order++;
79 return order;
80 }
81
82 static
83 int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
84 {
85 uint32_t alloc_len;
86
87 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
88 *fb = calloc(alloc_len, 1);
89 if (!*fb) {
90 return -ENOMEM;
91 } else {
92 (*fb)->alloc_len = alloc_len;
93 return 0;
94 }
95 }
96
97 static
98 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
99 {
100 int32_t ret;
101 uint32_t padding = offset_align((*fb)->b.len, align);
102 uint32_t new_len = (*fb)->b.len + padding + len;
103 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
104 uint32_t old_alloc_len = (*fb)->alloc_len;
105
106 if (new_len > LTTNG_FILTER_MAX_LEN)
107 return -EINVAL;
108
109 if (new_alloc_len > old_alloc_len) {
110 struct lttng_filter_bytecode_alloc *newptr;
111
112 new_alloc_len =
113 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
114 newptr = realloc(*fb, new_alloc_len);
115 if (!newptr)
116 return -ENOMEM;
117 *fb = newptr;
118 /* We zero directly the memory from start of allocation. */
119 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
120 (*fb)->alloc_len = new_alloc_len;
121 }
122 (*fb)->b.len += padding;
123 ret = (*fb)->b.len;
124 (*fb)->b.len += len;
125 return ret;
126 }
127
128 static
129 int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
130 uint32_t align, uint32_t len)
131 {
132 int32_t offset;
133
134 offset = bytecode_reserve(fb, align, len);
135 if (offset < 0)
136 return offset;
137 memcpy(&(*fb)->b.data[offset], data, len);
138 return 0;
139 }
140
141 static
142 int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
143 struct logical_op *data,
144 uint32_t align, uint32_t len,
145 uint16_t *skip_offset)
146 {
147 int32_t offset;
148
149 offset = bytecode_reserve(fb, align, len);
150 if (offset < 0)
151 return offset;
152 memcpy(&(*fb)->b.data[offset], data, len);
153 *skip_offset =
154 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
155 - (void *) &(*fb)->b.data[0];
156 return 0;
157 }
158
159 static
160 int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
161 const void *data,
162 uint16_t offset,
163 uint32_t len)
164 {
165 if (offset >= (*fb)->b.len) {
166 return -EINVAL;
167 }
168 memcpy(&(*fb)->b.data[offset], data, len);
169 return 0;
170 }
171
172 static
173 int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
174 {
175 int ret;
176 struct return_op insn;
177
178 /* Visit child */
179 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
180 if (ret)
181 return ret;
182
183 /* Generate end of bytecode instruction */
184 insn.op = FILTER_OP_RETURN;
185 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
186 }
187
188 static
189 int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
190 {
191 int ret;
192
193 switch (node->data_type) {
194 case IR_DATA_UNKNOWN:
195 default:
196 fprintf(stderr, "[error] Unknown data type in %s\n",
197 __func__);
198 return -EINVAL;
199
200 case IR_DATA_STRING:
201 {
202 struct load_op *insn;
203 uint32_t insn_len = sizeof(struct load_op)
204 + strlen(node->u.load.u.string) + 1;
205
206 insn = calloc(insn_len, 1);
207 if (!insn)
208 return -ENOMEM;
209 insn->op = FILTER_OP_LOAD_STRING;
210 strcpy(insn->data, node->u.load.u.string);
211 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
212 free(insn);
213 return ret;
214 }
215 case IR_DATA_NUMERIC:
216 {
217 struct load_op *insn;
218 uint32_t insn_len = sizeof(struct load_op)
219 + sizeof(struct literal_numeric);
220
221 insn = calloc(insn_len, 1);
222 if (!insn)
223 return -ENOMEM;
224 insn->op = FILTER_OP_LOAD_S64;
225 *(int64_t *) insn->data = node->u.load.u.num;
226 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
227 free(insn);
228 return ret;
229 }
230 case IR_DATA_FLOAT:
231 {
232 struct load_op *insn;
233 uint32_t insn_len = sizeof(struct load_op)
234 + sizeof(struct literal_double);
235
236 insn = calloc(insn_len, 1);
237 if (!insn)
238 return -ENOMEM;
239 insn->op = FILTER_OP_LOAD_DOUBLE;
240 *(double *) insn->data = node->u.load.u.flt;
241 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
242 free(insn);
243 return ret;
244 }
245 case IR_DATA_FIELD_REF: /* fall-through */
246 case IR_DATA_GET_CONTEXT_REF:
247 {
248 struct load_op *insn;
249 uint32_t insn_len = sizeof(struct load_op)
250 + sizeof(struct field_ref);
251 struct field_ref ref_offset;
252 uint32_t reloc_offset_u32;
253 uint16_t reloc_offset;
254
255 insn = calloc(insn_len, 1);
256 if (!insn)
257 return -ENOMEM;
258 switch(node->data_type) {
259 case IR_DATA_FIELD_REF:
260 insn->op = FILTER_OP_LOAD_FIELD_REF;
261 break;
262 case IR_DATA_GET_CONTEXT_REF:
263 insn->op = FILTER_OP_GET_CONTEXT_REF;
264 break;
265 default:
266 free(insn);
267 return -EINVAL;
268 }
269 ref_offset.offset = (uint16_t) -1U;
270 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
271 /* reloc_offset points to struct load_op */
272 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
273 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
274 free(insn);
275 return -EINVAL;
276 }
277 reloc_offset = (uint16_t) reloc_offset_u32;
278 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
279 if (ret) {
280 free(insn);
281 return ret;
282 }
283 /* append reloc */
284 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
285 1, sizeof(reloc_offset));
286 if (ret) {
287 free(insn);
288 return ret;
289 }
290 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
291 1, strlen(node->u.load.u.ref) + 1);
292 free(insn);
293 return ret;
294 }
295 }
296 }
297
298 static
299 int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
300 {
301 int ret;
302 struct unary_op insn;
303
304 /* Visit child */
305 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
306 if (ret)
307 return ret;
308
309 /* Generate end of bytecode instruction */
310 switch (node->u.unary.type) {
311 case AST_UNARY_UNKNOWN:
312 default:
313 fprintf(stderr, "[error] Unknown unary node type in %s\n",
314 __func__);
315 return -EINVAL;
316 case AST_UNARY_PLUS:
317 /* Nothing to do. */
318 return 0;
319 case AST_UNARY_MINUS:
320 insn.op = FILTER_OP_UNARY_MINUS;
321 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
322 case AST_UNARY_NOT:
323 insn.op = FILTER_OP_UNARY_NOT;
324 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
325 }
326 }
327
328 /*
329 * Binary comparator nesting is disallowed. This allows fitting into
330 * only 2 registers.
331 */
332 static
333 int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
334 {
335 int ret;
336 struct binary_op insn;
337
338 /* Visit child */
339 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
340 if (ret)
341 return ret;
342 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
343 if (ret)
344 return ret;
345
346 switch (node->u.binary.type) {
347 case AST_OP_UNKNOWN:
348 default:
349 fprintf(stderr, "[error] Unknown unary node type in %s\n",
350 __func__);
351 return -EINVAL;
352
353 case AST_OP_AND:
354 case AST_OP_OR:
355 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
356 __func__);
357 return -EINVAL;
358
359 case AST_OP_MUL:
360 insn.op = FILTER_OP_MUL;
361 break;
362 case AST_OP_DIV:
363 insn.op = FILTER_OP_DIV;
364 break;
365 case AST_OP_MOD:
366 insn.op = FILTER_OP_MOD;
367 break;
368 case AST_OP_PLUS:
369 insn.op = FILTER_OP_PLUS;
370 break;
371 case AST_OP_MINUS:
372 insn.op = FILTER_OP_MINUS;
373 break;
374 case AST_OP_RSHIFT:
375 insn.op = FILTER_OP_RSHIFT;
376 break;
377 case AST_OP_LSHIFT:
378 insn.op = FILTER_OP_LSHIFT;
379 break;
380 case AST_OP_BIN_AND:
381 insn.op = FILTER_OP_BIN_AND;
382 break;
383 case AST_OP_BIN_OR:
384 insn.op = FILTER_OP_BIN_OR;
385 break;
386 case AST_OP_BIN_XOR:
387 insn.op = FILTER_OP_BIN_XOR;
388 break;
389
390 case AST_OP_EQ:
391 insn.op = FILTER_OP_EQ;
392 break;
393 case AST_OP_NE:
394 insn.op = FILTER_OP_NE;
395 break;
396 case AST_OP_GT:
397 insn.op = FILTER_OP_GT;
398 break;
399 case AST_OP_LT:
400 insn.op = FILTER_OP_LT;
401 break;
402 case AST_OP_GE:
403 insn.op = FILTER_OP_GE;
404 break;
405 case AST_OP_LE:
406 insn.op = FILTER_OP_LE;
407 break;
408 }
409 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
410 }
411
412 /*
413 * A logical op always return a s64 (1 or 0).
414 */
415 static
416 int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
417 {
418 int ret;
419 struct logical_op insn;
420 uint16_t skip_offset_loc;
421 uint16_t target_loc;
422
423 /* Visit left child */
424 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
425 if (ret)
426 return ret;
427 /* Cast to s64 if float or field ref */
428 if ((node->u.binary.left->data_type == IR_DATA_FIELD_REF
429 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF)
430 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
431 struct cast_op cast_insn;
432
433 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
434 || node->u.binary.left->data_type == IR_DATA_GET_CONTEXT_REF) {
435 cast_insn.op = FILTER_OP_CAST_TO_S64;
436 } else {
437 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
438 }
439 ret = bytecode_push(&ctx->bytecode, &cast_insn,
440 1, sizeof(cast_insn));
441 if (ret)
442 return ret;
443 }
444 switch (node->u.logical.type) {
445 default:
446 fprintf(stderr, "[error] Unknown node type in %s\n",
447 __func__);
448 return -EINVAL;
449
450 case AST_OP_AND:
451 insn.op = FILTER_OP_AND;
452 break;
453 case AST_OP_OR:
454 insn.op = FILTER_OP_OR;
455 break;
456 }
457 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
458 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
459 &skip_offset_loc);
460 if (ret)
461 return ret;
462 /* Visit right child */
463 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
464 if (ret)
465 return ret;
466 /* Cast to s64 if float or field ref */
467 if ((node->u.binary.right->data_type == IR_DATA_FIELD_REF
468 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF)
469 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
470 struct cast_op cast_insn;
471
472 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
473 || node->u.binary.right->data_type == IR_DATA_GET_CONTEXT_REF) {
474 cast_insn.op = FILTER_OP_CAST_TO_S64;
475 } else {
476 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
477 }
478 ret = bytecode_push(&ctx->bytecode, &cast_insn,
479 1, sizeof(cast_insn));
480 if (ret)
481 return ret;
482 }
483 /* We now know where the logical op can skip. */
484 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
485 ret = bytecode_patch(&ctx->bytecode,
486 &target_loc, /* Offset to jump to */
487 skip_offset_loc, /* Where to patch */
488 sizeof(uint16_t));
489 return ret;
490 }
491
492 /*
493 * Postorder traversal of the tree. We need the children result before
494 * we can evaluate the parent.
495 */
496 static
497 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
498 struct ir_op *node)
499 {
500 switch (node->op) {
501 case IR_OP_UNKNOWN:
502 default:
503 fprintf(stderr, "[error] Unknown node type in %s\n",
504 __func__);
505 return -EINVAL;
506
507 case IR_OP_ROOT:
508 return visit_node_root(ctx, node);
509 case IR_OP_LOAD:
510 return visit_node_load(ctx, node);
511 case IR_OP_UNARY:
512 return visit_node_unary(ctx, node);
513 case IR_OP_BINARY:
514 return visit_node_binary(ctx, node);
515 case IR_OP_LOGICAL:
516 return visit_node_logical(ctx, node);
517 }
518 }
519
520 LTTNG_HIDDEN
521 void filter_bytecode_free(struct filter_parser_ctx *ctx)
522 {
523 free(ctx->bytecode);
524 ctx->bytecode = NULL;
525 free(ctx->bytecode_reloc);
526 ctx->bytecode_reloc = NULL;
527 }
528
529 LTTNG_HIDDEN
530 int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
531 {
532 int ret;
533
534 ret = bytecode_init(&ctx->bytecode);
535 if (ret)
536 return ret;
537 ret = bytecode_init(&ctx->bytecode_reloc);
538 if (ret)
539 goto error;
540 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
541 if (ret)
542 goto error;
543
544 /* Finally, append symbol table to bytecode */
545 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
546 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
547 1, bytecode_get_len(&ctx->bytecode_reloc->b));
548
549 error:
550 filter_bytecode_free(ctx);
551 return ret;
552 }
This page took 0.063844 seconds and 4 git commands to generate.