71da21c8a5108a8f0114aa9389c16d92bc54240a
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
1 /*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <stdlib.h>
23 #include <string.h>
24 #include <errno.h>
25 #include "align.h"
26 #include "filter-bytecode.h"
27 #include "filter-ir.h"
28 #include "filter-ast.h"
29
30 #ifndef max_t
31 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
32 #endif
33
34 //#define INIT_ALLOC_SIZE PAGE_SIZE
35 #define INIT_ALLOC_SIZE 4
36
37 static
38 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
39 struct ir_op *node);
40
41 static inline int fls(unsigned int x)
42 {
43 int r = 32;
44
45 if (!x)
46 return 0;
47 if (!(x & 0xFFFF0000U)) {
48 x <<= 16;
49 r -= 16;
50 }
51 if (!(x & 0xFF000000U)) {
52 x <<= 8;
53 r -= 8;
54 }
55 if (!(x & 0xF0000000U)) {
56 x <<= 4;
57 r -= 4;
58 }
59 if (!(x & 0xC0000000U)) {
60 x <<= 2;
61 r -= 2;
62 }
63 if (!(x & 0x80000000U)) {
64 x <<= 1;
65 r -= 1;
66 }
67 return r;
68 }
69
70 static inline int get_count_order(unsigned int count)
71 {
72 int order;
73
74 order = fls(count) - 1;
75 if (count & (count - 1))
76 order++;
77 return order;
78 }
79
80 static
81 int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
82 {
83 *fb = calloc(sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE, 1);
84 if (!*fb) {
85 return -ENOMEM;
86 } else {
87 (*fb)->alloc_len = INIT_ALLOC_SIZE;
88 return 0;
89 }
90 }
91
92 static
93 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
94 {
95 int32_t ret;
96 uint32_t padding = offset_align((*fb)->b.len, align);
97
98 if ((*fb)->b.len + padding + len > (*fb)->alloc_len) {
99 uint32_t new_len =
100 max_t(uint32_t, 1U << get_count_order((*fb)->b.len + padding + len),
101 (*fb)->alloc_len << 1);
102 uint32_t old_len = (*fb)->alloc_len;
103
104 if (new_len > 0xFFFF)
105 return -EINVAL;
106 *fb = realloc(*fb, sizeof(struct lttng_filter_bytecode_alloc) + new_len);
107 if (!*fb)
108 return -ENOMEM;
109 memset(&(*fb)->b.data[old_len], 0, new_len - old_len);
110 (*fb)->alloc_len = new_len;
111 }
112 (*fb)->b.len += padding;
113 ret = (*fb)->b.len;
114 (*fb)->b.len += len;
115 return ret;
116 }
117
118 static
119 int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
120 uint32_t align, uint32_t len)
121 {
122 int32_t offset;
123
124 offset = bytecode_reserve(fb, align, len);
125 if (offset < 0)
126 return offset;
127 memcpy(&(*fb)->b.data[offset], data, len);
128 return 0;
129 }
130
131 static
132 int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
133 struct logical_op *data,
134 uint32_t align, uint32_t len,
135 uint16_t *skip_offset)
136 {
137 int32_t offset;
138
139 offset = bytecode_reserve(fb, align, len);
140 if (offset < 0)
141 return offset;
142 memcpy(&(*fb)->b.data[offset], data, len);
143 *skip_offset =
144 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
145 - (void *) &(*fb)->b.data[0];
146 return 0;
147 }
148
149 static
150 int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
151 const void *data,
152 uint16_t offset,
153 uint32_t len)
154 {
155 if (offset >= (*fb)->b.len) {
156 return -EINVAL;
157 }
158 memcpy(&(*fb)->b.data[offset], data, len);
159 return 0;
160 }
161
162 static
163 int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
164 {
165 int ret;
166 struct return_op insn;
167
168 /* Visit child */
169 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
170 if (ret)
171 return ret;
172
173 /* Generate end of bytecode instruction */
174 insn.op = FILTER_OP_RETURN;
175 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
176 }
177
178 static
179 int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
180 {
181 int ret;
182
183 switch (node->data_type) {
184 case IR_DATA_UNKNOWN:
185 default:
186 fprintf(stderr, "[error] Unknown data type in %s\n",
187 __func__);
188 return -EINVAL;
189
190 case IR_DATA_STRING:
191 {
192 struct load_op *insn;
193 uint32_t insn_len = sizeof(struct load_op)
194 + strlen(node->u.load.u.string) + 1;
195
196 insn = calloc(insn_len, 1);
197 if (!insn)
198 return -ENOMEM;
199 insn->op = FILTER_OP_LOAD_STRING;
200 strcpy(insn->data, node->u.load.u.string);
201 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
202 free(insn);
203 return ret;
204 }
205 case IR_DATA_NUMERIC:
206 {
207 struct load_op *insn;
208 uint32_t insn_len = sizeof(struct load_op)
209 + sizeof(struct literal_numeric);
210
211 insn = calloc(insn_len, 1);
212 if (!insn)
213 return -ENOMEM;
214 insn->op = FILTER_OP_LOAD_S64;
215 *(int64_t *) insn->data = node->u.load.u.num;
216 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
217 free(insn);
218 return ret;
219 }
220 case IR_DATA_FLOAT:
221 {
222 struct load_op *insn;
223 uint32_t insn_len = sizeof(struct load_op)
224 + sizeof(struct literal_double);
225
226 insn = calloc(insn_len, 1);
227 if (!insn)
228 return -ENOMEM;
229 insn->op = FILTER_OP_LOAD_DOUBLE;
230 *(double *) insn->data = node->u.load.u.flt;
231 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
232 free(insn);
233 return ret;
234 }
235 case IR_DATA_FIELD_REF:
236 {
237 struct load_op *insn;
238 uint32_t insn_len = sizeof(struct load_op)
239 + sizeof(struct field_ref);
240 struct field_ref ref_offset;
241 uint16_t reloc_offset;
242
243 insn = calloc(insn_len, 1);
244 if (!insn)
245 return -ENOMEM;
246 insn->op = FILTER_OP_LOAD_FIELD_REF;
247 ref_offset.offset = (uint16_t) -1U;
248 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
249 /* reloc_offset points to struct load_op */
250 reloc_offset = bytecode_get_len(&ctx->bytecode->b);
251 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
252 if (ret) {
253 free(insn);
254 return ret;
255 }
256 /* append reloc */
257 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
258 1, sizeof(reloc_offset));
259 if (ret) {
260 free(insn);
261 return ret;
262 }
263 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
264 1, strlen(node->u.load.u.ref) + 1);
265 free(insn);
266 return ret;
267 }
268 }
269 }
270
271 static
272 int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
273 {
274 int ret;
275 struct unary_op insn;
276
277 /* Visit child */
278 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
279 if (ret)
280 return ret;
281
282 /* Generate end of bytecode instruction */
283 switch (node->u.unary.type) {
284 case AST_UNARY_UNKNOWN:
285 default:
286 fprintf(stderr, "[error] Unknown unary node type in %s\n",
287 __func__);
288 return -EINVAL;
289 case AST_UNARY_PLUS:
290 /* Nothing to do. */
291 return 0;
292 case AST_UNARY_MINUS:
293 insn.op = FILTER_OP_UNARY_MINUS;
294 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
295 case AST_UNARY_NOT:
296 insn.op = FILTER_OP_UNARY_NOT;
297 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
298 }
299 }
300
301 /*
302 * Binary comparator nesting is disallowed. This allows fitting into
303 * only 2 registers.
304 */
305 static
306 int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
307 {
308 int ret;
309 struct binary_op insn;
310
311 /* Visit child */
312 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
313 if (ret)
314 return ret;
315 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
316 if (ret)
317 return ret;
318
319 switch (node->u.binary.type) {
320 case AST_OP_UNKNOWN:
321 default:
322 fprintf(stderr, "[error] Unknown unary node type in %s\n",
323 __func__);
324 return -EINVAL;
325
326 case AST_OP_AND:
327 case AST_OP_OR:
328 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
329 __func__);
330 return -EINVAL;
331
332 case AST_OP_MUL:
333 insn.op = FILTER_OP_MUL;
334 break;
335 case AST_OP_DIV:
336 insn.op = FILTER_OP_DIV;
337 break;
338 case AST_OP_MOD:
339 insn.op = FILTER_OP_MOD;
340 break;
341 case AST_OP_PLUS:
342 insn.op = FILTER_OP_PLUS;
343 break;
344 case AST_OP_MINUS:
345 insn.op = FILTER_OP_MINUS;
346 break;
347 case AST_OP_RSHIFT:
348 insn.op = FILTER_OP_RSHIFT;
349 break;
350 case AST_OP_LSHIFT:
351 insn.op = FILTER_OP_LSHIFT;
352 break;
353 case AST_OP_BIN_AND:
354 insn.op = FILTER_OP_BIN_AND;
355 break;
356 case AST_OP_BIN_OR:
357 insn.op = FILTER_OP_BIN_OR;
358 break;
359 case AST_OP_BIN_XOR:
360 insn.op = FILTER_OP_BIN_XOR;
361 break;
362
363 case AST_OP_EQ:
364 insn.op = FILTER_OP_EQ;
365 break;
366 case AST_OP_NE:
367 insn.op = FILTER_OP_NE;
368 break;
369 case AST_OP_GT:
370 insn.op = FILTER_OP_GT;
371 break;
372 case AST_OP_LT:
373 insn.op = FILTER_OP_LT;
374 break;
375 case AST_OP_GE:
376 insn.op = FILTER_OP_GE;
377 break;
378 case AST_OP_LE:
379 insn.op = FILTER_OP_LE;
380 break;
381 }
382 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
383 }
384
385 /*
386 * A logical op always return a s64 (1 or 0).
387 */
388 static
389 int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
390 {
391 int ret;
392 struct logical_op insn;
393 uint16_t skip_offset_loc;
394 uint16_t target_loc;
395
396 /* Visit left child */
397 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
398 if (ret)
399 return ret;
400 /* Cast to s64 if float or field ref */
401 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
402 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
403 struct cast_op cast_insn;
404
405 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF) {
406 cast_insn.op = FILTER_OP_CAST_TO_S64;
407 } else {
408 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
409 }
410 ret = bytecode_push(&ctx->bytecode, &cast_insn,
411 1, sizeof(cast_insn));
412 if (ret)
413 return ret;
414 }
415 switch (node->u.logical.type) {
416 default:
417 fprintf(stderr, "[error] Unknown node type in %s\n",
418 __func__);
419 return -EINVAL;
420
421 case AST_OP_AND:
422 insn.op = FILTER_OP_AND;
423 break;
424 case AST_OP_OR:
425 insn.op = FILTER_OP_OR;
426 break;
427 }
428 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
429 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
430 &skip_offset_loc);
431 if (ret)
432 return ret;
433 /* Visit right child */
434 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
435 if (ret)
436 return ret;
437 /* Cast to s64 if float or field ref */
438 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
439 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
440 struct cast_op cast_insn;
441
442 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF) {
443 cast_insn.op = FILTER_OP_CAST_TO_S64;
444 } else {
445 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
446 }
447 ret = bytecode_push(&ctx->bytecode, &cast_insn,
448 1, sizeof(cast_insn));
449 if (ret)
450 return ret;
451 }
452 /* We now know where the logical op can skip. */
453 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
454 ret = bytecode_patch(&ctx->bytecode,
455 &target_loc, /* Offset to jump to */
456 skip_offset_loc, /* Where to patch */
457 sizeof(uint16_t));
458 return ret;
459 }
460
461 /*
462 * Postorder traversal of the tree. We need the children result before
463 * we can evaluate the parent.
464 */
465 static
466 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
467 struct ir_op *node)
468 {
469 switch (node->op) {
470 case IR_OP_UNKNOWN:
471 default:
472 fprintf(stderr, "[error] Unknown node type in %s\n",
473 __func__);
474 return -EINVAL;
475
476 case IR_OP_ROOT:
477 return visit_node_root(ctx, node);
478 case IR_OP_LOAD:
479 return visit_node_load(ctx, node);
480 case IR_OP_UNARY:
481 return visit_node_unary(ctx, node);
482 case IR_OP_BINARY:
483 return visit_node_binary(ctx, node);
484 case IR_OP_LOGICAL:
485 return visit_node_logical(ctx, node);
486 }
487 }
488
489 __attribute__((visibility("hidden")))
490 void filter_bytecode_free(struct filter_parser_ctx *ctx)
491 {
492 free(ctx->bytecode);
493 ctx->bytecode = NULL;
494 free(ctx->bytecode_reloc);
495 ctx->bytecode_reloc = NULL;
496 }
497
498 __attribute__((visibility("hidden")))
499 int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
500 {
501 int ret;
502
503 ret = bytecode_init(&ctx->bytecode);
504 if (ret)
505 return ret;
506 ret = bytecode_init(&ctx->bytecode_reloc);
507 if (ret)
508 goto error;
509 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
510 if (ret)
511 goto error;
512
513 /* Finally, append symbol table to bytecode */
514 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
515 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
516 1, bytecode_get_len(&ctx->bytecode_reloc->b));
517
518 error:
519 filter_bytecode_free(ctx);
520 return ret;
521 }
This page took 0.0395 seconds and 3 git commands to generate.