Filter: ensure logical operator merge is always s64
[lttng-tools.git] / src / lib / lttng-ctl / filter-visitor-generate-bytecode.c
1 /*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <stdlib.h>
23 #include <string.h>
24 #include <errno.h>
25 #include "align.h"
26 #include "filter-bytecode.h"
27 #include "filter-ir.h"
28 #include "filter-ast.h"
29
30 #ifndef max_t
31 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
32 #endif
33
34 //#define INIT_ALLOC_SIZE PAGE_SIZE
35 #define INIT_ALLOC_SIZE 4
36
37 static
38 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
39 struct ir_op *node);
40
41 static
42 int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
43 {
44 *fb = calloc(sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE, 1);
45 if (!*fb) {
46 return -ENOMEM;
47 } else {
48 (*fb)->alloc_len = INIT_ALLOC_SIZE;
49 return 0;
50 }
51 }
52
53 static
54 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
55 {
56 int32_t ret;
57 uint32_t padding = offset_align((*fb)->b.len, align);
58
59 if ((*fb)->b.len + padding + len > (*fb)->alloc_len) {
60 uint32_t new_len =
61 max_t(uint32_t, (*fb)->b.len + padding + len,
62 (*fb)->alloc_len << 1);
63 uint32_t old_len = (*fb)->alloc_len;
64
65 if (new_len > 0xFFFF)
66 return -EINVAL;
67 *fb = realloc(*fb, sizeof(struct lttng_filter_bytecode_alloc) + new_len);
68 if (!*fb)
69 return -ENOMEM;
70 memset(&(*fb)->b.data[old_len], 0, new_len - old_len);
71 (*fb)->alloc_len = new_len;
72 }
73 (*fb)->b.len += padding;
74 ret = (*fb)->b.len;
75 (*fb)->b.len += len;
76 return ret;
77 }
78
79 static
80 int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
81 uint32_t align, uint32_t len)
82 {
83 int32_t offset;
84
85 offset = bytecode_reserve(fb, align, len);
86 if (offset < 0)
87 return offset;
88 memcpy(&(*fb)->b.data[offset], data, len);
89 return 0;
90 }
91
92 static
93 int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
94 struct logical_op *data,
95 uint32_t align, uint32_t len,
96 uint16_t *skip_offset)
97 {
98 int32_t offset;
99
100 offset = bytecode_reserve(fb, align, len);
101 if (offset < 0)
102 return offset;
103 memcpy(&(*fb)->b.data[offset], data, len);
104 *skip_offset =
105 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
106 - (void *) &(*fb)->b.data[0];
107 return 0;
108 }
109
110 static
111 int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
112 const void *data,
113 uint16_t offset,
114 uint32_t len)
115 {
116 if (offset >= (*fb)->b.len) {
117 return -EINVAL;
118 }
119 memcpy(&(*fb)->b.data[offset], data, len);
120 return 0;
121 }
122
123 static
124 int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
125 {
126 int ret;
127 struct return_op insn;
128
129 /* Visit child */
130 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
131 if (ret)
132 return ret;
133
134 /* Generate end of bytecode instruction */
135 insn.op = FILTER_OP_RETURN;
136 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
137 }
138
139 static
140 enum filter_register reg_sel(struct ir_op *node)
141 {
142 switch (node->side) {
143 case IR_SIDE_UNKNOWN:
144 default:
145 fprintf(stderr, "[error] Unknown node side in %s\n",
146 __func__);
147 return REG_ERROR;
148 case IR_LEFT:
149 return REG_R0;
150 case IR_RIGHT:
151 return REG_R1;
152 }
153 }
154
155 static
156 int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
157 {
158 int ret;
159
160 switch (node->data_type) {
161 case IR_DATA_UNKNOWN:
162 default:
163 fprintf(stderr, "[error] Unknown data type in %s\n",
164 __func__);
165 return -EINVAL;
166
167 case IR_DATA_STRING:
168 {
169 struct load_op *insn;
170 uint32_t insn_len = sizeof(struct load_op)
171 + strlen(node->u.load.u.string) + 1;
172
173 insn = calloc(insn_len, 1);
174 if (!insn)
175 return -ENOMEM;
176 insn->op = FILTER_OP_LOAD_STRING;
177 insn->reg = reg_sel(node);
178 if (insn->reg == REG_ERROR)
179 return -EINVAL;
180 strcpy(insn->data, node->u.load.u.string);
181 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
182 free(insn);
183 return ret;
184 }
185 case IR_DATA_NUMERIC:
186 {
187 struct load_op *insn;
188 uint32_t insn_len = sizeof(struct load_op)
189 + sizeof(struct literal_numeric);
190
191 insn = calloc(insn_len, 1);
192 if (!insn)
193 return -ENOMEM;
194 insn->op = FILTER_OP_LOAD_S64;
195 insn->reg = reg_sel(node);
196 if (insn->reg == REG_ERROR)
197 return -EINVAL;
198 *(int64_t *) insn->data = node->u.load.u.num;
199 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
200 free(insn);
201 return ret;
202 }
203 case IR_DATA_FLOAT:
204 {
205 struct load_op *insn;
206 uint32_t insn_len = sizeof(struct load_op)
207 + sizeof(struct literal_double);
208
209 insn = calloc(insn_len, 1);
210 if (!insn)
211 return -ENOMEM;
212 insn->op = FILTER_OP_LOAD_DOUBLE;
213 insn->reg = reg_sel(node);
214 if (insn->reg == REG_ERROR)
215 return -EINVAL;
216 *(double *) insn->data = node->u.load.u.flt;
217 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
218 free(insn);
219 return ret;
220 }
221 case IR_DATA_FIELD_REF:
222 {
223 struct load_op *insn;
224 uint32_t insn_len = sizeof(struct load_op)
225 + sizeof(struct field_ref);
226 struct field_ref ref_offset;
227 uint16_t reloc_offset;
228
229 insn = calloc(insn_len, 1);
230 if (!insn)
231 return -ENOMEM;
232 insn->op = FILTER_OP_LOAD_FIELD_REF;
233 insn->reg = reg_sel(node);
234 ref_offset.offset = (uint16_t) -1U;
235 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
236 if (insn->reg == REG_ERROR)
237 return -EINVAL;
238 /* reloc_offset points to struct load_op */
239 reloc_offset = bytecode_get_len(&ctx->bytecode->b);
240 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
241 if (ret) {
242 free(insn);
243 return ret;
244 }
245 /* append reloc */
246 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
247 1, sizeof(reloc_offset));
248 if (ret) {
249 free(insn);
250 return ret;
251 }
252 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
253 1, strlen(node->u.load.u.ref) + 1);
254 free(insn);
255 return ret;
256 }
257 }
258 }
259
260 static
261 int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
262 {
263 int ret;
264 struct unary_op insn;
265
266 /* Visit child */
267 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
268 if (ret)
269 return ret;
270
271 /* Generate end of bytecode instruction */
272 switch (node->u.unary.type) {
273 case AST_UNARY_UNKNOWN:
274 default:
275 fprintf(stderr, "[error] Unknown unary node type in %s\n",
276 __func__);
277 return -EINVAL;
278 case AST_UNARY_PLUS:
279 /* Nothing to do. */
280 return 0;
281 case AST_UNARY_MINUS:
282 insn.op = FILTER_OP_UNARY_MINUS;
283 insn.reg = reg_sel(node);
284 if (insn.reg == REG_ERROR)
285 return -EINVAL;
286 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
287 case AST_UNARY_NOT:
288 insn.op = FILTER_OP_UNARY_NOT;
289 insn.reg = reg_sel(node);
290 if (insn.reg == REG_ERROR)
291 return -EINVAL;
292 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
293 }
294 }
295
296 /*
297 * Binary comparator nesting is disallowed. This allows fitting into
298 * only 2 registers.
299 */
300 static
301 int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
302 {
303 int ret;
304 struct binary_op insn;
305
306 /* Visit child */
307 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
308 if (ret)
309 return ret;
310 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
311 if (ret)
312 return ret;
313
314 switch (node->u.binary.type) {
315 case AST_OP_UNKNOWN:
316 default:
317 fprintf(stderr, "[error] Unknown unary node type in %s\n",
318 __func__);
319 return -EINVAL;
320
321 case AST_OP_AND:
322 case AST_OP_OR:
323 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
324 __func__);
325 return -EINVAL;
326
327 case AST_OP_MUL:
328 insn.op = FILTER_OP_MUL;
329 break;
330 case AST_OP_DIV:
331 insn.op = FILTER_OP_DIV;
332 break;
333 case AST_OP_MOD:
334 insn.op = FILTER_OP_MOD;
335 break;
336 case AST_OP_PLUS:
337 insn.op = FILTER_OP_PLUS;
338 break;
339 case AST_OP_MINUS:
340 insn.op = FILTER_OP_MINUS;
341 break;
342 case AST_OP_RSHIFT:
343 insn.op = FILTER_OP_RSHIFT;
344 break;
345 case AST_OP_LSHIFT:
346 insn.op = FILTER_OP_LSHIFT;
347 break;
348 case AST_OP_BIN_AND:
349 insn.op = FILTER_OP_BIN_AND;
350 break;
351 case AST_OP_BIN_OR:
352 insn.op = FILTER_OP_BIN_OR;
353 break;
354 case AST_OP_BIN_XOR:
355 insn.op = FILTER_OP_BIN_XOR;
356 break;
357
358 case AST_OP_EQ:
359 insn.op = FILTER_OP_EQ;
360 break;
361 case AST_OP_NE:
362 insn.op = FILTER_OP_NE;
363 break;
364 case AST_OP_GT:
365 insn.op = FILTER_OP_GT;
366 break;
367 case AST_OP_LT:
368 insn.op = FILTER_OP_LT;
369 break;
370 case AST_OP_GE:
371 insn.op = FILTER_OP_GE;
372 break;
373 case AST_OP_LE:
374 insn.op = FILTER_OP_LE;
375 break;
376 }
377 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
378 }
379
380 /*
381 * A logical op always return a s64 (1 or 0).
382 */
383 static
384 int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
385 {
386 int ret;
387 struct logical_op insn;
388 uint16_t skip_offset_loc;
389 uint16_t target_loc;
390
391 /* Visit left child */
392 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
393 if (ret)
394 return ret;
395 /* Cast to s64 if float or field ref */
396 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
397 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
398 struct cast_op cast_insn;
399
400 cast_insn.op = FILTER_OP_CAST_TO_S64;
401 cast_insn.reg = REG_R0;
402 ret = bytecode_push(&ctx->bytecode, &cast_insn,
403 1, sizeof(cast_insn));
404 if (ret)
405 return ret;
406 }
407 switch (node->u.logical.type) {
408 default:
409 fprintf(stderr, "[error] Unknown node type in %s\n",
410 __func__);
411 return -EINVAL;
412
413 case AST_OP_AND:
414 insn.op = FILTER_OP_AND;
415 break;
416 case AST_OP_OR:
417 insn.op = FILTER_OP_OR;
418 break;
419 }
420 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
421 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
422 &skip_offset_loc);
423 if (ret)
424 return ret;
425 /* Visit right child */
426 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
427 if (ret)
428 return ret;
429 /* Cast to s64 if float or field ref */
430 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
431 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
432 struct cast_op cast_insn;
433
434 cast_insn.op = FILTER_OP_CAST_TO_S64;
435 cast_insn.reg = REG_R0;
436 ret = bytecode_push(&ctx->bytecode, &cast_insn,
437 1, sizeof(cast_insn));
438 if (ret)
439 return ret;
440 }
441 /* We now know where the logical op can skip. */
442 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
443 ret = bytecode_patch(&ctx->bytecode,
444 &target_loc, /* Offset to jump to */
445 skip_offset_loc, /* Where to patch */
446 sizeof(uint16_t));
447 return ret;
448 }
449
450 /*
451 * Postorder traversal of the tree. We need the children result before
452 * we can evaluate the parent.
453 */
454 static
455 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
456 struct ir_op *node)
457 {
458 switch (node->op) {
459 case IR_OP_UNKNOWN:
460 default:
461 fprintf(stderr, "[error] Unknown node type in %s\n",
462 __func__);
463 return -EINVAL;
464
465 case IR_OP_ROOT:
466 return visit_node_root(ctx, node);
467 case IR_OP_LOAD:
468 return visit_node_load(ctx, node);
469 case IR_OP_UNARY:
470 return visit_node_unary(ctx, node);
471 case IR_OP_BINARY:
472 return visit_node_binary(ctx, node);
473 case IR_OP_LOGICAL:
474 return visit_node_logical(ctx, node);
475 }
476 }
477
478 void filter_bytecode_free(struct filter_parser_ctx *ctx)
479 {
480 free(ctx->bytecode);
481 ctx->bytecode = NULL;
482 free(ctx->bytecode_reloc);
483 ctx->bytecode_reloc = NULL;
484 }
485
486 int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
487 {
488 int ret;
489
490 ret = bytecode_init(&ctx->bytecode);
491 if (ret)
492 return ret;
493 ret = bytecode_init(&ctx->bytecode_reloc);
494 if (ret)
495 goto error;
496 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
497 if (ret)
498 goto error;
499
500 /* Finally, append symbol table to bytecode */
501 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
502 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
503 1, bytecode_get_len(&ctx->bytecode_reloc->b));
504
505 error:
506 filter_bytecode_free(ctx);
507 return ret;
508 }
This page took 0.041089 seconds and 5 git commands to generate.