Filter: specialize 'and' and 'or' ops
[lttng-tools.git] / src / lib / lttng-ctl / filter-visitor-generate-bytecode.c
CommitLineData
953192ba
MD
1/*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22#include <stdlib.h>
23#include <string.h>
24#include <errno.h>
25#include "align.h"
26#include "filter-bytecode.h"
27#include "filter-ir.h"
28#include "filter-ast.h"
29
30#ifndef max_t
31#define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
32#endif
33
34//#define INIT_ALLOC_SIZE PAGE_SIZE
35#define INIT_ALLOC_SIZE 4
36
37static
38int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
39 struct ir_op *node);
40
41static
53a80697 42int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
953192ba 43{
53a80697 44 *fb = calloc(sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE, 1);
953192ba
MD
45 if (!*fb) {
46 return -ENOMEM;
47 } else {
48 (*fb)->alloc_len = INIT_ALLOC_SIZE;
49 return 0;
50 }
51}
52
53static
53a80697 54int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
953192ba
MD
55{
56 int32_t ret;
57 uint32_t padding = offset_align((*fb)->b.len, align);
58
59 if ((*fb)->b.len + padding + len > (*fb)->alloc_len) {
60 uint32_t new_len =
61 max_t(uint32_t, (*fb)->b.len + padding + len,
62 (*fb)->alloc_len << 1);
63 uint32_t old_len = (*fb)->alloc_len;
64
65 if (new_len > 0xFFFF)
66 return -EINVAL;
53a80697 67 *fb = realloc(*fb, sizeof(struct lttng_filter_bytecode_alloc) + new_len);
953192ba
MD
68 if (!*fb)
69 return -ENOMEM;
70 memset(&(*fb)->b.data[old_len], 0, new_len - old_len);
71 (*fb)->alloc_len = new_len;
72 }
73 (*fb)->b.len += padding;
74 ret = (*fb)->b.len;
75 (*fb)->b.len += len;
76 return ret;
77}
78
79static
53a80697 80int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
953192ba
MD
81 uint32_t align, uint32_t len)
82{
83 int32_t offset;
84
85 offset = bytecode_reserve(fb, align, len);
86 if (offset < 0)
87 return offset;
88 memcpy(&(*fb)->b.data[offset], data, len);
89 return 0;
90}
91
92static
53a80697 93int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
94 struct logical_op *data,
95 uint32_t align, uint32_t len,
96 uint16_t *skip_offset)
97{
98 int32_t offset;
99
100 offset = bytecode_reserve(fb, align, len);
101 if (offset < 0)
102 return offset;
103 memcpy(&(*fb)->b.data[offset], data, len);
104 *skip_offset =
105 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
106 - (void *) &(*fb)->b.data[0];
107 return 0;
108}
109
110static
53a80697 111int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
953192ba
MD
112 const void *data,
113 uint16_t offset,
114 uint32_t len)
115{
116 if (offset >= (*fb)->b.len) {
117 return -EINVAL;
118 }
119 memcpy(&(*fb)->b.data[offset], data, len);
120 return 0;
121}
122
123static
124int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
125{
126 int ret;
127 struct return_op insn;
128
129 /* Visit child */
130 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
131 if (ret)
132 return ret;
133
134 /* Generate end of bytecode instruction */
135 insn.op = FILTER_OP_RETURN;
136 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
137}
138
139static
140enum filter_register reg_sel(struct ir_op *node)
141{
142 switch (node->side) {
143 case IR_SIDE_UNKNOWN:
144 default:
145 fprintf(stderr, "[error] Unknown node side in %s\n",
146 __func__);
147 return REG_ERROR;
148 case IR_LEFT:
149 return REG_R0;
150 case IR_RIGHT:
151 return REG_R1;
152 }
153}
154
155static
156int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
157{
158 int ret;
159
160 switch (node->data_type) {
161 case IR_DATA_UNKNOWN:
162 default:
163 fprintf(stderr, "[error] Unknown data type in %s\n",
164 __func__);
165 return -EINVAL;
166
167 case IR_DATA_STRING:
168 {
169 struct load_op *insn;
170 uint32_t insn_len = sizeof(struct load_op)
171 + strlen(node->u.load.u.string) + 1;
172
173 insn = calloc(insn_len, 1);
174 if (!insn)
175 return -ENOMEM;
176 insn->op = FILTER_OP_LOAD_STRING;
177 insn->reg = reg_sel(node);
178 if (insn->reg == REG_ERROR)
179 return -EINVAL;
180 strcpy(insn->data, node->u.load.u.string);
181 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
182 free(insn);
183 return ret;
184 }
185 case IR_DATA_NUMERIC:
186 {
187 struct load_op *insn;
188 uint32_t insn_len = sizeof(struct load_op)
189 + sizeof(struct literal_numeric);
190
191 insn = calloc(insn_len, 1);
192 if (!insn)
193 return -ENOMEM;
194 insn->op = FILTER_OP_LOAD_S64;
195 insn->reg = reg_sel(node);
196 if (insn->reg == REG_ERROR)
197 return -EINVAL;
198 *(int64_t *) insn->data = node->u.load.u.num;
199 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
200 free(insn);
201 return ret;
202 }
e90d8561
MD
203 case IR_DATA_FLOAT:
204 {
205 struct load_op *insn;
206 uint32_t insn_len = sizeof(struct load_op)
207 + sizeof(struct literal_double);
208
209 insn = calloc(insn_len, 1);
210 if (!insn)
211 return -ENOMEM;
212 insn->op = FILTER_OP_LOAD_DOUBLE;
213 insn->reg = reg_sel(node);
214 if (insn->reg == REG_ERROR)
215 return -EINVAL;
216 *(double *) insn->data = node->u.load.u.flt;
217 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
218 free(insn);
219 return ret;
220 }
953192ba
MD
221 case IR_DATA_FIELD_REF:
222 {
223 struct load_op *insn;
224 uint32_t insn_len = sizeof(struct load_op)
225 + sizeof(struct field_ref);
226 struct field_ref ref_offset;
227 uint16_t reloc_offset;
228
229 insn = calloc(insn_len, 1);
230 if (!insn)
231 return -ENOMEM;
232 insn->op = FILTER_OP_LOAD_FIELD_REF;
233 insn->reg = reg_sel(node);
234 ref_offset.offset = (uint16_t) -1U;
235 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
236 if (insn->reg == REG_ERROR)
237 return -EINVAL;
65775683 238 /* reloc_offset points to struct load_op */
953192ba 239 reloc_offset = bytecode_get_len(&ctx->bytecode->b);
953192ba
MD
240 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
241 if (ret) {
242 free(insn);
243 return ret;
244 }
245 /* append reloc */
246 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
247 1, sizeof(reloc_offset));
248 if (ret) {
249 free(insn);
250 return ret;
251 }
252 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
253 1, strlen(node->u.load.u.ref) + 1);
254 free(insn);
255 return ret;
256 }
257 }
258}
259
260static
261int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
262{
263 int ret;
264 struct unary_op insn;
265
266 /* Visit child */
267 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
268 if (ret)
269 return ret;
270
271 /* Generate end of bytecode instruction */
272 switch (node->u.unary.type) {
273 case AST_UNARY_UNKNOWN:
274 default:
275 fprintf(stderr, "[error] Unknown unary node type in %s\n",
276 __func__);
277 return -EINVAL;
278 case AST_UNARY_PLUS:
279 /* Nothing to do. */
280 return 0;
281 case AST_UNARY_MINUS:
282 insn.op = FILTER_OP_UNARY_MINUS;
283 insn.reg = reg_sel(node);
284 if (insn.reg == REG_ERROR)
285 return -EINVAL;
286 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
287 case AST_UNARY_NOT:
288 insn.op = FILTER_OP_UNARY_NOT;
289 insn.reg = reg_sel(node);
290 if (insn.reg == REG_ERROR)
291 return -EINVAL;
292 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
293 }
294}
295
296/*
297 * Binary comparator nesting is disallowed. This allows fitting into
298 * only 2 registers.
299 */
300static
301int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
302{
303 int ret;
304 struct binary_op insn;
305
306 /* Visit child */
307 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
308 if (ret)
309 return ret;
310 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
311 if (ret)
312 return ret;
313
314 switch (node->u.binary.type) {
315 case AST_OP_UNKNOWN:
316 default:
317 fprintf(stderr, "[error] Unknown unary node type in %s\n",
318 __func__);
319 return -EINVAL;
320
321 case AST_OP_AND:
322 case AST_OP_OR:
323 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
324 __func__);
325 return -EINVAL;
326
327 case AST_OP_MUL:
328 insn.op = FILTER_OP_MUL;
329 break;
330 case AST_OP_DIV:
331 insn.op = FILTER_OP_DIV;
332 break;
333 case AST_OP_MOD:
334 insn.op = FILTER_OP_MOD;
335 break;
336 case AST_OP_PLUS:
337 insn.op = FILTER_OP_PLUS;
338 break;
339 case AST_OP_MINUS:
340 insn.op = FILTER_OP_MINUS;
341 break;
342 case AST_OP_RSHIFT:
343 insn.op = FILTER_OP_RSHIFT;
344 break;
345 case AST_OP_LSHIFT:
346 insn.op = FILTER_OP_LSHIFT;
347 break;
348 case AST_OP_BIN_AND:
349 insn.op = FILTER_OP_BIN_AND;
350 break;
351 case AST_OP_BIN_OR:
352 insn.op = FILTER_OP_BIN_OR;
353 break;
354 case AST_OP_BIN_XOR:
355 insn.op = FILTER_OP_BIN_XOR;
356 break;
357
358 case AST_OP_EQ:
359 insn.op = FILTER_OP_EQ;
360 break;
361 case AST_OP_NE:
362 insn.op = FILTER_OP_NE;
363 break;
364 case AST_OP_GT:
365 insn.op = FILTER_OP_GT;
366 break;
367 case AST_OP_LT:
368 insn.op = FILTER_OP_LT;
369 break;
370 case AST_OP_GE:
371 insn.op = FILTER_OP_GE;
372 break;
373 case AST_OP_LE:
374 insn.op = FILTER_OP_LE;
375 break;
376 }
377 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
378}
379
380static
381int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
382{
383 int ret;
384 struct logical_op insn;
385 uint16_t skip_offset_loc;
386 uint16_t target_loc;
387
388 /* Visit left child */
389 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
390 if (ret)
391 return ret;
392 switch (node->u.logical.type) {
393 default:
394 fprintf(stderr, "[error] Unknown node type in %s\n",
395 __func__);
396 return -EINVAL;
397
398 case AST_OP_AND:
399 insn.op = FILTER_OP_AND;
400 break;
401 case AST_OP_OR:
402 insn.op = FILTER_OP_OR;
403 break;
404 }
405 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
406 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
407 &skip_offset_loc);
408 if (ret)
409 return ret;
410 /* Visit right child */
411 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
412 if (ret)
413 return ret;
414 /* We now know where the logical op can skip. */
415 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
416 ret = bytecode_patch(&ctx->bytecode,
417 &target_loc, /* Offset to jump to */
418 skip_offset_loc, /* Where to patch */
419 sizeof(uint16_t));
420 return ret;
421}
422
423/*
424 * Postorder traversal of the tree. We need the children result before
425 * we can evaluate the parent.
426 */
427static
428int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
429 struct ir_op *node)
430{
431 switch (node->op) {
432 case IR_OP_UNKNOWN:
433 default:
434 fprintf(stderr, "[error] Unknown node type in %s\n",
435 __func__);
436 return -EINVAL;
437
438 case IR_OP_ROOT:
439 return visit_node_root(ctx, node);
440 case IR_OP_LOAD:
441 return visit_node_load(ctx, node);
442 case IR_OP_UNARY:
443 return visit_node_unary(ctx, node);
444 case IR_OP_BINARY:
445 return visit_node_binary(ctx, node);
446 case IR_OP_LOGICAL:
447 return visit_node_logical(ctx, node);
448 }
449}
450
451void filter_bytecode_free(struct filter_parser_ctx *ctx)
452{
453 free(ctx->bytecode);
454 ctx->bytecode = NULL;
455 free(ctx->bytecode_reloc);
456 ctx->bytecode_reloc = NULL;
457}
458
459int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
460{
461 int ret;
462
463 ret = bytecode_init(&ctx->bytecode);
464 if (ret)
465 return ret;
466 ret = bytecode_init(&ctx->bytecode_reloc);
467 if (ret)
468 goto error;
469 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
470 if (ret)
471 goto error;
472
473 /* Finally, append symbol table to bytecode */
474 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
475 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
476 1, bytecode_get_len(&ctx->bytecode_reloc->b));
477
478error:
479 filter_bytecode_free(ctx);
480 return ret;
481}
This page took 0.040857 seconds and 4 git commands to generate.