Fix: Filter: Fix allocation length error
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
1 /*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <stdlib.h>
23 #include <string.h>
24 #include <errno.h>
25 #include "align.h"
26 #include "filter-bytecode.h"
27 #include "filter-ir.h"
28 #include "filter-ast.h"
29
30 #ifndef max_t
31 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
32 #endif
33
34 //#define INIT_ALLOC_SIZE PAGE_SIZE
35 #define INIT_ALLOC_SIZE 4
36
37 static
38 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
39 struct ir_op *node);
40
41 static inline int fls(unsigned int x)
42 {
43 int r = 32;
44
45 if (!x)
46 return 0;
47 if (!(x & 0xFFFF0000U)) {
48 x <<= 16;
49 r -= 16;
50 }
51 if (!(x & 0xFF000000U)) {
52 x <<= 8;
53 r -= 8;
54 }
55 if (!(x & 0xF0000000U)) {
56 x <<= 4;
57 r -= 4;
58 }
59 if (!(x & 0xC0000000U)) {
60 x <<= 2;
61 r -= 2;
62 }
63 if (!(x & 0x80000000U)) {
64 x <<= 1;
65 r -= 1;
66 }
67 return r;
68 }
69
70 static inline int get_count_order(unsigned int count)
71 {
72 int order;
73
74 order = fls(count) - 1;
75 if (count & (count - 1))
76 order++;
77 return order;
78 }
79
80 static
81 int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
82 {
83 uint32_t alloc_len;
84
85 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
86 *fb = calloc(alloc_len, 1);
87 if (!*fb) {
88 return -ENOMEM;
89 } else {
90 (*fb)->alloc_len = alloc_len;
91 return 0;
92 }
93 }
94
95 static
96 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
97 {
98 int32_t ret;
99 uint32_t padding = offset_align((*fb)->b.len, align);
100 uint32_t new_len = (*fb)->b.len + padding + len;
101 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
102 uint32_t old_alloc_len = (*fb)->alloc_len;
103
104 if (new_len > LTTNG_FILTER_MAX_LEN)
105 return -EINVAL;
106
107 if (new_alloc_len > old_alloc_len) {
108 new_alloc_len =
109 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
110 *fb = realloc(*fb, new_alloc_len);
111 if (!*fb)
112 return -ENOMEM;
113 /* We zero directly the memory from start of allocation. */
114 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
115 (*fb)->alloc_len = new_alloc_len;
116 }
117 (*fb)->b.len += padding;
118 ret = (*fb)->b.len;
119 (*fb)->b.len += len;
120 return ret;
121 }
122
123 static
124 int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
125 uint32_t align, uint32_t len)
126 {
127 int32_t offset;
128
129 offset = bytecode_reserve(fb, align, len);
130 if (offset < 0)
131 return offset;
132 memcpy(&(*fb)->b.data[offset], data, len);
133 return 0;
134 }
135
136 static
137 int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
138 struct logical_op *data,
139 uint32_t align, uint32_t len,
140 uint16_t *skip_offset)
141 {
142 int32_t offset;
143
144 offset = bytecode_reserve(fb, align, len);
145 if (offset < 0)
146 return offset;
147 memcpy(&(*fb)->b.data[offset], data, len);
148 *skip_offset =
149 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
150 - (void *) &(*fb)->b.data[0];
151 return 0;
152 }
153
154 static
155 int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
156 const void *data,
157 uint16_t offset,
158 uint32_t len)
159 {
160 if (offset >= (*fb)->b.len) {
161 return -EINVAL;
162 }
163 memcpy(&(*fb)->b.data[offset], data, len);
164 return 0;
165 }
166
167 static
168 int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
169 {
170 int ret;
171 struct return_op insn;
172
173 /* Visit child */
174 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
175 if (ret)
176 return ret;
177
178 /* Generate end of bytecode instruction */
179 insn.op = FILTER_OP_RETURN;
180 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
181 }
182
183 static
184 int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
185 {
186 int ret;
187
188 switch (node->data_type) {
189 case IR_DATA_UNKNOWN:
190 default:
191 fprintf(stderr, "[error] Unknown data type in %s\n",
192 __func__);
193 return -EINVAL;
194
195 case IR_DATA_STRING:
196 {
197 struct load_op *insn;
198 uint32_t insn_len = sizeof(struct load_op)
199 + strlen(node->u.load.u.string) + 1;
200
201 insn = calloc(insn_len, 1);
202 if (!insn)
203 return -ENOMEM;
204 insn->op = FILTER_OP_LOAD_STRING;
205 strcpy(insn->data, node->u.load.u.string);
206 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
207 free(insn);
208 return ret;
209 }
210 case IR_DATA_NUMERIC:
211 {
212 struct load_op *insn;
213 uint32_t insn_len = sizeof(struct load_op)
214 + sizeof(struct literal_numeric);
215
216 insn = calloc(insn_len, 1);
217 if (!insn)
218 return -ENOMEM;
219 insn->op = FILTER_OP_LOAD_S64;
220 *(int64_t *) insn->data = node->u.load.u.num;
221 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
222 free(insn);
223 return ret;
224 }
225 case IR_DATA_FLOAT:
226 {
227 struct load_op *insn;
228 uint32_t insn_len = sizeof(struct load_op)
229 + sizeof(struct literal_double);
230
231 insn = calloc(insn_len, 1);
232 if (!insn)
233 return -ENOMEM;
234 insn->op = FILTER_OP_LOAD_DOUBLE;
235 *(double *) insn->data = node->u.load.u.flt;
236 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
237 free(insn);
238 return ret;
239 }
240 case IR_DATA_FIELD_REF:
241 {
242 struct load_op *insn;
243 uint32_t insn_len = sizeof(struct load_op)
244 + sizeof(struct field_ref);
245 struct field_ref ref_offset;
246 uint32_t reloc_offset_u32;
247 uint16_t reloc_offset;
248
249 insn = calloc(insn_len, 1);
250 if (!insn)
251 return -ENOMEM;
252 insn->op = FILTER_OP_LOAD_FIELD_REF;
253 ref_offset.offset = (uint16_t) -1U;
254 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
255 /* reloc_offset points to struct load_op */
256 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
257 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
258 free(insn);
259 return -EINVAL;
260 }
261 reloc_offset = (uint16_t) reloc_offset_u32;
262 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
263 if (ret) {
264 free(insn);
265 return ret;
266 }
267 /* append reloc */
268 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
269 1, sizeof(reloc_offset));
270 if (ret) {
271 free(insn);
272 return ret;
273 }
274 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
275 1, strlen(node->u.load.u.ref) + 1);
276 free(insn);
277 return ret;
278 }
279 }
280 }
281
282 static
283 int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
284 {
285 int ret;
286 struct unary_op insn;
287
288 /* Visit child */
289 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
290 if (ret)
291 return ret;
292
293 /* Generate end of bytecode instruction */
294 switch (node->u.unary.type) {
295 case AST_UNARY_UNKNOWN:
296 default:
297 fprintf(stderr, "[error] Unknown unary node type in %s\n",
298 __func__);
299 return -EINVAL;
300 case AST_UNARY_PLUS:
301 /* Nothing to do. */
302 return 0;
303 case AST_UNARY_MINUS:
304 insn.op = FILTER_OP_UNARY_MINUS;
305 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
306 case AST_UNARY_NOT:
307 insn.op = FILTER_OP_UNARY_NOT;
308 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
309 }
310 }
311
312 /*
313 * Binary comparator nesting is disallowed. This allows fitting into
314 * only 2 registers.
315 */
316 static
317 int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
318 {
319 int ret;
320 struct binary_op insn;
321
322 /* Visit child */
323 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
324 if (ret)
325 return ret;
326 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
327 if (ret)
328 return ret;
329
330 switch (node->u.binary.type) {
331 case AST_OP_UNKNOWN:
332 default:
333 fprintf(stderr, "[error] Unknown unary node type in %s\n",
334 __func__);
335 return -EINVAL;
336
337 case AST_OP_AND:
338 case AST_OP_OR:
339 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
340 __func__);
341 return -EINVAL;
342
343 case AST_OP_MUL:
344 insn.op = FILTER_OP_MUL;
345 break;
346 case AST_OP_DIV:
347 insn.op = FILTER_OP_DIV;
348 break;
349 case AST_OP_MOD:
350 insn.op = FILTER_OP_MOD;
351 break;
352 case AST_OP_PLUS:
353 insn.op = FILTER_OP_PLUS;
354 break;
355 case AST_OP_MINUS:
356 insn.op = FILTER_OP_MINUS;
357 break;
358 case AST_OP_RSHIFT:
359 insn.op = FILTER_OP_RSHIFT;
360 break;
361 case AST_OP_LSHIFT:
362 insn.op = FILTER_OP_LSHIFT;
363 break;
364 case AST_OP_BIN_AND:
365 insn.op = FILTER_OP_BIN_AND;
366 break;
367 case AST_OP_BIN_OR:
368 insn.op = FILTER_OP_BIN_OR;
369 break;
370 case AST_OP_BIN_XOR:
371 insn.op = FILTER_OP_BIN_XOR;
372 break;
373
374 case AST_OP_EQ:
375 insn.op = FILTER_OP_EQ;
376 break;
377 case AST_OP_NE:
378 insn.op = FILTER_OP_NE;
379 break;
380 case AST_OP_GT:
381 insn.op = FILTER_OP_GT;
382 break;
383 case AST_OP_LT:
384 insn.op = FILTER_OP_LT;
385 break;
386 case AST_OP_GE:
387 insn.op = FILTER_OP_GE;
388 break;
389 case AST_OP_LE:
390 insn.op = FILTER_OP_LE;
391 break;
392 }
393 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
394 }
395
396 /*
397 * A logical op always return a s64 (1 or 0).
398 */
399 static
400 int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
401 {
402 int ret;
403 struct logical_op insn;
404 uint16_t skip_offset_loc;
405 uint16_t target_loc;
406
407 /* Visit left child */
408 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
409 if (ret)
410 return ret;
411 /* Cast to s64 if float or field ref */
412 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
413 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
414 struct cast_op cast_insn;
415
416 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF) {
417 cast_insn.op = FILTER_OP_CAST_TO_S64;
418 } else {
419 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
420 }
421 ret = bytecode_push(&ctx->bytecode, &cast_insn,
422 1, sizeof(cast_insn));
423 if (ret)
424 return ret;
425 }
426 switch (node->u.logical.type) {
427 default:
428 fprintf(stderr, "[error] Unknown node type in %s\n",
429 __func__);
430 return -EINVAL;
431
432 case AST_OP_AND:
433 insn.op = FILTER_OP_AND;
434 break;
435 case AST_OP_OR:
436 insn.op = FILTER_OP_OR;
437 break;
438 }
439 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
440 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
441 &skip_offset_loc);
442 if (ret)
443 return ret;
444 /* Visit right child */
445 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
446 if (ret)
447 return ret;
448 /* Cast to s64 if float or field ref */
449 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
450 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
451 struct cast_op cast_insn;
452
453 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF) {
454 cast_insn.op = FILTER_OP_CAST_TO_S64;
455 } else {
456 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
457 }
458 ret = bytecode_push(&ctx->bytecode, &cast_insn,
459 1, sizeof(cast_insn));
460 if (ret)
461 return ret;
462 }
463 /* We now know where the logical op can skip. */
464 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
465 ret = bytecode_patch(&ctx->bytecode,
466 &target_loc, /* Offset to jump to */
467 skip_offset_loc, /* Where to patch */
468 sizeof(uint16_t));
469 return ret;
470 }
471
472 /*
473 * Postorder traversal of the tree. We need the children result before
474 * we can evaluate the parent.
475 */
476 static
477 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
478 struct ir_op *node)
479 {
480 switch (node->op) {
481 case IR_OP_UNKNOWN:
482 default:
483 fprintf(stderr, "[error] Unknown node type in %s\n",
484 __func__);
485 return -EINVAL;
486
487 case IR_OP_ROOT:
488 return visit_node_root(ctx, node);
489 case IR_OP_LOAD:
490 return visit_node_load(ctx, node);
491 case IR_OP_UNARY:
492 return visit_node_unary(ctx, node);
493 case IR_OP_BINARY:
494 return visit_node_binary(ctx, node);
495 case IR_OP_LOGICAL:
496 return visit_node_logical(ctx, node);
497 }
498 }
499
500 __attribute__((visibility("hidden")))
501 void filter_bytecode_free(struct filter_parser_ctx *ctx)
502 {
503 free(ctx->bytecode);
504 ctx->bytecode = NULL;
505 free(ctx->bytecode_reloc);
506 ctx->bytecode_reloc = NULL;
507 }
508
509 __attribute__((visibility("hidden")))
510 int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
511 {
512 int ret;
513
514 ret = bytecode_init(&ctx->bytecode);
515 if (ret)
516 return ret;
517 ret = bytecode_init(&ctx->bytecode_reloc);
518 if (ret)
519 goto error;
520 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
521 if (ret)
522 goto error;
523
524 /* Finally, append symbol table to bytecode */
525 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
526 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
527 1, bytecode_get_len(&ctx->bytecode_reloc->b));
528
529 error:
530 filter_bytecode_free(ctx);
531 return ret;
532 }
This page took 0.040807 seconds and 5 git commands to generate.