Cleanup: use LTTNG_HIDDEN in lttng-ctl filter lib
[lttng-tools.git] / src / lib / lttng-ctl / filter / filter-visitor-generate-bytecode.c
1 /*
2 * filter-visitor-generate-bytecode.c
3 *
4 * LTTng filter bytecode generation
5 *
6 * Copyright 2012 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU Lesser General Public License, version 2.1 only,
10 * as published by the Free Software Foundation.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public License
18 * along with this library; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 #include <stdlib.h>
23 #include <string.h>
24 #include <errno.h>
25 #include "align.h"
26 #include "filter-bytecode.h"
27 #include "filter-ir.h"
28 #include "filter-ast.h"
29
30 #include <common/macros.h>
31
32 #ifndef max_t
33 #define max_t(type, a, b) ((type) ((a) > (b) ? (a) : (b)))
34 #endif
35
36 //#define INIT_ALLOC_SIZE PAGE_SIZE
37 #define INIT_ALLOC_SIZE 4
38
39 static
40 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
41 struct ir_op *node);
42
43 static inline int fls(unsigned int x)
44 {
45 int r = 32;
46
47 if (!x)
48 return 0;
49 if (!(x & 0xFFFF0000U)) {
50 x <<= 16;
51 r -= 16;
52 }
53 if (!(x & 0xFF000000U)) {
54 x <<= 8;
55 r -= 8;
56 }
57 if (!(x & 0xF0000000U)) {
58 x <<= 4;
59 r -= 4;
60 }
61 if (!(x & 0xC0000000U)) {
62 x <<= 2;
63 r -= 2;
64 }
65 if (!(x & 0x80000000U)) {
66 x <<= 1;
67 r -= 1;
68 }
69 return r;
70 }
71
72 static inline int get_count_order(unsigned int count)
73 {
74 int order;
75
76 order = fls(count) - 1;
77 if (count & (count - 1))
78 order++;
79 return order;
80 }
81
82 static
83 int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
84 {
85 uint32_t alloc_len;
86
87 alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE;
88 *fb = calloc(alloc_len, 1);
89 if (!*fb) {
90 return -ENOMEM;
91 } else {
92 (*fb)->alloc_len = alloc_len;
93 return 0;
94 }
95 }
96
97 static
98 int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
99 {
100 int32_t ret;
101 uint32_t padding = offset_align((*fb)->b.len, align);
102 uint32_t new_len = (*fb)->b.len + padding + len;
103 uint32_t new_alloc_len = sizeof(struct lttng_filter_bytecode_alloc) + new_len;
104 uint32_t old_alloc_len = (*fb)->alloc_len;
105
106 if (new_len > LTTNG_FILTER_MAX_LEN)
107 return -EINVAL;
108
109 if (new_alloc_len > old_alloc_len) {
110 new_alloc_len =
111 max_t(uint32_t, 1U << get_count_order(new_alloc_len), old_alloc_len << 1);
112 *fb = realloc(*fb, new_alloc_len);
113 if (!*fb)
114 return -ENOMEM;
115 /* We zero directly the memory from start of allocation. */
116 memset(&((char *) *fb)[old_alloc_len], 0, new_alloc_len - old_alloc_len);
117 (*fb)->alloc_len = new_alloc_len;
118 }
119 (*fb)->b.len += padding;
120 ret = (*fb)->b.len;
121 (*fb)->b.len += len;
122 return ret;
123 }
124
125 static
126 int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
127 uint32_t align, uint32_t len)
128 {
129 int32_t offset;
130
131 offset = bytecode_reserve(fb, align, len);
132 if (offset < 0)
133 return offset;
134 memcpy(&(*fb)->b.data[offset], data, len);
135 return 0;
136 }
137
138 static
139 int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
140 struct logical_op *data,
141 uint32_t align, uint32_t len,
142 uint16_t *skip_offset)
143 {
144 int32_t offset;
145
146 offset = bytecode_reserve(fb, align, len);
147 if (offset < 0)
148 return offset;
149 memcpy(&(*fb)->b.data[offset], data, len);
150 *skip_offset =
151 (void *) &((struct logical_op *) &(*fb)->b.data[offset])->skip_offset
152 - (void *) &(*fb)->b.data[0];
153 return 0;
154 }
155
156 static
157 int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
158 const void *data,
159 uint16_t offset,
160 uint32_t len)
161 {
162 if (offset >= (*fb)->b.len) {
163 return -EINVAL;
164 }
165 memcpy(&(*fb)->b.data[offset], data, len);
166 return 0;
167 }
168
169 static
170 int visit_node_root(struct filter_parser_ctx *ctx, struct ir_op *node)
171 {
172 int ret;
173 struct return_op insn;
174
175 /* Visit child */
176 ret = recursive_visit_gen_bytecode(ctx, node->u.root.child);
177 if (ret)
178 return ret;
179
180 /* Generate end of bytecode instruction */
181 insn.op = FILTER_OP_RETURN;
182 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
183 }
184
185 static
186 int visit_node_load(struct filter_parser_ctx *ctx, struct ir_op *node)
187 {
188 int ret;
189
190 switch (node->data_type) {
191 case IR_DATA_UNKNOWN:
192 default:
193 fprintf(stderr, "[error] Unknown data type in %s\n",
194 __func__);
195 return -EINVAL;
196
197 case IR_DATA_STRING:
198 {
199 struct load_op *insn;
200 uint32_t insn_len = sizeof(struct load_op)
201 + strlen(node->u.load.u.string) + 1;
202
203 insn = calloc(insn_len, 1);
204 if (!insn)
205 return -ENOMEM;
206 insn->op = FILTER_OP_LOAD_STRING;
207 strcpy(insn->data, node->u.load.u.string);
208 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
209 free(insn);
210 return ret;
211 }
212 case IR_DATA_NUMERIC:
213 {
214 struct load_op *insn;
215 uint32_t insn_len = sizeof(struct load_op)
216 + sizeof(struct literal_numeric);
217
218 insn = calloc(insn_len, 1);
219 if (!insn)
220 return -ENOMEM;
221 insn->op = FILTER_OP_LOAD_S64;
222 *(int64_t *) insn->data = node->u.load.u.num;
223 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
224 free(insn);
225 return ret;
226 }
227 case IR_DATA_FLOAT:
228 {
229 struct load_op *insn;
230 uint32_t insn_len = sizeof(struct load_op)
231 + sizeof(struct literal_double);
232
233 insn = calloc(insn_len, 1);
234 if (!insn)
235 return -ENOMEM;
236 insn->op = FILTER_OP_LOAD_DOUBLE;
237 *(double *) insn->data = node->u.load.u.flt;
238 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
239 free(insn);
240 return ret;
241 }
242 case IR_DATA_FIELD_REF:
243 {
244 struct load_op *insn;
245 uint32_t insn_len = sizeof(struct load_op)
246 + sizeof(struct field_ref);
247 struct field_ref ref_offset;
248 uint32_t reloc_offset_u32;
249 uint16_t reloc_offset;
250
251 insn = calloc(insn_len, 1);
252 if (!insn)
253 return -ENOMEM;
254 insn->op = FILTER_OP_LOAD_FIELD_REF;
255 ref_offset.offset = (uint16_t) -1U;
256 memcpy(insn->data, &ref_offset, sizeof(ref_offset));
257 /* reloc_offset points to struct load_op */
258 reloc_offset_u32 = bytecode_get_len(&ctx->bytecode->b);
259 if (reloc_offset_u32 > LTTNG_FILTER_MAX_LEN - 1) {
260 free(insn);
261 return -EINVAL;
262 }
263 reloc_offset = (uint16_t) reloc_offset_u32;
264 ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
265 if (ret) {
266 free(insn);
267 return ret;
268 }
269 /* append reloc */
270 ret = bytecode_push(&ctx->bytecode_reloc, &reloc_offset,
271 1, sizeof(reloc_offset));
272 if (ret) {
273 free(insn);
274 return ret;
275 }
276 ret = bytecode_push(&ctx->bytecode_reloc, node->u.load.u.ref,
277 1, strlen(node->u.load.u.ref) + 1);
278 free(insn);
279 return ret;
280 }
281 }
282 }
283
284 static
285 int visit_node_unary(struct filter_parser_ctx *ctx, struct ir_op *node)
286 {
287 int ret;
288 struct unary_op insn;
289
290 /* Visit child */
291 ret = recursive_visit_gen_bytecode(ctx, node->u.unary.child);
292 if (ret)
293 return ret;
294
295 /* Generate end of bytecode instruction */
296 switch (node->u.unary.type) {
297 case AST_UNARY_UNKNOWN:
298 default:
299 fprintf(stderr, "[error] Unknown unary node type in %s\n",
300 __func__);
301 return -EINVAL;
302 case AST_UNARY_PLUS:
303 /* Nothing to do. */
304 return 0;
305 case AST_UNARY_MINUS:
306 insn.op = FILTER_OP_UNARY_MINUS;
307 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
308 case AST_UNARY_NOT:
309 insn.op = FILTER_OP_UNARY_NOT;
310 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
311 }
312 }
313
314 /*
315 * Binary comparator nesting is disallowed. This allows fitting into
316 * only 2 registers.
317 */
318 static
319 int visit_node_binary(struct filter_parser_ctx *ctx, struct ir_op *node)
320 {
321 int ret;
322 struct binary_op insn;
323
324 /* Visit child */
325 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
326 if (ret)
327 return ret;
328 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
329 if (ret)
330 return ret;
331
332 switch (node->u.binary.type) {
333 case AST_OP_UNKNOWN:
334 default:
335 fprintf(stderr, "[error] Unknown unary node type in %s\n",
336 __func__);
337 return -EINVAL;
338
339 case AST_OP_AND:
340 case AST_OP_OR:
341 fprintf(stderr, "[error] Unexpected logical node type in %s\n",
342 __func__);
343 return -EINVAL;
344
345 case AST_OP_MUL:
346 insn.op = FILTER_OP_MUL;
347 break;
348 case AST_OP_DIV:
349 insn.op = FILTER_OP_DIV;
350 break;
351 case AST_OP_MOD:
352 insn.op = FILTER_OP_MOD;
353 break;
354 case AST_OP_PLUS:
355 insn.op = FILTER_OP_PLUS;
356 break;
357 case AST_OP_MINUS:
358 insn.op = FILTER_OP_MINUS;
359 break;
360 case AST_OP_RSHIFT:
361 insn.op = FILTER_OP_RSHIFT;
362 break;
363 case AST_OP_LSHIFT:
364 insn.op = FILTER_OP_LSHIFT;
365 break;
366 case AST_OP_BIN_AND:
367 insn.op = FILTER_OP_BIN_AND;
368 break;
369 case AST_OP_BIN_OR:
370 insn.op = FILTER_OP_BIN_OR;
371 break;
372 case AST_OP_BIN_XOR:
373 insn.op = FILTER_OP_BIN_XOR;
374 break;
375
376 case AST_OP_EQ:
377 insn.op = FILTER_OP_EQ;
378 break;
379 case AST_OP_NE:
380 insn.op = FILTER_OP_NE;
381 break;
382 case AST_OP_GT:
383 insn.op = FILTER_OP_GT;
384 break;
385 case AST_OP_LT:
386 insn.op = FILTER_OP_LT;
387 break;
388 case AST_OP_GE:
389 insn.op = FILTER_OP_GE;
390 break;
391 case AST_OP_LE:
392 insn.op = FILTER_OP_LE;
393 break;
394 }
395 return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
396 }
397
398 /*
399 * A logical op always return a s64 (1 or 0).
400 */
401 static
402 int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
403 {
404 int ret;
405 struct logical_op insn;
406 uint16_t skip_offset_loc;
407 uint16_t target_loc;
408
409 /* Visit left child */
410 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
411 if (ret)
412 return ret;
413 /* Cast to s64 if float or field ref */
414 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
415 || node->u.binary.left->data_type == IR_DATA_FLOAT) {
416 struct cast_op cast_insn;
417
418 if (node->u.binary.left->data_type == IR_DATA_FIELD_REF) {
419 cast_insn.op = FILTER_OP_CAST_TO_S64;
420 } else {
421 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
422 }
423 ret = bytecode_push(&ctx->bytecode, &cast_insn,
424 1, sizeof(cast_insn));
425 if (ret)
426 return ret;
427 }
428 switch (node->u.logical.type) {
429 default:
430 fprintf(stderr, "[error] Unknown node type in %s\n",
431 __func__);
432 return -EINVAL;
433
434 case AST_OP_AND:
435 insn.op = FILTER_OP_AND;
436 break;
437 case AST_OP_OR:
438 insn.op = FILTER_OP_OR;
439 break;
440 }
441 insn.skip_offset = (uint16_t) -1UL; /* Temporary */
442 ret = bytecode_push_logical(&ctx->bytecode, &insn, 1, sizeof(insn),
443 &skip_offset_loc);
444 if (ret)
445 return ret;
446 /* Visit right child */
447 ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
448 if (ret)
449 return ret;
450 /* Cast to s64 if float or field ref */
451 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
452 || node->u.binary.right->data_type == IR_DATA_FLOAT) {
453 struct cast_op cast_insn;
454
455 if (node->u.binary.right->data_type == IR_DATA_FIELD_REF) {
456 cast_insn.op = FILTER_OP_CAST_TO_S64;
457 } else {
458 cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
459 }
460 ret = bytecode_push(&ctx->bytecode, &cast_insn,
461 1, sizeof(cast_insn));
462 if (ret)
463 return ret;
464 }
465 /* We now know where the logical op can skip. */
466 target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
467 ret = bytecode_patch(&ctx->bytecode,
468 &target_loc, /* Offset to jump to */
469 skip_offset_loc, /* Where to patch */
470 sizeof(uint16_t));
471 return ret;
472 }
473
474 /*
475 * Postorder traversal of the tree. We need the children result before
476 * we can evaluate the parent.
477 */
478 static
479 int recursive_visit_gen_bytecode(struct filter_parser_ctx *ctx,
480 struct ir_op *node)
481 {
482 switch (node->op) {
483 case IR_OP_UNKNOWN:
484 default:
485 fprintf(stderr, "[error] Unknown node type in %s\n",
486 __func__);
487 return -EINVAL;
488
489 case IR_OP_ROOT:
490 return visit_node_root(ctx, node);
491 case IR_OP_LOAD:
492 return visit_node_load(ctx, node);
493 case IR_OP_UNARY:
494 return visit_node_unary(ctx, node);
495 case IR_OP_BINARY:
496 return visit_node_binary(ctx, node);
497 case IR_OP_LOGICAL:
498 return visit_node_logical(ctx, node);
499 }
500 }
501
502 LTTNG_HIDDEN
503 void filter_bytecode_free(struct filter_parser_ctx *ctx)
504 {
505 free(ctx->bytecode);
506 ctx->bytecode = NULL;
507 free(ctx->bytecode_reloc);
508 ctx->bytecode_reloc = NULL;
509 }
510
511 LTTNG_HIDDEN
512 int filter_visitor_bytecode_generate(struct filter_parser_ctx *ctx)
513 {
514 int ret;
515
516 ret = bytecode_init(&ctx->bytecode);
517 if (ret)
518 return ret;
519 ret = bytecode_init(&ctx->bytecode_reloc);
520 if (ret)
521 goto error;
522 ret = recursive_visit_gen_bytecode(ctx, ctx->ir_root);
523 if (ret)
524 goto error;
525
526 /* Finally, append symbol table to bytecode */
527 ctx->bytecode->b.reloc_table_offset = bytecode_get_len(&ctx->bytecode->b);
528 return bytecode_push(&ctx->bytecode, ctx->bytecode_reloc->b.data,
529 1, bytecode_get_len(&ctx->bytecode_reloc->b));
530
531 error:
532 filter_bytecode_free(ctx);
533 return ret;
534 }
This page took 0.039704 seconds and 4 git commands to generate.