struct ir_op *node);
static
-int bytecode_init(struct filter_bytecode_alloc **fb)
+int bytecode_init(struct lttng_filter_bytecode_alloc **fb)
{
- *fb = calloc(sizeof(struct filter_bytecode_alloc) + INIT_ALLOC_SIZE, 1);
+ *fb = calloc(sizeof(struct lttng_filter_bytecode_alloc) + INIT_ALLOC_SIZE, 1);
if (!*fb) {
return -ENOMEM;
} else {
}
static
-int32_t bytecode_reserve(struct filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
+int32_t bytecode_reserve(struct lttng_filter_bytecode_alloc **fb, uint32_t align, uint32_t len)
{
int32_t ret;
uint32_t padding = offset_align((*fb)->b.len, align);
if (new_len > 0xFFFF)
return -EINVAL;
- *fb = realloc(*fb, sizeof(struct filter_bytecode_alloc) + new_len);
+ *fb = realloc(*fb, sizeof(struct lttng_filter_bytecode_alloc) + new_len);
if (!*fb)
return -ENOMEM;
memset(&(*fb)->b.data[old_len], 0, new_len - old_len);
}
static
-int bytecode_push(struct filter_bytecode_alloc **fb, const void *data,
+int bytecode_push(struct lttng_filter_bytecode_alloc **fb, const void *data,
uint32_t align, uint32_t len)
{
int32_t offset;
}
static
-int bytecode_push_logical(struct filter_bytecode_alloc **fb,
+int bytecode_push_logical(struct lttng_filter_bytecode_alloc **fb,
struct logical_op *data,
uint32_t align, uint32_t len,
uint16_t *skip_offset)
}
static
-int bytecode_patch(struct filter_bytecode_alloc **fb,
+int bytecode_patch(struct lttng_filter_bytecode_alloc **fb,
const void *data,
uint16_t offset,
uint32_t len)
free(insn);
return ret;
}
+ case IR_DATA_FLOAT:
+ {
+ struct load_op *insn;
+ uint32_t insn_len = sizeof(struct load_op)
+ + sizeof(struct literal_double);
+
+ insn = calloc(insn_len, 1);
+ if (!insn)
+ return -ENOMEM;
+ insn->op = FILTER_OP_LOAD_DOUBLE;
+ insn->reg = reg_sel(node);
+ if (insn->reg == REG_ERROR)
+ return -EINVAL;
+ *(double *) insn->data = node->u.load.u.flt;
+ ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
+ free(insn);
+ return ret;
+ }
case IR_DATA_FIELD_REF:
{
struct load_op *insn;
memcpy(insn->data, &ref_offset, sizeof(ref_offset));
if (insn->reg == REG_ERROR)
return -EINVAL;
- /* reloc_offset points to struct field_ref */
+ /* reloc_offset points to struct load_op */
reloc_offset = bytecode_get_len(&ctx->bytecode->b);
- reloc_offset += sizeof(struct load_op);
ret = bytecode_push(&ctx->bytecode, insn, 1, insn_len);
if (ret) {
free(insn);
return bytecode_push(&ctx->bytecode, &insn, 1, sizeof(insn));
}
+/*
+ * A logical op always return a s64 (1 or 0).
+ */
static
int visit_node_logical(struct filter_parser_ctx *ctx, struct ir_op *node)
{
ret = recursive_visit_gen_bytecode(ctx, node->u.binary.left);
if (ret)
return ret;
+ /* Cast to s64 if float or field ref */
+ if (node->u.binary.left->data_type == IR_DATA_FIELD_REF
+ || node->u.binary.left->data_type == IR_DATA_FLOAT) {
+ struct cast_op cast_insn;
+
+ if (node->u.binary.left->data_type == IR_DATA_FIELD_REF) {
+ cast_insn.op = FILTER_OP_CAST_TO_S64;
+ } else {
+ cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
+ }
+ cast_insn.reg = REG_R0;
+ ret = bytecode_push(&ctx->bytecode, &cast_insn,
+ 1, sizeof(cast_insn));
+ if (ret)
+ return ret;
+ }
switch (node->u.logical.type) {
default:
fprintf(stderr, "[error] Unknown node type in %s\n",
ret = recursive_visit_gen_bytecode(ctx, node->u.binary.right);
if (ret)
return ret;
+ /* Cast to s64 if float or field ref */
+ if (node->u.binary.right->data_type == IR_DATA_FIELD_REF
+ || node->u.binary.right->data_type == IR_DATA_FLOAT) {
+ struct cast_op cast_insn;
+
+ if (node->u.binary.right->data_type == IR_DATA_FIELD_REF) {
+ cast_insn.op = FILTER_OP_CAST_TO_S64;
+ } else {
+ cast_insn.op = FILTER_OP_CAST_DOUBLE_TO_S64;
+ }
+ cast_insn.reg = REG_R0;
+ ret = bytecode_push(&ctx->bytecode, &cast_insn,
+ 1, sizeof(cast_insn));
+ if (ret)
+ return ret;
+ }
/* We now know where the logical op can skip. */
target_loc = (uint16_t) bytecode_get_len(&ctx->bytecode->b);
ret = bytecode_patch(&ctx->bytecode,