*
* LTTng UST filter bytecode validator.
*
- * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
*
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
*/
#define _LGPL_SOURCE
#include <urcu/rculfhash.h>
#include "lttng-hash-helper.h"
+#include "string-utils.h"
/*
* Number of merge points for hash table size. Hash table initialized to
const struct vstack *stack)
{
struct lfht_mp_node *node;
- unsigned long hash = lttng_hash_mix((const void *) target_pc,
+ unsigned long hash = lttng_hash_mix((const char *) target_pc,
sizeof(target_pc),
lttng_hash_seed);
struct cds_lfht_node *ret;
node->target_pc = target_pc;
memcpy(&node->stack, stack, sizeof(node->stack));
ret = cds_lfht_add_unique(ht, hash, lttng_hash_match,
- (const void *) target_pc, &node->node);
+ (const char *) target_pc, &node->node);
if (ret != &node->node) {
struct lfht_mp_node *ret_mp =
caa_container_of(ret, struct lfht_mp_node, node);
* (unknown), negative error value on error.
*/
static
-int bin_op_compare_check(struct vstack *stack, const char *str)
+int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode,
+ const char *str)
{
if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
goto error_empty;
goto unknown;
case REG_STRING:
break;
+ case REG_STAR_GLOB_STRING:
+ if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_S64:
+ case REG_DOUBLE:
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_STRING:
+ if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
+ goto error_mismatch;
+ }
+ break;
+ case REG_STAR_GLOB_STRING:
case REG_S64:
case REG_DOUBLE:
goto error_mismatch;
case REG_UNKNOWN:
goto unknown;
case REG_STRING:
+ case REG_STAR_GLOB_STRING:
goto error_mismatch;
case REG_S64:
case REG_DOUBLE:
return -EINVAL;
}
+/*
+ * Binary bitwise operators use top of stack and top of stack -1.
+ * Return 0 if typing is known to match, 1 if typing is dynamic
+ * (unknown), negative error value on error.
+ */
+static
+int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
+ const char *str)
+{
+ if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
+ goto error_empty;
+
+ switch (vstack_ax(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ switch (vstack_bx(stack)->type) {
+ default:
+ goto error_type;
+
+ case REG_UNKNOWN:
+ goto unknown;
+ case REG_S64:
+ break;
+ }
+ break;
+ }
+ return 0;
+
+unknown:
+ return 1;
+
+error_empty:
+ ERR("empty stack for '%s' binary operator\n", str);
+ return -EINVAL;
+
+error_type:
+ ERR("unknown type for '%s' binary operator\n", str);
+ return -EINVAL;
+}
+
+static
+int validate_get_symbol(struct bytecode_runtime *bytecode,
+ const struct get_symbol *sym)
+{
+ const char *str, *str_limit;
+ size_t len_limit;
+
+ if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
+ return -EINVAL;
+
+ str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
+ str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
+ len_limit = str_limit - str;
+ if (strnlen(str, len_limit) == len_limit)
+ return -EINVAL;
+ return 0;
+}
+
/*
* Validate bytecode range overflow within the validation pass.
* Called for each instruction encountered.
*/
static
int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
- void *start_pc, void *pc)
+ char *start_pc, char *pc)
{
int ret = 0;
}
case FILTER_OP_RETURN:
+ case FILTER_OP_RETURN_S64:
{
if (unlikely(pc + sizeof(struct return_op)
> start_pc + bytecode->len)) {
case FILTER_OP_MOD:
case FILTER_OP_PLUS:
case FILTER_OP_MINUS:
- case FILTER_OP_RSHIFT:
- case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
{
ERR("unsupported bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
case FILTER_OP_LT_STRING:
case FILTER_OP_GE_STRING:
case FILTER_OP_LE_STRING:
+ case FILTER_OP_EQ_STAR_GLOB_STRING:
+ case FILTER_OP_NE_STAR_GLOB_STRING:
case FILTER_OP_EQ_S64:
case FILTER_OP_NE_S64:
case FILTER_OP_GT_S64:
case FILTER_OP_LT_S64_DOUBLE:
case FILTER_OP_GE_S64_DOUBLE:
case FILTER_OP_LE_S64_DOUBLE:
+ case FILTER_OP_BIT_RSHIFT:
+ case FILTER_OP_BIT_LSHIFT:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
if (unlikely(pc + sizeof(struct binary_op)
> start_pc + bytecode->len)) {
case FILTER_OP_UNARY_PLUS_DOUBLE:
case FILTER_OP_UNARY_MINUS_DOUBLE:
case FILTER_OP_UNARY_NOT_DOUBLE:
+ case FILTER_OP_UNARY_BIT_NOT:
{
if (unlikely(pc + sizeof(struct unary_op)
> start_pc + bytecode->len)) {
ret = -EINVAL;
break;
}
+
/* get context ref */
case FILTER_OP_GET_CONTEXT_REF:
case FILTER_OP_LOAD_FIELD_REF_STRING:
/* load from immediate operand */
case FILTER_OP_LOAD_STRING:
+ case FILTER_OP_LOAD_STAR_GLOB_STRING:
{
struct load_op *insn = (struct load_op *) pc;
uint32_t str_len, maxlen;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ case FILTER_OP_LOAD_FIELD:
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ if (unlikely(pc + sizeof(struct load_op)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ break;
+ }
+ ret = validate_get_symbol(bytecode, sym);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ ERR("Unexpected get symbol field");
+ ret = -EINVAL;
+ break;
+
+ case FILTER_OP_GET_INDEX_U16:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
+
+ case FILTER_OP_GET_INDEX_U64:
+ if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
+ > start_pc + bytecode->len)) {
+ ret = -ERANGE;
+ }
+ break;
}
return ret;
static
int validate_instruction_context(struct bytecode_runtime *bytecode,
struct vstack *stack,
- void *start_pc,
- void *pc)
+ char *start_pc,
+ char *pc)
{
int ret = 0;
+ const filter_opcode_t opcode = *(filter_opcode_t *) pc;
- switch (*(filter_opcode_t *) pc) {
+ switch (opcode) {
case FILTER_OP_UNKNOWN:
default:
{
}
case FILTER_OP_RETURN:
+ case FILTER_OP_RETURN_S64:
{
goto end;
}
case FILTER_OP_MOD:
case FILTER_OP_PLUS:
case FILTER_OP_MINUS:
- case FILTER_OP_RSHIFT:
- case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
{
ERR("unsupported bytecode op %u\n",
- (unsigned int) *(filter_opcode_t *) pc);
+ (unsigned int) opcode);
ret = -EINVAL;
goto end;
}
case FILTER_OP_EQ:
{
- ret = bin_op_compare_check(stack, "==");
+ ret = bin_op_compare_check(stack, opcode, "==");
if (ret < 0)
goto end;
break;
}
case FILTER_OP_NE:
{
- ret = bin_op_compare_check(stack, "!=");
+ ret = bin_op_compare_check(stack, opcode, "!=");
if (ret < 0)
goto end;
break;
}
case FILTER_OP_GT:
{
- ret = bin_op_compare_check(stack, ">");
+ ret = bin_op_compare_check(stack, opcode, ">");
if (ret < 0)
goto end;
break;
}
case FILTER_OP_LT:
{
- ret = bin_op_compare_check(stack, "<");
+ ret = bin_op_compare_check(stack, opcode, "<");
if (ret < 0)
goto end;
break;
}
case FILTER_OP_GE:
{
- ret = bin_op_compare_check(stack, ">=");
+ ret = bin_op_compare_check(stack, opcode, ">=");
if (ret < 0)
goto end;
break;
}
case FILTER_OP_LE:
{
- ret = bin_op_compare_check(stack, "<=");
+ ret = bin_op_compare_check(stack, opcode, "<=");
if (ret < 0)
goto end;
break;
break;
}
+ case FILTER_OP_EQ_STAR_GLOB_STRING:
+ case FILTER_OP_NE_STAR_GLOB_STRING:
+ {
+ if (!vstack_ax(stack) || !vstack_bx(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
+ && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
+ ERR("Unexpected register type for globbing pattern comparator\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ break;
+ }
+
case FILTER_OP_EQ_S64:
case FILTER_OP_NE_S64:
case FILTER_OP_GT_S64:
break;
}
+ case FILTER_OP_BIT_RSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, ">>");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_LSHIFT:
+ ret = bin_op_bitwise_check(stack, opcode, "<<");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_AND:
+ ret = bin_op_bitwise_check(stack, opcode, "&");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_OR:
+ ret = bin_op_bitwise_check(stack, opcode, "|");
+ if (ret < 0)
+ goto end;
+ break;
+ case FILTER_OP_BIT_XOR:
+ ret = bin_op_bitwise_check(stack, opcode, "^");
+ if (ret < 0)
+ goto end;
+ break;
+
/* unary */
case FILTER_OP_UNARY_PLUS:
case FILTER_OP_UNARY_MINUS:
goto end;
case REG_STRING:
+ case REG_STAR_GLOB_STRING:
ERR("Unary op can only be applied to numeric or floating point registers\n");
ret = -EINVAL;
goto end;
}
break;
}
+ case FILTER_OP_UNARY_BIT_NOT:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ default:
+ ERR("unknown register type\n");
+ ret = -EINVAL;
+ goto end;
+
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_DOUBLE:
+ ERR("Unary bitwise op can only be applied to numeric registers\n");
+ ret = -EINVAL;
+ goto end;
+ case REG_S64:
+ break;
+ case REG_UNKNOWN:
+ break;
+ }
+ break;
+ }
case FILTER_OP_UNARY_PLUS_S64:
case FILTER_OP_UNARY_MINUS_S64:
/* load from immediate operand */
case FILTER_OP_LOAD_STRING:
+ case FILTER_OP_LOAD_STAR_GLOB_STRING:
{
break;
}
goto end;
case REG_STRING:
+ case REG_STAR_GLOB_STRING:
ERR("Cast op can only be applied to numeric or floating point registers\n");
ret = -EINVAL;
goto end;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get context root\n");
+ break;
+ }
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ {
+ dbg_printf("Validate get app context root\n");
+ break;
+ }
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ dbg_printf("Validate get payload root\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD:
+ {
+ /*
+ * We tolerate that field type is unknown at validation,
+ * because we are performing the load specialization in
+ * a phase after validation.
+ */
+ dbg_printf("Validate load field\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S8:
+ {
+ dbg_printf("Validate load field s8\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S16:
+ {
+ dbg_printf("Validate load field s16\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S32:
+ {
+ dbg_printf("Validate load field s32\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_S64:
+ {
+ dbg_printf("Validate load field s64\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U8:
+ {
+ dbg_printf("Validate load field u8\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U16:
+ {
+ dbg_printf("Validate load field u16\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U32:
+ {
+ dbg_printf("Validate load field u32\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ dbg_printf("Validate load field u64\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_STRING:
+ {
+ dbg_printf("Validate load field string\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ dbg_printf("Validate load field sequence\n");
+ break;
+ }
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ dbg_printf("Validate load field double\n");
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol offset %u\n", sym->offset);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_symbol *sym = (struct get_symbol *) insn->data;
+
+ dbg_printf("Validate get symbol field offset %u\n", sym->offset);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
+
+ dbg_printf("Validate get index u16 index %u\n", get_index->index);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+ struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
+
+ dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
+ break;
+ }
}
end:
return ret;
int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
struct cds_lfht *merge_points,
struct vstack *stack,
- void *start_pc,
- void *pc)
+ char *start_pc,
+ char *pc)
{
int ret;
unsigned long target_pc = pc - start_pc;
return ret;
/* Validate merge points */
- hash = lttng_hash_mix((const void *) target_pc, sizeof(target_pc),
+ hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
lttng_hash_seed);
cds_lfht_lookup(merge_points, hash, lttng_hash_match,
- (const void *) target_pc, &iter);
+ (const char *) target_pc, &iter);
node = cds_lfht_iter_get_node(&iter);
if (node) {
mp_node = caa_container_of(node, struct lfht_mp_node, node);
-
+
dbg_printf("Filter: validate merge point at offset %lu\n",
target_pc);
if (merge_points_compare(stack, &mp_node->stack)) {
int exec_insn(struct bytecode_runtime *bytecode,
struct cds_lfht *merge_points,
struct vstack *stack,
- void **_next_pc,
- void *pc)
+ char **_next_pc,
+ char *pc)
{
int ret = 1;
- void *next_pc = *_next_pc;
+ char *next_pc = *_next_pc;
switch (*(filter_opcode_t *) pc) {
case FILTER_OP_UNKNOWN:
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ ret = 0;
+ goto end;
+ }
+ case FILTER_OP_RETURN_S64:
+ {
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ case REG_UNKNOWN:
+ ERR("Unexpected register type %d at end of bytecode\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
ret = 0;
goto end;
}
case FILTER_OP_MOD:
case FILTER_OP_PLUS:
case FILTER_OP_MINUS:
- case FILTER_OP_RSHIFT:
- case FILTER_OP_LSHIFT:
- case FILTER_OP_BIN_AND:
- case FILTER_OP_BIN_OR:
- case FILTER_OP_BIN_XOR:
{
ERR("unsupported bytecode op %u\n",
(unsigned int) *(filter_opcode_t *) pc);
case FILTER_OP_LT_STRING:
case FILTER_OP_GE_STRING:
case FILTER_OP_LE_STRING:
+ case FILTER_OP_EQ_STAR_GLOB_STRING:
+ case FILTER_OP_NE_STAR_GLOB_STRING:
case FILTER_OP_EQ_S64:
case FILTER_OP_NE_S64:
case FILTER_OP_GT_S64:
case FILTER_OP_LT_S64_DOUBLE:
case FILTER_OP_GE_S64_DOUBLE:
case FILTER_OP_LE_S64_DOUBLE:
+ case FILTER_OP_BIT_RSHIFT:
+ case FILTER_OP_BIT_LSHIFT:
+ case FILTER_OP_BIT_AND:
+ case FILTER_OP_BIT_OR:
+ case FILTER_OP_BIT_XOR:
{
/* Pop 2, push 1 */
if (vstack_pop(stack)) {
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_DOUBLE:
+ case REG_STRING:
+ case REG_STAR_GLOB_STRING:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct binary_op);
break;
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
vstack_ax(stack)->type = REG_UNKNOWN;
next_pc += sizeof(struct unary_op);
break;
case FILTER_OP_UNARY_PLUS_S64:
case FILTER_OP_UNARY_MINUS_S64:
- case FILTER_OP_UNARY_NOT:
case FILTER_OP_UNARY_NOT_S64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case FILTER_OP_UNARY_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_DOUBLE:
+ case REG_S64:
+ break;
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
+ case FILTER_OP_UNARY_BIT_NOT:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ switch (vstack_ax(stack)->type) {
+ case REG_UNKNOWN:
+ case REG_S64:
+ break;
+ case REG_DOUBLE:
+ default:
+ ERR("Unexpected register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct unary_op);
+ break;
+ }
+
case FILTER_OP_UNARY_NOT_DOUBLE:
{
/* Pop 1, push 1 */
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct unary_op);
break;
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_DOUBLE:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
vstack_ax(stack)->type = REG_DOUBLE;
next_pc += sizeof(struct unary_op);
break;
ret = merge_ret;
goto end;
}
+
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ /* There is always a cast-to-s64 operation before a or/and op. */
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ break;
+ default:
+ ERR("Incorrect register type %d for operation\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
+
/* Continue to next instruction */
/* Pop 1 when jump not taken */
if (vstack_pop(stack)) {
break;
}
+ case FILTER_OP_LOAD_STAR_GLOB_STRING:
+ {
+ struct load_op *insn = (struct load_op *) pc;
+
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
+ next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+ break;
+ }
+
case FILTER_OP_LOAD_S64:
{
if (vstack_push(stack)) {
ret = -EINVAL;
goto end;
}
+ switch (vstack_ax(stack)->type) {
+ case REG_S64:
+ case REG_DOUBLE:
+ case REG_UNKNOWN:
+ break;
+ default:
+ ERR("Incorrect register type %d for cast\n",
+ (int) vstack_ax(stack)->type);
+ ret = -EINVAL;
+ goto end;
+ }
vstack_ax(stack)->type = REG_S64;
next_pc += sizeof(struct cast_op);
break;
break;
}
+ /*
+ * Instructions for recursive traversal through composed types.
+ */
+ case FILTER_OP_GET_CONTEXT_ROOT:
+ case FILTER_OP_GET_APP_CONTEXT_ROOT:
+ case FILTER_OP_GET_PAYLOAD_ROOT:
+ {
+ if (vstack_push(stack)) {
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_PTR;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_UNKNOWN;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_S8:
+ case FILTER_OP_LOAD_FIELD_S16:
+ case FILTER_OP_LOAD_FIELD_S32:
+ case FILTER_OP_LOAD_FIELD_S64:
+ case FILTER_OP_LOAD_FIELD_U8:
+ case FILTER_OP_LOAD_FIELD_U16:
+ case FILTER_OP_LOAD_FIELD_U32:
+ case FILTER_OP_LOAD_FIELD_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_S64;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_STRING:
+ case FILTER_OP_LOAD_FIELD_SEQUENCE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_STRING;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_LOAD_FIELD_DOUBLE:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ vstack_ax(stack)->type = REG_DOUBLE;
+ next_pc += sizeof(struct load_op);
+ break;
+ }
+
+ case FILTER_OP_GET_SYMBOL:
+ case FILTER_OP_GET_SYMBOL_FIELD:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U16:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+ break;
+ }
+
+ case FILTER_OP_GET_INDEX_U64:
+ {
+ /* Pop 1, push 1 */
+ if (!vstack_ax(stack)) {
+ ERR("Empty stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ if (vstack_ax(stack)->type != REG_PTR) {
+ ERR("Expecting pointer on top of stack\n");
+ ret = -EINVAL;
+ goto end;
+ }
+ next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+ break;
+ }
+
}
end:
*_next_pc = next_pc;
int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
{
struct cds_lfht *merge_points;
- void *pc, *next_pc, *start_pc;
+ char *pc, *next_pc, *start_pc;
int ret = -EINVAL;
struct vstack stack;
ERR("Error allocating hash table for bytecode validation\n");
return -ENOMEM;
}
- start_pc = &bytecode->data[0];
+ start_pc = &bytecode->code[0];
for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
pc = next_pc) {
ret = bytecode_validate_overflow(bytecode, start_pc, pc);