X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=liblttng-ust%2Flttng-filter-validator.c;h=e6982369bdc1d8c8e90c4c9d8ff29c6f9b20c16a;hb=ab249ecfea7ddc352e1fb5c3b97a4f0fbb62f3ca;hp=dc50774de5b6deb6c20efdcd9c7f3cc905b3bb07;hpb=53569322d40ed45abe0368ddb08eb4a2738afc37;p=lttng-ust.git diff --git a/liblttng-ust/lttng-filter-validator.c b/liblttng-ust/lttng-filter-validator.c index dc50774d..e6982369 100644 --- a/liblttng-ust/lttng-filter-validator.c +++ b/liblttng-ust/lttng-filter-validator.c @@ -3,30 +3,39 @@ * * LTTng UST filter bytecode validator. * - * Copyright (C) 2010-2012 Mathieu Desnoyers + * Copyright (C) 2010-2016 Mathieu Desnoyers * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. */ #define _LGPL_SOURCE -#include +#include +#include #include -#include "lttng-filter.h" +#include #include + +#include "lttng-filter.h" #include "lttng-hash-helper.h" +#include "string-utils.h" +#include "ust-events-internal.h" /* * Number of merge points for hash table size. Hash table initialized to @@ -87,7 +96,7 @@ int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc, const struct vstack *stack) { struct lfht_mp_node *node; - unsigned long hash = lttng_hash_mix((const void *) target_pc, + unsigned long hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc), lttng_hash_seed); struct cds_lfht_node *ret; @@ -100,7 +109,7 @@ int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc, node->target_pc = target_pc; memcpy(&node->stack, stack, sizeof(node->stack)); ret = cds_lfht_add_unique(ht, hash, lttng_hash_match, - (const void *) target_pc, &node->node); + (const char *) target_pc, &node->node); if (ret != &node->node) { struct lfht_mp_node *ret_mp = caa_container_of(ret, struct lfht_mp_node, node); @@ -124,7 +133,8 @@ int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc, * (unknown), negative error value on error. */ static -int bin_op_compare_check(struct vstack *stack, const char *str) +int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode, + const char *str) { if (unlikely(!vstack_ax(stack) || !vstack_bx(stack))) goto error_empty; @@ -144,12 +154,38 @@ int bin_op_compare_check(struct vstack *stack, const char *str) goto unknown; case REG_STRING: break; + case REG_STAR_GLOB_STRING: + if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) { + goto error_mismatch; + } + break; + case REG_S64: + case REG_U64: + case REG_DOUBLE: + goto error_mismatch; + } + break; + case REG_STAR_GLOB_STRING: + switch (vstack_bx(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_STRING: + if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) { + goto error_mismatch; + } + break; + case REG_STAR_GLOB_STRING: case REG_S64: + case REG_U64: case REG_DOUBLE: goto error_mismatch; } break; case REG_S64: + case REG_U64: case REG_DOUBLE: switch (vstack_bx(stack)->type) { default: @@ -158,8 +194,10 @@ int bin_op_compare_check(struct vstack *stack, const char *str) case REG_UNKNOWN: goto unknown; case REG_STRING: + case REG_STAR_GLOB_STRING: goto error_mismatch; case REG_S64: + case REG_U64: case REG_DOUBLE: break; } @@ -183,13 +221,77 @@ error_type: return -EINVAL; } +/* + * Binary bitwise operators use top of stack and top of stack -1. + * Return 0 if typing is known to match, 1 if typing is dynamic + * (unknown), negative error value on error. + */ +static +int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode, + const char *str) +{ + if (unlikely(!vstack_ax(stack) || !vstack_bx(stack))) + goto error_empty; + + switch (vstack_ax(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_S64: + case REG_U64: + switch (vstack_bx(stack)->type) { + default: + goto error_type; + + case REG_UNKNOWN: + goto unknown; + case REG_S64: + case REG_U64: + break; + } + break; + } + return 0; + +unknown: + return 1; + +error_empty: + ERR("empty stack for '%s' binary operator\n", str); + return -EINVAL; + +error_type: + ERR("unknown type for '%s' binary operator\n", str); + return -EINVAL; +} + +static +int validate_get_symbol(struct bytecode_runtime *bytecode, + const struct get_symbol *sym) +{ + const char *str, *str_limit; + size_t len_limit; + + if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset) + return -EINVAL; + + str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset; + str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len; + len_limit = str_limit - str; + if (strnlen(str, len_limit) == len_limit) + return -EINVAL; + return 0; +} + /* * Validate bytecode range overflow within the validation pass. * Called for each instruction encountered. */ static int bytecode_validate_overflow(struct bytecode_runtime *bytecode, - void *start_pc, void *pc) + char *start_pc, char *pc) { int ret = 0; @@ -204,6 +306,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, } case FILTER_OP_RETURN: + case FILTER_OP_RETURN_S64: { if (unlikely(pc + sizeof(struct return_op) > start_pc + bytecode->len)) { @@ -218,11 +321,6 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, case FILTER_OP_MOD: case FILTER_OP_PLUS: case FILTER_OP_MINUS: - case FILTER_OP_RSHIFT: - case FILTER_OP_LSHIFT: - case FILTER_OP_BIN_AND: - case FILTER_OP_BIN_OR: - case FILTER_OP_BIN_XOR: { ERR("unsupported bytecode op %u\n", (unsigned int) *(filter_opcode_t *) pc); @@ -242,6 +340,8 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, case FILTER_OP_LT_STRING: case FILTER_OP_GE_STRING: case FILTER_OP_LE_STRING: + case FILTER_OP_EQ_STAR_GLOB_STRING: + case FILTER_OP_NE_STAR_GLOB_STRING: case FILTER_OP_EQ_S64: case FILTER_OP_NE_S64: case FILTER_OP_GT_S64: @@ -266,6 +366,11 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, case FILTER_OP_LT_S64_DOUBLE: case FILTER_OP_GE_S64_DOUBLE: case FILTER_OP_LE_S64_DOUBLE: + case FILTER_OP_BIT_RSHIFT: + case FILTER_OP_BIT_LSHIFT: + case FILTER_OP_BIT_AND: + case FILTER_OP_BIT_OR: + case FILTER_OP_BIT_XOR: { if (unlikely(pc + sizeof(struct binary_op) > start_pc + bytecode->len)) { @@ -284,6 +389,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, case FILTER_OP_UNARY_PLUS_DOUBLE: case FILTER_OP_UNARY_MINUS_DOUBLE: case FILTER_OP_UNARY_NOT_DOUBLE: + case FILTER_OP_UNARY_BIT_NOT: { if (unlikely(pc + sizeof(struct unary_op) > start_pc + bytecode->len)) { @@ -310,6 +416,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, ret = -EINVAL; break; } + /* get context ref */ case FILTER_OP_GET_CONTEXT_REF: case FILTER_OP_LOAD_FIELD_REF_STRING: @@ -329,6 +436,7 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, /* load from immediate operand */ case FILTER_OP_LOAD_STRING: + case FILTER_OP_LOAD_STAR_GLOB_STRING: { struct load_op *insn = (struct load_op *) pc; uint32_t str_len, maxlen; @@ -377,6 +485,62 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode, break; } + /* + * Instructions for recursive traversal through composed types. + */ + case FILTER_OP_GET_CONTEXT_ROOT: + case FILTER_OP_GET_APP_CONTEXT_ROOT: + case FILTER_OP_GET_PAYLOAD_ROOT: + case FILTER_OP_LOAD_FIELD: + case FILTER_OP_LOAD_FIELD_S8: + case FILTER_OP_LOAD_FIELD_S16: + case FILTER_OP_LOAD_FIELD_S32: + case FILTER_OP_LOAD_FIELD_S64: + case FILTER_OP_LOAD_FIELD_U8: + case FILTER_OP_LOAD_FIELD_U16: + case FILTER_OP_LOAD_FIELD_U32: + case FILTER_OP_LOAD_FIELD_U64: + case FILTER_OP_LOAD_FIELD_STRING: + case FILTER_OP_LOAD_FIELD_SEQUENCE: + case FILTER_OP_LOAD_FIELD_DOUBLE: + if (unlikely(pc + sizeof(struct load_op) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + + case FILTER_OP_GET_SYMBOL: + { + struct load_op *insn = (struct load_op *) pc; + struct get_symbol *sym = (struct get_symbol *) insn->data; + + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol) + > start_pc + bytecode->len)) { + ret = -ERANGE; + break; + } + ret = validate_get_symbol(bytecode, sym); + break; + } + + case FILTER_OP_GET_SYMBOL_FIELD: + ERR("Unexpected get symbol field"); + ret = -EINVAL; + break; + + case FILTER_OP_GET_INDEX_U16: + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; + + case FILTER_OP_GET_INDEX_U64: + if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64) + > start_pc + bytecode->len)) { + ret = -ERANGE; + } + break; } return ret; @@ -409,12 +573,13 @@ unsigned long delete_all_nodes(struct cds_lfht *ht) static int validate_instruction_context(struct bytecode_runtime *bytecode, struct vstack *stack, - void *start_pc, - void *pc) + char *start_pc, + char *pc) { int ret = 0; + const filter_opcode_t opcode = *(filter_opcode_t *) pc; - switch (*(filter_opcode_t *) pc) { + switch (opcode) { case FILTER_OP_UNKNOWN: default: { @@ -425,6 +590,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, } case FILTER_OP_RETURN: + case FILTER_OP_RETURN_S64: { goto end; } @@ -435,56 +601,51 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, case FILTER_OP_MOD: case FILTER_OP_PLUS: case FILTER_OP_MINUS: - case FILTER_OP_RSHIFT: - case FILTER_OP_LSHIFT: - case FILTER_OP_BIN_AND: - case FILTER_OP_BIN_OR: - case FILTER_OP_BIN_XOR: { ERR("unsupported bytecode op %u\n", - (unsigned int) *(filter_opcode_t *) pc); + (unsigned int) opcode); ret = -EINVAL; goto end; } case FILTER_OP_EQ: { - ret = bin_op_compare_check(stack, "=="); + ret = bin_op_compare_check(stack, opcode, "=="); if (ret < 0) goto end; break; } case FILTER_OP_NE: { - ret = bin_op_compare_check(stack, "!="); + ret = bin_op_compare_check(stack, opcode, "!="); if (ret < 0) goto end; break; } case FILTER_OP_GT: { - ret = bin_op_compare_check(stack, ">"); + ret = bin_op_compare_check(stack, opcode, ">"); if (ret < 0) goto end; break; } case FILTER_OP_LT: { - ret = bin_op_compare_check(stack, "<"); + ret = bin_op_compare_check(stack, opcode, "<"); if (ret < 0) goto end; break; } case FILTER_OP_GE: { - ret = bin_op_compare_check(stack, ">="); + ret = bin_op_compare_check(stack, opcode, ">="); if (ret < 0) goto end; break; } case FILTER_OP_LE: { - ret = bin_op_compare_check(stack, "<="); + ret = bin_op_compare_check(stack, opcode, "<="); if (ret < 0) goto end; break; @@ -511,6 +672,23 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } + case FILTER_OP_EQ_STAR_GLOB_STRING: + case FILTER_OP_NE_STAR_GLOB_STRING: + { + if (!vstack_ax(stack) || !vstack_bx(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING + && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) { + ERR("Unexpected register type for globbing pattern comparator\n"); + ret = -EINVAL; + goto end; + } + break; + } + case FILTER_OP_EQ_S64: case FILTER_OP_NE_S64: case FILTER_OP_GT_S64: @@ -523,8 +701,20 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } - if (vstack_ax(stack)->type != REG_S64 - || vstack_bx(stack)->type != REG_S64) { + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type for s64 comparator\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: ERR("Unexpected register type for s64 comparator\n"); ret = -EINVAL; goto end; @@ -564,7 +754,19 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } - if (vstack_ax(stack)->type != REG_S64 && vstack_bx(stack)->type != REG_DOUBLE) { + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Double-S64 operator has unexpected register types\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_DOUBLE: + break; + default: ERR("Double-S64 operator has unexpected register types\n"); ret = -EINVAL; goto end; @@ -584,7 +786,19 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } - if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_S64) { + switch (vstack_ax(stack)->type) { + case REG_DOUBLE: + break; + default: + ERR("S64-Double operator has unexpected register types\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_bx(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: ERR("S64-Double operator has unexpected register types\n"); ret = -EINVAL; goto end; @@ -592,6 +806,32 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } + case FILTER_OP_BIT_RSHIFT: + ret = bin_op_bitwise_check(stack, opcode, ">>"); + if (ret < 0) + goto end; + break; + case FILTER_OP_BIT_LSHIFT: + ret = bin_op_bitwise_check(stack, opcode, "<<"); + if (ret < 0) + goto end; + break; + case FILTER_OP_BIT_AND: + ret = bin_op_bitwise_check(stack, opcode, "&"); + if (ret < 0) + goto end; + break; + case FILTER_OP_BIT_OR: + ret = bin_op_bitwise_check(stack, opcode, "|"); + if (ret < 0) + goto end; + break; + case FILTER_OP_BIT_XOR: + ret = bin_op_bitwise_check(stack, opcode, "^"); + if (ret < 0) + goto end; + break; + /* unary */ case FILTER_OP_UNARY_PLUS: case FILTER_OP_UNARY_MINUS: @@ -609,12 +849,43 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, goto end; case REG_STRING: + case REG_STAR_GLOB_STRING: ERR("Unary op can only be applied to numeric or floating point registers\n"); ret = -EINVAL; goto end; case REG_S64: break; + case REG_U64: + break; + case REG_DOUBLE: + break; + case REG_UNKNOWN: + break; + } + break; + } + case FILTER_OP_UNARY_BIT_NOT: + { + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + default: + ERR("unknown register type\n"); + ret = -EINVAL; + goto end; + + case REG_STRING: + case REG_STAR_GLOB_STRING: case REG_DOUBLE: + ERR("Unary bitwise op can only be applied to numeric registers\n"); + ret = -EINVAL; + goto end; + case REG_S64: + break; + case REG_U64: break; case REG_UNKNOWN: break; @@ -631,7 +902,8 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } - if (vstack_ax(stack)->type != REG_S64) { + if (vstack_ax(stack)->type != REG_S64 && + vstack_ax(stack)->type != REG_U64) { ERR("Invalid register type\n"); ret = -EINVAL; goto end; @@ -668,8 +940,9 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, goto end; } if (vstack_ax(stack)->type != REG_S64 + && vstack_ax(stack)->type != REG_U64 && vstack_ax(stack)->type != REG_UNKNOWN) { - ERR("Logical comparator expects S64 or dynamic register\n"); + ERR("Logical comparator expects S64, U64 or dynamic register\n"); ret = -EINVAL; goto end; } @@ -722,6 +995,7 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, /* load from immediate operand */ case FILTER_OP_LOAD_STRING: + case FILTER_OP_LOAD_STAR_GLOB_STRING: { break; } @@ -753,11 +1027,14 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, goto end; case REG_STRING: + case REG_STAR_GLOB_STRING: ERR("Cast op can only be applied to numeric or floating point registers\n"); ret = -EINVAL; goto end; case REG_S64: break; + case REG_U64: + break; case REG_DOUBLE: break; case REG_UNKNOWN: @@ -815,75 +1092,194 @@ int validate_instruction_context(struct bytecode_runtime *bytecode, break; } + /* + * Instructions for recursive traversal through composed types. + */ + case FILTER_OP_GET_CONTEXT_ROOT: + { + dbg_printf("Validate get context root\n"); + break; } -end: - return ret; -} - -/* - * Return value: - * 0: success - * <0: error - */ -static -int validate_instruction_all_contexts(struct bytecode_runtime *bytecode, - struct cds_lfht *merge_points, - struct vstack *stack, - void *start_pc, - void *pc) -{ - int ret; - unsigned long target_pc = pc - start_pc; - struct cds_lfht_iter iter; - struct cds_lfht_node *node; - struct lfht_mp_node *mp_node; - unsigned long hash; - - /* Validate the context resulting from the previous instruction */ - ret = validate_instruction_context(bytecode, stack, start_pc, pc); - if (ret < 0) - return ret; - - /* Validate merge points */ - hash = lttng_hash_mix((const void *) target_pc, sizeof(target_pc), - lttng_hash_seed); - cds_lfht_lookup(merge_points, hash, lttng_hash_match, - (const void *) target_pc, &iter); - node = cds_lfht_iter_get_node(&iter); - if (node) { - mp_node = caa_container_of(node, struct lfht_mp_node, node); - - dbg_printf("Filter: validate merge point at offset %lu\n", - target_pc); - if (merge_points_compare(stack, &mp_node->stack)) { - ERR("Merge points differ for offset %lu\n", - target_pc); - return -EINVAL; - } - /* Once validated, we can remove the merge point */ - dbg_printf("Filter: remove merge point at offset %lu\n", - target_pc); - ret = cds_lfht_del(merge_points, node); - assert(!ret); + case FILTER_OP_GET_APP_CONTEXT_ROOT: + { + dbg_printf("Validate get app context root\n"); + break; } - return 0; -} - -/* - * Return value: - * >0: going to next insn. - * 0: success, stop iteration. - * <0: error - */ -static -int exec_insn(struct bytecode_runtime *bytecode, - struct cds_lfht *merge_points, - struct vstack *stack, - void **_next_pc, - void *pc) + case FILTER_OP_GET_PAYLOAD_ROOT: + { + dbg_printf("Validate get payload root\n"); + break; + } + case FILTER_OP_LOAD_FIELD: + { + /* + * We tolerate that field type is unknown at validation, + * because we are performing the load specialization in + * a phase after validation. + */ + dbg_printf("Validate load field\n"); + break; + } + case FILTER_OP_LOAD_FIELD_S8: + { + dbg_printf("Validate load field s8\n"); + break; + } + case FILTER_OP_LOAD_FIELD_S16: + { + dbg_printf("Validate load field s16\n"); + break; + } + case FILTER_OP_LOAD_FIELD_S32: + { + dbg_printf("Validate load field s32\n"); + break; + } + case FILTER_OP_LOAD_FIELD_S64: + { + dbg_printf("Validate load field s64\n"); + break; + } + case FILTER_OP_LOAD_FIELD_U8: + { + dbg_printf("Validate load field u8\n"); + break; + } + case FILTER_OP_LOAD_FIELD_U16: + { + dbg_printf("Validate load field u16\n"); + break; + } + case FILTER_OP_LOAD_FIELD_U32: + { + dbg_printf("Validate load field u32\n"); + break; + } + case FILTER_OP_LOAD_FIELD_U64: + { + dbg_printf("Validate load field u64\n"); + break; + } + case FILTER_OP_LOAD_FIELD_STRING: + { + dbg_printf("Validate load field string\n"); + break; + } + case FILTER_OP_LOAD_FIELD_SEQUENCE: + { + dbg_printf("Validate load field sequence\n"); + break; + } + case FILTER_OP_LOAD_FIELD_DOUBLE: + { + dbg_printf("Validate load field double\n"); + break; + } + + case FILTER_OP_GET_SYMBOL: + { + struct load_op *insn = (struct load_op *) pc; + struct get_symbol *sym = (struct get_symbol *) insn->data; + + dbg_printf("Validate get symbol offset %u\n", sym->offset); + break; + } + + case FILTER_OP_GET_SYMBOL_FIELD: + { + struct load_op *insn = (struct load_op *) pc; + struct get_symbol *sym = (struct get_symbol *) insn->data; + + dbg_printf("Validate get symbol field offset %u\n", sym->offset); + break; + } + + case FILTER_OP_GET_INDEX_U16: + { + struct load_op *insn = (struct load_op *) pc; + struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data; + + dbg_printf("Validate get index u16 index %u\n", get_index->index); + break; + } + + case FILTER_OP_GET_INDEX_U64: + { + struct load_op *insn = (struct load_op *) pc; + struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data; + + dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index); + break; + } + } +end: + return ret; +} + +/* + * Return value: + * 0: success + * <0: error + */ +static +int validate_instruction_all_contexts(struct bytecode_runtime *bytecode, + struct cds_lfht *merge_points, + struct vstack *stack, + char *start_pc, + char *pc) +{ + int ret; + unsigned long target_pc = pc - start_pc; + struct cds_lfht_iter iter; + struct cds_lfht_node *node; + struct lfht_mp_node *mp_node; + unsigned long hash; + + /* Validate the context resulting from the previous instruction */ + ret = validate_instruction_context(bytecode, stack, start_pc, pc); + if (ret < 0) + return ret; + + /* Validate merge points */ + hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc), + lttng_hash_seed); + cds_lfht_lookup(merge_points, hash, lttng_hash_match, + (const char *) target_pc, &iter); + node = cds_lfht_iter_get_node(&iter); + if (node) { + mp_node = caa_container_of(node, struct lfht_mp_node, node); + + dbg_printf("Filter: validate merge point at offset %lu\n", + target_pc); + if (merge_points_compare(stack, &mp_node->stack)) { + ERR("Merge points differ for offset %lu\n", + target_pc); + return -EINVAL; + } + /* Once validated, we can remove the merge point */ + dbg_printf("Filter: remove merge point at offset %lu\n", + target_pc); + ret = cds_lfht_del(merge_points, node); + assert(!ret); + } + return 0; +} + +/* + * Return value: + * >0: going to next insn. + * 0: success, stop iteration. + * <0: error + */ +static +int exec_insn(struct bytecode_runtime *bytecode, + struct cds_lfht *merge_points, + struct vstack *stack, + char **_next_pc, + char *pc) { int ret = 1; - void *next_pc = *_next_pc; + char *next_pc = *_next_pc; switch (*(filter_opcode_t *) pc) { case FILTER_OP_UNKNOWN: @@ -902,6 +1298,43 @@ int exec_insn(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_STRING: + case REG_PTR: + case REG_UNKNOWN: + break; + default: + ERR("Unexpected register type %d at end of bytecode\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + ret = 0; + goto end; + } + case FILTER_OP_RETURN_S64: + { + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + case REG_UNKNOWN: + ERR("Unexpected register type %d at end of bytecode\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + ret = 0; goto end; } @@ -912,11 +1345,6 @@ int exec_insn(struct bytecode_runtime *bytecode, case FILTER_OP_MOD: case FILTER_OP_PLUS: case FILTER_OP_MINUS: - case FILTER_OP_RSHIFT: - case FILTER_OP_LSHIFT: - case FILTER_OP_BIN_AND: - case FILTER_OP_BIN_OR: - case FILTER_OP_BIN_XOR: { ERR("unsupported bytecode op %u\n", (unsigned int) *(filter_opcode_t *) pc); @@ -936,6 +1364,8 @@ int exec_insn(struct bytecode_runtime *bytecode, case FILTER_OP_LT_STRING: case FILTER_OP_GE_STRING: case FILTER_OP_LE_STRING: + case FILTER_OP_EQ_STAR_GLOB_STRING: + case FILTER_OP_NE_STAR_GLOB_STRING: case FILTER_OP_EQ_S64: case FILTER_OP_NE_S64: case FILTER_OP_GT_S64: @@ -971,11 +1401,62 @@ int exec_insn(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_STRING: + case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; next_pc += sizeof(struct binary_op); break; } + case FILTER_OP_BIT_RSHIFT: + case FILTER_OP_BIT_LSHIFT: + case FILTER_OP_BIT_AND: + case FILTER_OP_BIT_OR: + case FILTER_OP_BIT_XOR: + { + /* Pop 2, push 1 */ + if (vstack_pop(stack)) { + ret = -EINVAL; + goto end; + } + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_STRING: + case REG_STAR_GLOB_STRING: + case REG_UNKNOWN: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct binary_op); + break; + } + /* unary */ case FILTER_OP_UNARY_PLUS: case FILTER_OP_UNARY_MINUS: @@ -986,6 +1467,18 @@ int exec_insn(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } + switch (vstack_ax(stack)->type) { + case REG_UNKNOWN: + case REG_DOUBLE: + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } vstack_ax(stack)->type = REG_UNKNOWN; next_pc += sizeof(struct unary_op); break; @@ -993,8 +1486,80 @@ int exec_insn(struct bytecode_runtime *bytecode, case FILTER_OP_UNARY_PLUS_S64: case FILTER_OP_UNARY_MINUS_S64: - case FILTER_OP_UNARY_NOT: case FILTER_OP_UNARY_NOT_S64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + next_pc += sizeof(struct unary_op); + break; + } + + case FILTER_OP_UNARY_NOT: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_UNKNOWN: + case REG_DOUBLE: + case REG_S64: + case REG_U64: + break; + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + next_pc += sizeof(struct unary_op); + break; + } + + case FILTER_OP_UNARY_BIT_NOT: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + switch (vstack_ax(stack)->type) { + case REG_UNKNOWN: + case REG_S64: + case REG_U64: + break; + case REG_DOUBLE: + default: + ERR("Unexpected register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct unary_op); + break; + } + case FILTER_OP_UNARY_NOT_DOUBLE: { /* Pop 1, push 1 */ @@ -1003,6 +1568,16 @@ int exec_insn(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } + switch (vstack_ax(stack)->type) { + case REG_DOUBLE: + break; + default: + ERR("Incorrect register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; next_pc += sizeof(struct unary_op); break; @@ -1017,6 +1592,16 @@ int exec_insn(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } + switch (vstack_ax(stack)->type) { + case REG_DOUBLE: + break; + default: + ERR("Incorrect register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_DOUBLE; next_pc += sizeof(struct unary_op); break; @@ -1036,6 +1621,24 @@ int exec_insn(struct bytecode_runtime *bytecode, ret = merge_ret; goto end; } + + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + /* There is always a cast-to-s64 operation before a or/and op. */ + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + break; + default: + ERR("Incorrect register type %d for operation\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } + /* Continue to next instruction */ /* Pop 1 when jump not taken */ if (vstack_pop(stack)) { @@ -1113,6 +1716,19 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } + case FILTER_OP_LOAD_STAR_GLOB_STRING: + { + struct load_op *insn = (struct load_op *) pc; + + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STAR_GLOB_STRING; + next_pc += sizeof(struct load_op) + strlen(insn->data) + 1; + break; + } + case FILTER_OP_LOAD_S64: { if (vstack_push(stack)) { @@ -1146,6 +1762,18 @@ int exec_insn(struct bytecode_runtime *bytecode, ret = -EINVAL; goto end; } + switch (vstack_ax(stack)->type) { + case REG_S64: + case REG_U64: + case REG_DOUBLE: + case REG_UNKNOWN: + break; + default: + ERR("Incorrect register type %d for cast\n", + (int) vstack_ax(stack)->type); + ret = -EINVAL; + goto end; + } vstack_ax(stack)->type = REG_S64; next_pc += sizeof(struct cast_op); break; @@ -1156,6 +1784,171 @@ int exec_insn(struct bytecode_runtime *bytecode, break; } + /* + * Instructions for recursive traversal through composed types. + */ + case FILTER_OP_GET_CONTEXT_ROOT: + case FILTER_OP_GET_APP_CONTEXT_ROOT: + case FILTER_OP_GET_PAYLOAD_ROOT: + { + if (vstack_push(stack)) { + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_PTR; + next_pc += sizeof(struct load_op); + break; + } + + case FILTER_OP_LOAD_FIELD: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_UNKNOWN; + next_pc += sizeof(struct load_op); + break; + } + + case FILTER_OP_LOAD_FIELD_S8: + case FILTER_OP_LOAD_FIELD_S16: + case FILTER_OP_LOAD_FIELD_S32: + case FILTER_OP_LOAD_FIELD_S64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_S64; + next_pc += sizeof(struct load_op); + break; + } + + case FILTER_OP_LOAD_FIELD_U8: + case FILTER_OP_LOAD_FIELD_U16: + case FILTER_OP_LOAD_FIELD_U32: + case FILTER_OP_LOAD_FIELD_U64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_U64; + next_pc += sizeof(struct load_op); + break; + } + + case FILTER_OP_LOAD_FIELD_STRING: + case FILTER_OP_LOAD_FIELD_SEQUENCE: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_STRING; + next_pc += sizeof(struct load_op); + break; + } + + case FILTER_OP_LOAD_FIELD_DOUBLE: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + vstack_ax(stack)->type = REG_DOUBLE; + next_pc += sizeof(struct load_op); + break; + } + + case FILTER_OP_GET_SYMBOL: + case FILTER_OP_GET_SYMBOL_FIELD: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct load_op) + sizeof(struct get_symbol); + break; + } + + case FILTER_OP_GET_INDEX_U16: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16); + break; + } + + case FILTER_OP_GET_INDEX_U64: + { + /* Pop 1, push 1 */ + if (!vstack_ax(stack)) { + ERR("Empty stack\n"); + ret = -EINVAL; + goto end; + } + if (vstack_ax(stack)->type != REG_PTR) { + ERR("Expecting pointer on top of stack\n"); + ret = -EINVAL; + goto end; + } + next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64); + break; + } + } end: *_next_pc = next_pc; @@ -1168,7 +1961,7 @@ end: int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode) { struct cds_lfht *merge_points; - void *pc, *next_pc, *start_pc; + char *pc, *next_pc, *start_pc; int ret = -EINVAL; struct vstack stack; @@ -1191,7 +1984,7 @@ int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode) ERR("Error allocating hash table for bytecode validation\n"); return -ENOMEM; } - start_pc = &bytecode->data[0]; + start_pc = &bytecode->code[0]; for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; pc = next_pc) { ret = bytecode_validate_overflow(bytecode, start_pc, pc);