Fix: bytecode validator: reject specialized load field/context ref instructions
authorMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 30 Sep 2022 15:38:57 +0000 (11:38 -0400)
committerMathieu Desnoyers <mathieu.desnoyers@efficios.com>
Fri, 30 Sep 2022 16:01:03 +0000 (12:01 -0400)
Reject specialized load ref and get context ref instructions so a
bytecode crafted with nefarious intent cannot read a memory area larger
than the memory targeted by the instrumentation.

This prevents bytecode received from the session daemon from performing
out of bound memory accesses and from disclosing the content of
application memory beyond what has been targeted by the instrumentation.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Change-Id: I9bb027e58312c125aa4a9cba5d8f4b5ceb31f4f6

liblttng-ust/lttng-filter-validator.c
liblttng-ust/lttng-filter.c
liblttng-ust/lttng-filter.h

index 4e99dc50f983c6b07bbc500291203e5a09ce46d2..acc91ba97a17fd9eda9def96e38b2e3f62eb9ea6 100644 (file)
@@ -399,15 +399,8 @@ int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
                break;
        }
 
-       /* load field ref */
+       /* load field and get context ref */
        case FILTER_OP_LOAD_FIELD_REF:
-       {
-               ERR("Unknown field ref type\n");
-               ret = -EINVAL;
-               break;
-       }
-
-       /* get context ref */
        case FILTER_OP_GET_CONTEXT_REF:
        case FILTER_OP_LOAD_FIELD_REF_STRING:
        case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
@@ -1179,6 +1172,236 @@ int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
        return 0;
 }
 
+/*
+ * Validate load instructions: specialized instructions not accepted as input.
+ *
+ * Return value:
+ * >0: going to next insn.
+ * 0: success, stop iteration.
+ * <0: error
+ */
+static
+int validate_load(char **_next_pc,
+               char *pc)
+{
+       int ret = 0;
+       char *next_pc = *_next_pc;
+
+       switch (*(filter_opcode_t *) pc) {
+       case FILTER_OP_UNKNOWN:
+       default:
+       {
+               ERR("Unknown bytecode op %u\n",
+                       (unsigned int) *(filter_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case FILTER_OP_RETURN:
+       {
+               next_pc += sizeof(struct return_op);
+               break;
+       }
+
+       case FILTER_OP_RETURN_S64:
+       {
+               next_pc += sizeof(struct return_op);
+               break;
+       }
+
+       /* binary */
+       case FILTER_OP_MUL:
+       case FILTER_OP_DIV:
+       case FILTER_OP_MOD:
+       case FILTER_OP_PLUS:
+       case FILTER_OP_MINUS:
+       {
+               ERR("Unsupported bytecode op %u\n",
+                       (unsigned int) *(filter_opcode_t *) pc);
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case FILTER_OP_EQ:
+       case FILTER_OP_NE:
+       case FILTER_OP_GT:
+       case FILTER_OP_LT:
+       case FILTER_OP_GE:
+       case FILTER_OP_LE:
+       case FILTER_OP_EQ_STRING:
+       case FILTER_OP_NE_STRING:
+       case FILTER_OP_GT_STRING:
+       case FILTER_OP_LT_STRING:
+       case FILTER_OP_GE_STRING:
+       case FILTER_OP_LE_STRING:
+       case FILTER_OP_EQ_STAR_GLOB_STRING:
+       case FILTER_OP_NE_STAR_GLOB_STRING:
+       case FILTER_OP_EQ_S64:
+       case FILTER_OP_NE_S64:
+       case FILTER_OP_GT_S64:
+       case FILTER_OP_LT_S64:
+       case FILTER_OP_GE_S64:
+       case FILTER_OP_LE_S64:
+       case FILTER_OP_EQ_DOUBLE:
+       case FILTER_OP_NE_DOUBLE:
+       case FILTER_OP_GT_DOUBLE:
+       case FILTER_OP_LT_DOUBLE:
+       case FILTER_OP_GE_DOUBLE:
+       case FILTER_OP_LE_DOUBLE:
+       case FILTER_OP_EQ_DOUBLE_S64:
+       case FILTER_OP_NE_DOUBLE_S64:
+       case FILTER_OP_GT_DOUBLE_S64:
+       case FILTER_OP_LT_DOUBLE_S64:
+       case FILTER_OP_GE_DOUBLE_S64:
+       case FILTER_OP_LE_DOUBLE_S64:
+       case FILTER_OP_EQ_S64_DOUBLE:
+       case FILTER_OP_NE_S64_DOUBLE:
+       case FILTER_OP_GT_S64_DOUBLE:
+       case FILTER_OP_LT_S64_DOUBLE:
+       case FILTER_OP_GE_S64_DOUBLE:
+       case FILTER_OP_LE_S64_DOUBLE:
+       case FILTER_OP_BIT_RSHIFT:
+       case FILTER_OP_BIT_LSHIFT:
+       case FILTER_OP_BIT_AND:
+       case FILTER_OP_BIT_OR:
+       case FILTER_OP_BIT_XOR:
+       {
+               next_pc += sizeof(struct binary_op);
+               break;
+       }
+
+       /* unary */
+       case FILTER_OP_UNARY_PLUS:
+       case FILTER_OP_UNARY_MINUS:
+       case FILTER_OP_UNARY_PLUS_S64:
+       case FILTER_OP_UNARY_MINUS_S64:
+       case FILTER_OP_UNARY_NOT_S64:
+       case FILTER_OP_UNARY_NOT:
+       case FILTER_OP_UNARY_BIT_NOT:
+       case FILTER_OP_UNARY_PLUS_DOUBLE:
+       case FILTER_OP_UNARY_MINUS_DOUBLE:
+       case FILTER_OP_UNARY_NOT_DOUBLE:
+       {
+               next_pc += sizeof(struct unary_op);
+               break;
+       }
+
+       /* logical */
+       case FILTER_OP_AND:
+       case FILTER_OP_OR:
+       {
+               next_pc += sizeof(struct logical_op);
+               break;
+       }
+
+       /* load field ref */
+       case FILTER_OP_LOAD_FIELD_REF:
+       /* get context ref */
+       case FILTER_OP_GET_CONTEXT_REF:
+       {
+               next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
+               break;
+       }
+       case FILTER_OP_LOAD_FIELD_REF_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
+       case FILTER_OP_GET_CONTEXT_REF_STRING:
+       case FILTER_OP_LOAD_FIELD_REF_S64:
+       case FILTER_OP_GET_CONTEXT_REF_S64:
+       case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
+       case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
+       {
+               /*
+                * Reject specialized load field ref instructions.
+                */
+               ret = -EINVAL;
+               goto end;
+       }
+
+       /* load from immediate operand */
+       case FILTER_OP_LOAD_STRING:
+       case FILTER_OP_LOAD_STAR_GLOB_STRING:
+       {
+               struct load_op *insn = (struct load_op *) pc;
+
+               next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
+               break;
+       }
+
+       case FILTER_OP_LOAD_S64:
+       {
+               next_pc += sizeof(struct load_op) + sizeof(struct literal_numeric);
+               break;
+       }
+       case FILTER_OP_LOAD_DOUBLE:
+       {
+               next_pc += sizeof(struct load_op) + sizeof(struct literal_double);
+               break;
+       }
+
+       case FILTER_OP_CAST_DOUBLE_TO_S64:
+       case FILTER_OP_CAST_TO_S64:
+       case FILTER_OP_CAST_NOP:
+       {
+               next_pc += sizeof(struct cast_op);
+               break;
+       }
+
+       /*
+        * Instructions for recursive traversal through composed types.
+        */
+       case FILTER_OP_GET_CONTEXT_ROOT:
+       case FILTER_OP_GET_APP_CONTEXT_ROOT:
+       case FILTER_OP_GET_PAYLOAD_ROOT:
+       case FILTER_OP_LOAD_FIELD:
+       {
+               next_pc += sizeof(struct load_op);
+               break;
+       }
+
+       case FILTER_OP_LOAD_FIELD_S8:
+       case FILTER_OP_LOAD_FIELD_S16:
+       case FILTER_OP_LOAD_FIELD_S32:
+       case FILTER_OP_LOAD_FIELD_S64:
+       case FILTER_OP_LOAD_FIELD_U8:
+       case FILTER_OP_LOAD_FIELD_U16:
+       case FILTER_OP_LOAD_FIELD_U32:
+       case FILTER_OP_LOAD_FIELD_U64:
+       case FILTER_OP_LOAD_FIELD_STRING:
+       case FILTER_OP_LOAD_FIELD_SEQUENCE:
+       case FILTER_OP_LOAD_FIELD_DOUBLE:
+       {
+               /*
+                * Reject specialized load field instructions.
+                */
+               ret = -EINVAL;
+               goto end;
+       }
+
+       case FILTER_OP_GET_SYMBOL:
+       case FILTER_OP_GET_SYMBOL_FIELD:
+       {
+               next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
+               break;
+       }
+
+       case FILTER_OP_GET_INDEX_U16:
+       {
+               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
+               break;
+       }
+
+       case FILTER_OP_GET_INDEX_U64:
+       {
+               next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
+               break;
+       }
+
+       }
+end:
+       *_next_pc = next_pc;
+       return ret;
+}
+
 /*
  * Return value:
  * >0: going to next insn.
@@ -1811,6 +2034,32 @@ end:
        return ret;
 }
 
+int lttng_filter_validate_bytecode_load(struct bytecode_runtime *bytecode)
+{
+       char *pc, *next_pc, *start_pc;
+       int ret = -EINVAL;
+
+       start_pc = &bytecode->code[0];
+       for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
+                       pc = next_pc) {
+               ret = bytecode_validate_overflow(bytecode, start_pc, pc);
+               if (ret != 0) {
+                       if (ret == -ERANGE)
+                               ERR("filter bytecode overflow\n");
+                       goto end;
+               }
+               dbg_printf("Validating loads: op %s (%u)\n",
+                       print_op((unsigned int) *(filter_opcode_t *) pc),
+                       (unsigned int) *(filter_opcode_t *) pc);
+
+               ret = validate_load(&next_pc, pc);
+               if (ret)
+                       goto end;
+       }
+end:
+       return ret;
+}
+
 /*
  * Never called concurrently (hash seed is shared).
  */
index 947ed843917579fcee828f6e9aba95a88504e238..c5a2db1e9cc0e346e45a95d2e81667b9440ea4d4 100644 (file)
@@ -468,6 +468,11 @@ int _lttng_filter_event_link_bytecode(struct lttng_event *event,
        runtime->len = filter_bytecode->bc.reloc_offset;
        /* copy original bytecode */
        memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
+       /* Validate bytecode load instructions before relocs. */
+       ret = lttng_filter_validate_bytecode_load(runtime);
+       if (ret) {
+               goto link_error;
+       }
        /*
         * apply relocs. Those are a uint16_t (offset in bytecode)
         * followed by a string (field name).
index cc15c1546fa469ae1f32871d8a74092662f990bf..09446507cb6fa79160d3d9412d6264d2611116a0 100644 (file)
@@ -291,6 +291,7 @@ struct estack {
 const char *print_op(enum filter_op op);
 
 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode);
+int lttng_filter_validate_bytecode_load(struct bytecode_runtime *bytecode);
 int lttng_filter_specialize_bytecode(struct lttng_event *event,
                struct bytecode_runtime *bytecode);
 
This page took 0.029675 seconds and 4 git commands to generate.