cc8f45962c73f51c7a0c944579df6675a447aba0
[lttng-modules.git] / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng modules filter bytecode validator.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/types.h>
28 #include <linux/jhash.h>
29 #include <linux/slab.h>
30
31 #include <wrapper/list.h>
32 #include <lttng-filter.h>
33
34 #define MERGE_POINT_TABLE_BITS 7
35 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
36
37 /* merge point table node */
38 struct mp_node {
39 struct hlist_node node;
40
41 /* Context at merge point */
42 struct vstack stack;
43 unsigned long target_pc;
44 };
45
46 struct mp_table {
47 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
48 };
49
50 static
51 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
52 {
53 if (mp_node->target_pc == key_pc)
54 return 1;
55 else
56 return 0;
57 }
58
59 static
60 int merge_points_compare(const struct vstack *stacka,
61 const struct vstack *stackb)
62 {
63 int i, len;
64
65 if (stacka->top != stackb->top)
66 return 1;
67 len = stacka->top + 1;
68 WARN_ON_ONCE(len < 0);
69 for (i = 0; i < len; i++) {
70 if (stacka->e[i].type != stackb->e[i].type)
71 return 1;
72 }
73 return 0;
74 }
75
76 static
77 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
78 const struct vstack *stack)
79 {
80 struct mp_node *mp_node;
81 unsigned long hash = jhash_1word(target_pc, 0);
82 struct hlist_head *head;
83 struct mp_node *lookup_node;
84 int found = 0;
85
86 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
87 target_pc, hash);
88 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
89 if (!mp_node)
90 return -ENOMEM;
91 mp_node->target_pc = target_pc;
92 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
93
94 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
95 lttng_hlist_for_each_entry(lookup_node, head, node) {
96 if (lttng_hash_match(lookup_node, target_pc)) {
97 found = 1;
98 break;
99 }
100 }
101 if (found) {
102 /* Key already present */
103 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
104 target_pc, hash);
105 kfree(mp_node);
106 if (merge_points_compare(stack, &lookup_node->stack)) {
107 printk(KERN_WARNING "Merge points differ for offset %lu\n",
108 target_pc);
109 return -EINVAL;
110 }
111 } else {
112 hlist_add_head(&mp_node->node, head);
113 }
114 return 0;
115 }
116
117 /*
118 * Binary comparators use top of stack and top of stack -1.
119 */
120 static
121 int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
122 const char *str)
123 {
124 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
125 goto error_empty;
126
127 switch (vstack_ax(stack)->type) {
128 default:
129 case REG_DOUBLE:
130 goto error_type;
131
132 case REG_STRING:
133 switch (vstack_bx(stack)->type) {
134 default:
135 case REG_DOUBLE:
136 goto error_type;
137 case REG_TYPE_UNKNOWN:
138 goto unknown;
139 case REG_STRING:
140 break;
141 case REG_STAR_GLOB_STRING:
142 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
143 goto error_mismatch;
144 }
145 break;
146 case REG_S64:
147 goto error_mismatch;
148 }
149 break;
150 case REG_STAR_GLOB_STRING:
151 switch (vstack_bx(stack)->type) {
152 default:
153 case REG_DOUBLE:
154 goto error_type;
155 case REG_TYPE_UNKNOWN:
156 goto unknown;
157 case REG_STRING:
158 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
159 goto error_mismatch;
160 }
161 break;
162 case REG_STAR_GLOB_STRING:
163 case REG_S64:
164 goto error_mismatch;
165 }
166 break;
167 case REG_S64:
168 switch (vstack_bx(stack)->type) {
169 default:
170 case REG_DOUBLE:
171 goto error_type;
172 case REG_TYPE_UNKNOWN:
173 goto unknown;
174 case REG_STRING:
175 case REG_STAR_GLOB_STRING:
176 goto error_mismatch;
177 case REG_S64:
178 break;
179 }
180 break;
181 case REG_TYPE_UNKNOWN:
182 switch (vstack_bx(stack)->type) {
183 default:
184 case REG_DOUBLE:
185 goto error_type;
186 case REG_TYPE_UNKNOWN:
187 case REG_STRING:
188 case REG_STAR_GLOB_STRING:
189 case REG_S64:
190 goto unknown;
191 }
192 break;
193 }
194 return 0;
195
196 unknown:
197 return 1;
198
199 error_empty:
200 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
201 return -EINVAL;
202
203 error_mismatch:
204 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
205 return -EINVAL;
206
207 error_type:
208 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
209 return -EINVAL;
210 }
211
212 /*
213 * Binary bitwise operators use top of stack and top of stack -1.
214 * Return 0 if typing is known to match, 1 if typing is dynamic
215 * (unknown), negative error value on error.
216 */
217 static
218 int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
219 const char *str)
220 {
221 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
222 goto error_empty;
223
224 switch (vstack_ax(stack)->type) {
225 default:
226 case REG_DOUBLE:
227 goto error_type;
228
229 case REG_TYPE_UNKNOWN:
230 switch (vstack_bx(stack)->type) {
231 default:
232 case REG_DOUBLE:
233 goto error_type;
234 case REG_TYPE_UNKNOWN:
235 case REG_STRING:
236 case REG_STAR_GLOB_STRING:
237 case REG_S64:
238 goto unknown;
239 }
240 break;
241 case REG_S64:
242 switch (vstack_bx(stack)->type) {
243 default:
244 case REG_DOUBLE:
245 goto error_type;
246 case REG_TYPE_UNKNOWN:
247 goto unknown;
248 case REG_S64:
249 break;
250 }
251 break;
252 }
253 return 0;
254
255 unknown:
256 return 1;
257
258 error_empty:
259 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
260 return -EINVAL;
261
262 error_type:
263 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
264 return -EINVAL;
265 }
266
267 static
268 int validate_get_symbol(struct bytecode_runtime *bytecode,
269 const struct get_symbol *sym)
270 {
271 const char *str, *str_limit;
272 size_t len_limit;
273
274 if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
275 return -EINVAL;
276
277 str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
278 str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
279 len_limit = str_limit - str;
280 if (strnlen(str, len_limit) == len_limit)
281 return -EINVAL;
282 return 0;
283 }
284
285 /*
286 * Validate bytecode range overflow within the validation pass.
287 * Called for each instruction encountered.
288 */
289 static
290 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
291 char *start_pc, char *pc)
292 {
293 int ret = 0;
294
295 switch (*(filter_opcode_t *) pc) {
296 case FILTER_OP_UNKNOWN:
297 default:
298 {
299 printk(KERN_WARNING "unknown bytecode op %u\n",
300 (unsigned int) *(filter_opcode_t *) pc);
301 ret = -EINVAL;
302 break;
303 }
304
305 case FILTER_OP_RETURN:
306 {
307 if (unlikely(pc + sizeof(struct return_op)
308 > start_pc + bytecode->len)) {
309 ret = -ERANGE;
310 }
311 break;
312 }
313
314 /* binary */
315 case FILTER_OP_MUL:
316 case FILTER_OP_DIV:
317 case FILTER_OP_MOD:
318 case FILTER_OP_PLUS:
319 case FILTER_OP_MINUS:
320 case FILTER_OP_RSHIFT:
321 case FILTER_OP_LSHIFT:
322 case FILTER_OP_EQ_DOUBLE:
323 case FILTER_OP_NE_DOUBLE:
324 case FILTER_OP_GT_DOUBLE:
325 case FILTER_OP_LT_DOUBLE:
326 case FILTER_OP_GE_DOUBLE:
327 case FILTER_OP_LE_DOUBLE:
328 /* Floating point */
329 case FILTER_OP_EQ_DOUBLE_S64:
330 case FILTER_OP_NE_DOUBLE_S64:
331 case FILTER_OP_GT_DOUBLE_S64:
332 case FILTER_OP_LT_DOUBLE_S64:
333 case FILTER_OP_GE_DOUBLE_S64:
334 case FILTER_OP_LE_DOUBLE_S64:
335 case FILTER_OP_EQ_S64_DOUBLE:
336 case FILTER_OP_NE_S64_DOUBLE:
337 case FILTER_OP_GT_S64_DOUBLE:
338 case FILTER_OP_LT_S64_DOUBLE:
339 case FILTER_OP_GE_S64_DOUBLE:
340 case FILTER_OP_LE_S64_DOUBLE:
341 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
342 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
343 case FILTER_OP_LOAD_DOUBLE:
344 case FILTER_OP_CAST_DOUBLE_TO_S64:
345 case FILTER_OP_UNARY_PLUS_DOUBLE:
346 case FILTER_OP_UNARY_MINUS_DOUBLE:
347 case FILTER_OP_UNARY_NOT_DOUBLE:
348 {
349 printk(KERN_WARNING "unsupported bytecode op %u\n",
350 (unsigned int) *(filter_opcode_t *) pc);
351 ret = -EINVAL;
352 break;
353 }
354
355 case FILTER_OP_EQ:
356 case FILTER_OP_NE:
357 case FILTER_OP_GT:
358 case FILTER_OP_LT:
359 case FILTER_OP_GE:
360 case FILTER_OP_LE:
361 case FILTER_OP_EQ_STRING:
362 case FILTER_OP_NE_STRING:
363 case FILTER_OP_GT_STRING:
364 case FILTER_OP_LT_STRING:
365 case FILTER_OP_GE_STRING:
366 case FILTER_OP_LE_STRING:
367 case FILTER_OP_EQ_STAR_GLOB_STRING:
368 case FILTER_OP_NE_STAR_GLOB_STRING:
369 case FILTER_OP_EQ_S64:
370 case FILTER_OP_NE_S64:
371 case FILTER_OP_GT_S64:
372 case FILTER_OP_LT_S64:
373 case FILTER_OP_GE_S64:
374 case FILTER_OP_LE_S64:
375 case FILTER_OP_BIT_AND:
376 case FILTER_OP_BIT_OR:
377 case FILTER_OP_BIT_XOR:
378 {
379 if (unlikely(pc + sizeof(struct binary_op)
380 > start_pc + bytecode->len)) {
381 ret = -ERANGE;
382 }
383 break;
384 }
385
386 /* unary */
387 case FILTER_OP_UNARY_PLUS:
388 case FILTER_OP_UNARY_MINUS:
389 case FILTER_OP_UNARY_NOT:
390 case FILTER_OP_UNARY_PLUS_S64:
391 case FILTER_OP_UNARY_MINUS_S64:
392 case FILTER_OP_UNARY_NOT_S64:
393 {
394 if (unlikely(pc + sizeof(struct unary_op)
395 > start_pc + bytecode->len)) {
396 ret = -ERANGE;
397 }
398 break;
399 }
400
401 /* logical */
402 case FILTER_OP_AND:
403 case FILTER_OP_OR:
404 {
405 if (unlikely(pc + sizeof(struct logical_op)
406 > start_pc + bytecode->len)) {
407 ret = -ERANGE;
408 }
409 break;
410 }
411
412 /* load field ref */
413 case FILTER_OP_LOAD_FIELD_REF:
414 {
415 printk(KERN_WARNING "Unknown field ref type\n");
416 ret = -EINVAL;
417 break;
418 }
419
420 /* get context ref */
421 case FILTER_OP_GET_CONTEXT_REF:
422 {
423 printk(KERN_WARNING "Unknown field ref type\n");
424 ret = -EINVAL;
425 break;
426 }
427 case FILTER_OP_LOAD_FIELD_REF_STRING:
428 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
429 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
430 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
431 case FILTER_OP_LOAD_FIELD_REF_S64:
432 case FILTER_OP_GET_CONTEXT_REF_STRING:
433 case FILTER_OP_GET_CONTEXT_REF_S64:
434 {
435 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
436 > start_pc + bytecode->len)) {
437 ret = -ERANGE;
438 }
439 break;
440 }
441
442 /* load from immediate operand */
443 case FILTER_OP_LOAD_STRING:
444 case FILTER_OP_LOAD_STAR_GLOB_STRING:
445 {
446 struct load_op *insn = (struct load_op *) pc;
447 uint32_t str_len, maxlen;
448
449 if (unlikely(pc + sizeof(struct load_op)
450 > start_pc + bytecode->len)) {
451 ret = -ERANGE;
452 break;
453 }
454
455 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
456 str_len = strnlen(insn->data, maxlen);
457 if (unlikely(str_len >= maxlen)) {
458 /* Final '\0' not found within range */
459 ret = -ERANGE;
460 }
461 break;
462 }
463
464 case FILTER_OP_LOAD_S64:
465 {
466 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
467 > start_pc + bytecode->len)) {
468 ret = -ERANGE;
469 }
470 break;
471 }
472
473 case FILTER_OP_CAST_TO_S64:
474 case FILTER_OP_CAST_NOP:
475 {
476 if (unlikely(pc + sizeof(struct cast_op)
477 > start_pc + bytecode->len)) {
478 ret = -ERANGE;
479 }
480 break;
481 }
482
483 /*
484 * Instructions for recursive traversal through composed types.
485 */
486 case FILTER_OP_GET_CONTEXT_ROOT:
487 case FILTER_OP_GET_APP_CONTEXT_ROOT:
488 case FILTER_OP_GET_PAYLOAD_ROOT:
489 case FILTER_OP_LOAD_FIELD:
490 case FILTER_OP_LOAD_FIELD_S8:
491 case FILTER_OP_LOAD_FIELD_S16:
492 case FILTER_OP_LOAD_FIELD_S32:
493 case FILTER_OP_LOAD_FIELD_S64:
494 case FILTER_OP_LOAD_FIELD_U8:
495 case FILTER_OP_LOAD_FIELD_U16:
496 case FILTER_OP_LOAD_FIELD_U32:
497 case FILTER_OP_LOAD_FIELD_U64:
498 case FILTER_OP_LOAD_FIELD_STRING:
499 case FILTER_OP_LOAD_FIELD_SEQUENCE:
500 case FILTER_OP_LOAD_FIELD_DOUBLE:
501 if (unlikely(pc + sizeof(struct load_op)
502 > start_pc + bytecode->len)) {
503 ret = -ERANGE;
504 }
505 break;
506
507 case FILTER_OP_GET_SYMBOL:
508 {
509 struct load_op *insn = (struct load_op *) pc;
510 struct get_symbol *sym = (struct get_symbol *) insn->data;
511
512 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
513 > start_pc + bytecode->len)) {
514 ret = -ERANGE;
515 }
516 ret = validate_get_symbol(bytecode, sym);
517 break;
518 }
519
520 case FILTER_OP_GET_SYMBOL_FIELD:
521 printk(KERN_WARNING "Unexpected get symbol field\n");
522 ret = -EINVAL;
523 break;
524
525 case FILTER_OP_GET_INDEX_U16:
526 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
527 > start_pc + bytecode->len)) {
528 ret = -ERANGE;
529 }
530 break;
531
532 case FILTER_OP_GET_INDEX_U64:
533 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
534 > start_pc + bytecode->len)) {
535 ret = -ERANGE;
536 }
537 break;
538 }
539
540 return ret;
541 }
542
543 static
544 unsigned long delete_all_nodes(struct mp_table *mp_table)
545 {
546 struct mp_node *mp_node;
547 struct hlist_node *tmp;
548 unsigned long nr_nodes = 0;
549 int i;
550
551 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
552 struct hlist_head *head;
553
554 head = &mp_table->mp_head[i];
555 lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
556 kfree(mp_node);
557 nr_nodes++;
558 }
559 }
560 return nr_nodes;
561 }
562
563 /*
564 * Return value:
565 * >=0: success
566 * <0: error
567 */
568 static
569 int validate_instruction_context(struct bytecode_runtime *bytecode,
570 struct vstack *stack,
571 char *start_pc,
572 char *pc)
573 {
574 int ret = 0;
575 const filter_opcode_t opcode = *(filter_opcode_t *) pc;
576
577 switch (opcode) {
578 case FILTER_OP_UNKNOWN:
579 default:
580 {
581 printk(KERN_WARNING "unknown bytecode op %u\n",
582 (unsigned int) *(filter_opcode_t *) pc);
583 ret = -EINVAL;
584 goto end;
585 }
586
587 case FILTER_OP_RETURN:
588 {
589 goto end;
590 }
591
592 /* binary */
593 case FILTER_OP_MUL:
594 case FILTER_OP_DIV:
595 case FILTER_OP_MOD:
596 case FILTER_OP_PLUS:
597 case FILTER_OP_MINUS:
598 case FILTER_OP_RSHIFT:
599 case FILTER_OP_LSHIFT:
600 /* Floating point */
601 case FILTER_OP_EQ_DOUBLE:
602 case FILTER_OP_NE_DOUBLE:
603 case FILTER_OP_GT_DOUBLE:
604 case FILTER_OP_LT_DOUBLE:
605 case FILTER_OP_GE_DOUBLE:
606 case FILTER_OP_LE_DOUBLE:
607 case FILTER_OP_EQ_DOUBLE_S64:
608 case FILTER_OP_NE_DOUBLE_S64:
609 case FILTER_OP_GT_DOUBLE_S64:
610 case FILTER_OP_LT_DOUBLE_S64:
611 case FILTER_OP_GE_DOUBLE_S64:
612 case FILTER_OP_LE_DOUBLE_S64:
613 case FILTER_OP_EQ_S64_DOUBLE:
614 case FILTER_OP_NE_S64_DOUBLE:
615 case FILTER_OP_GT_S64_DOUBLE:
616 case FILTER_OP_LT_S64_DOUBLE:
617 case FILTER_OP_GE_S64_DOUBLE:
618 case FILTER_OP_LE_S64_DOUBLE:
619 case FILTER_OP_UNARY_PLUS_DOUBLE:
620 case FILTER_OP_UNARY_MINUS_DOUBLE:
621 case FILTER_OP_UNARY_NOT_DOUBLE:
622 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
623 case FILTER_OP_LOAD_DOUBLE:
624 case FILTER_OP_CAST_DOUBLE_TO_S64:
625 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
626 {
627 printk(KERN_WARNING "unsupported bytecode op %u\n",
628 (unsigned int) *(filter_opcode_t *) pc);
629 ret = -EINVAL;
630 goto end;
631 }
632
633 case FILTER_OP_EQ:
634 {
635 ret = bin_op_compare_check(stack, opcode, "==");
636 if (ret < 0)
637 goto end;
638 break;
639 }
640 case FILTER_OP_NE:
641 {
642 ret = bin_op_compare_check(stack, opcode, "!=");
643 if (ret < 0)
644 goto end;
645 break;
646 }
647 case FILTER_OP_GT:
648 {
649 ret = bin_op_compare_check(stack, opcode, ">");
650 if (ret < 0)
651 goto end;
652 break;
653 }
654 case FILTER_OP_LT:
655 {
656 ret = bin_op_compare_check(stack, opcode, "<");
657 if (ret < 0)
658 goto end;
659 break;
660 }
661 case FILTER_OP_GE:
662 {
663 ret = bin_op_compare_check(stack, opcode, ">=");
664 if (ret < 0)
665 goto end;
666 break;
667 }
668 case FILTER_OP_LE:
669 {
670 ret = bin_op_compare_check(stack, opcode, "<=");
671 if (ret < 0)
672 goto end;
673 break;
674 }
675
676 case FILTER_OP_EQ_STRING:
677 case FILTER_OP_NE_STRING:
678 case FILTER_OP_GT_STRING:
679 case FILTER_OP_LT_STRING:
680 case FILTER_OP_GE_STRING:
681 case FILTER_OP_LE_STRING:
682 {
683 if (!vstack_ax(stack) || !vstack_bx(stack)) {
684 printk(KERN_WARNING "Empty stack\n");
685 ret = -EINVAL;
686 goto end;
687 }
688 if (vstack_ax(stack)->type != REG_STRING
689 || vstack_bx(stack)->type != REG_STRING) {
690 printk(KERN_WARNING "Unexpected register type for string comparator\n");
691 ret = -EINVAL;
692 goto end;
693 }
694 break;
695 }
696
697
698 case FILTER_OP_EQ_STAR_GLOB_STRING:
699 case FILTER_OP_NE_STAR_GLOB_STRING:
700 {
701 if (!vstack_ax(stack) || !vstack_bx(stack)) {
702 printk(KERN_WARNING "Empty stack\n");
703 ret = -EINVAL;
704 goto end;
705 }
706 if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
707 && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
708 printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
709 ret = -EINVAL;
710 goto end;
711 }
712 break;
713 }
714
715 case FILTER_OP_EQ_S64:
716 case FILTER_OP_NE_S64:
717 case FILTER_OP_GT_S64:
718 case FILTER_OP_LT_S64:
719 case FILTER_OP_GE_S64:
720 case FILTER_OP_LE_S64:
721 {
722 if (!vstack_ax(stack) || !vstack_bx(stack)) {
723 printk(KERN_WARNING "Empty stack\n");
724 ret = -EINVAL;
725 goto end;
726 }
727 if (vstack_ax(stack)->type != REG_S64
728 || vstack_bx(stack)->type != REG_S64) {
729 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
730 ret = -EINVAL;
731 goto end;
732 }
733 break;
734 }
735
736 case FILTER_OP_BIT_AND:
737 ret = bin_op_bitwise_check(stack, opcode, "&");
738 if (ret < 0)
739 goto end;
740 break;
741 case FILTER_OP_BIT_OR:
742 ret = bin_op_bitwise_check(stack, opcode, "|");
743 if (ret < 0)
744 goto end;
745 break;
746 case FILTER_OP_BIT_XOR:
747 ret = bin_op_bitwise_check(stack, opcode, "^");
748 if (ret < 0)
749 goto end;
750 break;
751
752 /* unary */
753 case FILTER_OP_UNARY_PLUS:
754 case FILTER_OP_UNARY_MINUS:
755 case FILTER_OP_UNARY_NOT:
756 {
757 if (!vstack_ax(stack)) {
758 printk(KERN_WARNING "Empty stack\n");
759 ret = -EINVAL;
760 goto end;
761 }
762 switch (vstack_ax(stack)->type) {
763 default:
764 case REG_DOUBLE:
765 printk(KERN_WARNING "unknown register type\n");
766 ret = -EINVAL;
767 goto end;
768
769 case REG_STRING:
770 case REG_STAR_GLOB_STRING:
771 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
772 ret = -EINVAL;
773 goto end;
774 case REG_S64:
775 case REG_TYPE_UNKNOWN:
776 break;
777 }
778 break;
779 }
780
781 case FILTER_OP_UNARY_PLUS_S64:
782 case FILTER_OP_UNARY_MINUS_S64:
783 case FILTER_OP_UNARY_NOT_S64:
784 {
785 if (!vstack_ax(stack)) {
786 printk(KERN_WARNING "Empty stack\n");
787 ret = -EINVAL;
788 goto end;
789 }
790 if (vstack_ax(stack)->type != REG_S64) {
791 printk(KERN_WARNING "Invalid register type\n");
792 ret = -EINVAL;
793 goto end;
794 }
795 break;
796 }
797
798 /* logical */
799 case FILTER_OP_AND:
800 case FILTER_OP_OR:
801 {
802 struct logical_op *insn = (struct logical_op *) pc;
803
804 if (!vstack_ax(stack)) {
805 printk(KERN_WARNING "Empty stack\n");
806 ret = -EINVAL;
807 goto end;
808 }
809 if (vstack_ax(stack)->type != REG_S64) {
810 printk(KERN_WARNING "Logical comparator expects S64 register\n");
811 ret = -EINVAL;
812 goto end;
813 }
814
815 dbg_printk("Validate jumping to bytecode offset %u\n",
816 (unsigned int) insn->skip_offset);
817 if (unlikely(start_pc + insn->skip_offset <= pc)) {
818 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
819 ret = -EINVAL;
820 goto end;
821 }
822 break;
823 }
824
825 /* load field ref */
826 case FILTER_OP_LOAD_FIELD_REF:
827 {
828 printk(KERN_WARNING "Unknown field ref type\n");
829 ret = -EINVAL;
830 goto end;
831 }
832 case FILTER_OP_LOAD_FIELD_REF_STRING:
833 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
834 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
835 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
836 {
837 struct load_op *insn = (struct load_op *) pc;
838 struct field_ref *ref = (struct field_ref *) insn->data;
839
840 dbg_printk("Validate load field ref offset %u type string\n",
841 ref->offset);
842 break;
843 }
844 case FILTER_OP_LOAD_FIELD_REF_S64:
845 {
846 struct load_op *insn = (struct load_op *) pc;
847 struct field_ref *ref = (struct field_ref *) insn->data;
848
849 dbg_printk("Validate load field ref offset %u type s64\n",
850 ref->offset);
851 break;
852 }
853
854 /* load from immediate operand */
855 case FILTER_OP_LOAD_STRING:
856 case FILTER_OP_LOAD_STAR_GLOB_STRING:
857 {
858 break;
859 }
860
861 case FILTER_OP_LOAD_S64:
862 {
863 break;
864 }
865
866 case FILTER_OP_CAST_TO_S64:
867 {
868 struct cast_op *insn = (struct cast_op *) pc;
869
870 if (!vstack_ax(stack)) {
871 printk(KERN_WARNING "Empty stack\n");
872 ret = -EINVAL;
873 goto end;
874 }
875 switch (vstack_ax(stack)->type) {
876 default:
877 case REG_DOUBLE:
878 printk(KERN_WARNING "unknown register type\n");
879 ret = -EINVAL;
880 goto end;
881
882 case REG_STRING:
883 case REG_STAR_GLOB_STRING:
884 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
885 ret = -EINVAL;
886 goto end;
887 case REG_S64:
888 break;
889 }
890 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
891 if (vstack_ax(stack)->type != REG_DOUBLE) {
892 printk(KERN_WARNING "Cast expects double\n");
893 ret = -EINVAL;
894 goto end;
895 }
896 }
897 break;
898 }
899 case FILTER_OP_CAST_NOP:
900 {
901 break;
902 }
903
904 /* get context ref */
905 case FILTER_OP_GET_CONTEXT_REF:
906 {
907 printk(KERN_WARNING "Unknown get context ref type\n");
908 ret = -EINVAL;
909 goto end;
910 }
911 case FILTER_OP_GET_CONTEXT_REF_STRING:
912 {
913 struct load_op *insn = (struct load_op *) pc;
914 struct field_ref *ref = (struct field_ref *) insn->data;
915
916 dbg_printk("Validate get context ref offset %u type string\n",
917 ref->offset);
918 break;
919 }
920 case FILTER_OP_GET_CONTEXT_REF_S64:
921 {
922 struct load_op *insn = (struct load_op *) pc;
923 struct field_ref *ref = (struct field_ref *) insn->data;
924
925 dbg_printk("Validate get context ref offset %u type s64\n",
926 ref->offset);
927 break;
928 }
929
930 /*
931 * Instructions for recursive traversal through composed types.
932 */
933 case FILTER_OP_GET_CONTEXT_ROOT:
934 {
935 dbg_printk("Validate get context root\n");
936 break;
937 }
938 case FILTER_OP_GET_APP_CONTEXT_ROOT:
939 {
940 dbg_printk("Validate get app context root\n");
941 break;
942 }
943 case FILTER_OP_GET_PAYLOAD_ROOT:
944 {
945 dbg_printk("Validate get payload root\n");
946 break;
947 }
948 case FILTER_OP_LOAD_FIELD:
949 {
950 /*
951 * We tolerate that field type is unknown at validation,
952 * because we are performing the load specialization in
953 * a phase after validation.
954 */
955 dbg_printk("Validate load field\n");
956 break;
957 }
958 case FILTER_OP_LOAD_FIELD_S8:
959 {
960 dbg_printk("Validate load field s8\n");
961 break;
962 }
963 case FILTER_OP_LOAD_FIELD_S16:
964 {
965 dbg_printk("Validate load field s16\n");
966 break;
967 }
968 case FILTER_OP_LOAD_FIELD_S32:
969 {
970 dbg_printk("Validate load field s32\n");
971 break;
972 }
973 case FILTER_OP_LOAD_FIELD_S64:
974 {
975 dbg_printk("Validate load field s64\n");
976 break;
977 }
978 case FILTER_OP_LOAD_FIELD_U8:
979 {
980 dbg_printk("Validate load field u8\n");
981 break;
982 }
983 case FILTER_OP_LOAD_FIELD_U16:
984 {
985 dbg_printk("Validate load field u16\n");
986 break;
987 }
988 case FILTER_OP_LOAD_FIELD_U32:
989 {
990 dbg_printk("Validate load field u32\n");
991 break;
992 }
993 case FILTER_OP_LOAD_FIELD_U64:
994 {
995 dbg_printk("Validate load field u64\n");
996 break;
997 }
998 case FILTER_OP_LOAD_FIELD_STRING:
999 {
1000 dbg_printk("Validate load field string\n");
1001 break;
1002 }
1003 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1004 {
1005 dbg_printk("Validate load field sequence\n");
1006 break;
1007 }
1008 case FILTER_OP_LOAD_FIELD_DOUBLE:
1009 {
1010 dbg_printk("Validate load field double\n");
1011 break;
1012 }
1013
1014 case FILTER_OP_GET_SYMBOL:
1015 {
1016 struct load_op *insn = (struct load_op *) pc;
1017 struct get_symbol *sym = (struct get_symbol *) insn->data;
1018
1019 dbg_printk("Validate get symbol offset %u\n", sym->offset);
1020 break;
1021 }
1022
1023 case FILTER_OP_GET_SYMBOL_FIELD:
1024 {
1025 struct load_op *insn = (struct load_op *) pc;
1026 struct get_symbol *sym = (struct get_symbol *) insn->data;
1027
1028 dbg_printk("Validate get symbol field offset %u\n", sym->offset);
1029 break;
1030 }
1031
1032 case FILTER_OP_GET_INDEX_U16:
1033 {
1034 struct load_op *insn = (struct load_op *) pc;
1035 struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
1036
1037 dbg_printk("Validate get index u16 index %u\n", get_index->index);
1038 break;
1039 }
1040
1041 case FILTER_OP_GET_INDEX_U64:
1042 {
1043 struct load_op *insn = (struct load_op *) pc;
1044 struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
1045
1046 dbg_printk("Validate get index u64 index %llu\n",
1047 (unsigned long long) get_index->index);
1048 break;
1049 }
1050 }
1051 end:
1052 return ret;
1053 }
1054
1055 /*
1056 * Return value:
1057 * 0: success
1058 * <0: error
1059 */
1060 static
1061 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
1062 struct mp_table *mp_table,
1063 struct vstack *stack,
1064 char *start_pc,
1065 char *pc)
1066 {
1067 int ret, found = 0;
1068 unsigned long target_pc = pc - start_pc;
1069 unsigned long hash;
1070 struct hlist_head *head;
1071 struct mp_node *mp_node;
1072
1073 /* Validate the context resulting from the previous instruction */
1074 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
1075 if (ret < 0)
1076 return ret;
1077
1078 /* Validate merge points */
1079 hash = jhash_1word(target_pc, 0);
1080 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
1081 lttng_hlist_for_each_entry(mp_node, head, node) {
1082 if (lttng_hash_match(mp_node, target_pc)) {
1083 found = 1;
1084 break;
1085 }
1086 }
1087 if (found) {
1088 dbg_printk("Filter: validate merge point at offset %lu\n",
1089 target_pc);
1090 if (merge_points_compare(stack, &mp_node->stack)) {
1091 printk(KERN_WARNING "Merge points differ for offset %lu\n",
1092 target_pc);
1093 return -EINVAL;
1094 }
1095 /* Once validated, we can remove the merge point */
1096 dbg_printk("Filter: remove merge point at offset %lu\n",
1097 target_pc);
1098 hlist_del(&mp_node->node);
1099 }
1100 return 0;
1101 }
1102
1103 /*
1104 * Return value:
1105 * >0: going to next insn.
1106 * 0: success, stop iteration.
1107 * <0: error
1108 */
1109 static
1110 int exec_insn(struct bytecode_runtime *bytecode,
1111 struct mp_table *mp_table,
1112 struct vstack *stack,
1113 char **_next_pc,
1114 char *pc)
1115 {
1116 int ret = 1;
1117 char *next_pc = *_next_pc;
1118
1119 switch (*(filter_opcode_t *) pc) {
1120 case FILTER_OP_UNKNOWN:
1121 default:
1122 {
1123 printk(KERN_WARNING "unknown bytecode op %u\n",
1124 (unsigned int) *(filter_opcode_t *) pc);
1125 ret = -EINVAL;
1126 goto end;
1127 }
1128
1129 case FILTER_OP_RETURN:
1130 {
1131 if (!vstack_ax(stack)) {
1132 printk(KERN_WARNING "Empty stack\n");
1133 ret = -EINVAL;
1134 goto end;
1135 }
1136 switch (vstack_ax(stack)->type) {
1137 case REG_S64:
1138 case REG_TYPE_UNKNOWN:
1139 break;
1140 default:
1141 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1142 (int) vstack_ax(stack)->type);
1143 ret = -EINVAL;
1144 goto end;
1145 }
1146
1147 ret = 0;
1148 goto end;
1149 }
1150
1151 /* binary */
1152 case FILTER_OP_MUL:
1153 case FILTER_OP_DIV:
1154 case FILTER_OP_MOD:
1155 case FILTER_OP_PLUS:
1156 case FILTER_OP_MINUS:
1157 case FILTER_OP_RSHIFT:
1158 case FILTER_OP_LSHIFT:
1159 /* Floating point */
1160 case FILTER_OP_EQ_DOUBLE:
1161 case FILTER_OP_NE_DOUBLE:
1162 case FILTER_OP_GT_DOUBLE:
1163 case FILTER_OP_LT_DOUBLE:
1164 case FILTER_OP_GE_DOUBLE:
1165 case FILTER_OP_LE_DOUBLE:
1166 case FILTER_OP_EQ_DOUBLE_S64:
1167 case FILTER_OP_NE_DOUBLE_S64:
1168 case FILTER_OP_GT_DOUBLE_S64:
1169 case FILTER_OP_LT_DOUBLE_S64:
1170 case FILTER_OP_GE_DOUBLE_S64:
1171 case FILTER_OP_LE_DOUBLE_S64:
1172 case FILTER_OP_EQ_S64_DOUBLE:
1173 case FILTER_OP_NE_S64_DOUBLE:
1174 case FILTER_OP_GT_S64_DOUBLE:
1175 case FILTER_OP_LT_S64_DOUBLE:
1176 case FILTER_OP_GE_S64_DOUBLE:
1177 case FILTER_OP_LE_S64_DOUBLE:
1178 case FILTER_OP_UNARY_PLUS_DOUBLE:
1179 case FILTER_OP_UNARY_MINUS_DOUBLE:
1180 case FILTER_OP_UNARY_NOT_DOUBLE:
1181 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1182 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1183 case FILTER_OP_LOAD_DOUBLE:
1184 case FILTER_OP_CAST_DOUBLE_TO_S64:
1185 {
1186 printk(KERN_WARNING "unsupported bytecode op %u\n",
1187 (unsigned int) *(filter_opcode_t *) pc);
1188 ret = -EINVAL;
1189 goto end;
1190 }
1191
1192 case FILTER_OP_EQ:
1193 case FILTER_OP_NE:
1194 case FILTER_OP_GT:
1195 case FILTER_OP_LT:
1196 case FILTER_OP_GE:
1197 case FILTER_OP_LE:
1198 case FILTER_OP_EQ_STRING:
1199 case FILTER_OP_NE_STRING:
1200 case FILTER_OP_GT_STRING:
1201 case FILTER_OP_LT_STRING:
1202 case FILTER_OP_GE_STRING:
1203 case FILTER_OP_LE_STRING:
1204 case FILTER_OP_EQ_STAR_GLOB_STRING:
1205 case FILTER_OP_NE_STAR_GLOB_STRING:
1206 case FILTER_OP_EQ_S64:
1207 case FILTER_OP_NE_S64:
1208 case FILTER_OP_GT_S64:
1209 case FILTER_OP_LT_S64:
1210 case FILTER_OP_GE_S64:
1211 case FILTER_OP_LE_S64:
1212 case FILTER_OP_BIT_AND:
1213 case FILTER_OP_BIT_OR:
1214 case FILTER_OP_BIT_XOR:
1215 {
1216 /* Pop 2, push 1 */
1217 if (vstack_pop(stack)) {
1218 ret = -EINVAL;
1219 goto end;
1220 }
1221 if (!vstack_ax(stack)) {
1222 printk(KERN_WARNING "Empty stack\n");
1223 ret = -EINVAL;
1224 goto end;
1225 }
1226 switch (vstack_ax(stack)->type) {
1227 case REG_S64:
1228 case REG_DOUBLE:
1229 case REG_STRING:
1230 case REG_STAR_GLOB_STRING:
1231 case REG_TYPE_UNKNOWN:
1232 break;
1233 default:
1234 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1235 (int) vstack_ax(stack)->type);
1236 ret = -EINVAL;
1237 goto end;
1238 }
1239
1240 vstack_ax(stack)->type = REG_S64;
1241 next_pc += sizeof(struct binary_op);
1242 break;
1243 }
1244
1245 /* unary */
1246 case FILTER_OP_UNARY_PLUS:
1247 case FILTER_OP_UNARY_MINUS:
1248 {
1249 /* Pop 1, push 1 */
1250 if (!vstack_ax(stack)) {
1251 printk(KERN_WARNING "Empty stack\n\n");
1252 ret = -EINVAL;
1253 goto end;
1254 }
1255 switch (vstack_ax(stack)->type) {
1256 case REG_S64:
1257 case REG_TYPE_UNKNOWN:
1258 break;
1259 default:
1260 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1261 (int) vstack_ax(stack)->type);
1262 ret = -EINVAL;
1263 goto end;
1264 }
1265
1266 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1267 next_pc += sizeof(struct unary_op);
1268 break;
1269 }
1270
1271 case FILTER_OP_UNARY_PLUS_S64:
1272 case FILTER_OP_UNARY_MINUS_S64:
1273 case FILTER_OP_UNARY_NOT_S64:
1274 {
1275 /* Pop 1, push 1 */
1276 if (!vstack_ax(stack)) {
1277 printk(KERN_WARNING "Empty stack\n\n");
1278 ret = -EINVAL;
1279 goto end;
1280 }
1281 switch (vstack_ax(stack)->type) {
1282 case REG_S64:
1283 break;
1284 default:
1285 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1286 (int) vstack_ax(stack)->type);
1287 ret = -EINVAL;
1288 goto end;
1289 }
1290
1291 vstack_ax(stack)->type = REG_S64;
1292 next_pc += sizeof(struct unary_op);
1293 break;
1294 }
1295
1296 case FILTER_OP_UNARY_NOT:
1297 {
1298 /* Pop 1, push 1 */
1299 if (!vstack_ax(stack)) {
1300 printk(KERN_WARNING "Empty stack\n\n");
1301 ret = -EINVAL;
1302 goto end;
1303 }
1304 switch (vstack_ax(stack)->type) {
1305 case REG_S64:
1306 case REG_TYPE_UNKNOWN:
1307 break;
1308 default:
1309 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1310 (int) vstack_ax(stack)->type);
1311 ret = -EINVAL;
1312 goto end;
1313 }
1314
1315 vstack_ax(stack)->type = REG_S64;
1316 next_pc += sizeof(struct unary_op);
1317 break;
1318 }
1319
1320 /* logical */
1321 case FILTER_OP_AND:
1322 case FILTER_OP_OR:
1323 {
1324 struct logical_op *insn = (struct logical_op *) pc;
1325 int merge_ret;
1326
1327 /* Add merge point to table */
1328 merge_ret = merge_point_add_check(mp_table,
1329 insn->skip_offset, stack);
1330 if (merge_ret) {
1331 ret = merge_ret;
1332 goto end;
1333 }
1334
1335 if (!vstack_ax(stack)) {
1336 printk(KERN_WARNING "Empty stack\n\n");
1337 ret = -EINVAL;
1338 goto end;
1339 }
1340 /* There is always a cast-to-s64 operation before a or/and op. */
1341 switch (vstack_ax(stack)->type) {
1342 case REG_S64:
1343 break;
1344 default:
1345 printk(KERN_WARNING "Incorrect register type %d for operation\n",
1346 (int) vstack_ax(stack)->type);
1347 ret = -EINVAL;
1348 goto end;
1349 }
1350
1351 /* Continue to next instruction */
1352 /* Pop 1 when jump not taken */
1353 if (vstack_pop(stack)) {
1354 ret = -EINVAL;
1355 goto end;
1356 }
1357 next_pc += sizeof(struct logical_op);
1358 break;
1359 }
1360
1361 /* load field ref */
1362 case FILTER_OP_LOAD_FIELD_REF:
1363 {
1364 printk(KERN_WARNING "Unknown field ref type\n");
1365 ret = -EINVAL;
1366 goto end;
1367 }
1368 /* get context ref */
1369 case FILTER_OP_GET_CONTEXT_REF:
1370 {
1371 printk(KERN_WARNING "Unknown get context ref type\n");
1372 ret = -EINVAL;
1373 goto end;
1374 }
1375 case FILTER_OP_LOAD_FIELD_REF_STRING:
1376 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1377 case FILTER_OP_GET_CONTEXT_REF_STRING:
1378 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
1379 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
1380 {
1381 if (vstack_push(stack)) {
1382 ret = -EINVAL;
1383 goto end;
1384 }
1385 vstack_ax(stack)->type = REG_STRING;
1386 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1387 break;
1388 }
1389 case FILTER_OP_LOAD_FIELD_REF_S64:
1390 case FILTER_OP_GET_CONTEXT_REF_S64:
1391 {
1392 if (vstack_push(stack)) {
1393 ret = -EINVAL;
1394 goto end;
1395 }
1396 vstack_ax(stack)->type = REG_S64;
1397 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1398 break;
1399 }
1400
1401 /* load from immediate operand */
1402 case FILTER_OP_LOAD_STRING:
1403 {
1404 struct load_op *insn = (struct load_op *) pc;
1405
1406 if (vstack_push(stack)) {
1407 ret = -EINVAL;
1408 goto end;
1409 }
1410 vstack_ax(stack)->type = REG_STRING;
1411 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1412 break;
1413 }
1414
1415 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1416 {
1417 struct load_op *insn = (struct load_op *) pc;
1418
1419 if (vstack_push(stack)) {
1420 ret = -EINVAL;
1421 goto end;
1422 }
1423 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1424 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1425 break;
1426 }
1427
1428 case FILTER_OP_LOAD_S64:
1429 {
1430 if (vstack_push(stack)) {
1431 ret = -EINVAL;
1432 goto end;
1433 }
1434 vstack_ax(stack)->type = REG_S64;
1435 next_pc += sizeof(struct load_op)
1436 + sizeof(struct literal_numeric);
1437 break;
1438 }
1439
1440 case FILTER_OP_CAST_TO_S64:
1441 {
1442 /* Pop 1, push 1 */
1443 if (!vstack_ax(stack)) {
1444 printk(KERN_WARNING "Empty stack\n");
1445 ret = -EINVAL;
1446 goto end;
1447 }
1448 switch (vstack_ax(stack)->type) {
1449 case REG_S64:
1450 case REG_DOUBLE:
1451 case REG_TYPE_UNKNOWN:
1452 break;
1453 default:
1454 printk(KERN_WARNING "Incorrect register type %d for cast\n",
1455 (int) vstack_ax(stack)->type);
1456 ret = -EINVAL;
1457 goto end;
1458 }
1459 vstack_ax(stack)->type = REG_S64;
1460 next_pc += sizeof(struct cast_op);
1461 break;
1462 }
1463 case FILTER_OP_CAST_NOP:
1464 {
1465 next_pc += sizeof(struct cast_op);
1466 break;
1467 }
1468
1469 /*
1470 * Instructions for recursive traversal through composed types.
1471 */
1472 case FILTER_OP_GET_CONTEXT_ROOT:
1473 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1474 case FILTER_OP_GET_PAYLOAD_ROOT:
1475 {
1476 if (vstack_push(stack)) {
1477 ret = -EINVAL;
1478 goto end;
1479 }
1480 vstack_ax(stack)->type = REG_PTR;
1481 next_pc += sizeof(struct load_op);
1482 break;
1483 }
1484
1485 case FILTER_OP_LOAD_FIELD:
1486 {
1487 /* Pop 1, push 1 */
1488 if (!vstack_ax(stack)) {
1489 printk(KERN_WARNING "Empty stack\n\n");
1490 ret = -EINVAL;
1491 goto end;
1492 }
1493 if (vstack_ax(stack)->type != REG_PTR) {
1494 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1495 ret = -EINVAL;
1496 goto end;
1497 }
1498 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1499 next_pc += sizeof(struct load_op);
1500 break;
1501 }
1502
1503 case FILTER_OP_LOAD_FIELD_S8:
1504 case FILTER_OP_LOAD_FIELD_S16:
1505 case FILTER_OP_LOAD_FIELD_S32:
1506 case FILTER_OP_LOAD_FIELD_S64:
1507 case FILTER_OP_LOAD_FIELD_U8:
1508 case FILTER_OP_LOAD_FIELD_U16:
1509 case FILTER_OP_LOAD_FIELD_U32:
1510 case FILTER_OP_LOAD_FIELD_U64:
1511 {
1512 /* Pop 1, push 1 */
1513 if (!vstack_ax(stack)) {
1514 printk(KERN_WARNING "Empty stack\n\n");
1515 ret = -EINVAL;
1516 goto end;
1517 }
1518 if (vstack_ax(stack)->type != REG_PTR) {
1519 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1520 ret = -EINVAL;
1521 goto end;
1522 }
1523 vstack_ax(stack)->type = REG_S64;
1524 next_pc += sizeof(struct load_op);
1525 break;
1526 }
1527
1528 case FILTER_OP_LOAD_FIELD_STRING:
1529 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1530 {
1531 /* Pop 1, push 1 */
1532 if (!vstack_ax(stack)) {
1533 printk(KERN_WARNING "Empty stack\n\n");
1534 ret = -EINVAL;
1535 goto end;
1536 }
1537 if (vstack_ax(stack)->type != REG_PTR) {
1538 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1539 ret = -EINVAL;
1540 goto end;
1541 }
1542 vstack_ax(stack)->type = REG_STRING;
1543 next_pc += sizeof(struct load_op);
1544 break;
1545 }
1546
1547 case FILTER_OP_LOAD_FIELD_DOUBLE:
1548 {
1549 /* Pop 1, push 1 */
1550 if (!vstack_ax(stack)) {
1551 printk(KERN_WARNING "Empty stack\n\n");
1552 ret = -EINVAL;
1553 goto end;
1554 }
1555 if (vstack_ax(stack)->type != REG_PTR) {
1556 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1557 ret = -EINVAL;
1558 goto end;
1559 }
1560 vstack_ax(stack)->type = REG_DOUBLE;
1561 next_pc += sizeof(struct load_op);
1562 break;
1563 }
1564
1565 case FILTER_OP_GET_SYMBOL:
1566 case FILTER_OP_GET_SYMBOL_FIELD:
1567 {
1568 /* Pop 1, push 1 */
1569 if (!vstack_ax(stack)) {
1570 printk(KERN_WARNING "Empty stack\n\n");
1571 ret = -EINVAL;
1572 goto end;
1573 }
1574 if (vstack_ax(stack)->type != REG_PTR) {
1575 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1576 ret = -EINVAL;
1577 goto end;
1578 }
1579 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1580 break;
1581 }
1582
1583 case FILTER_OP_GET_INDEX_U16:
1584 {
1585 /* Pop 1, push 1 */
1586 if (!vstack_ax(stack)) {
1587 printk(KERN_WARNING "Empty stack\n\n");
1588 ret = -EINVAL;
1589 goto end;
1590 }
1591 if (vstack_ax(stack)->type != REG_PTR) {
1592 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1593 ret = -EINVAL;
1594 goto end;
1595 }
1596 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1597 break;
1598 }
1599
1600 case FILTER_OP_GET_INDEX_U64:
1601 {
1602 /* Pop 1, push 1 */
1603 if (!vstack_ax(stack)) {
1604 printk(KERN_WARNING "Empty stack\n\n");
1605 ret = -EINVAL;
1606 goto end;
1607 }
1608 if (vstack_ax(stack)->type != REG_PTR) {
1609 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1610 ret = -EINVAL;
1611 goto end;
1612 }
1613 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1614 break;
1615 }
1616
1617 }
1618 end:
1619 *_next_pc = next_pc;
1620 return ret;
1621 }
1622
1623 /*
1624 * Never called concurrently (hash seed is shared).
1625 */
1626 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1627 {
1628 struct mp_table *mp_table;
1629 char *pc, *next_pc, *start_pc;
1630 int ret = -EINVAL;
1631 struct vstack stack;
1632
1633 vstack_init(&stack);
1634
1635 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1636 if (!mp_table) {
1637 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1638 return -ENOMEM;
1639 }
1640 start_pc = &bytecode->code[0];
1641 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1642 pc = next_pc) {
1643 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1644 if (ret != 0) {
1645 if (ret == -ERANGE)
1646 printk(KERN_WARNING "filter bytecode overflow\n");
1647 goto end;
1648 }
1649 dbg_printk("Validating op %s (%u)\n",
1650 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1651 (unsigned int) *(filter_opcode_t *) pc);
1652
1653 /*
1654 * For each instruction, validate the current context
1655 * (traversal of entire execution flow), and validate
1656 * all merge points targeting this instruction.
1657 */
1658 ret = validate_instruction_all_contexts(bytecode, mp_table,
1659 &stack, start_pc, pc);
1660 if (ret)
1661 goto end;
1662 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1663 if (ret <= 0)
1664 goto end;
1665 }
1666 end:
1667 if (delete_all_nodes(mp_table)) {
1668 if (!ret) {
1669 printk(KERN_WARNING "Unexpected merge points\n");
1670 ret = -EINVAL;
1671 }
1672 }
1673 kfree(mp_table);
1674 return ret;
1675 }
This page took 0.065409 seconds and 3 git commands to generate.