Filter: add FILTER_OP_RETURN_S64 instruction
[lttng-modules.git] / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng modules filter bytecode validator.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/types.h>
28 #include <linux/jhash.h>
29 #include <linux/slab.h>
30
31 #include <wrapper/list.h>
32 #include <lttng-filter.h>
33
34 #define MERGE_POINT_TABLE_BITS 7
35 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
36
37 /* merge point table node */
38 struct mp_node {
39 struct hlist_node node;
40
41 /* Context at merge point */
42 struct vstack stack;
43 unsigned long target_pc;
44 };
45
46 struct mp_table {
47 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
48 };
49
50 static
51 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
52 {
53 if (mp_node->target_pc == key_pc)
54 return 1;
55 else
56 return 0;
57 }
58
59 static
60 int merge_points_compare(const struct vstack *stacka,
61 const struct vstack *stackb)
62 {
63 int i, len;
64
65 if (stacka->top != stackb->top)
66 return 1;
67 len = stacka->top + 1;
68 WARN_ON_ONCE(len < 0);
69 for (i = 0; i < len; i++) {
70 if (stacka->e[i].type != stackb->e[i].type)
71 return 1;
72 }
73 return 0;
74 }
75
76 static
77 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
78 const struct vstack *stack)
79 {
80 struct mp_node *mp_node;
81 unsigned long hash = jhash_1word(target_pc, 0);
82 struct hlist_head *head;
83 struct mp_node *lookup_node;
84 int found = 0;
85
86 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
87 target_pc, hash);
88 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
89 if (!mp_node)
90 return -ENOMEM;
91 mp_node->target_pc = target_pc;
92 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
93
94 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
95 lttng_hlist_for_each_entry(lookup_node, head, node) {
96 if (lttng_hash_match(lookup_node, target_pc)) {
97 found = 1;
98 break;
99 }
100 }
101 if (found) {
102 /* Key already present */
103 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
104 target_pc, hash);
105 kfree(mp_node);
106 if (merge_points_compare(stack, &lookup_node->stack)) {
107 printk(KERN_WARNING "Merge points differ for offset %lu\n",
108 target_pc);
109 return -EINVAL;
110 }
111 } else {
112 hlist_add_head(&mp_node->node, head);
113 }
114 return 0;
115 }
116
117 /*
118 * Binary comparators use top of stack and top of stack -1.
119 */
120 static
121 int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
122 const char *str)
123 {
124 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
125 goto error_empty;
126
127 switch (vstack_ax(stack)->type) {
128 default:
129 case REG_DOUBLE:
130 goto error_type;
131
132 case REG_STRING:
133 switch (vstack_bx(stack)->type) {
134 default:
135 case REG_DOUBLE:
136 goto error_type;
137 case REG_TYPE_UNKNOWN:
138 goto unknown;
139 case REG_STRING:
140 break;
141 case REG_STAR_GLOB_STRING:
142 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
143 goto error_mismatch;
144 }
145 break;
146 case REG_S64:
147 goto error_mismatch;
148 }
149 break;
150 case REG_STAR_GLOB_STRING:
151 switch (vstack_bx(stack)->type) {
152 default:
153 case REG_DOUBLE:
154 goto error_type;
155 case REG_TYPE_UNKNOWN:
156 goto unknown;
157 case REG_STRING:
158 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
159 goto error_mismatch;
160 }
161 break;
162 case REG_STAR_GLOB_STRING:
163 case REG_S64:
164 goto error_mismatch;
165 }
166 break;
167 case REG_S64:
168 switch (vstack_bx(stack)->type) {
169 default:
170 case REG_DOUBLE:
171 goto error_type;
172 case REG_TYPE_UNKNOWN:
173 goto unknown;
174 case REG_STRING:
175 case REG_STAR_GLOB_STRING:
176 goto error_mismatch;
177 case REG_S64:
178 break;
179 }
180 break;
181 case REG_TYPE_UNKNOWN:
182 switch (vstack_bx(stack)->type) {
183 default:
184 case REG_DOUBLE:
185 goto error_type;
186 case REG_TYPE_UNKNOWN:
187 case REG_STRING:
188 case REG_STAR_GLOB_STRING:
189 case REG_S64:
190 goto unknown;
191 }
192 break;
193 }
194 return 0;
195
196 unknown:
197 return 1;
198
199 error_empty:
200 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
201 return -EINVAL;
202
203 error_mismatch:
204 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
205 return -EINVAL;
206
207 error_type:
208 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
209 return -EINVAL;
210 }
211
212 /*
213 * Binary bitwise operators use top of stack and top of stack -1.
214 * Return 0 if typing is known to match, 1 if typing is dynamic
215 * (unknown), negative error value on error.
216 */
217 static
218 int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
219 const char *str)
220 {
221 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
222 goto error_empty;
223
224 switch (vstack_ax(stack)->type) {
225 default:
226 case REG_DOUBLE:
227 goto error_type;
228
229 case REG_TYPE_UNKNOWN:
230 switch (vstack_bx(stack)->type) {
231 default:
232 case REG_DOUBLE:
233 goto error_type;
234 case REG_TYPE_UNKNOWN:
235 case REG_STRING:
236 case REG_STAR_GLOB_STRING:
237 case REG_S64:
238 goto unknown;
239 }
240 break;
241 case REG_S64:
242 switch (vstack_bx(stack)->type) {
243 default:
244 case REG_DOUBLE:
245 goto error_type;
246 case REG_TYPE_UNKNOWN:
247 goto unknown;
248 case REG_S64:
249 break;
250 }
251 break;
252 }
253 return 0;
254
255 unknown:
256 return 1;
257
258 error_empty:
259 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
260 return -EINVAL;
261
262 error_type:
263 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
264 return -EINVAL;
265 }
266
267 static
268 int validate_get_symbol(struct bytecode_runtime *bytecode,
269 const struct get_symbol *sym)
270 {
271 const char *str, *str_limit;
272 size_t len_limit;
273
274 if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
275 return -EINVAL;
276
277 str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
278 str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
279 len_limit = str_limit - str;
280 if (strnlen(str, len_limit) == len_limit)
281 return -EINVAL;
282 return 0;
283 }
284
285 /*
286 * Validate bytecode range overflow within the validation pass.
287 * Called for each instruction encountered.
288 */
289 static
290 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
291 char *start_pc, char *pc)
292 {
293 int ret = 0;
294
295 switch (*(filter_opcode_t *) pc) {
296 case FILTER_OP_UNKNOWN:
297 default:
298 {
299 printk(KERN_WARNING "unknown bytecode op %u\n",
300 (unsigned int) *(filter_opcode_t *) pc);
301 ret = -EINVAL;
302 break;
303 }
304
305 case FILTER_OP_RETURN:
306 case FILTER_OP_RETURN_S64:
307 {
308 if (unlikely(pc + sizeof(struct return_op)
309 > start_pc + bytecode->len)) {
310 ret = -ERANGE;
311 }
312 break;
313 }
314
315 /* binary */
316 case FILTER_OP_MUL:
317 case FILTER_OP_DIV:
318 case FILTER_OP_MOD:
319 case FILTER_OP_PLUS:
320 case FILTER_OP_MINUS:
321 case FILTER_OP_EQ_DOUBLE:
322 case FILTER_OP_NE_DOUBLE:
323 case FILTER_OP_GT_DOUBLE:
324 case FILTER_OP_LT_DOUBLE:
325 case FILTER_OP_GE_DOUBLE:
326 case FILTER_OP_LE_DOUBLE:
327 /* Floating point */
328 case FILTER_OP_EQ_DOUBLE_S64:
329 case FILTER_OP_NE_DOUBLE_S64:
330 case FILTER_OP_GT_DOUBLE_S64:
331 case FILTER_OP_LT_DOUBLE_S64:
332 case FILTER_OP_GE_DOUBLE_S64:
333 case FILTER_OP_LE_DOUBLE_S64:
334 case FILTER_OP_EQ_S64_DOUBLE:
335 case FILTER_OP_NE_S64_DOUBLE:
336 case FILTER_OP_GT_S64_DOUBLE:
337 case FILTER_OP_LT_S64_DOUBLE:
338 case FILTER_OP_GE_S64_DOUBLE:
339 case FILTER_OP_LE_S64_DOUBLE:
340 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
341 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
342 case FILTER_OP_LOAD_DOUBLE:
343 case FILTER_OP_CAST_DOUBLE_TO_S64:
344 case FILTER_OP_UNARY_PLUS_DOUBLE:
345 case FILTER_OP_UNARY_MINUS_DOUBLE:
346 case FILTER_OP_UNARY_NOT_DOUBLE:
347 {
348 printk(KERN_WARNING "unsupported bytecode op %u\n",
349 (unsigned int) *(filter_opcode_t *) pc);
350 ret = -EINVAL;
351 break;
352 }
353
354 case FILTER_OP_EQ:
355 case FILTER_OP_NE:
356 case FILTER_OP_GT:
357 case FILTER_OP_LT:
358 case FILTER_OP_GE:
359 case FILTER_OP_LE:
360 case FILTER_OP_EQ_STRING:
361 case FILTER_OP_NE_STRING:
362 case FILTER_OP_GT_STRING:
363 case FILTER_OP_LT_STRING:
364 case FILTER_OP_GE_STRING:
365 case FILTER_OP_LE_STRING:
366 case FILTER_OP_EQ_STAR_GLOB_STRING:
367 case FILTER_OP_NE_STAR_GLOB_STRING:
368 case FILTER_OP_EQ_S64:
369 case FILTER_OP_NE_S64:
370 case FILTER_OP_GT_S64:
371 case FILTER_OP_LT_S64:
372 case FILTER_OP_GE_S64:
373 case FILTER_OP_LE_S64:
374 case FILTER_OP_BIT_RSHIFT:
375 case FILTER_OP_BIT_LSHIFT:
376 case FILTER_OP_BIT_AND:
377 case FILTER_OP_BIT_OR:
378 case FILTER_OP_BIT_XOR:
379 {
380 if (unlikely(pc + sizeof(struct binary_op)
381 > start_pc + bytecode->len)) {
382 ret = -ERANGE;
383 }
384 break;
385 }
386
387 /* unary */
388 case FILTER_OP_UNARY_PLUS:
389 case FILTER_OP_UNARY_MINUS:
390 case FILTER_OP_UNARY_NOT:
391 case FILTER_OP_UNARY_PLUS_S64:
392 case FILTER_OP_UNARY_MINUS_S64:
393 case FILTER_OP_UNARY_NOT_S64:
394 case FILTER_OP_UNARY_BIT_NOT:
395 {
396 if (unlikely(pc + sizeof(struct unary_op)
397 > start_pc + bytecode->len)) {
398 ret = -ERANGE;
399 }
400 break;
401 }
402
403 /* logical */
404 case FILTER_OP_AND:
405 case FILTER_OP_OR:
406 {
407 if (unlikely(pc + sizeof(struct logical_op)
408 > start_pc + bytecode->len)) {
409 ret = -ERANGE;
410 }
411 break;
412 }
413
414 /* load field ref */
415 case FILTER_OP_LOAD_FIELD_REF:
416 {
417 printk(KERN_WARNING "Unknown field ref type\n");
418 ret = -EINVAL;
419 break;
420 }
421
422 /* get context ref */
423 case FILTER_OP_GET_CONTEXT_REF:
424 {
425 printk(KERN_WARNING "Unknown field ref type\n");
426 ret = -EINVAL;
427 break;
428 }
429 case FILTER_OP_LOAD_FIELD_REF_STRING:
430 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
431 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
432 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
433 case FILTER_OP_LOAD_FIELD_REF_S64:
434 case FILTER_OP_GET_CONTEXT_REF_STRING:
435 case FILTER_OP_GET_CONTEXT_REF_S64:
436 {
437 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
438 > start_pc + bytecode->len)) {
439 ret = -ERANGE;
440 }
441 break;
442 }
443
444 /* load from immediate operand */
445 case FILTER_OP_LOAD_STRING:
446 case FILTER_OP_LOAD_STAR_GLOB_STRING:
447 {
448 struct load_op *insn = (struct load_op *) pc;
449 uint32_t str_len, maxlen;
450
451 if (unlikely(pc + sizeof(struct load_op)
452 > start_pc + bytecode->len)) {
453 ret = -ERANGE;
454 break;
455 }
456
457 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
458 str_len = strnlen(insn->data, maxlen);
459 if (unlikely(str_len >= maxlen)) {
460 /* Final '\0' not found within range */
461 ret = -ERANGE;
462 }
463 break;
464 }
465
466 case FILTER_OP_LOAD_S64:
467 {
468 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
469 > start_pc + bytecode->len)) {
470 ret = -ERANGE;
471 }
472 break;
473 }
474
475 case FILTER_OP_CAST_TO_S64:
476 case FILTER_OP_CAST_NOP:
477 {
478 if (unlikely(pc + sizeof(struct cast_op)
479 > start_pc + bytecode->len)) {
480 ret = -ERANGE;
481 }
482 break;
483 }
484
485 /*
486 * Instructions for recursive traversal through composed types.
487 */
488 case FILTER_OP_GET_CONTEXT_ROOT:
489 case FILTER_OP_GET_APP_CONTEXT_ROOT:
490 case FILTER_OP_GET_PAYLOAD_ROOT:
491 case FILTER_OP_LOAD_FIELD:
492 case FILTER_OP_LOAD_FIELD_S8:
493 case FILTER_OP_LOAD_FIELD_S16:
494 case FILTER_OP_LOAD_FIELD_S32:
495 case FILTER_OP_LOAD_FIELD_S64:
496 case FILTER_OP_LOAD_FIELD_U8:
497 case FILTER_OP_LOAD_FIELD_U16:
498 case FILTER_OP_LOAD_FIELD_U32:
499 case FILTER_OP_LOAD_FIELD_U64:
500 case FILTER_OP_LOAD_FIELD_STRING:
501 case FILTER_OP_LOAD_FIELD_SEQUENCE:
502 case FILTER_OP_LOAD_FIELD_DOUBLE:
503 if (unlikely(pc + sizeof(struct load_op)
504 > start_pc + bytecode->len)) {
505 ret = -ERANGE;
506 }
507 break;
508
509 case FILTER_OP_GET_SYMBOL:
510 {
511 struct load_op *insn = (struct load_op *) pc;
512 struct get_symbol *sym = (struct get_symbol *) insn->data;
513
514 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
515 > start_pc + bytecode->len)) {
516 ret = -ERANGE;
517 }
518 ret = validate_get_symbol(bytecode, sym);
519 break;
520 }
521
522 case FILTER_OP_GET_SYMBOL_FIELD:
523 printk(KERN_WARNING "Unexpected get symbol field\n");
524 ret = -EINVAL;
525 break;
526
527 case FILTER_OP_GET_INDEX_U16:
528 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
529 > start_pc + bytecode->len)) {
530 ret = -ERANGE;
531 }
532 break;
533
534 case FILTER_OP_GET_INDEX_U64:
535 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
536 > start_pc + bytecode->len)) {
537 ret = -ERANGE;
538 }
539 break;
540 }
541
542 return ret;
543 }
544
545 static
546 unsigned long delete_all_nodes(struct mp_table *mp_table)
547 {
548 struct mp_node *mp_node;
549 struct hlist_node *tmp;
550 unsigned long nr_nodes = 0;
551 int i;
552
553 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
554 struct hlist_head *head;
555
556 head = &mp_table->mp_head[i];
557 lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
558 kfree(mp_node);
559 nr_nodes++;
560 }
561 }
562 return nr_nodes;
563 }
564
565 /*
566 * Return value:
567 * >=0: success
568 * <0: error
569 */
570 static
571 int validate_instruction_context(struct bytecode_runtime *bytecode,
572 struct vstack *stack,
573 char *start_pc,
574 char *pc)
575 {
576 int ret = 0;
577 const filter_opcode_t opcode = *(filter_opcode_t *) pc;
578
579 switch (opcode) {
580 case FILTER_OP_UNKNOWN:
581 default:
582 {
583 printk(KERN_WARNING "unknown bytecode op %u\n",
584 (unsigned int) *(filter_opcode_t *) pc);
585 ret = -EINVAL;
586 goto end;
587 }
588
589 case FILTER_OP_RETURN:
590 case FILTER_OP_RETURN_S64:
591 {
592 goto end;
593 }
594
595 /* binary */
596 case FILTER_OP_MUL:
597 case FILTER_OP_DIV:
598 case FILTER_OP_MOD:
599 case FILTER_OP_PLUS:
600 case FILTER_OP_MINUS:
601 /* Floating point */
602 case FILTER_OP_EQ_DOUBLE:
603 case FILTER_OP_NE_DOUBLE:
604 case FILTER_OP_GT_DOUBLE:
605 case FILTER_OP_LT_DOUBLE:
606 case FILTER_OP_GE_DOUBLE:
607 case FILTER_OP_LE_DOUBLE:
608 case FILTER_OP_EQ_DOUBLE_S64:
609 case FILTER_OP_NE_DOUBLE_S64:
610 case FILTER_OP_GT_DOUBLE_S64:
611 case FILTER_OP_LT_DOUBLE_S64:
612 case FILTER_OP_GE_DOUBLE_S64:
613 case FILTER_OP_LE_DOUBLE_S64:
614 case FILTER_OP_EQ_S64_DOUBLE:
615 case FILTER_OP_NE_S64_DOUBLE:
616 case FILTER_OP_GT_S64_DOUBLE:
617 case FILTER_OP_LT_S64_DOUBLE:
618 case FILTER_OP_GE_S64_DOUBLE:
619 case FILTER_OP_LE_S64_DOUBLE:
620 case FILTER_OP_UNARY_PLUS_DOUBLE:
621 case FILTER_OP_UNARY_MINUS_DOUBLE:
622 case FILTER_OP_UNARY_NOT_DOUBLE:
623 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
624 case FILTER_OP_LOAD_DOUBLE:
625 case FILTER_OP_CAST_DOUBLE_TO_S64:
626 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
627 {
628 printk(KERN_WARNING "unsupported bytecode op %u\n",
629 (unsigned int) *(filter_opcode_t *) pc);
630 ret = -EINVAL;
631 goto end;
632 }
633
634 case FILTER_OP_EQ:
635 {
636 ret = bin_op_compare_check(stack, opcode, "==");
637 if (ret < 0)
638 goto end;
639 break;
640 }
641 case FILTER_OP_NE:
642 {
643 ret = bin_op_compare_check(stack, opcode, "!=");
644 if (ret < 0)
645 goto end;
646 break;
647 }
648 case FILTER_OP_GT:
649 {
650 ret = bin_op_compare_check(stack, opcode, ">");
651 if (ret < 0)
652 goto end;
653 break;
654 }
655 case FILTER_OP_LT:
656 {
657 ret = bin_op_compare_check(stack, opcode, "<");
658 if (ret < 0)
659 goto end;
660 break;
661 }
662 case FILTER_OP_GE:
663 {
664 ret = bin_op_compare_check(stack, opcode, ">=");
665 if (ret < 0)
666 goto end;
667 break;
668 }
669 case FILTER_OP_LE:
670 {
671 ret = bin_op_compare_check(stack, opcode, "<=");
672 if (ret < 0)
673 goto end;
674 break;
675 }
676
677 case FILTER_OP_EQ_STRING:
678 case FILTER_OP_NE_STRING:
679 case FILTER_OP_GT_STRING:
680 case FILTER_OP_LT_STRING:
681 case FILTER_OP_GE_STRING:
682 case FILTER_OP_LE_STRING:
683 {
684 if (!vstack_ax(stack) || !vstack_bx(stack)) {
685 printk(KERN_WARNING "Empty stack\n");
686 ret = -EINVAL;
687 goto end;
688 }
689 if (vstack_ax(stack)->type != REG_STRING
690 || vstack_bx(stack)->type != REG_STRING) {
691 printk(KERN_WARNING "Unexpected register type for string comparator\n");
692 ret = -EINVAL;
693 goto end;
694 }
695 break;
696 }
697
698
699 case FILTER_OP_EQ_STAR_GLOB_STRING:
700 case FILTER_OP_NE_STAR_GLOB_STRING:
701 {
702 if (!vstack_ax(stack) || !vstack_bx(stack)) {
703 printk(KERN_WARNING "Empty stack\n");
704 ret = -EINVAL;
705 goto end;
706 }
707 if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
708 && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
709 printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
710 ret = -EINVAL;
711 goto end;
712 }
713 break;
714 }
715
716 case FILTER_OP_EQ_S64:
717 case FILTER_OP_NE_S64:
718 case FILTER_OP_GT_S64:
719 case FILTER_OP_LT_S64:
720 case FILTER_OP_GE_S64:
721 case FILTER_OP_LE_S64:
722 {
723 if (!vstack_ax(stack) || !vstack_bx(stack)) {
724 printk(KERN_WARNING "Empty stack\n");
725 ret = -EINVAL;
726 goto end;
727 }
728 if (vstack_ax(stack)->type != REG_S64
729 || vstack_bx(stack)->type != REG_S64) {
730 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
731 ret = -EINVAL;
732 goto end;
733 }
734 break;
735 }
736
737 case FILTER_OP_BIT_RSHIFT:
738 ret = bin_op_bitwise_check(stack, opcode, ">>");
739 if (ret < 0)
740 goto end;
741 break;
742 case FILTER_OP_BIT_LSHIFT:
743 ret = bin_op_bitwise_check(stack, opcode, "<<");
744 if (ret < 0)
745 goto end;
746 break;
747 case FILTER_OP_BIT_AND:
748 ret = bin_op_bitwise_check(stack, opcode, "&");
749 if (ret < 0)
750 goto end;
751 break;
752 case FILTER_OP_BIT_OR:
753 ret = bin_op_bitwise_check(stack, opcode, "|");
754 if (ret < 0)
755 goto end;
756 break;
757 case FILTER_OP_BIT_XOR:
758 ret = bin_op_bitwise_check(stack, opcode, "^");
759 if (ret < 0)
760 goto end;
761 break;
762
763 /* unary */
764 case FILTER_OP_UNARY_PLUS:
765 case FILTER_OP_UNARY_MINUS:
766 case FILTER_OP_UNARY_NOT:
767 {
768 if (!vstack_ax(stack)) {
769 printk(KERN_WARNING "Empty stack\n");
770 ret = -EINVAL;
771 goto end;
772 }
773 switch (vstack_ax(stack)->type) {
774 default:
775 case REG_DOUBLE:
776 printk(KERN_WARNING "unknown register type\n");
777 ret = -EINVAL;
778 goto end;
779
780 case REG_STRING:
781 case REG_STAR_GLOB_STRING:
782 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
783 ret = -EINVAL;
784 goto end;
785 case REG_S64:
786 case REG_TYPE_UNKNOWN:
787 break;
788 }
789 break;
790 }
791 case FILTER_OP_UNARY_BIT_NOT:
792 {
793 if (!vstack_ax(stack)) {
794 printk(KERN_WARNING "Empty stack\n");
795 ret = -EINVAL;
796 goto end;
797 }
798 switch (vstack_ax(stack)->type) {
799 default:
800 printk(KERN_WARNING "unknown register type\n");
801 ret = -EINVAL;
802 goto end;
803
804 case REG_STRING:
805 case REG_STAR_GLOB_STRING:
806 case REG_DOUBLE:
807 printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
808 ret = -EINVAL;
809 goto end;
810 case REG_S64:
811 break;
812 case REG_TYPE_UNKNOWN:
813 break;
814 }
815 break;
816 }
817
818 case FILTER_OP_UNARY_PLUS_S64:
819 case FILTER_OP_UNARY_MINUS_S64:
820 case FILTER_OP_UNARY_NOT_S64:
821 {
822 if (!vstack_ax(stack)) {
823 printk(KERN_WARNING "Empty stack\n");
824 ret = -EINVAL;
825 goto end;
826 }
827 if (vstack_ax(stack)->type != REG_S64) {
828 printk(KERN_WARNING "Invalid register type\n");
829 ret = -EINVAL;
830 goto end;
831 }
832 break;
833 }
834
835 /* logical */
836 case FILTER_OP_AND:
837 case FILTER_OP_OR:
838 {
839 struct logical_op *insn = (struct logical_op *) pc;
840
841 if (!vstack_ax(stack)) {
842 printk(KERN_WARNING "Empty stack\n");
843 ret = -EINVAL;
844 goto end;
845 }
846 if (vstack_ax(stack)->type != REG_S64) {
847 printk(KERN_WARNING "Logical comparator expects S64 register\n");
848 ret = -EINVAL;
849 goto end;
850 }
851
852 dbg_printk("Validate jumping to bytecode offset %u\n",
853 (unsigned int) insn->skip_offset);
854 if (unlikely(start_pc + insn->skip_offset <= pc)) {
855 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
856 ret = -EINVAL;
857 goto end;
858 }
859 break;
860 }
861
862 /* load field ref */
863 case FILTER_OP_LOAD_FIELD_REF:
864 {
865 printk(KERN_WARNING "Unknown field ref type\n");
866 ret = -EINVAL;
867 goto end;
868 }
869 case FILTER_OP_LOAD_FIELD_REF_STRING:
870 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
871 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
872 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
873 {
874 struct load_op *insn = (struct load_op *) pc;
875 struct field_ref *ref = (struct field_ref *) insn->data;
876
877 dbg_printk("Validate load field ref offset %u type string\n",
878 ref->offset);
879 break;
880 }
881 case FILTER_OP_LOAD_FIELD_REF_S64:
882 {
883 struct load_op *insn = (struct load_op *) pc;
884 struct field_ref *ref = (struct field_ref *) insn->data;
885
886 dbg_printk("Validate load field ref offset %u type s64\n",
887 ref->offset);
888 break;
889 }
890
891 /* load from immediate operand */
892 case FILTER_OP_LOAD_STRING:
893 case FILTER_OP_LOAD_STAR_GLOB_STRING:
894 {
895 break;
896 }
897
898 case FILTER_OP_LOAD_S64:
899 {
900 break;
901 }
902
903 case FILTER_OP_CAST_TO_S64:
904 {
905 struct cast_op *insn = (struct cast_op *) pc;
906
907 if (!vstack_ax(stack)) {
908 printk(KERN_WARNING "Empty stack\n");
909 ret = -EINVAL;
910 goto end;
911 }
912 switch (vstack_ax(stack)->type) {
913 default:
914 case REG_DOUBLE:
915 printk(KERN_WARNING "unknown register type\n");
916 ret = -EINVAL;
917 goto end;
918
919 case REG_STRING:
920 case REG_STAR_GLOB_STRING:
921 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
922 ret = -EINVAL;
923 goto end;
924 case REG_S64:
925 break;
926 }
927 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
928 if (vstack_ax(stack)->type != REG_DOUBLE) {
929 printk(KERN_WARNING "Cast expects double\n");
930 ret = -EINVAL;
931 goto end;
932 }
933 }
934 break;
935 }
936 case FILTER_OP_CAST_NOP:
937 {
938 break;
939 }
940
941 /* get context ref */
942 case FILTER_OP_GET_CONTEXT_REF:
943 {
944 printk(KERN_WARNING "Unknown get context ref type\n");
945 ret = -EINVAL;
946 goto end;
947 }
948 case FILTER_OP_GET_CONTEXT_REF_STRING:
949 {
950 struct load_op *insn = (struct load_op *) pc;
951 struct field_ref *ref = (struct field_ref *) insn->data;
952
953 dbg_printk("Validate get context ref offset %u type string\n",
954 ref->offset);
955 break;
956 }
957 case FILTER_OP_GET_CONTEXT_REF_S64:
958 {
959 struct load_op *insn = (struct load_op *) pc;
960 struct field_ref *ref = (struct field_ref *) insn->data;
961
962 dbg_printk("Validate get context ref offset %u type s64\n",
963 ref->offset);
964 break;
965 }
966
967 /*
968 * Instructions for recursive traversal through composed types.
969 */
970 case FILTER_OP_GET_CONTEXT_ROOT:
971 {
972 dbg_printk("Validate get context root\n");
973 break;
974 }
975 case FILTER_OP_GET_APP_CONTEXT_ROOT:
976 {
977 dbg_printk("Validate get app context root\n");
978 break;
979 }
980 case FILTER_OP_GET_PAYLOAD_ROOT:
981 {
982 dbg_printk("Validate get payload root\n");
983 break;
984 }
985 case FILTER_OP_LOAD_FIELD:
986 {
987 /*
988 * We tolerate that field type is unknown at validation,
989 * because we are performing the load specialization in
990 * a phase after validation.
991 */
992 dbg_printk("Validate load field\n");
993 break;
994 }
995 case FILTER_OP_LOAD_FIELD_S8:
996 {
997 dbg_printk("Validate load field s8\n");
998 break;
999 }
1000 case FILTER_OP_LOAD_FIELD_S16:
1001 {
1002 dbg_printk("Validate load field s16\n");
1003 break;
1004 }
1005 case FILTER_OP_LOAD_FIELD_S32:
1006 {
1007 dbg_printk("Validate load field s32\n");
1008 break;
1009 }
1010 case FILTER_OP_LOAD_FIELD_S64:
1011 {
1012 dbg_printk("Validate load field s64\n");
1013 break;
1014 }
1015 case FILTER_OP_LOAD_FIELD_U8:
1016 {
1017 dbg_printk("Validate load field u8\n");
1018 break;
1019 }
1020 case FILTER_OP_LOAD_FIELD_U16:
1021 {
1022 dbg_printk("Validate load field u16\n");
1023 break;
1024 }
1025 case FILTER_OP_LOAD_FIELD_U32:
1026 {
1027 dbg_printk("Validate load field u32\n");
1028 break;
1029 }
1030 case FILTER_OP_LOAD_FIELD_U64:
1031 {
1032 dbg_printk("Validate load field u64\n");
1033 break;
1034 }
1035 case FILTER_OP_LOAD_FIELD_STRING:
1036 {
1037 dbg_printk("Validate load field string\n");
1038 break;
1039 }
1040 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1041 {
1042 dbg_printk("Validate load field sequence\n");
1043 break;
1044 }
1045 case FILTER_OP_LOAD_FIELD_DOUBLE:
1046 {
1047 dbg_printk("Validate load field double\n");
1048 break;
1049 }
1050
1051 case FILTER_OP_GET_SYMBOL:
1052 {
1053 struct load_op *insn = (struct load_op *) pc;
1054 struct get_symbol *sym = (struct get_symbol *) insn->data;
1055
1056 dbg_printk("Validate get symbol offset %u\n", sym->offset);
1057 break;
1058 }
1059
1060 case FILTER_OP_GET_SYMBOL_FIELD:
1061 {
1062 struct load_op *insn = (struct load_op *) pc;
1063 struct get_symbol *sym = (struct get_symbol *) insn->data;
1064
1065 dbg_printk("Validate get symbol field offset %u\n", sym->offset);
1066 break;
1067 }
1068
1069 case FILTER_OP_GET_INDEX_U16:
1070 {
1071 struct load_op *insn = (struct load_op *) pc;
1072 struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
1073
1074 dbg_printk("Validate get index u16 index %u\n", get_index->index);
1075 break;
1076 }
1077
1078 case FILTER_OP_GET_INDEX_U64:
1079 {
1080 struct load_op *insn = (struct load_op *) pc;
1081 struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
1082
1083 dbg_printk("Validate get index u64 index %llu\n",
1084 (unsigned long long) get_index->index);
1085 break;
1086 }
1087 }
1088 end:
1089 return ret;
1090 }
1091
1092 /*
1093 * Return value:
1094 * 0: success
1095 * <0: error
1096 */
1097 static
1098 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
1099 struct mp_table *mp_table,
1100 struct vstack *stack,
1101 char *start_pc,
1102 char *pc)
1103 {
1104 int ret, found = 0;
1105 unsigned long target_pc = pc - start_pc;
1106 unsigned long hash;
1107 struct hlist_head *head;
1108 struct mp_node *mp_node;
1109
1110 /* Validate the context resulting from the previous instruction */
1111 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
1112 if (ret < 0)
1113 return ret;
1114
1115 /* Validate merge points */
1116 hash = jhash_1word(target_pc, 0);
1117 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
1118 lttng_hlist_for_each_entry(mp_node, head, node) {
1119 if (lttng_hash_match(mp_node, target_pc)) {
1120 found = 1;
1121 break;
1122 }
1123 }
1124 if (found) {
1125 dbg_printk("Filter: validate merge point at offset %lu\n",
1126 target_pc);
1127 if (merge_points_compare(stack, &mp_node->stack)) {
1128 printk(KERN_WARNING "Merge points differ for offset %lu\n",
1129 target_pc);
1130 return -EINVAL;
1131 }
1132 /* Once validated, we can remove the merge point */
1133 dbg_printk("Filter: remove merge point at offset %lu\n",
1134 target_pc);
1135 hlist_del(&mp_node->node);
1136 }
1137 return 0;
1138 }
1139
1140 /*
1141 * Return value:
1142 * >0: going to next insn.
1143 * 0: success, stop iteration.
1144 * <0: error
1145 */
1146 static
1147 int exec_insn(struct bytecode_runtime *bytecode,
1148 struct mp_table *mp_table,
1149 struct vstack *stack,
1150 char **_next_pc,
1151 char *pc)
1152 {
1153 int ret = 1;
1154 char *next_pc = *_next_pc;
1155
1156 switch (*(filter_opcode_t *) pc) {
1157 case FILTER_OP_UNKNOWN:
1158 default:
1159 {
1160 printk(KERN_WARNING "unknown bytecode op %u\n",
1161 (unsigned int) *(filter_opcode_t *) pc);
1162 ret = -EINVAL;
1163 goto end;
1164 }
1165
1166 case FILTER_OP_RETURN:
1167 {
1168 if (!vstack_ax(stack)) {
1169 printk(KERN_WARNING "Empty stack\n");
1170 ret = -EINVAL;
1171 goto end;
1172 }
1173 switch (vstack_ax(stack)->type) {
1174 case REG_S64:
1175 case REG_TYPE_UNKNOWN:
1176 break;
1177 default:
1178 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1179 (int) vstack_ax(stack)->type);
1180 ret = -EINVAL;
1181 goto end;
1182 }
1183
1184 ret = 0;
1185 goto end;
1186 }
1187
1188 case FILTER_OP_RETURN_S64:
1189 {
1190 if (!vstack_ax(stack)) {
1191 printk(KERN_WARNING "Empty stack\n");
1192 ret = -EINVAL;
1193 goto end;
1194 }
1195 switch (vstack_ax(stack)->type) {
1196 case REG_S64:
1197 break;
1198 default:
1199 case REG_TYPE_UNKNOWN:
1200 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1201 (int) vstack_ax(stack)->type);
1202 ret = -EINVAL;
1203 goto end;
1204 }
1205
1206 ret = 0;
1207 goto end;
1208 }
1209
1210 /* binary */
1211 case FILTER_OP_MUL:
1212 case FILTER_OP_DIV:
1213 case FILTER_OP_MOD:
1214 case FILTER_OP_PLUS:
1215 case FILTER_OP_MINUS:
1216 /* Floating point */
1217 case FILTER_OP_EQ_DOUBLE:
1218 case FILTER_OP_NE_DOUBLE:
1219 case FILTER_OP_GT_DOUBLE:
1220 case FILTER_OP_LT_DOUBLE:
1221 case FILTER_OP_GE_DOUBLE:
1222 case FILTER_OP_LE_DOUBLE:
1223 case FILTER_OP_EQ_DOUBLE_S64:
1224 case FILTER_OP_NE_DOUBLE_S64:
1225 case FILTER_OP_GT_DOUBLE_S64:
1226 case FILTER_OP_LT_DOUBLE_S64:
1227 case FILTER_OP_GE_DOUBLE_S64:
1228 case FILTER_OP_LE_DOUBLE_S64:
1229 case FILTER_OP_EQ_S64_DOUBLE:
1230 case FILTER_OP_NE_S64_DOUBLE:
1231 case FILTER_OP_GT_S64_DOUBLE:
1232 case FILTER_OP_LT_S64_DOUBLE:
1233 case FILTER_OP_GE_S64_DOUBLE:
1234 case FILTER_OP_LE_S64_DOUBLE:
1235 case FILTER_OP_UNARY_PLUS_DOUBLE:
1236 case FILTER_OP_UNARY_MINUS_DOUBLE:
1237 case FILTER_OP_UNARY_NOT_DOUBLE:
1238 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1239 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1240 case FILTER_OP_LOAD_DOUBLE:
1241 case FILTER_OP_CAST_DOUBLE_TO_S64:
1242 {
1243 printk(KERN_WARNING "unsupported bytecode op %u\n",
1244 (unsigned int) *(filter_opcode_t *) pc);
1245 ret = -EINVAL;
1246 goto end;
1247 }
1248
1249 case FILTER_OP_EQ:
1250 case FILTER_OP_NE:
1251 case FILTER_OP_GT:
1252 case FILTER_OP_LT:
1253 case FILTER_OP_GE:
1254 case FILTER_OP_LE:
1255 case FILTER_OP_EQ_STRING:
1256 case FILTER_OP_NE_STRING:
1257 case FILTER_OP_GT_STRING:
1258 case FILTER_OP_LT_STRING:
1259 case FILTER_OP_GE_STRING:
1260 case FILTER_OP_LE_STRING:
1261 case FILTER_OP_EQ_STAR_GLOB_STRING:
1262 case FILTER_OP_NE_STAR_GLOB_STRING:
1263 case FILTER_OP_EQ_S64:
1264 case FILTER_OP_NE_S64:
1265 case FILTER_OP_GT_S64:
1266 case FILTER_OP_LT_S64:
1267 case FILTER_OP_GE_S64:
1268 case FILTER_OP_LE_S64:
1269 case FILTER_OP_BIT_RSHIFT:
1270 case FILTER_OP_BIT_LSHIFT:
1271 case FILTER_OP_BIT_AND:
1272 case FILTER_OP_BIT_OR:
1273 case FILTER_OP_BIT_XOR:
1274 {
1275 /* Pop 2, push 1 */
1276 if (vstack_pop(stack)) {
1277 ret = -EINVAL;
1278 goto end;
1279 }
1280 if (!vstack_ax(stack)) {
1281 printk(KERN_WARNING "Empty stack\n");
1282 ret = -EINVAL;
1283 goto end;
1284 }
1285 switch (vstack_ax(stack)->type) {
1286 case REG_S64:
1287 case REG_DOUBLE:
1288 case REG_STRING:
1289 case REG_STAR_GLOB_STRING:
1290 case REG_TYPE_UNKNOWN:
1291 break;
1292 default:
1293 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1294 (int) vstack_ax(stack)->type);
1295 ret = -EINVAL;
1296 goto end;
1297 }
1298
1299 vstack_ax(stack)->type = REG_S64;
1300 next_pc += sizeof(struct binary_op);
1301 break;
1302 }
1303
1304 /* unary */
1305 case FILTER_OP_UNARY_PLUS:
1306 case FILTER_OP_UNARY_MINUS:
1307 {
1308 /* Pop 1, push 1 */
1309 if (!vstack_ax(stack)) {
1310 printk(KERN_WARNING "Empty stack\n\n");
1311 ret = -EINVAL;
1312 goto end;
1313 }
1314 switch (vstack_ax(stack)->type) {
1315 case REG_S64:
1316 case REG_TYPE_UNKNOWN:
1317 break;
1318 default:
1319 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1320 (int) vstack_ax(stack)->type);
1321 ret = -EINVAL;
1322 goto end;
1323 }
1324
1325 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1326 next_pc += sizeof(struct unary_op);
1327 break;
1328 }
1329
1330 case FILTER_OP_UNARY_PLUS_S64:
1331 case FILTER_OP_UNARY_MINUS_S64:
1332 case FILTER_OP_UNARY_NOT_S64:
1333 {
1334 /* Pop 1, push 1 */
1335 if (!vstack_ax(stack)) {
1336 printk(KERN_WARNING "Empty stack\n\n");
1337 ret = -EINVAL;
1338 goto end;
1339 }
1340 switch (vstack_ax(stack)->type) {
1341 case REG_S64:
1342 break;
1343 default:
1344 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1345 (int) vstack_ax(stack)->type);
1346 ret = -EINVAL;
1347 goto end;
1348 }
1349
1350 vstack_ax(stack)->type = REG_S64;
1351 next_pc += sizeof(struct unary_op);
1352 break;
1353 }
1354
1355 case FILTER_OP_UNARY_NOT:
1356 {
1357 /* Pop 1, push 1 */
1358 if (!vstack_ax(stack)) {
1359 printk(KERN_WARNING "Empty stack\n\n");
1360 ret = -EINVAL;
1361 goto end;
1362 }
1363 switch (vstack_ax(stack)->type) {
1364 case REG_S64:
1365 case REG_TYPE_UNKNOWN:
1366 break;
1367 default:
1368 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1369 (int) vstack_ax(stack)->type);
1370 ret = -EINVAL;
1371 goto end;
1372 }
1373
1374 vstack_ax(stack)->type = REG_S64;
1375 next_pc += sizeof(struct unary_op);
1376 break;
1377 }
1378
1379 case FILTER_OP_UNARY_BIT_NOT:
1380 {
1381 /* Pop 1, push 1 */
1382 if (!vstack_ax(stack)) {
1383 printk(KERN_WARNING "Empty stack\n");
1384 ret = -EINVAL;
1385 goto end;
1386 }
1387 switch (vstack_ax(stack)->type) {
1388 case REG_S64:
1389 case REG_TYPE_UNKNOWN:
1390 break;
1391 case REG_DOUBLE:
1392 default:
1393 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1394 (int) vstack_ax(stack)->type);
1395 ret = -EINVAL;
1396 goto end;
1397 }
1398
1399 vstack_ax(stack)->type = REG_S64;
1400 next_pc += sizeof(struct unary_op);
1401 break;
1402 }
1403
1404 /* logical */
1405 case FILTER_OP_AND:
1406 case FILTER_OP_OR:
1407 {
1408 struct logical_op *insn = (struct logical_op *) pc;
1409 int merge_ret;
1410
1411 /* Add merge point to table */
1412 merge_ret = merge_point_add_check(mp_table,
1413 insn->skip_offset, stack);
1414 if (merge_ret) {
1415 ret = merge_ret;
1416 goto end;
1417 }
1418
1419 if (!vstack_ax(stack)) {
1420 printk(KERN_WARNING "Empty stack\n\n");
1421 ret = -EINVAL;
1422 goto end;
1423 }
1424 /* There is always a cast-to-s64 operation before a or/and op. */
1425 switch (vstack_ax(stack)->type) {
1426 case REG_S64:
1427 break;
1428 default:
1429 printk(KERN_WARNING "Incorrect register type %d for operation\n",
1430 (int) vstack_ax(stack)->type);
1431 ret = -EINVAL;
1432 goto end;
1433 }
1434
1435 /* Continue to next instruction */
1436 /* Pop 1 when jump not taken */
1437 if (vstack_pop(stack)) {
1438 ret = -EINVAL;
1439 goto end;
1440 }
1441 next_pc += sizeof(struct logical_op);
1442 break;
1443 }
1444
1445 /* load field ref */
1446 case FILTER_OP_LOAD_FIELD_REF:
1447 {
1448 printk(KERN_WARNING "Unknown field ref type\n");
1449 ret = -EINVAL;
1450 goto end;
1451 }
1452 /* get context ref */
1453 case FILTER_OP_GET_CONTEXT_REF:
1454 {
1455 printk(KERN_WARNING "Unknown get context ref type\n");
1456 ret = -EINVAL;
1457 goto end;
1458 }
1459 case FILTER_OP_LOAD_FIELD_REF_STRING:
1460 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1461 case FILTER_OP_GET_CONTEXT_REF_STRING:
1462 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
1463 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
1464 {
1465 if (vstack_push(stack)) {
1466 ret = -EINVAL;
1467 goto end;
1468 }
1469 vstack_ax(stack)->type = REG_STRING;
1470 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1471 break;
1472 }
1473 case FILTER_OP_LOAD_FIELD_REF_S64:
1474 case FILTER_OP_GET_CONTEXT_REF_S64:
1475 {
1476 if (vstack_push(stack)) {
1477 ret = -EINVAL;
1478 goto end;
1479 }
1480 vstack_ax(stack)->type = REG_S64;
1481 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1482 break;
1483 }
1484
1485 /* load from immediate operand */
1486 case FILTER_OP_LOAD_STRING:
1487 {
1488 struct load_op *insn = (struct load_op *) pc;
1489
1490 if (vstack_push(stack)) {
1491 ret = -EINVAL;
1492 goto end;
1493 }
1494 vstack_ax(stack)->type = REG_STRING;
1495 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1496 break;
1497 }
1498
1499 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1500 {
1501 struct load_op *insn = (struct load_op *) pc;
1502
1503 if (vstack_push(stack)) {
1504 ret = -EINVAL;
1505 goto end;
1506 }
1507 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1508 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1509 break;
1510 }
1511
1512 case FILTER_OP_LOAD_S64:
1513 {
1514 if (vstack_push(stack)) {
1515 ret = -EINVAL;
1516 goto end;
1517 }
1518 vstack_ax(stack)->type = REG_S64;
1519 next_pc += sizeof(struct load_op)
1520 + sizeof(struct literal_numeric);
1521 break;
1522 }
1523
1524 case FILTER_OP_CAST_TO_S64:
1525 {
1526 /* Pop 1, push 1 */
1527 if (!vstack_ax(stack)) {
1528 printk(KERN_WARNING "Empty stack\n");
1529 ret = -EINVAL;
1530 goto end;
1531 }
1532 switch (vstack_ax(stack)->type) {
1533 case REG_S64:
1534 case REG_DOUBLE:
1535 case REG_TYPE_UNKNOWN:
1536 break;
1537 default:
1538 printk(KERN_WARNING "Incorrect register type %d for cast\n",
1539 (int) vstack_ax(stack)->type);
1540 ret = -EINVAL;
1541 goto end;
1542 }
1543 vstack_ax(stack)->type = REG_S64;
1544 next_pc += sizeof(struct cast_op);
1545 break;
1546 }
1547 case FILTER_OP_CAST_NOP:
1548 {
1549 next_pc += sizeof(struct cast_op);
1550 break;
1551 }
1552
1553 /*
1554 * Instructions for recursive traversal through composed types.
1555 */
1556 case FILTER_OP_GET_CONTEXT_ROOT:
1557 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1558 case FILTER_OP_GET_PAYLOAD_ROOT:
1559 {
1560 if (vstack_push(stack)) {
1561 ret = -EINVAL;
1562 goto end;
1563 }
1564 vstack_ax(stack)->type = REG_PTR;
1565 next_pc += sizeof(struct load_op);
1566 break;
1567 }
1568
1569 case FILTER_OP_LOAD_FIELD:
1570 {
1571 /* Pop 1, push 1 */
1572 if (!vstack_ax(stack)) {
1573 printk(KERN_WARNING "Empty stack\n\n");
1574 ret = -EINVAL;
1575 goto end;
1576 }
1577 if (vstack_ax(stack)->type != REG_PTR) {
1578 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1579 ret = -EINVAL;
1580 goto end;
1581 }
1582 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1583 next_pc += sizeof(struct load_op);
1584 break;
1585 }
1586
1587 case FILTER_OP_LOAD_FIELD_S8:
1588 case FILTER_OP_LOAD_FIELD_S16:
1589 case FILTER_OP_LOAD_FIELD_S32:
1590 case FILTER_OP_LOAD_FIELD_S64:
1591 case FILTER_OP_LOAD_FIELD_U8:
1592 case FILTER_OP_LOAD_FIELD_U16:
1593 case FILTER_OP_LOAD_FIELD_U32:
1594 case FILTER_OP_LOAD_FIELD_U64:
1595 {
1596 /* Pop 1, push 1 */
1597 if (!vstack_ax(stack)) {
1598 printk(KERN_WARNING "Empty stack\n\n");
1599 ret = -EINVAL;
1600 goto end;
1601 }
1602 if (vstack_ax(stack)->type != REG_PTR) {
1603 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1604 ret = -EINVAL;
1605 goto end;
1606 }
1607 vstack_ax(stack)->type = REG_S64;
1608 next_pc += sizeof(struct load_op);
1609 break;
1610 }
1611
1612 case FILTER_OP_LOAD_FIELD_STRING:
1613 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1614 {
1615 /* Pop 1, push 1 */
1616 if (!vstack_ax(stack)) {
1617 printk(KERN_WARNING "Empty stack\n\n");
1618 ret = -EINVAL;
1619 goto end;
1620 }
1621 if (vstack_ax(stack)->type != REG_PTR) {
1622 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1623 ret = -EINVAL;
1624 goto end;
1625 }
1626 vstack_ax(stack)->type = REG_STRING;
1627 next_pc += sizeof(struct load_op);
1628 break;
1629 }
1630
1631 case FILTER_OP_LOAD_FIELD_DOUBLE:
1632 {
1633 /* Pop 1, push 1 */
1634 if (!vstack_ax(stack)) {
1635 printk(KERN_WARNING "Empty stack\n\n");
1636 ret = -EINVAL;
1637 goto end;
1638 }
1639 if (vstack_ax(stack)->type != REG_PTR) {
1640 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1641 ret = -EINVAL;
1642 goto end;
1643 }
1644 vstack_ax(stack)->type = REG_DOUBLE;
1645 next_pc += sizeof(struct load_op);
1646 break;
1647 }
1648
1649 case FILTER_OP_GET_SYMBOL:
1650 case FILTER_OP_GET_SYMBOL_FIELD:
1651 {
1652 /* Pop 1, push 1 */
1653 if (!vstack_ax(stack)) {
1654 printk(KERN_WARNING "Empty stack\n\n");
1655 ret = -EINVAL;
1656 goto end;
1657 }
1658 if (vstack_ax(stack)->type != REG_PTR) {
1659 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1660 ret = -EINVAL;
1661 goto end;
1662 }
1663 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1664 break;
1665 }
1666
1667 case FILTER_OP_GET_INDEX_U16:
1668 {
1669 /* Pop 1, push 1 */
1670 if (!vstack_ax(stack)) {
1671 printk(KERN_WARNING "Empty stack\n\n");
1672 ret = -EINVAL;
1673 goto end;
1674 }
1675 if (vstack_ax(stack)->type != REG_PTR) {
1676 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1677 ret = -EINVAL;
1678 goto end;
1679 }
1680 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1681 break;
1682 }
1683
1684 case FILTER_OP_GET_INDEX_U64:
1685 {
1686 /* Pop 1, push 1 */
1687 if (!vstack_ax(stack)) {
1688 printk(KERN_WARNING "Empty stack\n\n");
1689 ret = -EINVAL;
1690 goto end;
1691 }
1692 if (vstack_ax(stack)->type != REG_PTR) {
1693 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1694 ret = -EINVAL;
1695 goto end;
1696 }
1697 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1698 break;
1699 }
1700
1701 }
1702 end:
1703 *_next_pc = next_pc;
1704 return ret;
1705 }
1706
1707 /*
1708 * Never called concurrently (hash seed is shared).
1709 */
1710 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1711 {
1712 struct mp_table *mp_table;
1713 char *pc, *next_pc, *start_pc;
1714 int ret = -EINVAL;
1715 struct vstack stack;
1716
1717 vstack_init(&stack);
1718
1719 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1720 if (!mp_table) {
1721 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1722 return -ENOMEM;
1723 }
1724 start_pc = &bytecode->code[0];
1725 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1726 pc = next_pc) {
1727 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1728 if (ret != 0) {
1729 if (ret == -ERANGE)
1730 printk(KERN_WARNING "filter bytecode overflow\n");
1731 goto end;
1732 }
1733 dbg_printk("Validating op %s (%u)\n",
1734 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1735 (unsigned int) *(filter_opcode_t *) pc);
1736
1737 /*
1738 * For each instruction, validate the current context
1739 * (traversal of entire execution flow), and validate
1740 * all merge points targeting this instruction.
1741 */
1742 ret = validate_instruction_all_contexts(bytecode, mp_table,
1743 &stack, start_pc, pc);
1744 if (ret)
1745 goto end;
1746 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1747 if (ret <= 0)
1748 goto end;
1749 }
1750 end:
1751 if (delete_all_nodes(mp_table)) {
1752 if (!ret) {
1753 printk(KERN_WARNING "Unexpected merge points\n");
1754 ret = -EINVAL;
1755 }
1756 }
1757 kfree(mp_table);
1758 return ret;
1759 }
This page took 0.124689 seconds and 4 git commands to generate.