Filter: add lshift, rshift, bit not ops
[lttng-modules.git] / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng modules filter bytecode validator.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/types.h>
28 #include <linux/jhash.h>
29 #include <linux/slab.h>
30
31 #include <wrapper/list.h>
32 #include <lttng-filter.h>
33
34 #define MERGE_POINT_TABLE_BITS 7
35 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
36
37 /* merge point table node */
38 struct mp_node {
39 struct hlist_node node;
40
41 /* Context at merge point */
42 struct vstack stack;
43 unsigned long target_pc;
44 };
45
46 struct mp_table {
47 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
48 };
49
50 static
51 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
52 {
53 if (mp_node->target_pc == key_pc)
54 return 1;
55 else
56 return 0;
57 }
58
59 static
60 int merge_points_compare(const struct vstack *stacka,
61 const struct vstack *stackb)
62 {
63 int i, len;
64
65 if (stacka->top != stackb->top)
66 return 1;
67 len = stacka->top + 1;
68 WARN_ON_ONCE(len < 0);
69 for (i = 0; i < len; i++) {
70 if (stacka->e[i].type != stackb->e[i].type)
71 return 1;
72 }
73 return 0;
74 }
75
76 static
77 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
78 const struct vstack *stack)
79 {
80 struct mp_node *mp_node;
81 unsigned long hash = jhash_1word(target_pc, 0);
82 struct hlist_head *head;
83 struct mp_node *lookup_node;
84 int found = 0;
85
86 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
87 target_pc, hash);
88 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
89 if (!mp_node)
90 return -ENOMEM;
91 mp_node->target_pc = target_pc;
92 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
93
94 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
95 lttng_hlist_for_each_entry(lookup_node, head, node) {
96 if (lttng_hash_match(lookup_node, target_pc)) {
97 found = 1;
98 break;
99 }
100 }
101 if (found) {
102 /* Key already present */
103 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
104 target_pc, hash);
105 kfree(mp_node);
106 if (merge_points_compare(stack, &lookup_node->stack)) {
107 printk(KERN_WARNING "Merge points differ for offset %lu\n",
108 target_pc);
109 return -EINVAL;
110 }
111 } else {
112 hlist_add_head(&mp_node->node, head);
113 }
114 return 0;
115 }
116
117 /*
118 * Binary comparators use top of stack and top of stack -1.
119 */
120 static
121 int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
122 const char *str)
123 {
124 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
125 goto error_empty;
126
127 switch (vstack_ax(stack)->type) {
128 default:
129 case REG_DOUBLE:
130 goto error_type;
131
132 case REG_STRING:
133 switch (vstack_bx(stack)->type) {
134 default:
135 case REG_DOUBLE:
136 goto error_type;
137 case REG_TYPE_UNKNOWN:
138 goto unknown;
139 case REG_STRING:
140 break;
141 case REG_STAR_GLOB_STRING:
142 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
143 goto error_mismatch;
144 }
145 break;
146 case REG_S64:
147 goto error_mismatch;
148 }
149 break;
150 case REG_STAR_GLOB_STRING:
151 switch (vstack_bx(stack)->type) {
152 default:
153 case REG_DOUBLE:
154 goto error_type;
155 case REG_TYPE_UNKNOWN:
156 goto unknown;
157 case REG_STRING:
158 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
159 goto error_mismatch;
160 }
161 break;
162 case REG_STAR_GLOB_STRING:
163 case REG_S64:
164 goto error_mismatch;
165 }
166 break;
167 case REG_S64:
168 switch (vstack_bx(stack)->type) {
169 default:
170 case REG_DOUBLE:
171 goto error_type;
172 case REG_TYPE_UNKNOWN:
173 goto unknown;
174 case REG_STRING:
175 case REG_STAR_GLOB_STRING:
176 goto error_mismatch;
177 case REG_S64:
178 break;
179 }
180 break;
181 case REG_TYPE_UNKNOWN:
182 switch (vstack_bx(stack)->type) {
183 default:
184 case REG_DOUBLE:
185 goto error_type;
186 case REG_TYPE_UNKNOWN:
187 case REG_STRING:
188 case REG_STAR_GLOB_STRING:
189 case REG_S64:
190 goto unknown;
191 }
192 break;
193 }
194 return 0;
195
196 unknown:
197 return 1;
198
199 error_empty:
200 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
201 return -EINVAL;
202
203 error_mismatch:
204 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
205 return -EINVAL;
206
207 error_type:
208 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
209 return -EINVAL;
210 }
211
212 /*
213 * Binary bitwise operators use top of stack and top of stack -1.
214 * Return 0 if typing is known to match, 1 if typing is dynamic
215 * (unknown), negative error value on error.
216 */
217 static
218 int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
219 const char *str)
220 {
221 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
222 goto error_empty;
223
224 switch (vstack_ax(stack)->type) {
225 default:
226 case REG_DOUBLE:
227 goto error_type;
228
229 case REG_TYPE_UNKNOWN:
230 switch (vstack_bx(stack)->type) {
231 default:
232 case REG_DOUBLE:
233 goto error_type;
234 case REG_TYPE_UNKNOWN:
235 case REG_STRING:
236 case REG_STAR_GLOB_STRING:
237 case REG_S64:
238 goto unknown;
239 }
240 break;
241 case REG_S64:
242 switch (vstack_bx(stack)->type) {
243 default:
244 case REG_DOUBLE:
245 goto error_type;
246 case REG_TYPE_UNKNOWN:
247 goto unknown;
248 case REG_S64:
249 break;
250 }
251 break;
252 }
253 return 0;
254
255 unknown:
256 return 1;
257
258 error_empty:
259 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
260 return -EINVAL;
261
262 error_type:
263 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
264 return -EINVAL;
265 }
266
267 static
268 int validate_get_symbol(struct bytecode_runtime *bytecode,
269 const struct get_symbol *sym)
270 {
271 const char *str, *str_limit;
272 size_t len_limit;
273
274 if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
275 return -EINVAL;
276
277 str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
278 str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
279 len_limit = str_limit - str;
280 if (strnlen(str, len_limit) == len_limit)
281 return -EINVAL;
282 return 0;
283 }
284
285 /*
286 * Validate bytecode range overflow within the validation pass.
287 * Called for each instruction encountered.
288 */
289 static
290 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
291 char *start_pc, char *pc)
292 {
293 int ret = 0;
294
295 switch (*(filter_opcode_t *) pc) {
296 case FILTER_OP_UNKNOWN:
297 default:
298 {
299 printk(KERN_WARNING "unknown bytecode op %u\n",
300 (unsigned int) *(filter_opcode_t *) pc);
301 ret = -EINVAL;
302 break;
303 }
304
305 case FILTER_OP_RETURN:
306 {
307 if (unlikely(pc + sizeof(struct return_op)
308 > start_pc + bytecode->len)) {
309 ret = -ERANGE;
310 }
311 break;
312 }
313
314 /* binary */
315 case FILTER_OP_MUL:
316 case FILTER_OP_DIV:
317 case FILTER_OP_MOD:
318 case FILTER_OP_PLUS:
319 case FILTER_OP_MINUS:
320 case FILTER_OP_EQ_DOUBLE:
321 case FILTER_OP_NE_DOUBLE:
322 case FILTER_OP_GT_DOUBLE:
323 case FILTER_OP_LT_DOUBLE:
324 case FILTER_OP_GE_DOUBLE:
325 case FILTER_OP_LE_DOUBLE:
326 /* Floating point */
327 case FILTER_OP_EQ_DOUBLE_S64:
328 case FILTER_OP_NE_DOUBLE_S64:
329 case FILTER_OP_GT_DOUBLE_S64:
330 case FILTER_OP_LT_DOUBLE_S64:
331 case FILTER_OP_GE_DOUBLE_S64:
332 case FILTER_OP_LE_DOUBLE_S64:
333 case FILTER_OP_EQ_S64_DOUBLE:
334 case FILTER_OP_NE_S64_DOUBLE:
335 case FILTER_OP_GT_S64_DOUBLE:
336 case FILTER_OP_LT_S64_DOUBLE:
337 case FILTER_OP_GE_S64_DOUBLE:
338 case FILTER_OP_LE_S64_DOUBLE:
339 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
340 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
341 case FILTER_OP_LOAD_DOUBLE:
342 case FILTER_OP_CAST_DOUBLE_TO_S64:
343 case FILTER_OP_UNARY_PLUS_DOUBLE:
344 case FILTER_OP_UNARY_MINUS_DOUBLE:
345 case FILTER_OP_UNARY_NOT_DOUBLE:
346 {
347 printk(KERN_WARNING "unsupported bytecode op %u\n",
348 (unsigned int) *(filter_opcode_t *) pc);
349 ret = -EINVAL;
350 break;
351 }
352
353 case FILTER_OP_EQ:
354 case FILTER_OP_NE:
355 case FILTER_OP_GT:
356 case FILTER_OP_LT:
357 case FILTER_OP_GE:
358 case FILTER_OP_LE:
359 case FILTER_OP_EQ_STRING:
360 case FILTER_OP_NE_STRING:
361 case FILTER_OP_GT_STRING:
362 case FILTER_OP_LT_STRING:
363 case FILTER_OP_GE_STRING:
364 case FILTER_OP_LE_STRING:
365 case FILTER_OP_EQ_STAR_GLOB_STRING:
366 case FILTER_OP_NE_STAR_GLOB_STRING:
367 case FILTER_OP_EQ_S64:
368 case FILTER_OP_NE_S64:
369 case FILTER_OP_GT_S64:
370 case FILTER_OP_LT_S64:
371 case FILTER_OP_GE_S64:
372 case FILTER_OP_LE_S64:
373 case FILTER_OP_BIT_RSHIFT:
374 case FILTER_OP_BIT_LSHIFT:
375 case FILTER_OP_BIT_AND:
376 case FILTER_OP_BIT_OR:
377 case FILTER_OP_BIT_XOR:
378 {
379 if (unlikely(pc + sizeof(struct binary_op)
380 > start_pc + bytecode->len)) {
381 ret = -ERANGE;
382 }
383 break;
384 }
385
386 /* unary */
387 case FILTER_OP_UNARY_PLUS:
388 case FILTER_OP_UNARY_MINUS:
389 case FILTER_OP_UNARY_NOT:
390 case FILTER_OP_UNARY_PLUS_S64:
391 case FILTER_OP_UNARY_MINUS_S64:
392 case FILTER_OP_UNARY_NOT_S64:
393 case FILTER_OP_UNARY_BIT_NOT:
394 {
395 if (unlikely(pc + sizeof(struct unary_op)
396 > start_pc + bytecode->len)) {
397 ret = -ERANGE;
398 }
399 break;
400 }
401
402 /* logical */
403 case FILTER_OP_AND:
404 case FILTER_OP_OR:
405 {
406 if (unlikely(pc + sizeof(struct logical_op)
407 > start_pc + bytecode->len)) {
408 ret = -ERANGE;
409 }
410 break;
411 }
412
413 /* load field ref */
414 case FILTER_OP_LOAD_FIELD_REF:
415 {
416 printk(KERN_WARNING "Unknown field ref type\n");
417 ret = -EINVAL;
418 break;
419 }
420
421 /* get context ref */
422 case FILTER_OP_GET_CONTEXT_REF:
423 {
424 printk(KERN_WARNING "Unknown field ref type\n");
425 ret = -EINVAL;
426 break;
427 }
428 case FILTER_OP_LOAD_FIELD_REF_STRING:
429 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
430 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
431 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
432 case FILTER_OP_LOAD_FIELD_REF_S64:
433 case FILTER_OP_GET_CONTEXT_REF_STRING:
434 case FILTER_OP_GET_CONTEXT_REF_S64:
435 {
436 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
437 > start_pc + bytecode->len)) {
438 ret = -ERANGE;
439 }
440 break;
441 }
442
443 /* load from immediate operand */
444 case FILTER_OP_LOAD_STRING:
445 case FILTER_OP_LOAD_STAR_GLOB_STRING:
446 {
447 struct load_op *insn = (struct load_op *) pc;
448 uint32_t str_len, maxlen;
449
450 if (unlikely(pc + sizeof(struct load_op)
451 > start_pc + bytecode->len)) {
452 ret = -ERANGE;
453 break;
454 }
455
456 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
457 str_len = strnlen(insn->data, maxlen);
458 if (unlikely(str_len >= maxlen)) {
459 /* Final '\0' not found within range */
460 ret = -ERANGE;
461 }
462 break;
463 }
464
465 case FILTER_OP_LOAD_S64:
466 {
467 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
468 > start_pc + bytecode->len)) {
469 ret = -ERANGE;
470 }
471 break;
472 }
473
474 case FILTER_OP_CAST_TO_S64:
475 case FILTER_OP_CAST_NOP:
476 {
477 if (unlikely(pc + sizeof(struct cast_op)
478 > start_pc + bytecode->len)) {
479 ret = -ERANGE;
480 }
481 break;
482 }
483
484 /*
485 * Instructions for recursive traversal through composed types.
486 */
487 case FILTER_OP_GET_CONTEXT_ROOT:
488 case FILTER_OP_GET_APP_CONTEXT_ROOT:
489 case FILTER_OP_GET_PAYLOAD_ROOT:
490 case FILTER_OP_LOAD_FIELD:
491 case FILTER_OP_LOAD_FIELD_S8:
492 case FILTER_OP_LOAD_FIELD_S16:
493 case FILTER_OP_LOAD_FIELD_S32:
494 case FILTER_OP_LOAD_FIELD_S64:
495 case FILTER_OP_LOAD_FIELD_U8:
496 case FILTER_OP_LOAD_FIELD_U16:
497 case FILTER_OP_LOAD_FIELD_U32:
498 case FILTER_OP_LOAD_FIELD_U64:
499 case FILTER_OP_LOAD_FIELD_STRING:
500 case FILTER_OP_LOAD_FIELD_SEQUENCE:
501 case FILTER_OP_LOAD_FIELD_DOUBLE:
502 if (unlikely(pc + sizeof(struct load_op)
503 > start_pc + bytecode->len)) {
504 ret = -ERANGE;
505 }
506 break;
507
508 case FILTER_OP_GET_SYMBOL:
509 {
510 struct load_op *insn = (struct load_op *) pc;
511 struct get_symbol *sym = (struct get_symbol *) insn->data;
512
513 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
514 > start_pc + bytecode->len)) {
515 ret = -ERANGE;
516 }
517 ret = validate_get_symbol(bytecode, sym);
518 break;
519 }
520
521 case FILTER_OP_GET_SYMBOL_FIELD:
522 printk(KERN_WARNING "Unexpected get symbol field\n");
523 ret = -EINVAL;
524 break;
525
526 case FILTER_OP_GET_INDEX_U16:
527 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
528 > start_pc + bytecode->len)) {
529 ret = -ERANGE;
530 }
531 break;
532
533 case FILTER_OP_GET_INDEX_U64:
534 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
535 > start_pc + bytecode->len)) {
536 ret = -ERANGE;
537 }
538 break;
539 }
540
541 return ret;
542 }
543
544 static
545 unsigned long delete_all_nodes(struct mp_table *mp_table)
546 {
547 struct mp_node *mp_node;
548 struct hlist_node *tmp;
549 unsigned long nr_nodes = 0;
550 int i;
551
552 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
553 struct hlist_head *head;
554
555 head = &mp_table->mp_head[i];
556 lttng_hlist_for_each_entry_safe(mp_node, tmp, head, node) {
557 kfree(mp_node);
558 nr_nodes++;
559 }
560 }
561 return nr_nodes;
562 }
563
564 /*
565 * Return value:
566 * >=0: success
567 * <0: error
568 */
569 static
570 int validate_instruction_context(struct bytecode_runtime *bytecode,
571 struct vstack *stack,
572 char *start_pc,
573 char *pc)
574 {
575 int ret = 0;
576 const filter_opcode_t opcode = *(filter_opcode_t *) pc;
577
578 switch (opcode) {
579 case FILTER_OP_UNKNOWN:
580 default:
581 {
582 printk(KERN_WARNING "unknown bytecode op %u\n",
583 (unsigned int) *(filter_opcode_t *) pc);
584 ret = -EINVAL;
585 goto end;
586 }
587
588 case FILTER_OP_RETURN:
589 {
590 goto end;
591 }
592
593 /* binary */
594 case FILTER_OP_MUL:
595 case FILTER_OP_DIV:
596 case FILTER_OP_MOD:
597 case FILTER_OP_PLUS:
598 case FILTER_OP_MINUS:
599 /* Floating point */
600 case FILTER_OP_EQ_DOUBLE:
601 case FILTER_OP_NE_DOUBLE:
602 case FILTER_OP_GT_DOUBLE:
603 case FILTER_OP_LT_DOUBLE:
604 case FILTER_OP_GE_DOUBLE:
605 case FILTER_OP_LE_DOUBLE:
606 case FILTER_OP_EQ_DOUBLE_S64:
607 case FILTER_OP_NE_DOUBLE_S64:
608 case FILTER_OP_GT_DOUBLE_S64:
609 case FILTER_OP_LT_DOUBLE_S64:
610 case FILTER_OP_GE_DOUBLE_S64:
611 case FILTER_OP_LE_DOUBLE_S64:
612 case FILTER_OP_EQ_S64_DOUBLE:
613 case FILTER_OP_NE_S64_DOUBLE:
614 case FILTER_OP_GT_S64_DOUBLE:
615 case FILTER_OP_LT_S64_DOUBLE:
616 case FILTER_OP_GE_S64_DOUBLE:
617 case FILTER_OP_LE_S64_DOUBLE:
618 case FILTER_OP_UNARY_PLUS_DOUBLE:
619 case FILTER_OP_UNARY_MINUS_DOUBLE:
620 case FILTER_OP_UNARY_NOT_DOUBLE:
621 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
622 case FILTER_OP_LOAD_DOUBLE:
623 case FILTER_OP_CAST_DOUBLE_TO_S64:
624 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
625 {
626 printk(KERN_WARNING "unsupported bytecode op %u\n",
627 (unsigned int) *(filter_opcode_t *) pc);
628 ret = -EINVAL;
629 goto end;
630 }
631
632 case FILTER_OP_EQ:
633 {
634 ret = bin_op_compare_check(stack, opcode, "==");
635 if (ret < 0)
636 goto end;
637 break;
638 }
639 case FILTER_OP_NE:
640 {
641 ret = bin_op_compare_check(stack, opcode, "!=");
642 if (ret < 0)
643 goto end;
644 break;
645 }
646 case FILTER_OP_GT:
647 {
648 ret = bin_op_compare_check(stack, opcode, ">");
649 if (ret < 0)
650 goto end;
651 break;
652 }
653 case FILTER_OP_LT:
654 {
655 ret = bin_op_compare_check(stack, opcode, "<");
656 if (ret < 0)
657 goto end;
658 break;
659 }
660 case FILTER_OP_GE:
661 {
662 ret = bin_op_compare_check(stack, opcode, ">=");
663 if (ret < 0)
664 goto end;
665 break;
666 }
667 case FILTER_OP_LE:
668 {
669 ret = bin_op_compare_check(stack, opcode, "<=");
670 if (ret < 0)
671 goto end;
672 break;
673 }
674
675 case FILTER_OP_EQ_STRING:
676 case FILTER_OP_NE_STRING:
677 case FILTER_OP_GT_STRING:
678 case FILTER_OP_LT_STRING:
679 case FILTER_OP_GE_STRING:
680 case FILTER_OP_LE_STRING:
681 {
682 if (!vstack_ax(stack) || !vstack_bx(stack)) {
683 printk(KERN_WARNING "Empty stack\n");
684 ret = -EINVAL;
685 goto end;
686 }
687 if (vstack_ax(stack)->type != REG_STRING
688 || vstack_bx(stack)->type != REG_STRING) {
689 printk(KERN_WARNING "Unexpected register type for string comparator\n");
690 ret = -EINVAL;
691 goto end;
692 }
693 break;
694 }
695
696
697 case FILTER_OP_EQ_STAR_GLOB_STRING:
698 case FILTER_OP_NE_STAR_GLOB_STRING:
699 {
700 if (!vstack_ax(stack) || !vstack_bx(stack)) {
701 printk(KERN_WARNING "Empty stack\n");
702 ret = -EINVAL;
703 goto end;
704 }
705 if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
706 && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
707 printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
708 ret = -EINVAL;
709 goto end;
710 }
711 break;
712 }
713
714 case FILTER_OP_EQ_S64:
715 case FILTER_OP_NE_S64:
716 case FILTER_OP_GT_S64:
717 case FILTER_OP_LT_S64:
718 case FILTER_OP_GE_S64:
719 case FILTER_OP_LE_S64:
720 {
721 if (!vstack_ax(stack) || !vstack_bx(stack)) {
722 printk(KERN_WARNING "Empty stack\n");
723 ret = -EINVAL;
724 goto end;
725 }
726 if (vstack_ax(stack)->type != REG_S64
727 || vstack_bx(stack)->type != REG_S64) {
728 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
729 ret = -EINVAL;
730 goto end;
731 }
732 break;
733 }
734
735 case FILTER_OP_BIT_RSHIFT:
736 ret = bin_op_bitwise_check(stack, opcode, ">>");
737 if (ret < 0)
738 goto end;
739 break;
740 case FILTER_OP_BIT_LSHIFT:
741 ret = bin_op_bitwise_check(stack, opcode, "<<");
742 if (ret < 0)
743 goto end;
744 break;
745 case FILTER_OP_BIT_AND:
746 ret = bin_op_bitwise_check(stack, opcode, "&");
747 if (ret < 0)
748 goto end;
749 break;
750 case FILTER_OP_BIT_OR:
751 ret = bin_op_bitwise_check(stack, opcode, "|");
752 if (ret < 0)
753 goto end;
754 break;
755 case FILTER_OP_BIT_XOR:
756 ret = bin_op_bitwise_check(stack, opcode, "^");
757 if (ret < 0)
758 goto end;
759 break;
760
761 /* unary */
762 case FILTER_OP_UNARY_PLUS:
763 case FILTER_OP_UNARY_MINUS:
764 case FILTER_OP_UNARY_NOT:
765 {
766 if (!vstack_ax(stack)) {
767 printk(KERN_WARNING "Empty stack\n");
768 ret = -EINVAL;
769 goto end;
770 }
771 switch (vstack_ax(stack)->type) {
772 default:
773 case REG_DOUBLE:
774 printk(KERN_WARNING "unknown register type\n");
775 ret = -EINVAL;
776 goto end;
777
778 case REG_STRING:
779 case REG_STAR_GLOB_STRING:
780 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
781 ret = -EINVAL;
782 goto end;
783 case REG_S64:
784 case REG_TYPE_UNKNOWN:
785 break;
786 }
787 break;
788 }
789 case FILTER_OP_UNARY_BIT_NOT:
790 {
791 if (!vstack_ax(stack)) {
792 printk(KERN_WARNING "Empty stack\n");
793 ret = -EINVAL;
794 goto end;
795 }
796 switch (vstack_ax(stack)->type) {
797 default:
798 printk(KERN_WARNING "unknown register type\n");
799 ret = -EINVAL;
800 goto end;
801
802 case REG_STRING:
803 case REG_STAR_GLOB_STRING:
804 case REG_DOUBLE:
805 printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
806 ret = -EINVAL;
807 goto end;
808 case REG_S64:
809 break;
810 case REG_TYPE_UNKNOWN:
811 break;
812 }
813 break;
814 }
815
816 case FILTER_OP_UNARY_PLUS_S64:
817 case FILTER_OP_UNARY_MINUS_S64:
818 case FILTER_OP_UNARY_NOT_S64:
819 {
820 if (!vstack_ax(stack)) {
821 printk(KERN_WARNING "Empty stack\n");
822 ret = -EINVAL;
823 goto end;
824 }
825 if (vstack_ax(stack)->type != REG_S64) {
826 printk(KERN_WARNING "Invalid register type\n");
827 ret = -EINVAL;
828 goto end;
829 }
830 break;
831 }
832
833 /* logical */
834 case FILTER_OP_AND:
835 case FILTER_OP_OR:
836 {
837 struct logical_op *insn = (struct logical_op *) pc;
838
839 if (!vstack_ax(stack)) {
840 printk(KERN_WARNING "Empty stack\n");
841 ret = -EINVAL;
842 goto end;
843 }
844 if (vstack_ax(stack)->type != REG_S64) {
845 printk(KERN_WARNING "Logical comparator expects S64 register\n");
846 ret = -EINVAL;
847 goto end;
848 }
849
850 dbg_printk("Validate jumping to bytecode offset %u\n",
851 (unsigned int) insn->skip_offset);
852 if (unlikely(start_pc + insn->skip_offset <= pc)) {
853 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
854 ret = -EINVAL;
855 goto end;
856 }
857 break;
858 }
859
860 /* load field ref */
861 case FILTER_OP_LOAD_FIELD_REF:
862 {
863 printk(KERN_WARNING "Unknown field ref type\n");
864 ret = -EINVAL;
865 goto end;
866 }
867 case FILTER_OP_LOAD_FIELD_REF_STRING:
868 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
869 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
870 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
871 {
872 struct load_op *insn = (struct load_op *) pc;
873 struct field_ref *ref = (struct field_ref *) insn->data;
874
875 dbg_printk("Validate load field ref offset %u type string\n",
876 ref->offset);
877 break;
878 }
879 case FILTER_OP_LOAD_FIELD_REF_S64:
880 {
881 struct load_op *insn = (struct load_op *) pc;
882 struct field_ref *ref = (struct field_ref *) insn->data;
883
884 dbg_printk("Validate load field ref offset %u type s64\n",
885 ref->offset);
886 break;
887 }
888
889 /* load from immediate operand */
890 case FILTER_OP_LOAD_STRING:
891 case FILTER_OP_LOAD_STAR_GLOB_STRING:
892 {
893 break;
894 }
895
896 case FILTER_OP_LOAD_S64:
897 {
898 break;
899 }
900
901 case FILTER_OP_CAST_TO_S64:
902 {
903 struct cast_op *insn = (struct cast_op *) pc;
904
905 if (!vstack_ax(stack)) {
906 printk(KERN_WARNING "Empty stack\n");
907 ret = -EINVAL;
908 goto end;
909 }
910 switch (vstack_ax(stack)->type) {
911 default:
912 case REG_DOUBLE:
913 printk(KERN_WARNING "unknown register type\n");
914 ret = -EINVAL;
915 goto end;
916
917 case REG_STRING:
918 case REG_STAR_GLOB_STRING:
919 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
920 ret = -EINVAL;
921 goto end;
922 case REG_S64:
923 break;
924 }
925 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
926 if (vstack_ax(stack)->type != REG_DOUBLE) {
927 printk(KERN_WARNING "Cast expects double\n");
928 ret = -EINVAL;
929 goto end;
930 }
931 }
932 break;
933 }
934 case FILTER_OP_CAST_NOP:
935 {
936 break;
937 }
938
939 /* get context ref */
940 case FILTER_OP_GET_CONTEXT_REF:
941 {
942 printk(KERN_WARNING "Unknown get context ref type\n");
943 ret = -EINVAL;
944 goto end;
945 }
946 case FILTER_OP_GET_CONTEXT_REF_STRING:
947 {
948 struct load_op *insn = (struct load_op *) pc;
949 struct field_ref *ref = (struct field_ref *) insn->data;
950
951 dbg_printk("Validate get context ref offset %u type string\n",
952 ref->offset);
953 break;
954 }
955 case FILTER_OP_GET_CONTEXT_REF_S64:
956 {
957 struct load_op *insn = (struct load_op *) pc;
958 struct field_ref *ref = (struct field_ref *) insn->data;
959
960 dbg_printk("Validate get context ref offset %u type s64\n",
961 ref->offset);
962 break;
963 }
964
965 /*
966 * Instructions for recursive traversal through composed types.
967 */
968 case FILTER_OP_GET_CONTEXT_ROOT:
969 {
970 dbg_printk("Validate get context root\n");
971 break;
972 }
973 case FILTER_OP_GET_APP_CONTEXT_ROOT:
974 {
975 dbg_printk("Validate get app context root\n");
976 break;
977 }
978 case FILTER_OP_GET_PAYLOAD_ROOT:
979 {
980 dbg_printk("Validate get payload root\n");
981 break;
982 }
983 case FILTER_OP_LOAD_FIELD:
984 {
985 /*
986 * We tolerate that field type is unknown at validation,
987 * because we are performing the load specialization in
988 * a phase after validation.
989 */
990 dbg_printk("Validate load field\n");
991 break;
992 }
993 case FILTER_OP_LOAD_FIELD_S8:
994 {
995 dbg_printk("Validate load field s8\n");
996 break;
997 }
998 case FILTER_OP_LOAD_FIELD_S16:
999 {
1000 dbg_printk("Validate load field s16\n");
1001 break;
1002 }
1003 case FILTER_OP_LOAD_FIELD_S32:
1004 {
1005 dbg_printk("Validate load field s32\n");
1006 break;
1007 }
1008 case FILTER_OP_LOAD_FIELD_S64:
1009 {
1010 dbg_printk("Validate load field s64\n");
1011 break;
1012 }
1013 case FILTER_OP_LOAD_FIELD_U8:
1014 {
1015 dbg_printk("Validate load field u8\n");
1016 break;
1017 }
1018 case FILTER_OP_LOAD_FIELD_U16:
1019 {
1020 dbg_printk("Validate load field u16\n");
1021 break;
1022 }
1023 case FILTER_OP_LOAD_FIELD_U32:
1024 {
1025 dbg_printk("Validate load field u32\n");
1026 break;
1027 }
1028 case FILTER_OP_LOAD_FIELD_U64:
1029 {
1030 dbg_printk("Validate load field u64\n");
1031 break;
1032 }
1033 case FILTER_OP_LOAD_FIELD_STRING:
1034 {
1035 dbg_printk("Validate load field string\n");
1036 break;
1037 }
1038 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1039 {
1040 dbg_printk("Validate load field sequence\n");
1041 break;
1042 }
1043 case FILTER_OP_LOAD_FIELD_DOUBLE:
1044 {
1045 dbg_printk("Validate load field double\n");
1046 break;
1047 }
1048
1049 case FILTER_OP_GET_SYMBOL:
1050 {
1051 struct load_op *insn = (struct load_op *) pc;
1052 struct get_symbol *sym = (struct get_symbol *) insn->data;
1053
1054 dbg_printk("Validate get symbol offset %u\n", sym->offset);
1055 break;
1056 }
1057
1058 case FILTER_OP_GET_SYMBOL_FIELD:
1059 {
1060 struct load_op *insn = (struct load_op *) pc;
1061 struct get_symbol *sym = (struct get_symbol *) insn->data;
1062
1063 dbg_printk("Validate get symbol field offset %u\n", sym->offset);
1064 break;
1065 }
1066
1067 case FILTER_OP_GET_INDEX_U16:
1068 {
1069 struct load_op *insn = (struct load_op *) pc;
1070 struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
1071
1072 dbg_printk("Validate get index u16 index %u\n", get_index->index);
1073 break;
1074 }
1075
1076 case FILTER_OP_GET_INDEX_U64:
1077 {
1078 struct load_op *insn = (struct load_op *) pc;
1079 struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
1080
1081 dbg_printk("Validate get index u64 index %llu\n",
1082 (unsigned long long) get_index->index);
1083 break;
1084 }
1085 }
1086 end:
1087 return ret;
1088 }
1089
1090 /*
1091 * Return value:
1092 * 0: success
1093 * <0: error
1094 */
1095 static
1096 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
1097 struct mp_table *mp_table,
1098 struct vstack *stack,
1099 char *start_pc,
1100 char *pc)
1101 {
1102 int ret, found = 0;
1103 unsigned long target_pc = pc - start_pc;
1104 unsigned long hash;
1105 struct hlist_head *head;
1106 struct mp_node *mp_node;
1107
1108 /* Validate the context resulting from the previous instruction */
1109 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
1110 if (ret < 0)
1111 return ret;
1112
1113 /* Validate merge points */
1114 hash = jhash_1word(target_pc, 0);
1115 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
1116 lttng_hlist_for_each_entry(mp_node, head, node) {
1117 if (lttng_hash_match(mp_node, target_pc)) {
1118 found = 1;
1119 break;
1120 }
1121 }
1122 if (found) {
1123 dbg_printk("Filter: validate merge point at offset %lu\n",
1124 target_pc);
1125 if (merge_points_compare(stack, &mp_node->stack)) {
1126 printk(KERN_WARNING "Merge points differ for offset %lu\n",
1127 target_pc);
1128 return -EINVAL;
1129 }
1130 /* Once validated, we can remove the merge point */
1131 dbg_printk("Filter: remove merge point at offset %lu\n",
1132 target_pc);
1133 hlist_del(&mp_node->node);
1134 }
1135 return 0;
1136 }
1137
1138 /*
1139 * Return value:
1140 * >0: going to next insn.
1141 * 0: success, stop iteration.
1142 * <0: error
1143 */
1144 static
1145 int exec_insn(struct bytecode_runtime *bytecode,
1146 struct mp_table *mp_table,
1147 struct vstack *stack,
1148 char **_next_pc,
1149 char *pc)
1150 {
1151 int ret = 1;
1152 char *next_pc = *_next_pc;
1153
1154 switch (*(filter_opcode_t *) pc) {
1155 case FILTER_OP_UNKNOWN:
1156 default:
1157 {
1158 printk(KERN_WARNING "unknown bytecode op %u\n",
1159 (unsigned int) *(filter_opcode_t *) pc);
1160 ret = -EINVAL;
1161 goto end;
1162 }
1163
1164 case FILTER_OP_RETURN:
1165 {
1166 if (!vstack_ax(stack)) {
1167 printk(KERN_WARNING "Empty stack\n");
1168 ret = -EINVAL;
1169 goto end;
1170 }
1171 switch (vstack_ax(stack)->type) {
1172 case REG_S64:
1173 case REG_TYPE_UNKNOWN:
1174 break;
1175 default:
1176 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1177 (int) vstack_ax(stack)->type);
1178 ret = -EINVAL;
1179 goto end;
1180 }
1181
1182 ret = 0;
1183 goto end;
1184 }
1185
1186 /* binary */
1187 case FILTER_OP_MUL:
1188 case FILTER_OP_DIV:
1189 case FILTER_OP_MOD:
1190 case FILTER_OP_PLUS:
1191 case FILTER_OP_MINUS:
1192 /* Floating point */
1193 case FILTER_OP_EQ_DOUBLE:
1194 case FILTER_OP_NE_DOUBLE:
1195 case FILTER_OP_GT_DOUBLE:
1196 case FILTER_OP_LT_DOUBLE:
1197 case FILTER_OP_GE_DOUBLE:
1198 case FILTER_OP_LE_DOUBLE:
1199 case FILTER_OP_EQ_DOUBLE_S64:
1200 case FILTER_OP_NE_DOUBLE_S64:
1201 case FILTER_OP_GT_DOUBLE_S64:
1202 case FILTER_OP_LT_DOUBLE_S64:
1203 case FILTER_OP_GE_DOUBLE_S64:
1204 case FILTER_OP_LE_DOUBLE_S64:
1205 case FILTER_OP_EQ_S64_DOUBLE:
1206 case FILTER_OP_NE_S64_DOUBLE:
1207 case FILTER_OP_GT_S64_DOUBLE:
1208 case FILTER_OP_LT_S64_DOUBLE:
1209 case FILTER_OP_GE_S64_DOUBLE:
1210 case FILTER_OP_LE_S64_DOUBLE:
1211 case FILTER_OP_UNARY_PLUS_DOUBLE:
1212 case FILTER_OP_UNARY_MINUS_DOUBLE:
1213 case FILTER_OP_UNARY_NOT_DOUBLE:
1214 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1215 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1216 case FILTER_OP_LOAD_DOUBLE:
1217 case FILTER_OP_CAST_DOUBLE_TO_S64:
1218 {
1219 printk(KERN_WARNING "unsupported bytecode op %u\n",
1220 (unsigned int) *(filter_opcode_t *) pc);
1221 ret = -EINVAL;
1222 goto end;
1223 }
1224
1225 case FILTER_OP_EQ:
1226 case FILTER_OP_NE:
1227 case FILTER_OP_GT:
1228 case FILTER_OP_LT:
1229 case FILTER_OP_GE:
1230 case FILTER_OP_LE:
1231 case FILTER_OP_EQ_STRING:
1232 case FILTER_OP_NE_STRING:
1233 case FILTER_OP_GT_STRING:
1234 case FILTER_OP_LT_STRING:
1235 case FILTER_OP_GE_STRING:
1236 case FILTER_OP_LE_STRING:
1237 case FILTER_OP_EQ_STAR_GLOB_STRING:
1238 case FILTER_OP_NE_STAR_GLOB_STRING:
1239 case FILTER_OP_EQ_S64:
1240 case FILTER_OP_NE_S64:
1241 case FILTER_OP_GT_S64:
1242 case FILTER_OP_LT_S64:
1243 case FILTER_OP_GE_S64:
1244 case FILTER_OP_LE_S64:
1245 case FILTER_OP_BIT_RSHIFT:
1246 case FILTER_OP_BIT_LSHIFT:
1247 case FILTER_OP_BIT_AND:
1248 case FILTER_OP_BIT_OR:
1249 case FILTER_OP_BIT_XOR:
1250 {
1251 /* Pop 2, push 1 */
1252 if (vstack_pop(stack)) {
1253 ret = -EINVAL;
1254 goto end;
1255 }
1256 if (!vstack_ax(stack)) {
1257 printk(KERN_WARNING "Empty stack\n");
1258 ret = -EINVAL;
1259 goto end;
1260 }
1261 switch (vstack_ax(stack)->type) {
1262 case REG_S64:
1263 case REG_DOUBLE:
1264 case REG_STRING:
1265 case REG_STAR_GLOB_STRING:
1266 case REG_TYPE_UNKNOWN:
1267 break;
1268 default:
1269 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1270 (int) vstack_ax(stack)->type);
1271 ret = -EINVAL;
1272 goto end;
1273 }
1274
1275 vstack_ax(stack)->type = REG_S64;
1276 next_pc += sizeof(struct binary_op);
1277 break;
1278 }
1279
1280 /* unary */
1281 case FILTER_OP_UNARY_PLUS:
1282 case FILTER_OP_UNARY_MINUS:
1283 {
1284 /* Pop 1, push 1 */
1285 if (!vstack_ax(stack)) {
1286 printk(KERN_WARNING "Empty stack\n\n");
1287 ret = -EINVAL;
1288 goto end;
1289 }
1290 switch (vstack_ax(stack)->type) {
1291 case REG_S64:
1292 case REG_TYPE_UNKNOWN:
1293 break;
1294 default:
1295 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1296 (int) vstack_ax(stack)->type);
1297 ret = -EINVAL;
1298 goto end;
1299 }
1300
1301 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1302 next_pc += sizeof(struct unary_op);
1303 break;
1304 }
1305
1306 case FILTER_OP_UNARY_PLUS_S64:
1307 case FILTER_OP_UNARY_MINUS_S64:
1308 case FILTER_OP_UNARY_NOT_S64:
1309 {
1310 /* Pop 1, push 1 */
1311 if (!vstack_ax(stack)) {
1312 printk(KERN_WARNING "Empty stack\n\n");
1313 ret = -EINVAL;
1314 goto end;
1315 }
1316 switch (vstack_ax(stack)->type) {
1317 case REG_S64:
1318 break;
1319 default:
1320 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1321 (int) vstack_ax(stack)->type);
1322 ret = -EINVAL;
1323 goto end;
1324 }
1325
1326 vstack_ax(stack)->type = REG_S64;
1327 next_pc += sizeof(struct unary_op);
1328 break;
1329 }
1330
1331 case FILTER_OP_UNARY_NOT:
1332 {
1333 /* Pop 1, push 1 */
1334 if (!vstack_ax(stack)) {
1335 printk(KERN_WARNING "Empty stack\n\n");
1336 ret = -EINVAL;
1337 goto end;
1338 }
1339 switch (vstack_ax(stack)->type) {
1340 case REG_S64:
1341 case REG_TYPE_UNKNOWN:
1342 break;
1343 default:
1344 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1345 (int) vstack_ax(stack)->type);
1346 ret = -EINVAL;
1347 goto end;
1348 }
1349
1350 vstack_ax(stack)->type = REG_S64;
1351 next_pc += sizeof(struct unary_op);
1352 break;
1353 }
1354
1355 case FILTER_OP_UNARY_BIT_NOT:
1356 {
1357 /* Pop 1, push 1 */
1358 if (!vstack_ax(stack)) {
1359 printk(KERN_WARNING "Empty stack\n");
1360 ret = -EINVAL;
1361 goto end;
1362 }
1363 switch (vstack_ax(stack)->type) {
1364 case REG_S64:
1365 case REG_TYPE_UNKNOWN:
1366 break;
1367 case REG_DOUBLE:
1368 default:
1369 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1370 (int) vstack_ax(stack)->type);
1371 ret = -EINVAL;
1372 goto end;
1373 }
1374
1375 vstack_ax(stack)->type = REG_S64;
1376 next_pc += sizeof(struct unary_op);
1377 break;
1378 }
1379
1380 /* logical */
1381 case FILTER_OP_AND:
1382 case FILTER_OP_OR:
1383 {
1384 struct logical_op *insn = (struct logical_op *) pc;
1385 int merge_ret;
1386
1387 /* Add merge point to table */
1388 merge_ret = merge_point_add_check(mp_table,
1389 insn->skip_offset, stack);
1390 if (merge_ret) {
1391 ret = merge_ret;
1392 goto end;
1393 }
1394
1395 if (!vstack_ax(stack)) {
1396 printk(KERN_WARNING "Empty stack\n\n");
1397 ret = -EINVAL;
1398 goto end;
1399 }
1400 /* There is always a cast-to-s64 operation before a or/and op. */
1401 switch (vstack_ax(stack)->type) {
1402 case REG_S64:
1403 break;
1404 default:
1405 printk(KERN_WARNING "Incorrect register type %d for operation\n",
1406 (int) vstack_ax(stack)->type);
1407 ret = -EINVAL;
1408 goto end;
1409 }
1410
1411 /* Continue to next instruction */
1412 /* Pop 1 when jump not taken */
1413 if (vstack_pop(stack)) {
1414 ret = -EINVAL;
1415 goto end;
1416 }
1417 next_pc += sizeof(struct logical_op);
1418 break;
1419 }
1420
1421 /* load field ref */
1422 case FILTER_OP_LOAD_FIELD_REF:
1423 {
1424 printk(KERN_WARNING "Unknown field ref type\n");
1425 ret = -EINVAL;
1426 goto end;
1427 }
1428 /* get context ref */
1429 case FILTER_OP_GET_CONTEXT_REF:
1430 {
1431 printk(KERN_WARNING "Unknown get context ref type\n");
1432 ret = -EINVAL;
1433 goto end;
1434 }
1435 case FILTER_OP_LOAD_FIELD_REF_STRING:
1436 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1437 case FILTER_OP_GET_CONTEXT_REF_STRING:
1438 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
1439 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
1440 {
1441 if (vstack_push(stack)) {
1442 ret = -EINVAL;
1443 goto end;
1444 }
1445 vstack_ax(stack)->type = REG_STRING;
1446 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1447 break;
1448 }
1449 case FILTER_OP_LOAD_FIELD_REF_S64:
1450 case FILTER_OP_GET_CONTEXT_REF_S64:
1451 {
1452 if (vstack_push(stack)) {
1453 ret = -EINVAL;
1454 goto end;
1455 }
1456 vstack_ax(stack)->type = REG_S64;
1457 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1458 break;
1459 }
1460
1461 /* load from immediate operand */
1462 case FILTER_OP_LOAD_STRING:
1463 {
1464 struct load_op *insn = (struct load_op *) pc;
1465
1466 if (vstack_push(stack)) {
1467 ret = -EINVAL;
1468 goto end;
1469 }
1470 vstack_ax(stack)->type = REG_STRING;
1471 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1472 break;
1473 }
1474
1475 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1476 {
1477 struct load_op *insn = (struct load_op *) pc;
1478
1479 if (vstack_push(stack)) {
1480 ret = -EINVAL;
1481 goto end;
1482 }
1483 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1484 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1485 break;
1486 }
1487
1488 case FILTER_OP_LOAD_S64:
1489 {
1490 if (vstack_push(stack)) {
1491 ret = -EINVAL;
1492 goto end;
1493 }
1494 vstack_ax(stack)->type = REG_S64;
1495 next_pc += sizeof(struct load_op)
1496 + sizeof(struct literal_numeric);
1497 break;
1498 }
1499
1500 case FILTER_OP_CAST_TO_S64:
1501 {
1502 /* Pop 1, push 1 */
1503 if (!vstack_ax(stack)) {
1504 printk(KERN_WARNING "Empty stack\n");
1505 ret = -EINVAL;
1506 goto end;
1507 }
1508 switch (vstack_ax(stack)->type) {
1509 case REG_S64:
1510 case REG_DOUBLE:
1511 case REG_TYPE_UNKNOWN:
1512 break;
1513 default:
1514 printk(KERN_WARNING "Incorrect register type %d for cast\n",
1515 (int) vstack_ax(stack)->type);
1516 ret = -EINVAL;
1517 goto end;
1518 }
1519 vstack_ax(stack)->type = REG_S64;
1520 next_pc += sizeof(struct cast_op);
1521 break;
1522 }
1523 case FILTER_OP_CAST_NOP:
1524 {
1525 next_pc += sizeof(struct cast_op);
1526 break;
1527 }
1528
1529 /*
1530 * Instructions for recursive traversal through composed types.
1531 */
1532 case FILTER_OP_GET_CONTEXT_ROOT:
1533 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1534 case FILTER_OP_GET_PAYLOAD_ROOT:
1535 {
1536 if (vstack_push(stack)) {
1537 ret = -EINVAL;
1538 goto end;
1539 }
1540 vstack_ax(stack)->type = REG_PTR;
1541 next_pc += sizeof(struct load_op);
1542 break;
1543 }
1544
1545 case FILTER_OP_LOAD_FIELD:
1546 {
1547 /* Pop 1, push 1 */
1548 if (!vstack_ax(stack)) {
1549 printk(KERN_WARNING "Empty stack\n\n");
1550 ret = -EINVAL;
1551 goto end;
1552 }
1553 if (vstack_ax(stack)->type != REG_PTR) {
1554 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1555 ret = -EINVAL;
1556 goto end;
1557 }
1558 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1559 next_pc += sizeof(struct load_op);
1560 break;
1561 }
1562
1563 case FILTER_OP_LOAD_FIELD_S8:
1564 case FILTER_OP_LOAD_FIELD_S16:
1565 case FILTER_OP_LOAD_FIELD_S32:
1566 case FILTER_OP_LOAD_FIELD_S64:
1567 case FILTER_OP_LOAD_FIELD_U8:
1568 case FILTER_OP_LOAD_FIELD_U16:
1569 case FILTER_OP_LOAD_FIELD_U32:
1570 case FILTER_OP_LOAD_FIELD_U64:
1571 {
1572 /* Pop 1, push 1 */
1573 if (!vstack_ax(stack)) {
1574 printk(KERN_WARNING "Empty stack\n\n");
1575 ret = -EINVAL;
1576 goto end;
1577 }
1578 if (vstack_ax(stack)->type != REG_PTR) {
1579 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1580 ret = -EINVAL;
1581 goto end;
1582 }
1583 vstack_ax(stack)->type = REG_S64;
1584 next_pc += sizeof(struct load_op);
1585 break;
1586 }
1587
1588 case FILTER_OP_LOAD_FIELD_STRING:
1589 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1590 {
1591 /* Pop 1, push 1 */
1592 if (!vstack_ax(stack)) {
1593 printk(KERN_WARNING "Empty stack\n\n");
1594 ret = -EINVAL;
1595 goto end;
1596 }
1597 if (vstack_ax(stack)->type != REG_PTR) {
1598 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1599 ret = -EINVAL;
1600 goto end;
1601 }
1602 vstack_ax(stack)->type = REG_STRING;
1603 next_pc += sizeof(struct load_op);
1604 break;
1605 }
1606
1607 case FILTER_OP_LOAD_FIELD_DOUBLE:
1608 {
1609 /* Pop 1, push 1 */
1610 if (!vstack_ax(stack)) {
1611 printk(KERN_WARNING "Empty stack\n\n");
1612 ret = -EINVAL;
1613 goto end;
1614 }
1615 if (vstack_ax(stack)->type != REG_PTR) {
1616 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1617 ret = -EINVAL;
1618 goto end;
1619 }
1620 vstack_ax(stack)->type = REG_DOUBLE;
1621 next_pc += sizeof(struct load_op);
1622 break;
1623 }
1624
1625 case FILTER_OP_GET_SYMBOL:
1626 case FILTER_OP_GET_SYMBOL_FIELD:
1627 {
1628 /* Pop 1, push 1 */
1629 if (!vstack_ax(stack)) {
1630 printk(KERN_WARNING "Empty stack\n\n");
1631 ret = -EINVAL;
1632 goto end;
1633 }
1634 if (vstack_ax(stack)->type != REG_PTR) {
1635 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1636 ret = -EINVAL;
1637 goto end;
1638 }
1639 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1640 break;
1641 }
1642
1643 case FILTER_OP_GET_INDEX_U16:
1644 {
1645 /* Pop 1, push 1 */
1646 if (!vstack_ax(stack)) {
1647 printk(KERN_WARNING "Empty stack\n\n");
1648 ret = -EINVAL;
1649 goto end;
1650 }
1651 if (vstack_ax(stack)->type != REG_PTR) {
1652 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1653 ret = -EINVAL;
1654 goto end;
1655 }
1656 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1657 break;
1658 }
1659
1660 case FILTER_OP_GET_INDEX_U64:
1661 {
1662 /* Pop 1, push 1 */
1663 if (!vstack_ax(stack)) {
1664 printk(KERN_WARNING "Empty stack\n\n");
1665 ret = -EINVAL;
1666 goto end;
1667 }
1668 if (vstack_ax(stack)->type != REG_PTR) {
1669 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1670 ret = -EINVAL;
1671 goto end;
1672 }
1673 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1674 break;
1675 }
1676
1677 }
1678 end:
1679 *_next_pc = next_pc;
1680 return ret;
1681 }
1682
1683 /*
1684 * Never called concurrently (hash seed is shared).
1685 */
1686 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1687 {
1688 struct mp_table *mp_table;
1689 char *pc, *next_pc, *start_pc;
1690 int ret = -EINVAL;
1691 struct vstack stack;
1692
1693 vstack_init(&stack);
1694
1695 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1696 if (!mp_table) {
1697 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1698 return -ENOMEM;
1699 }
1700 start_pc = &bytecode->code[0];
1701 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1702 pc = next_pc) {
1703 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1704 if (ret != 0) {
1705 if (ret == -ERANGE)
1706 printk(KERN_WARNING "filter bytecode overflow\n");
1707 goto end;
1708 }
1709 dbg_printk("Validating op %s (%u)\n",
1710 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1711 (unsigned int) *(filter_opcode_t *) pc);
1712
1713 /*
1714 * For each instruction, validate the current context
1715 * (traversal of entire execution flow), and validate
1716 * all merge points targeting this instruction.
1717 */
1718 ret = validate_instruction_all_contexts(bytecode, mp_table,
1719 &stack, start_pc, pc);
1720 if (ret)
1721 goto end;
1722 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1723 if (ret <= 0)
1724 goto end;
1725 }
1726 end:
1727 if (delete_all_nodes(mp_table)) {
1728 if (!ret) {
1729 printk(KERN_WARNING "Unexpected merge points\n");
1730 ret = -EINVAL;
1731 }
1732 }
1733 kfree(mp_table);
1734 return ret;
1735 }
This page took 0.125035 seconds and 4 git commands to generate.