Move headers under include/
[lttng-modules.git] / lttng-filter-validator.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-validator.c
4 *
5 * LTTng modules filter bytecode validator.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/types.h>
11 #include <linux/jhash.h>
12 #include <linux/slab.h>
13
14 #include <lttng/lttng-filter.h>
15
16 #define MERGE_POINT_TABLE_BITS 7
17 #define MERGE_POINT_TABLE_SIZE (1U << MERGE_POINT_TABLE_BITS)
18
19 /* merge point table node */
20 struct mp_node {
21 struct hlist_node node;
22
23 /* Context at merge point */
24 struct vstack stack;
25 unsigned long target_pc;
26 };
27
28 struct mp_table {
29 struct hlist_head mp_head[MERGE_POINT_TABLE_SIZE];
30 };
31
32 static
33 int lttng_hash_match(struct mp_node *mp_node, unsigned long key_pc)
34 {
35 if (mp_node->target_pc == key_pc)
36 return 1;
37 else
38 return 0;
39 }
40
41 static
42 int merge_points_compare(const struct vstack *stacka,
43 const struct vstack *stackb)
44 {
45 int i, len;
46
47 if (stacka->top != stackb->top)
48 return 1;
49 len = stacka->top + 1;
50 WARN_ON_ONCE(len < 0);
51 for (i = 0; i < len; i++) {
52 if (stacka->e[i].type != stackb->e[i].type)
53 return 1;
54 }
55 return 0;
56 }
57
58 static
59 int merge_point_add_check(struct mp_table *mp_table, unsigned long target_pc,
60 const struct vstack *stack)
61 {
62 struct mp_node *mp_node;
63 unsigned long hash = jhash_1word(target_pc, 0);
64 struct hlist_head *head;
65 struct mp_node *lookup_node;
66 int found = 0;
67
68 dbg_printk("Filter: adding merge point at offset %lu, hash %lu\n",
69 target_pc, hash);
70 mp_node = kzalloc(sizeof(struct mp_node), GFP_KERNEL);
71 if (!mp_node)
72 return -ENOMEM;
73 mp_node->target_pc = target_pc;
74 memcpy(&mp_node->stack, stack, sizeof(mp_node->stack));
75
76 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
77 hlist_for_each_entry(lookup_node, head, node) {
78 if (lttng_hash_match(lookup_node, target_pc)) {
79 found = 1;
80 break;
81 }
82 }
83 if (found) {
84 /* Key already present */
85 dbg_printk("Filter: compare merge points for offset %lu, hash %lu\n",
86 target_pc, hash);
87 kfree(mp_node);
88 if (merge_points_compare(stack, &lookup_node->stack)) {
89 printk(KERN_WARNING "Merge points differ for offset %lu\n",
90 target_pc);
91 return -EINVAL;
92 }
93 } else {
94 hlist_add_head(&mp_node->node, head);
95 }
96 return 0;
97 }
98
99 /*
100 * Binary comparators use top of stack and top of stack -1.
101 */
102 static
103 int bin_op_compare_check(struct vstack *stack, const filter_opcode_t opcode,
104 const char *str)
105 {
106 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
107 goto error_empty;
108
109 switch (vstack_ax(stack)->type) {
110 default:
111 case REG_DOUBLE:
112 goto error_type;
113
114 case REG_STRING:
115 switch (vstack_bx(stack)->type) {
116 default:
117 case REG_DOUBLE:
118 goto error_type;
119 case REG_TYPE_UNKNOWN:
120 goto unknown;
121 case REG_STRING:
122 break;
123 case REG_STAR_GLOB_STRING:
124 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
125 goto error_mismatch;
126 }
127 break;
128 case REG_S64:
129 goto error_mismatch;
130 }
131 break;
132 case REG_STAR_GLOB_STRING:
133 switch (vstack_bx(stack)->type) {
134 default:
135 case REG_DOUBLE:
136 goto error_type;
137 case REG_TYPE_UNKNOWN:
138 goto unknown;
139 case REG_STRING:
140 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
141 goto error_mismatch;
142 }
143 break;
144 case REG_STAR_GLOB_STRING:
145 case REG_S64:
146 goto error_mismatch;
147 }
148 break;
149 case REG_S64:
150 switch (vstack_bx(stack)->type) {
151 default:
152 case REG_DOUBLE:
153 goto error_type;
154 case REG_TYPE_UNKNOWN:
155 goto unknown;
156 case REG_STRING:
157 case REG_STAR_GLOB_STRING:
158 goto error_mismatch;
159 case REG_S64:
160 break;
161 }
162 break;
163 case REG_TYPE_UNKNOWN:
164 switch (vstack_bx(stack)->type) {
165 default:
166 case REG_DOUBLE:
167 goto error_type;
168 case REG_TYPE_UNKNOWN:
169 case REG_STRING:
170 case REG_STAR_GLOB_STRING:
171 case REG_S64:
172 goto unknown;
173 }
174 break;
175 }
176 return 0;
177
178 unknown:
179 return 1;
180
181 error_empty:
182 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
183 return -EINVAL;
184
185 error_mismatch:
186 printk(KERN_WARNING "type mismatch for '%s' binary operator\n", str);
187 return -EINVAL;
188
189 error_type:
190 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
191 return -EINVAL;
192 }
193
194 /*
195 * Binary bitwise operators use top of stack and top of stack -1.
196 * Return 0 if typing is known to match, 1 if typing is dynamic
197 * (unknown), negative error value on error.
198 */
199 static
200 int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
201 const char *str)
202 {
203 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
204 goto error_empty;
205
206 switch (vstack_ax(stack)->type) {
207 default:
208 case REG_DOUBLE:
209 goto error_type;
210
211 case REG_TYPE_UNKNOWN:
212 switch (vstack_bx(stack)->type) {
213 default:
214 case REG_DOUBLE:
215 goto error_type;
216 case REG_TYPE_UNKNOWN:
217 case REG_STRING:
218 case REG_STAR_GLOB_STRING:
219 case REG_S64:
220 goto unknown;
221 }
222 break;
223 case REG_S64:
224 switch (vstack_bx(stack)->type) {
225 default:
226 case REG_DOUBLE:
227 goto error_type;
228 case REG_TYPE_UNKNOWN:
229 goto unknown;
230 case REG_S64:
231 break;
232 }
233 break;
234 }
235 return 0;
236
237 unknown:
238 return 1;
239
240 error_empty:
241 printk(KERN_WARNING "empty stack for '%s' binary operator\n", str);
242 return -EINVAL;
243
244 error_type:
245 printk(KERN_WARNING "unknown type for '%s' binary operator\n", str);
246 return -EINVAL;
247 }
248
249 static
250 int validate_get_symbol(struct bytecode_runtime *bytecode,
251 const struct get_symbol *sym)
252 {
253 const char *str, *str_limit;
254 size_t len_limit;
255
256 if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
257 return -EINVAL;
258
259 str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
260 str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
261 len_limit = str_limit - str;
262 if (strnlen(str, len_limit) == len_limit)
263 return -EINVAL;
264 return 0;
265 }
266
267 /*
268 * Validate bytecode range overflow within the validation pass.
269 * Called for each instruction encountered.
270 */
271 static
272 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
273 char *start_pc, char *pc)
274 {
275 int ret = 0;
276
277 switch (*(filter_opcode_t *) pc) {
278 case FILTER_OP_UNKNOWN:
279 default:
280 {
281 printk(KERN_WARNING "unknown bytecode op %u\n",
282 (unsigned int) *(filter_opcode_t *) pc);
283 ret = -EINVAL;
284 break;
285 }
286
287 case FILTER_OP_RETURN:
288 case FILTER_OP_RETURN_S64:
289 {
290 if (unlikely(pc + sizeof(struct return_op)
291 > start_pc + bytecode->len)) {
292 ret = -ERANGE;
293 }
294 break;
295 }
296
297 /* binary */
298 case FILTER_OP_MUL:
299 case FILTER_OP_DIV:
300 case FILTER_OP_MOD:
301 case FILTER_OP_PLUS:
302 case FILTER_OP_MINUS:
303 case FILTER_OP_EQ_DOUBLE:
304 case FILTER_OP_NE_DOUBLE:
305 case FILTER_OP_GT_DOUBLE:
306 case FILTER_OP_LT_DOUBLE:
307 case FILTER_OP_GE_DOUBLE:
308 case FILTER_OP_LE_DOUBLE:
309 /* Floating point */
310 case FILTER_OP_EQ_DOUBLE_S64:
311 case FILTER_OP_NE_DOUBLE_S64:
312 case FILTER_OP_GT_DOUBLE_S64:
313 case FILTER_OP_LT_DOUBLE_S64:
314 case FILTER_OP_GE_DOUBLE_S64:
315 case FILTER_OP_LE_DOUBLE_S64:
316 case FILTER_OP_EQ_S64_DOUBLE:
317 case FILTER_OP_NE_S64_DOUBLE:
318 case FILTER_OP_GT_S64_DOUBLE:
319 case FILTER_OP_LT_S64_DOUBLE:
320 case FILTER_OP_GE_S64_DOUBLE:
321 case FILTER_OP_LE_S64_DOUBLE:
322 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
323 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
324 case FILTER_OP_LOAD_DOUBLE:
325 case FILTER_OP_CAST_DOUBLE_TO_S64:
326 case FILTER_OP_UNARY_PLUS_DOUBLE:
327 case FILTER_OP_UNARY_MINUS_DOUBLE:
328 case FILTER_OP_UNARY_NOT_DOUBLE:
329 {
330 printk(KERN_WARNING "unsupported bytecode op %u\n",
331 (unsigned int) *(filter_opcode_t *) pc);
332 ret = -EINVAL;
333 break;
334 }
335
336 case FILTER_OP_EQ:
337 case FILTER_OP_NE:
338 case FILTER_OP_GT:
339 case FILTER_OP_LT:
340 case FILTER_OP_GE:
341 case FILTER_OP_LE:
342 case FILTER_OP_EQ_STRING:
343 case FILTER_OP_NE_STRING:
344 case FILTER_OP_GT_STRING:
345 case FILTER_OP_LT_STRING:
346 case FILTER_OP_GE_STRING:
347 case FILTER_OP_LE_STRING:
348 case FILTER_OP_EQ_STAR_GLOB_STRING:
349 case FILTER_OP_NE_STAR_GLOB_STRING:
350 case FILTER_OP_EQ_S64:
351 case FILTER_OP_NE_S64:
352 case FILTER_OP_GT_S64:
353 case FILTER_OP_LT_S64:
354 case FILTER_OP_GE_S64:
355 case FILTER_OP_LE_S64:
356 case FILTER_OP_BIT_RSHIFT:
357 case FILTER_OP_BIT_LSHIFT:
358 case FILTER_OP_BIT_AND:
359 case FILTER_OP_BIT_OR:
360 case FILTER_OP_BIT_XOR:
361 {
362 if (unlikely(pc + sizeof(struct binary_op)
363 > start_pc + bytecode->len)) {
364 ret = -ERANGE;
365 }
366 break;
367 }
368
369 /* unary */
370 case FILTER_OP_UNARY_PLUS:
371 case FILTER_OP_UNARY_MINUS:
372 case FILTER_OP_UNARY_NOT:
373 case FILTER_OP_UNARY_PLUS_S64:
374 case FILTER_OP_UNARY_MINUS_S64:
375 case FILTER_OP_UNARY_NOT_S64:
376 case FILTER_OP_UNARY_BIT_NOT:
377 {
378 if (unlikely(pc + sizeof(struct unary_op)
379 > start_pc + bytecode->len)) {
380 ret = -ERANGE;
381 }
382 break;
383 }
384
385 /* logical */
386 case FILTER_OP_AND:
387 case FILTER_OP_OR:
388 {
389 if (unlikely(pc + sizeof(struct logical_op)
390 > start_pc + bytecode->len)) {
391 ret = -ERANGE;
392 }
393 break;
394 }
395
396 /* load field ref */
397 case FILTER_OP_LOAD_FIELD_REF:
398 {
399 printk(KERN_WARNING "Unknown field ref type\n");
400 ret = -EINVAL;
401 break;
402 }
403
404 /* get context ref */
405 case FILTER_OP_GET_CONTEXT_REF:
406 {
407 printk(KERN_WARNING "Unknown field ref type\n");
408 ret = -EINVAL;
409 break;
410 }
411 case FILTER_OP_LOAD_FIELD_REF_STRING:
412 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
413 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
414 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
415 case FILTER_OP_LOAD_FIELD_REF_S64:
416 case FILTER_OP_GET_CONTEXT_REF_STRING:
417 case FILTER_OP_GET_CONTEXT_REF_S64:
418 {
419 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
420 > start_pc + bytecode->len)) {
421 ret = -ERANGE;
422 }
423 break;
424 }
425
426 /* load from immediate operand */
427 case FILTER_OP_LOAD_STRING:
428 case FILTER_OP_LOAD_STAR_GLOB_STRING:
429 {
430 struct load_op *insn = (struct load_op *) pc;
431 uint32_t str_len, maxlen;
432
433 if (unlikely(pc + sizeof(struct load_op)
434 > start_pc + bytecode->len)) {
435 ret = -ERANGE;
436 break;
437 }
438
439 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
440 str_len = strnlen(insn->data, maxlen);
441 if (unlikely(str_len >= maxlen)) {
442 /* Final '\0' not found within range */
443 ret = -ERANGE;
444 }
445 break;
446 }
447
448 case FILTER_OP_LOAD_S64:
449 {
450 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
451 > start_pc + bytecode->len)) {
452 ret = -ERANGE;
453 }
454 break;
455 }
456
457 case FILTER_OP_CAST_TO_S64:
458 case FILTER_OP_CAST_NOP:
459 {
460 if (unlikely(pc + sizeof(struct cast_op)
461 > start_pc + bytecode->len)) {
462 ret = -ERANGE;
463 }
464 break;
465 }
466
467 /*
468 * Instructions for recursive traversal through composed types.
469 */
470 case FILTER_OP_GET_CONTEXT_ROOT:
471 case FILTER_OP_GET_APP_CONTEXT_ROOT:
472 case FILTER_OP_GET_PAYLOAD_ROOT:
473 case FILTER_OP_LOAD_FIELD:
474 case FILTER_OP_LOAD_FIELD_S8:
475 case FILTER_OP_LOAD_FIELD_S16:
476 case FILTER_OP_LOAD_FIELD_S32:
477 case FILTER_OP_LOAD_FIELD_S64:
478 case FILTER_OP_LOAD_FIELD_U8:
479 case FILTER_OP_LOAD_FIELD_U16:
480 case FILTER_OP_LOAD_FIELD_U32:
481 case FILTER_OP_LOAD_FIELD_U64:
482 case FILTER_OP_LOAD_FIELD_STRING:
483 case FILTER_OP_LOAD_FIELD_SEQUENCE:
484 case FILTER_OP_LOAD_FIELD_DOUBLE:
485 if (unlikely(pc + sizeof(struct load_op)
486 > start_pc + bytecode->len)) {
487 ret = -ERANGE;
488 }
489 break;
490
491 case FILTER_OP_GET_SYMBOL:
492 {
493 struct load_op *insn = (struct load_op *) pc;
494 struct get_symbol *sym = (struct get_symbol *) insn->data;
495
496 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
497 > start_pc + bytecode->len)) {
498 ret = -ERANGE;
499 break;
500 }
501 ret = validate_get_symbol(bytecode, sym);
502 break;
503 }
504
505 case FILTER_OP_GET_SYMBOL_FIELD:
506 printk(KERN_WARNING "Unexpected get symbol field\n");
507 ret = -EINVAL;
508 break;
509
510 case FILTER_OP_GET_INDEX_U16:
511 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
512 > start_pc + bytecode->len)) {
513 ret = -ERANGE;
514 }
515 break;
516
517 case FILTER_OP_GET_INDEX_U64:
518 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
519 > start_pc + bytecode->len)) {
520 ret = -ERANGE;
521 }
522 break;
523 }
524
525 return ret;
526 }
527
528 static
529 unsigned long delete_all_nodes(struct mp_table *mp_table)
530 {
531 struct mp_node *mp_node;
532 struct hlist_node *tmp;
533 unsigned long nr_nodes = 0;
534 int i;
535
536 for (i = 0; i < MERGE_POINT_TABLE_SIZE; i++) {
537 struct hlist_head *head;
538
539 head = &mp_table->mp_head[i];
540 hlist_for_each_entry_safe(mp_node, tmp, head, node) {
541 kfree(mp_node);
542 nr_nodes++;
543 }
544 }
545 return nr_nodes;
546 }
547
548 /*
549 * Return value:
550 * >=0: success
551 * <0: error
552 */
553 static
554 int validate_instruction_context(struct bytecode_runtime *bytecode,
555 struct vstack *stack,
556 char *start_pc,
557 char *pc)
558 {
559 int ret = 0;
560 const filter_opcode_t opcode = *(filter_opcode_t *) pc;
561
562 switch (opcode) {
563 case FILTER_OP_UNKNOWN:
564 default:
565 {
566 printk(KERN_WARNING "unknown bytecode op %u\n",
567 (unsigned int) *(filter_opcode_t *) pc);
568 ret = -EINVAL;
569 goto end;
570 }
571
572 case FILTER_OP_RETURN:
573 case FILTER_OP_RETURN_S64:
574 {
575 goto end;
576 }
577
578 /* binary */
579 case FILTER_OP_MUL:
580 case FILTER_OP_DIV:
581 case FILTER_OP_MOD:
582 case FILTER_OP_PLUS:
583 case FILTER_OP_MINUS:
584 /* Floating point */
585 case FILTER_OP_EQ_DOUBLE:
586 case FILTER_OP_NE_DOUBLE:
587 case FILTER_OP_GT_DOUBLE:
588 case FILTER_OP_LT_DOUBLE:
589 case FILTER_OP_GE_DOUBLE:
590 case FILTER_OP_LE_DOUBLE:
591 case FILTER_OP_EQ_DOUBLE_S64:
592 case FILTER_OP_NE_DOUBLE_S64:
593 case FILTER_OP_GT_DOUBLE_S64:
594 case FILTER_OP_LT_DOUBLE_S64:
595 case FILTER_OP_GE_DOUBLE_S64:
596 case FILTER_OP_LE_DOUBLE_S64:
597 case FILTER_OP_EQ_S64_DOUBLE:
598 case FILTER_OP_NE_S64_DOUBLE:
599 case FILTER_OP_GT_S64_DOUBLE:
600 case FILTER_OP_LT_S64_DOUBLE:
601 case FILTER_OP_GE_S64_DOUBLE:
602 case FILTER_OP_LE_S64_DOUBLE:
603 case FILTER_OP_UNARY_PLUS_DOUBLE:
604 case FILTER_OP_UNARY_MINUS_DOUBLE:
605 case FILTER_OP_UNARY_NOT_DOUBLE:
606 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
607 case FILTER_OP_LOAD_DOUBLE:
608 case FILTER_OP_CAST_DOUBLE_TO_S64:
609 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
610 {
611 printk(KERN_WARNING "unsupported bytecode op %u\n",
612 (unsigned int) *(filter_opcode_t *) pc);
613 ret = -EINVAL;
614 goto end;
615 }
616
617 case FILTER_OP_EQ:
618 {
619 ret = bin_op_compare_check(stack, opcode, "==");
620 if (ret < 0)
621 goto end;
622 break;
623 }
624 case FILTER_OP_NE:
625 {
626 ret = bin_op_compare_check(stack, opcode, "!=");
627 if (ret < 0)
628 goto end;
629 break;
630 }
631 case FILTER_OP_GT:
632 {
633 ret = bin_op_compare_check(stack, opcode, ">");
634 if (ret < 0)
635 goto end;
636 break;
637 }
638 case FILTER_OP_LT:
639 {
640 ret = bin_op_compare_check(stack, opcode, "<");
641 if (ret < 0)
642 goto end;
643 break;
644 }
645 case FILTER_OP_GE:
646 {
647 ret = bin_op_compare_check(stack, opcode, ">=");
648 if (ret < 0)
649 goto end;
650 break;
651 }
652 case FILTER_OP_LE:
653 {
654 ret = bin_op_compare_check(stack, opcode, "<=");
655 if (ret < 0)
656 goto end;
657 break;
658 }
659
660 case FILTER_OP_EQ_STRING:
661 case FILTER_OP_NE_STRING:
662 case FILTER_OP_GT_STRING:
663 case FILTER_OP_LT_STRING:
664 case FILTER_OP_GE_STRING:
665 case FILTER_OP_LE_STRING:
666 {
667 if (!vstack_ax(stack) || !vstack_bx(stack)) {
668 printk(KERN_WARNING "Empty stack\n");
669 ret = -EINVAL;
670 goto end;
671 }
672 if (vstack_ax(stack)->type != REG_STRING
673 || vstack_bx(stack)->type != REG_STRING) {
674 printk(KERN_WARNING "Unexpected register type for string comparator\n");
675 ret = -EINVAL;
676 goto end;
677 }
678 break;
679 }
680
681
682 case FILTER_OP_EQ_STAR_GLOB_STRING:
683 case FILTER_OP_NE_STAR_GLOB_STRING:
684 {
685 if (!vstack_ax(stack) || !vstack_bx(stack)) {
686 printk(KERN_WARNING "Empty stack\n");
687 ret = -EINVAL;
688 goto end;
689 }
690 if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
691 && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
692 printk(KERN_WARNING "Unexpected register type for globbing pattern comparator\n");
693 ret = -EINVAL;
694 goto end;
695 }
696 break;
697 }
698
699 case FILTER_OP_EQ_S64:
700 case FILTER_OP_NE_S64:
701 case FILTER_OP_GT_S64:
702 case FILTER_OP_LT_S64:
703 case FILTER_OP_GE_S64:
704 case FILTER_OP_LE_S64:
705 {
706 if (!vstack_ax(stack) || !vstack_bx(stack)) {
707 printk(KERN_WARNING "Empty stack\n");
708 ret = -EINVAL;
709 goto end;
710 }
711 if (vstack_ax(stack)->type != REG_S64
712 || vstack_bx(stack)->type != REG_S64) {
713 printk(KERN_WARNING "Unexpected register type for s64 comparator\n");
714 ret = -EINVAL;
715 goto end;
716 }
717 break;
718 }
719
720 case FILTER_OP_BIT_RSHIFT:
721 ret = bin_op_bitwise_check(stack, opcode, ">>");
722 if (ret < 0)
723 goto end;
724 break;
725 case FILTER_OP_BIT_LSHIFT:
726 ret = bin_op_bitwise_check(stack, opcode, "<<");
727 if (ret < 0)
728 goto end;
729 break;
730 case FILTER_OP_BIT_AND:
731 ret = bin_op_bitwise_check(stack, opcode, "&");
732 if (ret < 0)
733 goto end;
734 break;
735 case FILTER_OP_BIT_OR:
736 ret = bin_op_bitwise_check(stack, opcode, "|");
737 if (ret < 0)
738 goto end;
739 break;
740 case FILTER_OP_BIT_XOR:
741 ret = bin_op_bitwise_check(stack, opcode, "^");
742 if (ret < 0)
743 goto end;
744 break;
745
746 /* unary */
747 case FILTER_OP_UNARY_PLUS:
748 case FILTER_OP_UNARY_MINUS:
749 case FILTER_OP_UNARY_NOT:
750 {
751 if (!vstack_ax(stack)) {
752 printk(KERN_WARNING "Empty stack\n");
753 ret = -EINVAL;
754 goto end;
755 }
756 switch (vstack_ax(stack)->type) {
757 default:
758 case REG_DOUBLE:
759 printk(KERN_WARNING "unknown register type\n");
760 ret = -EINVAL;
761 goto end;
762
763 case REG_STRING:
764 case REG_STAR_GLOB_STRING:
765 printk(KERN_WARNING "Unary op can only be applied to numeric or floating point registers\n");
766 ret = -EINVAL;
767 goto end;
768 case REG_S64:
769 case REG_TYPE_UNKNOWN:
770 break;
771 }
772 break;
773 }
774 case FILTER_OP_UNARY_BIT_NOT:
775 {
776 if (!vstack_ax(stack)) {
777 printk(KERN_WARNING "Empty stack\n");
778 ret = -EINVAL;
779 goto end;
780 }
781 switch (vstack_ax(stack)->type) {
782 default:
783 printk(KERN_WARNING "unknown register type\n");
784 ret = -EINVAL;
785 goto end;
786
787 case REG_STRING:
788 case REG_STAR_GLOB_STRING:
789 case REG_DOUBLE:
790 printk(KERN_WARNING "Unary bitwise op can only be applied to numeric registers\n");
791 ret = -EINVAL;
792 goto end;
793 case REG_S64:
794 break;
795 case REG_TYPE_UNKNOWN:
796 break;
797 }
798 break;
799 }
800
801 case FILTER_OP_UNARY_PLUS_S64:
802 case FILTER_OP_UNARY_MINUS_S64:
803 case FILTER_OP_UNARY_NOT_S64:
804 {
805 if (!vstack_ax(stack)) {
806 printk(KERN_WARNING "Empty stack\n");
807 ret = -EINVAL;
808 goto end;
809 }
810 if (vstack_ax(stack)->type != REG_S64) {
811 printk(KERN_WARNING "Invalid register type\n");
812 ret = -EINVAL;
813 goto end;
814 }
815 break;
816 }
817
818 /* logical */
819 case FILTER_OP_AND:
820 case FILTER_OP_OR:
821 {
822 struct logical_op *insn = (struct logical_op *) pc;
823
824 if (!vstack_ax(stack)) {
825 printk(KERN_WARNING "Empty stack\n");
826 ret = -EINVAL;
827 goto end;
828 }
829 if (vstack_ax(stack)->type != REG_S64) {
830 printk(KERN_WARNING "Logical comparator expects S64 register\n");
831 ret = -EINVAL;
832 goto end;
833 }
834
835 dbg_printk("Validate jumping to bytecode offset %u\n",
836 (unsigned int) insn->skip_offset);
837 if (unlikely(start_pc + insn->skip_offset <= pc)) {
838 printk(KERN_WARNING "Loops are not allowed in bytecode\n");
839 ret = -EINVAL;
840 goto end;
841 }
842 break;
843 }
844
845 /* load field ref */
846 case FILTER_OP_LOAD_FIELD_REF:
847 {
848 printk(KERN_WARNING "Unknown field ref type\n");
849 ret = -EINVAL;
850 goto end;
851 }
852 case FILTER_OP_LOAD_FIELD_REF_STRING:
853 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
854 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
855 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
856 {
857 struct load_op *insn = (struct load_op *) pc;
858 struct field_ref *ref = (struct field_ref *) insn->data;
859
860 dbg_printk("Validate load field ref offset %u type string\n",
861 ref->offset);
862 break;
863 }
864 case FILTER_OP_LOAD_FIELD_REF_S64:
865 {
866 struct load_op *insn = (struct load_op *) pc;
867 struct field_ref *ref = (struct field_ref *) insn->data;
868
869 dbg_printk("Validate load field ref offset %u type s64\n",
870 ref->offset);
871 break;
872 }
873
874 /* load from immediate operand */
875 case FILTER_OP_LOAD_STRING:
876 case FILTER_OP_LOAD_STAR_GLOB_STRING:
877 {
878 break;
879 }
880
881 case FILTER_OP_LOAD_S64:
882 {
883 break;
884 }
885
886 case FILTER_OP_CAST_TO_S64:
887 {
888 struct cast_op *insn = (struct cast_op *) pc;
889
890 if (!vstack_ax(stack)) {
891 printk(KERN_WARNING "Empty stack\n");
892 ret = -EINVAL;
893 goto end;
894 }
895 switch (vstack_ax(stack)->type) {
896 default:
897 case REG_DOUBLE:
898 printk(KERN_WARNING "unknown register type\n");
899 ret = -EINVAL;
900 goto end;
901
902 case REG_STRING:
903 case REG_STAR_GLOB_STRING:
904 printk(KERN_WARNING "Cast op can only be applied to numeric or floating point registers\n");
905 ret = -EINVAL;
906 goto end;
907 case REG_S64:
908 break;
909 }
910 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
911 if (vstack_ax(stack)->type != REG_DOUBLE) {
912 printk(KERN_WARNING "Cast expects double\n");
913 ret = -EINVAL;
914 goto end;
915 }
916 }
917 break;
918 }
919 case FILTER_OP_CAST_NOP:
920 {
921 break;
922 }
923
924 /* get context ref */
925 case FILTER_OP_GET_CONTEXT_REF:
926 {
927 printk(KERN_WARNING "Unknown get context ref type\n");
928 ret = -EINVAL;
929 goto end;
930 }
931 case FILTER_OP_GET_CONTEXT_REF_STRING:
932 {
933 struct load_op *insn = (struct load_op *) pc;
934 struct field_ref *ref = (struct field_ref *) insn->data;
935
936 dbg_printk("Validate get context ref offset %u type string\n",
937 ref->offset);
938 break;
939 }
940 case FILTER_OP_GET_CONTEXT_REF_S64:
941 {
942 struct load_op *insn = (struct load_op *) pc;
943 struct field_ref *ref = (struct field_ref *) insn->data;
944
945 dbg_printk("Validate get context ref offset %u type s64\n",
946 ref->offset);
947 break;
948 }
949
950 /*
951 * Instructions for recursive traversal through composed types.
952 */
953 case FILTER_OP_GET_CONTEXT_ROOT:
954 {
955 dbg_printk("Validate get context root\n");
956 break;
957 }
958 case FILTER_OP_GET_APP_CONTEXT_ROOT:
959 {
960 dbg_printk("Validate get app context root\n");
961 break;
962 }
963 case FILTER_OP_GET_PAYLOAD_ROOT:
964 {
965 dbg_printk("Validate get payload root\n");
966 break;
967 }
968 case FILTER_OP_LOAD_FIELD:
969 {
970 /*
971 * We tolerate that field type is unknown at validation,
972 * because we are performing the load specialization in
973 * a phase after validation.
974 */
975 dbg_printk("Validate load field\n");
976 break;
977 }
978 case FILTER_OP_LOAD_FIELD_S8:
979 {
980 dbg_printk("Validate load field s8\n");
981 break;
982 }
983 case FILTER_OP_LOAD_FIELD_S16:
984 {
985 dbg_printk("Validate load field s16\n");
986 break;
987 }
988 case FILTER_OP_LOAD_FIELD_S32:
989 {
990 dbg_printk("Validate load field s32\n");
991 break;
992 }
993 case FILTER_OP_LOAD_FIELD_S64:
994 {
995 dbg_printk("Validate load field s64\n");
996 break;
997 }
998 case FILTER_OP_LOAD_FIELD_U8:
999 {
1000 dbg_printk("Validate load field u8\n");
1001 break;
1002 }
1003 case FILTER_OP_LOAD_FIELD_U16:
1004 {
1005 dbg_printk("Validate load field u16\n");
1006 break;
1007 }
1008 case FILTER_OP_LOAD_FIELD_U32:
1009 {
1010 dbg_printk("Validate load field u32\n");
1011 break;
1012 }
1013 case FILTER_OP_LOAD_FIELD_U64:
1014 {
1015 dbg_printk("Validate load field u64\n");
1016 break;
1017 }
1018 case FILTER_OP_LOAD_FIELD_STRING:
1019 {
1020 dbg_printk("Validate load field string\n");
1021 break;
1022 }
1023 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1024 {
1025 dbg_printk("Validate load field sequence\n");
1026 break;
1027 }
1028 case FILTER_OP_LOAD_FIELD_DOUBLE:
1029 {
1030 dbg_printk("Validate load field double\n");
1031 break;
1032 }
1033
1034 case FILTER_OP_GET_SYMBOL:
1035 {
1036 struct load_op *insn = (struct load_op *) pc;
1037 struct get_symbol *sym = (struct get_symbol *) insn->data;
1038
1039 dbg_printk("Validate get symbol offset %u\n", sym->offset);
1040 break;
1041 }
1042
1043 case FILTER_OP_GET_SYMBOL_FIELD:
1044 {
1045 struct load_op *insn = (struct load_op *) pc;
1046 struct get_symbol *sym = (struct get_symbol *) insn->data;
1047
1048 dbg_printk("Validate get symbol field offset %u\n", sym->offset);
1049 break;
1050 }
1051
1052 case FILTER_OP_GET_INDEX_U16:
1053 {
1054 struct load_op *insn = (struct load_op *) pc;
1055 struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
1056
1057 dbg_printk("Validate get index u16 index %u\n", get_index->index);
1058 break;
1059 }
1060
1061 case FILTER_OP_GET_INDEX_U64:
1062 {
1063 struct load_op *insn = (struct load_op *) pc;
1064 struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
1065
1066 dbg_printk("Validate get index u64 index %llu\n",
1067 (unsigned long long) get_index->index);
1068 break;
1069 }
1070 }
1071 end:
1072 return ret;
1073 }
1074
1075 /*
1076 * Return value:
1077 * 0: success
1078 * <0: error
1079 */
1080 static
1081 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
1082 struct mp_table *mp_table,
1083 struct vstack *stack,
1084 char *start_pc,
1085 char *pc)
1086 {
1087 int ret, found = 0;
1088 unsigned long target_pc = pc - start_pc;
1089 unsigned long hash;
1090 struct hlist_head *head;
1091 struct mp_node *mp_node;
1092
1093 /* Validate the context resulting from the previous instruction */
1094 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
1095 if (ret < 0)
1096 return ret;
1097
1098 /* Validate merge points */
1099 hash = jhash_1word(target_pc, 0);
1100 head = &mp_table->mp_head[hash & (MERGE_POINT_TABLE_SIZE - 1)];
1101 hlist_for_each_entry(mp_node, head, node) {
1102 if (lttng_hash_match(mp_node, target_pc)) {
1103 found = 1;
1104 break;
1105 }
1106 }
1107 if (found) {
1108 dbg_printk("Filter: validate merge point at offset %lu\n",
1109 target_pc);
1110 if (merge_points_compare(stack, &mp_node->stack)) {
1111 printk(KERN_WARNING "Merge points differ for offset %lu\n",
1112 target_pc);
1113 return -EINVAL;
1114 }
1115 /* Once validated, we can remove the merge point */
1116 dbg_printk("Filter: remove merge point at offset %lu\n",
1117 target_pc);
1118 hlist_del(&mp_node->node);
1119 }
1120 return 0;
1121 }
1122
1123 /*
1124 * Return value:
1125 * >0: going to next insn.
1126 * 0: success, stop iteration.
1127 * <0: error
1128 */
1129 static
1130 int exec_insn(struct bytecode_runtime *bytecode,
1131 struct mp_table *mp_table,
1132 struct vstack *stack,
1133 char **_next_pc,
1134 char *pc)
1135 {
1136 int ret = 1;
1137 char *next_pc = *_next_pc;
1138
1139 switch (*(filter_opcode_t *) pc) {
1140 case FILTER_OP_UNKNOWN:
1141 default:
1142 {
1143 printk(KERN_WARNING "unknown bytecode op %u\n",
1144 (unsigned int) *(filter_opcode_t *) pc);
1145 ret = -EINVAL;
1146 goto end;
1147 }
1148
1149 case FILTER_OP_RETURN:
1150 {
1151 if (!vstack_ax(stack)) {
1152 printk(KERN_WARNING "Empty stack\n");
1153 ret = -EINVAL;
1154 goto end;
1155 }
1156 switch (vstack_ax(stack)->type) {
1157 case REG_S64:
1158 case REG_TYPE_UNKNOWN:
1159 break;
1160 default:
1161 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1162 (int) vstack_ax(stack)->type);
1163 ret = -EINVAL;
1164 goto end;
1165 }
1166
1167 ret = 0;
1168 goto end;
1169 }
1170
1171 case FILTER_OP_RETURN_S64:
1172 {
1173 if (!vstack_ax(stack)) {
1174 printk(KERN_WARNING "Empty stack\n");
1175 ret = -EINVAL;
1176 goto end;
1177 }
1178 switch (vstack_ax(stack)->type) {
1179 case REG_S64:
1180 break;
1181 default:
1182 case REG_TYPE_UNKNOWN:
1183 printk(KERN_WARNING "Unexpected register type %d at end of bytecode\n",
1184 (int) vstack_ax(stack)->type);
1185 ret = -EINVAL;
1186 goto end;
1187 }
1188
1189 ret = 0;
1190 goto end;
1191 }
1192
1193 /* binary */
1194 case FILTER_OP_MUL:
1195 case FILTER_OP_DIV:
1196 case FILTER_OP_MOD:
1197 case FILTER_OP_PLUS:
1198 case FILTER_OP_MINUS:
1199 /* Floating point */
1200 case FILTER_OP_EQ_DOUBLE:
1201 case FILTER_OP_NE_DOUBLE:
1202 case FILTER_OP_GT_DOUBLE:
1203 case FILTER_OP_LT_DOUBLE:
1204 case FILTER_OP_GE_DOUBLE:
1205 case FILTER_OP_LE_DOUBLE:
1206 case FILTER_OP_EQ_DOUBLE_S64:
1207 case FILTER_OP_NE_DOUBLE_S64:
1208 case FILTER_OP_GT_DOUBLE_S64:
1209 case FILTER_OP_LT_DOUBLE_S64:
1210 case FILTER_OP_GE_DOUBLE_S64:
1211 case FILTER_OP_LE_DOUBLE_S64:
1212 case FILTER_OP_EQ_S64_DOUBLE:
1213 case FILTER_OP_NE_S64_DOUBLE:
1214 case FILTER_OP_GT_S64_DOUBLE:
1215 case FILTER_OP_LT_S64_DOUBLE:
1216 case FILTER_OP_GE_S64_DOUBLE:
1217 case FILTER_OP_LE_S64_DOUBLE:
1218 case FILTER_OP_UNARY_PLUS_DOUBLE:
1219 case FILTER_OP_UNARY_MINUS_DOUBLE:
1220 case FILTER_OP_UNARY_NOT_DOUBLE:
1221 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1222 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1223 case FILTER_OP_LOAD_DOUBLE:
1224 case FILTER_OP_CAST_DOUBLE_TO_S64:
1225 {
1226 printk(KERN_WARNING "unsupported bytecode op %u\n",
1227 (unsigned int) *(filter_opcode_t *) pc);
1228 ret = -EINVAL;
1229 goto end;
1230 }
1231
1232 case FILTER_OP_EQ:
1233 case FILTER_OP_NE:
1234 case FILTER_OP_GT:
1235 case FILTER_OP_LT:
1236 case FILTER_OP_GE:
1237 case FILTER_OP_LE:
1238 case FILTER_OP_EQ_STRING:
1239 case FILTER_OP_NE_STRING:
1240 case FILTER_OP_GT_STRING:
1241 case FILTER_OP_LT_STRING:
1242 case FILTER_OP_GE_STRING:
1243 case FILTER_OP_LE_STRING:
1244 case FILTER_OP_EQ_STAR_GLOB_STRING:
1245 case FILTER_OP_NE_STAR_GLOB_STRING:
1246 case FILTER_OP_EQ_S64:
1247 case FILTER_OP_NE_S64:
1248 case FILTER_OP_GT_S64:
1249 case FILTER_OP_LT_S64:
1250 case FILTER_OP_GE_S64:
1251 case FILTER_OP_LE_S64:
1252 case FILTER_OP_BIT_RSHIFT:
1253 case FILTER_OP_BIT_LSHIFT:
1254 case FILTER_OP_BIT_AND:
1255 case FILTER_OP_BIT_OR:
1256 case FILTER_OP_BIT_XOR:
1257 {
1258 /* Pop 2, push 1 */
1259 if (vstack_pop(stack)) {
1260 ret = -EINVAL;
1261 goto end;
1262 }
1263 if (!vstack_ax(stack)) {
1264 printk(KERN_WARNING "Empty stack\n");
1265 ret = -EINVAL;
1266 goto end;
1267 }
1268 switch (vstack_ax(stack)->type) {
1269 case REG_S64:
1270 case REG_DOUBLE:
1271 case REG_STRING:
1272 case REG_STAR_GLOB_STRING:
1273 case REG_TYPE_UNKNOWN:
1274 break;
1275 default:
1276 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1277 (int) vstack_ax(stack)->type);
1278 ret = -EINVAL;
1279 goto end;
1280 }
1281
1282 vstack_ax(stack)->type = REG_S64;
1283 next_pc += sizeof(struct binary_op);
1284 break;
1285 }
1286
1287 /* unary */
1288 case FILTER_OP_UNARY_PLUS:
1289 case FILTER_OP_UNARY_MINUS:
1290 {
1291 /* Pop 1, push 1 */
1292 if (!vstack_ax(stack)) {
1293 printk(KERN_WARNING "Empty stack\n\n");
1294 ret = -EINVAL;
1295 goto end;
1296 }
1297 switch (vstack_ax(stack)->type) {
1298 case REG_S64:
1299 case REG_TYPE_UNKNOWN:
1300 break;
1301 default:
1302 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1303 (int) vstack_ax(stack)->type);
1304 ret = -EINVAL;
1305 goto end;
1306 }
1307
1308 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1309 next_pc += sizeof(struct unary_op);
1310 break;
1311 }
1312
1313 case FILTER_OP_UNARY_PLUS_S64:
1314 case FILTER_OP_UNARY_MINUS_S64:
1315 case FILTER_OP_UNARY_NOT_S64:
1316 {
1317 /* Pop 1, push 1 */
1318 if (!vstack_ax(stack)) {
1319 printk(KERN_WARNING "Empty stack\n\n");
1320 ret = -EINVAL;
1321 goto end;
1322 }
1323 switch (vstack_ax(stack)->type) {
1324 case REG_S64:
1325 break;
1326 default:
1327 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1328 (int) vstack_ax(stack)->type);
1329 ret = -EINVAL;
1330 goto end;
1331 }
1332
1333 vstack_ax(stack)->type = REG_S64;
1334 next_pc += sizeof(struct unary_op);
1335 break;
1336 }
1337
1338 case FILTER_OP_UNARY_NOT:
1339 {
1340 /* Pop 1, push 1 */
1341 if (!vstack_ax(stack)) {
1342 printk(KERN_WARNING "Empty stack\n\n");
1343 ret = -EINVAL;
1344 goto end;
1345 }
1346 switch (vstack_ax(stack)->type) {
1347 case REG_S64:
1348 case REG_TYPE_UNKNOWN:
1349 break;
1350 default:
1351 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1352 (int) vstack_ax(stack)->type);
1353 ret = -EINVAL;
1354 goto end;
1355 }
1356
1357 vstack_ax(stack)->type = REG_S64;
1358 next_pc += sizeof(struct unary_op);
1359 break;
1360 }
1361
1362 case FILTER_OP_UNARY_BIT_NOT:
1363 {
1364 /* Pop 1, push 1 */
1365 if (!vstack_ax(stack)) {
1366 printk(KERN_WARNING "Empty stack\n");
1367 ret = -EINVAL;
1368 goto end;
1369 }
1370 switch (vstack_ax(stack)->type) {
1371 case REG_S64:
1372 case REG_TYPE_UNKNOWN:
1373 break;
1374 case REG_DOUBLE:
1375 default:
1376 printk(KERN_WARNING "Unexpected register type %d for operation\n",
1377 (int) vstack_ax(stack)->type);
1378 ret = -EINVAL;
1379 goto end;
1380 }
1381
1382 vstack_ax(stack)->type = REG_S64;
1383 next_pc += sizeof(struct unary_op);
1384 break;
1385 }
1386
1387 /* logical */
1388 case FILTER_OP_AND:
1389 case FILTER_OP_OR:
1390 {
1391 struct logical_op *insn = (struct logical_op *) pc;
1392 int merge_ret;
1393
1394 /* Add merge point to table */
1395 merge_ret = merge_point_add_check(mp_table,
1396 insn->skip_offset, stack);
1397 if (merge_ret) {
1398 ret = merge_ret;
1399 goto end;
1400 }
1401
1402 if (!vstack_ax(stack)) {
1403 printk(KERN_WARNING "Empty stack\n\n");
1404 ret = -EINVAL;
1405 goto end;
1406 }
1407 /* There is always a cast-to-s64 operation before a or/and op. */
1408 switch (vstack_ax(stack)->type) {
1409 case REG_S64:
1410 break;
1411 default:
1412 printk(KERN_WARNING "Incorrect register type %d for operation\n",
1413 (int) vstack_ax(stack)->type);
1414 ret = -EINVAL;
1415 goto end;
1416 }
1417
1418 /* Continue to next instruction */
1419 /* Pop 1 when jump not taken */
1420 if (vstack_pop(stack)) {
1421 ret = -EINVAL;
1422 goto end;
1423 }
1424 next_pc += sizeof(struct logical_op);
1425 break;
1426 }
1427
1428 /* load field ref */
1429 case FILTER_OP_LOAD_FIELD_REF:
1430 {
1431 printk(KERN_WARNING "Unknown field ref type\n");
1432 ret = -EINVAL;
1433 goto end;
1434 }
1435 /* get context ref */
1436 case FILTER_OP_GET_CONTEXT_REF:
1437 {
1438 printk(KERN_WARNING "Unknown get context ref type\n");
1439 ret = -EINVAL;
1440 goto end;
1441 }
1442 case FILTER_OP_LOAD_FIELD_REF_STRING:
1443 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1444 case FILTER_OP_GET_CONTEXT_REF_STRING:
1445 case FILTER_OP_LOAD_FIELD_REF_USER_STRING:
1446 case FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE:
1447 {
1448 if (vstack_push(stack)) {
1449 ret = -EINVAL;
1450 goto end;
1451 }
1452 vstack_ax(stack)->type = REG_STRING;
1453 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1454 break;
1455 }
1456 case FILTER_OP_LOAD_FIELD_REF_S64:
1457 case FILTER_OP_GET_CONTEXT_REF_S64:
1458 {
1459 if (vstack_push(stack)) {
1460 ret = -EINVAL;
1461 goto end;
1462 }
1463 vstack_ax(stack)->type = REG_S64;
1464 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1465 break;
1466 }
1467
1468 /* load from immediate operand */
1469 case FILTER_OP_LOAD_STRING:
1470 {
1471 struct load_op *insn = (struct load_op *) pc;
1472
1473 if (vstack_push(stack)) {
1474 ret = -EINVAL;
1475 goto end;
1476 }
1477 vstack_ax(stack)->type = REG_STRING;
1478 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1479 break;
1480 }
1481
1482 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1483 {
1484 struct load_op *insn = (struct load_op *) pc;
1485
1486 if (vstack_push(stack)) {
1487 ret = -EINVAL;
1488 goto end;
1489 }
1490 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1491 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1492 break;
1493 }
1494
1495 case FILTER_OP_LOAD_S64:
1496 {
1497 if (vstack_push(stack)) {
1498 ret = -EINVAL;
1499 goto end;
1500 }
1501 vstack_ax(stack)->type = REG_S64;
1502 next_pc += sizeof(struct load_op)
1503 + sizeof(struct literal_numeric);
1504 break;
1505 }
1506
1507 case FILTER_OP_CAST_TO_S64:
1508 {
1509 /* Pop 1, push 1 */
1510 if (!vstack_ax(stack)) {
1511 printk(KERN_WARNING "Empty stack\n");
1512 ret = -EINVAL;
1513 goto end;
1514 }
1515 switch (vstack_ax(stack)->type) {
1516 case REG_S64:
1517 case REG_DOUBLE:
1518 case REG_TYPE_UNKNOWN:
1519 break;
1520 default:
1521 printk(KERN_WARNING "Incorrect register type %d for cast\n",
1522 (int) vstack_ax(stack)->type);
1523 ret = -EINVAL;
1524 goto end;
1525 }
1526 vstack_ax(stack)->type = REG_S64;
1527 next_pc += sizeof(struct cast_op);
1528 break;
1529 }
1530 case FILTER_OP_CAST_NOP:
1531 {
1532 next_pc += sizeof(struct cast_op);
1533 break;
1534 }
1535
1536 /*
1537 * Instructions for recursive traversal through composed types.
1538 */
1539 case FILTER_OP_GET_CONTEXT_ROOT:
1540 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1541 case FILTER_OP_GET_PAYLOAD_ROOT:
1542 {
1543 if (vstack_push(stack)) {
1544 ret = -EINVAL;
1545 goto end;
1546 }
1547 vstack_ax(stack)->type = REG_PTR;
1548 next_pc += sizeof(struct load_op);
1549 break;
1550 }
1551
1552 case FILTER_OP_LOAD_FIELD:
1553 {
1554 /* Pop 1, push 1 */
1555 if (!vstack_ax(stack)) {
1556 printk(KERN_WARNING "Empty stack\n\n");
1557 ret = -EINVAL;
1558 goto end;
1559 }
1560 if (vstack_ax(stack)->type != REG_PTR) {
1561 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1562 ret = -EINVAL;
1563 goto end;
1564 }
1565 vstack_ax(stack)->type = REG_TYPE_UNKNOWN;
1566 next_pc += sizeof(struct load_op);
1567 break;
1568 }
1569
1570 case FILTER_OP_LOAD_FIELD_S8:
1571 case FILTER_OP_LOAD_FIELD_S16:
1572 case FILTER_OP_LOAD_FIELD_S32:
1573 case FILTER_OP_LOAD_FIELD_S64:
1574 case FILTER_OP_LOAD_FIELD_U8:
1575 case FILTER_OP_LOAD_FIELD_U16:
1576 case FILTER_OP_LOAD_FIELD_U32:
1577 case FILTER_OP_LOAD_FIELD_U64:
1578 {
1579 /* Pop 1, push 1 */
1580 if (!vstack_ax(stack)) {
1581 printk(KERN_WARNING "Empty stack\n\n");
1582 ret = -EINVAL;
1583 goto end;
1584 }
1585 if (vstack_ax(stack)->type != REG_PTR) {
1586 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1587 ret = -EINVAL;
1588 goto end;
1589 }
1590 vstack_ax(stack)->type = REG_S64;
1591 next_pc += sizeof(struct load_op);
1592 break;
1593 }
1594
1595 case FILTER_OP_LOAD_FIELD_STRING:
1596 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1597 {
1598 /* Pop 1, push 1 */
1599 if (!vstack_ax(stack)) {
1600 printk(KERN_WARNING "Empty stack\n\n");
1601 ret = -EINVAL;
1602 goto end;
1603 }
1604 if (vstack_ax(stack)->type != REG_PTR) {
1605 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1606 ret = -EINVAL;
1607 goto end;
1608 }
1609 vstack_ax(stack)->type = REG_STRING;
1610 next_pc += sizeof(struct load_op);
1611 break;
1612 }
1613
1614 case FILTER_OP_LOAD_FIELD_DOUBLE:
1615 {
1616 /* Pop 1, push 1 */
1617 if (!vstack_ax(stack)) {
1618 printk(KERN_WARNING "Empty stack\n\n");
1619 ret = -EINVAL;
1620 goto end;
1621 }
1622 if (vstack_ax(stack)->type != REG_PTR) {
1623 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1624 ret = -EINVAL;
1625 goto end;
1626 }
1627 vstack_ax(stack)->type = REG_DOUBLE;
1628 next_pc += sizeof(struct load_op);
1629 break;
1630 }
1631
1632 case FILTER_OP_GET_SYMBOL:
1633 case FILTER_OP_GET_SYMBOL_FIELD:
1634 {
1635 /* Pop 1, push 1 */
1636 if (!vstack_ax(stack)) {
1637 printk(KERN_WARNING "Empty stack\n\n");
1638 ret = -EINVAL;
1639 goto end;
1640 }
1641 if (vstack_ax(stack)->type != REG_PTR) {
1642 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1643 ret = -EINVAL;
1644 goto end;
1645 }
1646 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1647 break;
1648 }
1649
1650 case FILTER_OP_GET_INDEX_U16:
1651 {
1652 /* Pop 1, push 1 */
1653 if (!vstack_ax(stack)) {
1654 printk(KERN_WARNING "Empty stack\n\n");
1655 ret = -EINVAL;
1656 goto end;
1657 }
1658 if (vstack_ax(stack)->type != REG_PTR) {
1659 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1660 ret = -EINVAL;
1661 goto end;
1662 }
1663 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1664 break;
1665 }
1666
1667 case FILTER_OP_GET_INDEX_U64:
1668 {
1669 /* Pop 1, push 1 */
1670 if (!vstack_ax(stack)) {
1671 printk(KERN_WARNING "Empty stack\n\n");
1672 ret = -EINVAL;
1673 goto end;
1674 }
1675 if (vstack_ax(stack)->type != REG_PTR) {
1676 printk(KERN_WARNING "Expecting pointer on top of stack\n\n");
1677 ret = -EINVAL;
1678 goto end;
1679 }
1680 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1681 break;
1682 }
1683
1684 }
1685 end:
1686 *_next_pc = next_pc;
1687 return ret;
1688 }
1689
1690 /*
1691 * Never called concurrently (hash seed is shared).
1692 */
1693 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1694 {
1695 struct mp_table *mp_table;
1696 char *pc, *next_pc, *start_pc;
1697 int ret = -EINVAL;
1698 struct vstack stack;
1699
1700 vstack_init(&stack);
1701
1702 mp_table = kzalloc(sizeof(*mp_table), GFP_KERNEL);
1703 if (!mp_table) {
1704 printk(KERN_WARNING "Error allocating hash table for bytecode validation\n");
1705 return -ENOMEM;
1706 }
1707 start_pc = &bytecode->code[0];
1708 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1709 pc = next_pc) {
1710 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1711 if (ret != 0) {
1712 if (ret == -ERANGE)
1713 printk(KERN_WARNING "filter bytecode overflow\n");
1714 goto end;
1715 }
1716 dbg_printk("Validating op %s (%u)\n",
1717 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc),
1718 (unsigned int) *(filter_opcode_t *) pc);
1719
1720 /*
1721 * For each instruction, validate the current context
1722 * (traversal of entire execution flow), and validate
1723 * all merge points targeting this instruction.
1724 */
1725 ret = validate_instruction_all_contexts(bytecode, mp_table,
1726 &stack, start_pc, pc);
1727 if (ret)
1728 goto end;
1729 ret = exec_insn(bytecode, mp_table, &stack, &next_pc, pc);
1730 if (ret <= 0)
1731 goto end;
1732 }
1733 end:
1734 if (delete_all_nodes(mp_table)) {
1735 if (!ret) {
1736 printk(KERN_WARNING "Unexpected merge points\n");
1737 ret = -EINVAL;
1738 }
1739 }
1740 kfree(mp_table);
1741 return ret;
1742 }
This page took 0.100545 seconds and 4 git commands to generate.