Fix filter: fix stack leak on taken branch
[lttng-ust.git] / liblttng-ust / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng UST filter bytecode validator.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <urcu-bp.h>
25 #include <time.h>
26 #include "lttng-filter.h"
27
28 #include <urcu/rculfhash.h>
29 #include "lttng-hash-helper.h"
30
31 /*
32 * Number of merge points for hash table size. Hash table initialized to
33 * that size, and we do not resize, because we do not want to trigger
34 * RCU worker thread execution: fall-back on linear traversal if number
35 * of merge points exceeds this value.
36 */
37 #define DEFAULT_NR_MERGE_POINTS 128
38 #define MIN_NR_BUCKETS 128
39 #define MAX_NR_BUCKETS 128
40
41 /* merge point table node */
42 struct lfht_mp_node {
43 struct cds_lfht_node node;
44
45 /* Context at merge point */
46 struct vstack stack;
47 unsigned long target_pc;
48 };
49
50 static unsigned long lttng_hash_seed;
51 static unsigned int lttng_hash_seed_ready;
52
53 static
54 int lttng_hash_match(struct cds_lfht_node *node, const void *key)
55 {
56 struct lfht_mp_node *mp_node =
57 caa_container_of(node, struct lfht_mp_node, node);
58 unsigned long key_pc = (unsigned long) key;
59
60 if (mp_node->target_pc == key_pc)
61 return 1;
62 else
63 return 0;
64 }
65
66 static
67 int merge_points_compare(const struct vstack *stacka,
68 const struct vstack *stackb)
69 {
70 int i, len;
71
72 if (stacka->top != stackb->top)
73 return 1;
74 len = stacka->top + 1;
75 assert(len >= 0);
76 for (i = 0; i < len; i++) {
77 if (stacka->e[i].type != stackb->e[i].type)
78 return 1;
79 }
80 return 0;
81 }
82
83 static
84 int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc,
85 const struct vstack *stack)
86 {
87 struct lfht_mp_node *node;
88 unsigned long hash = lttng_hash_mix((const void *) target_pc,
89 sizeof(target_pc),
90 lttng_hash_seed);
91 struct cds_lfht_node *ret;
92
93 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
94 target_pc, hash);
95 node = zmalloc(sizeof(struct lfht_mp_node));
96 if (!node)
97 return -ENOMEM;
98 node->target_pc = target_pc;
99 memcpy(&node->stack, stack, sizeof(node->stack));
100 ret = cds_lfht_add_unique(ht, hash, lttng_hash_match,
101 (const void *) target_pc, &node->node);
102 if (ret != &node->node) {
103 struct lfht_mp_node *ret_mp =
104 caa_container_of(ret, struct lfht_mp_node, node);
105
106 /* Key already present */
107 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
108 target_pc, hash);
109 free(node);
110 if (merge_points_compare(stack, &ret_mp->stack)) {
111 ERR("Merge points differ for offset %lu\n",
112 target_pc);
113 return -EINVAL;
114 }
115 }
116 return 0;
117 }
118
119 /*
120 * Binary comparators use top of stack and top of stack -1.
121 */
122 static
123 int bin_op_compare_check(struct vstack *stack, const char *str)
124 {
125 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
126 goto error_unknown;
127
128 switch (vstack_ax(stack)->type) {
129 default:
130 goto error_unknown;
131
132 case REG_STRING:
133 switch (vstack_bx(stack)->type) {
134 default:
135 goto error_unknown;
136
137 case REG_STRING:
138 break;
139 case REG_S64:
140 case REG_DOUBLE:
141 goto error_mismatch;
142 }
143 break;
144 case REG_S64:
145 case REG_DOUBLE:
146 switch (vstack_bx(stack)->type) {
147 default:
148 goto error_unknown;
149
150 case REG_STRING:
151 goto error_mismatch;
152
153 case REG_S64:
154 case REG_DOUBLE:
155 break;
156 }
157 break;
158 }
159 return 0;
160
161 error_unknown:
162 return -EINVAL;
163
164 error_mismatch:
165 ERR("type mismatch for '%s' binary operator\n", str);
166 return -EINVAL;
167 }
168
169 /*
170 * Validate bytecode range overflow within the validation pass.
171 * Called for each instruction encountered.
172 */
173 static
174 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
175 void *start_pc, void *pc)
176 {
177 int ret = 0;
178
179 switch (*(filter_opcode_t *) pc) {
180 case FILTER_OP_UNKNOWN:
181 default:
182 {
183 ERR("unknown bytecode op %u\n",
184 (unsigned int) *(filter_opcode_t *) pc);
185 ret = -EINVAL;
186 break;
187 }
188
189 case FILTER_OP_RETURN:
190 {
191 if (unlikely(pc + sizeof(struct return_op)
192 > start_pc + bytecode->len)) {
193 ret = -EINVAL;
194 }
195 break;
196 }
197
198 /* binary */
199 case FILTER_OP_MUL:
200 case FILTER_OP_DIV:
201 case FILTER_OP_MOD:
202 case FILTER_OP_PLUS:
203 case FILTER_OP_MINUS:
204 case FILTER_OP_RSHIFT:
205 case FILTER_OP_LSHIFT:
206 case FILTER_OP_BIN_AND:
207 case FILTER_OP_BIN_OR:
208 case FILTER_OP_BIN_XOR:
209 {
210 ERR("unsupported bytecode op %u\n",
211 (unsigned int) *(filter_opcode_t *) pc);
212 ret = -EINVAL;
213 break;
214 }
215
216 case FILTER_OP_EQ:
217 case FILTER_OP_NE:
218 case FILTER_OP_GT:
219 case FILTER_OP_LT:
220 case FILTER_OP_GE:
221 case FILTER_OP_LE:
222 case FILTER_OP_EQ_STRING:
223 case FILTER_OP_NE_STRING:
224 case FILTER_OP_GT_STRING:
225 case FILTER_OP_LT_STRING:
226 case FILTER_OP_GE_STRING:
227 case FILTER_OP_LE_STRING:
228 case FILTER_OP_EQ_S64:
229 case FILTER_OP_NE_S64:
230 case FILTER_OP_GT_S64:
231 case FILTER_OP_LT_S64:
232 case FILTER_OP_GE_S64:
233 case FILTER_OP_LE_S64:
234 case FILTER_OP_EQ_DOUBLE:
235 case FILTER_OP_NE_DOUBLE:
236 case FILTER_OP_GT_DOUBLE:
237 case FILTER_OP_LT_DOUBLE:
238 case FILTER_OP_GE_DOUBLE:
239 case FILTER_OP_LE_DOUBLE:
240 {
241 if (unlikely(pc + sizeof(struct binary_op)
242 > start_pc + bytecode->len)) {
243 ret = -EINVAL;
244 }
245 break;
246 }
247
248 /* unary */
249 case FILTER_OP_UNARY_PLUS:
250 case FILTER_OP_UNARY_MINUS:
251 case FILTER_OP_UNARY_NOT:
252 case FILTER_OP_UNARY_PLUS_S64:
253 case FILTER_OP_UNARY_MINUS_S64:
254 case FILTER_OP_UNARY_NOT_S64:
255 case FILTER_OP_UNARY_PLUS_DOUBLE:
256 case FILTER_OP_UNARY_MINUS_DOUBLE:
257 case FILTER_OP_UNARY_NOT_DOUBLE:
258 {
259 if (unlikely(pc + sizeof(struct unary_op)
260 > start_pc + bytecode->len)) {
261 ret = -EINVAL;
262 }
263 break;
264 }
265
266 /* logical */
267 case FILTER_OP_AND:
268 case FILTER_OP_OR:
269 {
270 if (unlikely(pc + sizeof(struct logical_op)
271 > start_pc + bytecode->len)) {
272 ret = -EINVAL;
273 }
274 break;
275 }
276
277 /* load */
278 case FILTER_OP_LOAD_FIELD_REF:
279 {
280 ERR("Unknown field ref type\n");
281 ret = -EINVAL;
282 break;
283 }
284 case FILTER_OP_LOAD_FIELD_REF_STRING:
285 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
286 case FILTER_OP_LOAD_FIELD_REF_S64:
287 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
288 {
289 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
290 > start_pc + bytecode->len)) {
291 ret = -EINVAL;
292 }
293 break;
294 }
295
296 case FILTER_OP_LOAD_STRING:
297 {
298 struct load_op *insn = (struct load_op *) pc;
299 uint32_t str_len, maxlen;
300
301 if (unlikely(pc + sizeof(struct load_op)
302 > start_pc + bytecode->len)) {
303 ret = -EINVAL;
304 break;
305 }
306
307 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
308 str_len = strnlen(insn->data, maxlen);
309 if (unlikely(str_len >= maxlen)) {
310 /* Final '\0' not found within range */
311 ret = -EINVAL;
312 }
313 break;
314 }
315
316 case FILTER_OP_LOAD_S64:
317 {
318 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
319 > start_pc + bytecode->len)) {
320 ret = -EINVAL;
321 }
322 break;
323 }
324
325 case FILTER_OP_LOAD_DOUBLE:
326 {
327 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
328 > start_pc + bytecode->len)) {
329 ret = -EINVAL;
330 }
331 break;
332 }
333
334 case FILTER_OP_CAST_TO_S64:
335 case FILTER_OP_CAST_DOUBLE_TO_S64:
336 case FILTER_OP_CAST_NOP:
337 {
338 if (unlikely(pc + sizeof(struct cast_op)
339 > start_pc + bytecode->len)) {
340 ret = -EINVAL;
341 }
342 break;
343 }
344 }
345
346 return ret;
347 }
348
349 static
350 unsigned long delete_all_nodes(struct cds_lfht *ht)
351 {
352 struct cds_lfht_iter iter;
353 struct lfht_mp_node *node;
354 unsigned long nr_nodes = 0;
355
356 cds_lfht_for_each_entry(ht, &iter, node, node) {
357 int ret;
358
359 ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter));
360 assert(!ret);
361 /* note: this hash table is never used concurrently */
362 free(node);
363 nr_nodes++;
364 }
365 return nr_nodes;
366 }
367
368 /*
369 * Return value:
370 * 0: success
371 * <0: error
372 */
373 static
374 int validate_instruction_context(struct bytecode_runtime *bytecode,
375 struct vstack *stack,
376 void *start_pc,
377 void *pc)
378 {
379 int ret = 0;
380
381 switch (*(filter_opcode_t *) pc) {
382 case FILTER_OP_UNKNOWN:
383 default:
384 {
385 ERR("unknown bytecode op %u\n",
386 (unsigned int) *(filter_opcode_t *) pc);
387 ret = -EINVAL;
388 goto end;
389 }
390
391 case FILTER_OP_RETURN:
392 {
393 goto end;
394 }
395
396 /* binary */
397 case FILTER_OP_MUL:
398 case FILTER_OP_DIV:
399 case FILTER_OP_MOD:
400 case FILTER_OP_PLUS:
401 case FILTER_OP_MINUS:
402 case FILTER_OP_RSHIFT:
403 case FILTER_OP_LSHIFT:
404 case FILTER_OP_BIN_AND:
405 case FILTER_OP_BIN_OR:
406 case FILTER_OP_BIN_XOR:
407 {
408 ERR("unsupported bytecode op %u\n",
409 (unsigned int) *(filter_opcode_t *) pc);
410 ret = -EINVAL;
411 goto end;
412 }
413
414 case FILTER_OP_EQ:
415 {
416 ret = bin_op_compare_check(stack, "==");
417 if (ret)
418 goto end;
419 break;
420 }
421 case FILTER_OP_NE:
422 {
423 ret = bin_op_compare_check(stack, "!=");
424 if (ret)
425 goto end;
426 break;
427 }
428 case FILTER_OP_GT:
429 {
430 ret = bin_op_compare_check(stack, ">");
431 if (ret)
432 goto end;
433 break;
434 }
435 case FILTER_OP_LT:
436 {
437 ret = bin_op_compare_check(stack, "<");
438 if (ret)
439 goto end;
440 break;
441 }
442 case FILTER_OP_GE:
443 {
444 ret = bin_op_compare_check(stack, ">=");
445 if (ret)
446 goto end;
447 break;
448 }
449 case FILTER_OP_LE:
450 {
451 ret = bin_op_compare_check(stack, "<=");
452 if (ret)
453 goto end;
454 break;
455 }
456
457 case FILTER_OP_EQ_STRING:
458 case FILTER_OP_NE_STRING:
459 case FILTER_OP_GT_STRING:
460 case FILTER_OP_LT_STRING:
461 case FILTER_OP_GE_STRING:
462 case FILTER_OP_LE_STRING:
463 {
464 if (!vstack_ax(stack) || !vstack_bx(stack)) {
465 ERR("Empty stack\n");
466 ret = -EINVAL;
467 goto end;
468 }
469 if (vstack_ax(stack)->type != REG_STRING
470 || vstack_bx(stack)->type != REG_STRING) {
471 ERR("Unexpected register type for string comparator\n");
472 ret = -EINVAL;
473 goto end;
474 }
475 break;
476 }
477
478 case FILTER_OP_EQ_S64:
479 case FILTER_OP_NE_S64:
480 case FILTER_OP_GT_S64:
481 case FILTER_OP_LT_S64:
482 case FILTER_OP_GE_S64:
483 case FILTER_OP_LE_S64:
484 {
485 if (!vstack_ax(stack) || !vstack_bx(stack)) {
486 ERR("Empty stack\n");
487 ret = -EINVAL;
488 goto end;
489 }
490 if (vstack_ax(stack)->type != REG_S64
491 || vstack_bx(stack)->type != REG_S64) {
492 ERR("Unexpected register type for s64 comparator\n");
493 ret = -EINVAL;
494 goto end;
495 }
496 break;
497 }
498
499 case FILTER_OP_EQ_DOUBLE:
500 case FILTER_OP_NE_DOUBLE:
501 case FILTER_OP_GT_DOUBLE:
502 case FILTER_OP_LT_DOUBLE:
503 case FILTER_OP_GE_DOUBLE:
504 case FILTER_OP_LE_DOUBLE:
505 {
506 if (!vstack_ax(stack) || !vstack_bx(stack)) {
507 ERR("Empty stack\n");
508 ret = -EINVAL;
509 goto end;
510 }
511 if ((vstack_ax(stack)->type != REG_DOUBLE && vstack_ax(stack)->type != REG_S64)
512 || (vstack_bx(stack)-> type != REG_DOUBLE && vstack_bx(stack)->type != REG_S64)) {
513 ERR("Unexpected register type for double comparator\n");
514 ret = -EINVAL;
515 goto end;
516 }
517 if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
518 ERR("Double operator should have at least one double register\n");
519 ret = -EINVAL;
520 goto end;
521 }
522 break;
523 }
524
525 /* unary */
526 case FILTER_OP_UNARY_PLUS:
527 case FILTER_OP_UNARY_MINUS:
528 case FILTER_OP_UNARY_NOT:
529 {
530 if (!vstack_ax(stack)) {
531 ERR("Empty stack\n");
532 ret = -EINVAL;
533 goto end;
534 }
535 switch (vstack_ax(stack)->type) {
536 default:
537 ERR("unknown register type\n");
538 ret = -EINVAL;
539 goto end;
540
541 case REG_STRING:
542 ERR("Unary op can only be applied to numeric or floating point registers\n");
543 ret = -EINVAL;
544 goto end;
545 case REG_S64:
546 break;
547 case REG_DOUBLE:
548 break;
549 }
550 break;
551 }
552
553 case FILTER_OP_UNARY_PLUS_S64:
554 case FILTER_OP_UNARY_MINUS_S64:
555 case FILTER_OP_UNARY_NOT_S64:
556 {
557 if (!vstack_ax(stack)) {
558 ERR("Empty stack\n");
559 ret = -EINVAL;
560 goto end;
561 }
562 if (vstack_ax(stack)->type != REG_S64) {
563 ERR("Invalid register type\n");
564 ret = -EINVAL;
565 goto end;
566 }
567 break;
568 }
569
570 case FILTER_OP_UNARY_PLUS_DOUBLE:
571 case FILTER_OP_UNARY_MINUS_DOUBLE:
572 case FILTER_OP_UNARY_NOT_DOUBLE:
573 {
574 if (!vstack_ax(stack)) {
575 ERR("Empty stack\n");
576 ret = -EINVAL;
577 goto end;
578 }
579 if (vstack_ax(stack)->type != REG_DOUBLE) {
580 ERR("Invalid register type\n");
581 ret = -EINVAL;
582 goto end;
583 }
584 break;
585 }
586
587 /* logical */
588 case FILTER_OP_AND:
589 case FILTER_OP_OR:
590 {
591 struct logical_op *insn = (struct logical_op *) pc;
592
593 if (!vstack_ax(stack)) {
594 ERR("Empty stack\n");
595 ret = -EINVAL;
596 goto end;
597 }
598 if (vstack_ax(stack)->type != REG_S64) {
599 ERR("Logical comparator expects S64 register\n");
600 ret = -EINVAL;
601 goto end;
602 }
603
604 dbg_printf("Validate jumping to bytecode offset %u\n",
605 (unsigned int) insn->skip_offset);
606 if (unlikely(start_pc + insn->skip_offset <= pc)) {
607 ERR("Loops are not allowed in bytecode\n");
608 ret = -EINVAL;
609 goto end;
610 }
611 break;
612 }
613
614 /* load */
615 case FILTER_OP_LOAD_FIELD_REF:
616 {
617 ERR("Unknown field ref type\n");
618 ret = -EINVAL;
619 goto end;
620 }
621 case FILTER_OP_LOAD_FIELD_REF_STRING:
622 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
623 {
624 struct load_op *insn = (struct load_op *) pc;
625 struct field_ref *ref = (struct field_ref *) insn->data;
626
627 dbg_printf("Validate load field ref offset %u type string\n",
628 ref->offset);
629 break;
630 }
631 case FILTER_OP_LOAD_FIELD_REF_S64:
632 {
633 struct load_op *insn = (struct load_op *) pc;
634 struct field_ref *ref = (struct field_ref *) insn->data;
635
636 dbg_printf("Validate load field ref offset %u type s64\n",
637 ref->offset);
638 break;
639 }
640 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
641 {
642 struct load_op *insn = (struct load_op *) pc;
643 struct field_ref *ref = (struct field_ref *) insn->data;
644
645 dbg_printf("Validate load field ref offset %u type double\n",
646 ref->offset);
647 break;
648 }
649
650 case FILTER_OP_LOAD_STRING:
651 {
652 break;
653 }
654
655 case FILTER_OP_LOAD_S64:
656 {
657 break;
658 }
659
660 case FILTER_OP_LOAD_DOUBLE:
661 {
662 break;
663 }
664
665 case FILTER_OP_CAST_TO_S64:
666 case FILTER_OP_CAST_DOUBLE_TO_S64:
667 {
668 struct cast_op *insn = (struct cast_op *) pc;
669
670 if (!vstack_ax(stack)) {
671 ERR("Empty stack\n");
672 ret = -EINVAL;
673 goto end;
674 }
675 switch (vstack_ax(stack)->type) {
676 default:
677 ERR("unknown register type\n");
678 ret = -EINVAL;
679 goto end;
680
681 case REG_STRING:
682 ERR("Cast op can only be applied to numeric or floating point registers\n");
683 ret = -EINVAL;
684 goto end;
685 case REG_S64:
686 break;
687 case REG_DOUBLE:
688 break;
689 }
690 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
691 if (vstack_ax(stack)->type != REG_DOUBLE) {
692 ERR("Cast expects double\n");
693 ret = -EINVAL;
694 goto end;
695 }
696 }
697 break;
698 }
699 case FILTER_OP_CAST_NOP:
700 {
701 break;
702 }
703
704 }
705 end:
706 return ret;
707 }
708
709 /*
710 * Return value:
711 * 0: success
712 * <0: error
713 */
714 static
715 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
716 struct cds_lfht *merge_points,
717 struct vstack *stack,
718 void *start_pc,
719 void *pc)
720 {
721 int ret;
722 unsigned long target_pc = pc - start_pc;
723 struct cds_lfht_iter iter;
724 struct cds_lfht_node *node;
725 struct lfht_mp_node *mp_node;
726 unsigned long hash;
727
728 /* Validate the context resulting from the previous instruction */
729 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
730 if (ret)
731 return ret;
732
733 /* Validate merge points */
734 hash = lttng_hash_mix((const void *) target_pc, sizeof(target_pc),
735 lttng_hash_seed);
736 cds_lfht_lookup(merge_points, hash, lttng_hash_match,
737 (const void *) target_pc, &iter);
738 node = cds_lfht_iter_get_node(&iter);
739 if (node) {
740 mp_node = caa_container_of(node, struct lfht_mp_node, node);
741
742 dbg_printf("Filter: validate merge point at offset %lu\n",
743 target_pc);
744 if (merge_points_compare(stack, &mp_node->stack)) {
745 ERR("Merge points differ for offset %lu\n",
746 target_pc);
747 return -EINVAL;
748 }
749 /* Once validated, we can remove the merge point */
750 dbg_printf("Filter: remove merge point at offset %lu\n",
751 target_pc);
752 ret = cds_lfht_del(merge_points, node);
753 assert(!ret);
754 }
755 return 0;
756 }
757
758 /*
759 * Return value:
760 * >0: going to next insn.
761 * 0: success, stop iteration.
762 * <0: error
763 */
764 static
765 int exec_insn(struct bytecode_runtime *bytecode,
766 struct cds_lfht *merge_points,
767 struct vstack *stack,
768 void **_next_pc,
769 void *pc)
770 {
771 int ret = 1;
772 void *next_pc = *_next_pc;
773
774 switch (*(filter_opcode_t *) pc) {
775 case FILTER_OP_UNKNOWN:
776 default:
777 {
778 ERR("unknown bytecode op %u\n",
779 (unsigned int) *(filter_opcode_t *) pc);
780 ret = -EINVAL;
781 goto end;
782 }
783
784 case FILTER_OP_RETURN:
785 {
786 if (!vstack_ax(stack)) {
787 ERR("Empty stack\n");
788 ret = -EINVAL;
789 goto end;
790 }
791 ret = 0;
792 goto end;
793 }
794
795 /* binary */
796 case FILTER_OP_MUL:
797 case FILTER_OP_DIV:
798 case FILTER_OP_MOD:
799 case FILTER_OP_PLUS:
800 case FILTER_OP_MINUS:
801 case FILTER_OP_RSHIFT:
802 case FILTER_OP_LSHIFT:
803 case FILTER_OP_BIN_AND:
804 case FILTER_OP_BIN_OR:
805 case FILTER_OP_BIN_XOR:
806 {
807 ERR("unsupported bytecode op %u\n",
808 (unsigned int) *(filter_opcode_t *) pc);
809 ret = -EINVAL;
810 goto end;
811 }
812
813 case FILTER_OP_EQ:
814 case FILTER_OP_NE:
815 case FILTER_OP_GT:
816 case FILTER_OP_LT:
817 case FILTER_OP_GE:
818 case FILTER_OP_LE:
819 case FILTER_OP_EQ_STRING:
820 case FILTER_OP_NE_STRING:
821 case FILTER_OP_GT_STRING:
822 case FILTER_OP_LT_STRING:
823 case FILTER_OP_GE_STRING:
824 case FILTER_OP_LE_STRING:
825 case FILTER_OP_EQ_S64:
826 case FILTER_OP_NE_S64:
827 case FILTER_OP_GT_S64:
828 case FILTER_OP_LT_S64:
829 case FILTER_OP_GE_S64:
830 case FILTER_OP_LE_S64:
831 case FILTER_OP_EQ_DOUBLE:
832 case FILTER_OP_NE_DOUBLE:
833 case FILTER_OP_GT_DOUBLE:
834 case FILTER_OP_LT_DOUBLE:
835 case FILTER_OP_GE_DOUBLE:
836 case FILTER_OP_LE_DOUBLE:
837 {
838 /* Pop 2, push 1 */
839 if (vstack_pop(stack)) {
840 ret = -EINVAL;
841 goto end;
842 }
843 if (!vstack_ax(stack)) {
844 ERR("Empty stack\n");
845 ret = -EINVAL;
846 goto end;
847 }
848 vstack_ax(stack)->type = REG_S64;
849 next_pc += sizeof(struct binary_op);
850 break;
851 }
852
853 /* unary */
854 case FILTER_OP_UNARY_PLUS:
855 case FILTER_OP_UNARY_MINUS:
856 case FILTER_OP_UNARY_NOT:
857 case FILTER_OP_UNARY_PLUS_S64:
858 case FILTER_OP_UNARY_MINUS_S64:
859 case FILTER_OP_UNARY_NOT_S64:
860 {
861 /* Pop 1, push 1 */
862 if (!vstack_ax(stack)) {
863 ERR("Empty stack\n");
864 ret = -EINVAL;
865 goto end;
866 }
867 vstack_ax(stack)->type = REG_S64;
868 next_pc += sizeof(struct unary_op);
869 break;
870 }
871
872 case FILTER_OP_UNARY_PLUS_DOUBLE:
873 case FILTER_OP_UNARY_MINUS_DOUBLE:
874 case FILTER_OP_UNARY_NOT_DOUBLE:
875 {
876 /* Pop 1, push 1 */
877 if (!vstack_ax(stack)) {
878 ERR("Empty stack\n");
879 ret = -EINVAL;
880 goto end;
881 }
882 vstack_ax(stack)->type = REG_DOUBLE;
883 next_pc += sizeof(struct unary_op);
884 break;
885 }
886
887 /* logical */
888 case FILTER_OP_AND:
889 case FILTER_OP_OR:
890 {
891 struct logical_op *insn = (struct logical_op *) pc;
892 int merge_ret;
893
894 /* Add merge point to table */
895 merge_ret = merge_point_add_check(merge_points,
896 insn->skip_offset, stack);
897 if (merge_ret) {
898 ret = merge_ret;
899 goto end;
900 }
901 /* Continue to next instruction */
902 /* Pop 1 when jump not taken */
903 if (vstack_pop(stack)) {
904 ret = -EINVAL;
905 goto end;
906 }
907 next_pc += sizeof(struct logical_op);
908 break;
909 }
910
911 /* load */
912 case FILTER_OP_LOAD_FIELD_REF:
913 {
914 ERR("Unknown field ref type\n");
915 ret = -EINVAL;
916 goto end;
917 }
918 case FILTER_OP_LOAD_FIELD_REF_STRING:
919 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
920 {
921 if (vstack_push(stack)) {
922 ret = -EINVAL;
923 goto end;
924 }
925 vstack_ax(stack)->type = REG_STRING;
926 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
927 break;
928 }
929 case FILTER_OP_LOAD_FIELD_REF_S64:
930 {
931 if (vstack_push(stack)) {
932 ret = -EINVAL;
933 goto end;
934 }
935 vstack_ax(stack)->type = REG_S64;
936 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
937 break;
938 }
939 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
940 {
941 if (vstack_push(stack)) {
942 ret = -EINVAL;
943 goto end;
944 }
945 vstack_ax(stack)->type = REG_DOUBLE;
946 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
947 break;
948 }
949
950 case FILTER_OP_LOAD_STRING:
951 {
952 struct load_op *insn = (struct load_op *) pc;
953
954 if (vstack_push(stack)) {
955 ret = -EINVAL;
956 goto end;
957 }
958 vstack_ax(stack)->type = REG_STRING;
959 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
960 break;
961 }
962
963 case FILTER_OP_LOAD_S64:
964 {
965 if (vstack_push(stack)) {
966 ret = -EINVAL;
967 goto end;
968 }
969 vstack_ax(stack)->type = REG_S64;
970 next_pc += sizeof(struct load_op)
971 + sizeof(struct literal_numeric);
972 break;
973 }
974
975 case FILTER_OP_LOAD_DOUBLE:
976 {
977 if (vstack_push(stack)) {
978 ret = -EINVAL;
979 goto end;
980 }
981 vstack_ax(stack)->type = REG_DOUBLE;
982 next_pc += sizeof(struct load_op)
983 + sizeof(struct literal_double);
984 break;
985 }
986
987 case FILTER_OP_CAST_TO_S64:
988 case FILTER_OP_CAST_DOUBLE_TO_S64:
989 {
990 /* Pop 1, push 1 */
991 if (!vstack_ax(stack)) {
992 ERR("Empty stack\n");
993 ret = -EINVAL;
994 goto end;
995 }
996 vstack_ax(stack)->type = REG_S64;
997 next_pc += sizeof(struct cast_op);
998 break;
999 }
1000 case FILTER_OP_CAST_NOP:
1001 {
1002 next_pc += sizeof(struct cast_op);
1003 break;
1004 }
1005
1006 }
1007 end:
1008 *_next_pc = next_pc;
1009 return ret;
1010 }
1011
1012 /*
1013 * Never called concurrently (hash seed is shared).
1014 */
1015 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1016 {
1017 struct cds_lfht *merge_points;
1018 void *pc, *next_pc, *start_pc;
1019 int ret = -EINVAL;
1020 struct vstack stack;
1021
1022 vstack_init(&stack);
1023
1024 if (!lttng_hash_seed_ready) {
1025 lttng_hash_seed = time(NULL);
1026 lttng_hash_seed_ready = 1;
1027 }
1028 /*
1029 * Note: merge_points hash table used by single thread, and
1030 * never concurrently resized. Therefore, we can use it without
1031 * holding RCU read-side lock and free nodes without using
1032 * call_rcu.
1033 */
1034 merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS,
1035 MIN_NR_BUCKETS, MAX_NR_BUCKETS,
1036 0, NULL);
1037 if (!merge_points) {
1038 ERR("Error allocating hash table for bytecode validation\n");
1039 return -ENOMEM;
1040 }
1041 start_pc = &bytecode->data[0];
1042 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1043 pc = next_pc) {
1044 if (bytecode_validate_overflow(bytecode, start_pc, pc) != 0) {
1045 ERR("filter bytecode overflow\n");
1046 ret = -EINVAL;
1047 goto end;
1048 }
1049 dbg_printf("Validating op %s (%u)\n",
1050 print_op((unsigned int) *(filter_opcode_t *) pc),
1051 (unsigned int) *(filter_opcode_t *) pc);
1052
1053 /*
1054 * For each instruction, validate the current context
1055 * (traversal of entire execution flow), and validate
1056 * all merge points targeting this instruction.
1057 */
1058 ret = validate_instruction_all_contexts(bytecode, merge_points,
1059 &stack, start_pc, pc);
1060 if (ret)
1061 goto end;
1062 ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
1063 if (ret <= 0)
1064 goto end;
1065 }
1066 end:
1067 if (delete_all_nodes(merge_points)) {
1068 if (!ret) {
1069 ERR("Unexpected merge points\n");
1070 ret = -EINVAL;
1071 }
1072 }
1073 if (cds_lfht_destroy(merge_points, NULL)) {
1074 ERR("Error destroying hash table\n");
1075 }
1076 return ret;
1077 }
This page took 0.055632 seconds and 5 git commands to generate.