Filter code relicensing to MIT license
[lttng-ust.git] / liblttng-ust / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng UST filter bytecode validator.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include <urcu-bp.h>
29 #include <time.h>
30 #include "lttng-filter.h"
31
32 #include <urcu/rculfhash.h>
33 #include "lttng-hash-helper.h"
34
35 /*
36 * Number of merge points for hash table size. Hash table initialized to
37 * that size, and we do not resize, because we do not want to trigger
38 * RCU worker thread execution: fall-back on linear traversal if number
39 * of merge points exceeds this value.
40 */
41 #define DEFAULT_NR_MERGE_POINTS 128
42 #define MIN_NR_BUCKETS 128
43 #define MAX_NR_BUCKETS 128
44
45 /* merge point table node */
46 struct lfht_mp_node {
47 struct cds_lfht_node node;
48
49 /* Context at merge point */
50 struct vstack stack;
51 unsigned long target_pc;
52 };
53
54 static unsigned long lttng_hash_seed;
55 static unsigned int lttng_hash_seed_ready;
56
57 static
58 int lttng_hash_match(struct cds_lfht_node *node, const void *key)
59 {
60 struct lfht_mp_node *mp_node =
61 caa_container_of(node, struct lfht_mp_node, node);
62 unsigned long key_pc = (unsigned long) key;
63
64 if (mp_node->target_pc == key_pc)
65 return 1;
66 else
67 return 0;
68 }
69
70 static
71 int merge_points_compare(const struct vstack *stacka,
72 const struct vstack *stackb)
73 {
74 int i, len;
75
76 if (stacka->top != stackb->top)
77 return 1;
78 len = stacka->top + 1;
79 assert(len >= 0);
80 for (i = 0; i < len; i++) {
81 if (stacka->e[i].type != REG_UNKNOWN
82 && stackb->e[i].type != REG_UNKNOWN
83 && stacka->e[i].type != stackb->e[i].type)
84 return 1;
85 }
86 return 0;
87 }
88
89 static
90 int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc,
91 const struct vstack *stack)
92 {
93 struct lfht_mp_node *node;
94 unsigned long hash = lttng_hash_mix((const char *) target_pc,
95 sizeof(target_pc),
96 lttng_hash_seed);
97 struct cds_lfht_node *ret;
98
99 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
100 target_pc, hash);
101 node = zmalloc(sizeof(struct lfht_mp_node));
102 if (!node)
103 return -ENOMEM;
104 node->target_pc = target_pc;
105 memcpy(&node->stack, stack, sizeof(node->stack));
106 ret = cds_lfht_add_unique(ht, hash, lttng_hash_match,
107 (const char *) target_pc, &node->node);
108 if (ret != &node->node) {
109 struct lfht_mp_node *ret_mp =
110 caa_container_of(ret, struct lfht_mp_node, node);
111
112 /* Key already present */
113 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
114 target_pc, hash);
115 free(node);
116 if (merge_points_compare(stack, &ret_mp->stack)) {
117 ERR("Merge points differ for offset %lu\n",
118 target_pc);
119 return -EINVAL;
120 }
121 }
122 return 0;
123 }
124
125 /*
126 * Binary comparators use top of stack and top of stack -1.
127 * Return 0 if typing is known to match, 1 if typing is dynamic
128 * (unknown), negative error value on error.
129 */
130 static
131 int bin_op_compare_check(struct vstack *stack, const char *str)
132 {
133 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
134 goto error_empty;
135
136 switch (vstack_ax(stack)->type) {
137 default:
138 goto error_type;
139
140 case REG_UNKNOWN:
141 goto unknown;
142 case REG_STRING:
143 switch (vstack_bx(stack)->type) {
144 default:
145 goto error_type;
146
147 case REG_UNKNOWN:
148 goto unknown;
149 case REG_STRING:
150 break;
151 case REG_S64:
152 case REG_DOUBLE:
153 goto error_mismatch;
154 }
155 break;
156 case REG_S64:
157 case REG_DOUBLE:
158 switch (vstack_bx(stack)->type) {
159 default:
160 goto error_type;
161
162 case REG_UNKNOWN:
163 goto unknown;
164 case REG_STRING:
165 goto error_mismatch;
166 case REG_S64:
167 case REG_DOUBLE:
168 break;
169 }
170 break;
171 }
172 return 0;
173
174 unknown:
175 return 1;
176
177 error_mismatch:
178 ERR("type mismatch for '%s' binary operator\n", str);
179 return -EINVAL;
180
181 error_empty:
182 ERR("empty stack for '%s' binary operator\n", str);
183 return -EINVAL;
184
185 error_type:
186 ERR("unknown type for '%s' binary operator\n", str);
187 return -EINVAL;
188 }
189
190 /*
191 * Validate bytecode range overflow within the validation pass.
192 * Called for each instruction encountered.
193 */
194 static
195 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
196 char *start_pc, char *pc)
197 {
198 int ret = 0;
199
200 switch (*(filter_opcode_t *) pc) {
201 case FILTER_OP_UNKNOWN:
202 default:
203 {
204 ERR("unknown bytecode op %u\n",
205 (unsigned int) *(filter_opcode_t *) pc);
206 ret = -EINVAL;
207 break;
208 }
209
210 case FILTER_OP_RETURN:
211 {
212 if (unlikely(pc + sizeof(struct return_op)
213 > start_pc + bytecode->len)) {
214 ret = -ERANGE;
215 }
216 break;
217 }
218
219 /* binary */
220 case FILTER_OP_MUL:
221 case FILTER_OP_DIV:
222 case FILTER_OP_MOD:
223 case FILTER_OP_PLUS:
224 case FILTER_OP_MINUS:
225 case FILTER_OP_RSHIFT:
226 case FILTER_OP_LSHIFT:
227 case FILTER_OP_BIN_AND:
228 case FILTER_OP_BIN_OR:
229 case FILTER_OP_BIN_XOR:
230 {
231 ERR("unsupported bytecode op %u\n",
232 (unsigned int) *(filter_opcode_t *) pc);
233 ret = -EINVAL;
234 break;
235 }
236
237 case FILTER_OP_EQ:
238 case FILTER_OP_NE:
239 case FILTER_OP_GT:
240 case FILTER_OP_LT:
241 case FILTER_OP_GE:
242 case FILTER_OP_LE:
243 case FILTER_OP_EQ_STRING:
244 case FILTER_OP_NE_STRING:
245 case FILTER_OP_GT_STRING:
246 case FILTER_OP_LT_STRING:
247 case FILTER_OP_GE_STRING:
248 case FILTER_OP_LE_STRING:
249 case FILTER_OP_EQ_S64:
250 case FILTER_OP_NE_S64:
251 case FILTER_OP_GT_S64:
252 case FILTER_OP_LT_S64:
253 case FILTER_OP_GE_S64:
254 case FILTER_OP_LE_S64:
255 case FILTER_OP_EQ_DOUBLE:
256 case FILTER_OP_NE_DOUBLE:
257 case FILTER_OP_GT_DOUBLE:
258 case FILTER_OP_LT_DOUBLE:
259 case FILTER_OP_GE_DOUBLE:
260 case FILTER_OP_LE_DOUBLE:
261 case FILTER_OP_EQ_DOUBLE_S64:
262 case FILTER_OP_NE_DOUBLE_S64:
263 case FILTER_OP_GT_DOUBLE_S64:
264 case FILTER_OP_LT_DOUBLE_S64:
265 case FILTER_OP_GE_DOUBLE_S64:
266 case FILTER_OP_LE_DOUBLE_S64:
267 case FILTER_OP_EQ_S64_DOUBLE:
268 case FILTER_OP_NE_S64_DOUBLE:
269 case FILTER_OP_GT_S64_DOUBLE:
270 case FILTER_OP_LT_S64_DOUBLE:
271 case FILTER_OP_GE_S64_DOUBLE:
272 case FILTER_OP_LE_S64_DOUBLE:
273 {
274 if (unlikely(pc + sizeof(struct binary_op)
275 > start_pc + bytecode->len)) {
276 ret = -ERANGE;
277 }
278 break;
279 }
280
281 /* unary */
282 case FILTER_OP_UNARY_PLUS:
283 case FILTER_OP_UNARY_MINUS:
284 case FILTER_OP_UNARY_NOT:
285 case FILTER_OP_UNARY_PLUS_S64:
286 case FILTER_OP_UNARY_MINUS_S64:
287 case FILTER_OP_UNARY_NOT_S64:
288 case FILTER_OP_UNARY_PLUS_DOUBLE:
289 case FILTER_OP_UNARY_MINUS_DOUBLE:
290 case FILTER_OP_UNARY_NOT_DOUBLE:
291 {
292 if (unlikely(pc + sizeof(struct unary_op)
293 > start_pc + bytecode->len)) {
294 ret = -ERANGE;
295 }
296 break;
297 }
298
299 /* logical */
300 case FILTER_OP_AND:
301 case FILTER_OP_OR:
302 {
303 if (unlikely(pc + sizeof(struct logical_op)
304 > start_pc + bytecode->len)) {
305 ret = -ERANGE;
306 }
307 break;
308 }
309
310 /* load field ref */
311 case FILTER_OP_LOAD_FIELD_REF:
312 {
313 ERR("Unknown field ref type\n");
314 ret = -EINVAL;
315 break;
316 }
317 /* get context ref */
318 case FILTER_OP_GET_CONTEXT_REF:
319 case FILTER_OP_LOAD_FIELD_REF_STRING:
320 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
321 case FILTER_OP_LOAD_FIELD_REF_S64:
322 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
323 case FILTER_OP_GET_CONTEXT_REF_STRING:
324 case FILTER_OP_GET_CONTEXT_REF_S64:
325 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
326 {
327 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
328 > start_pc + bytecode->len)) {
329 ret = -ERANGE;
330 }
331 break;
332 }
333
334 /* load from immediate operand */
335 case FILTER_OP_LOAD_STRING:
336 {
337 struct load_op *insn = (struct load_op *) pc;
338 uint32_t str_len, maxlen;
339
340 if (unlikely(pc + sizeof(struct load_op)
341 > start_pc + bytecode->len)) {
342 ret = -ERANGE;
343 break;
344 }
345
346 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
347 str_len = strnlen(insn->data, maxlen);
348 if (unlikely(str_len >= maxlen)) {
349 /* Final '\0' not found within range */
350 ret = -ERANGE;
351 }
352 break;
353 }
354
355 case FILTER_OP_LOAD_S64:
356 {
357 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
358 > start_pc + bytecode->len)) {
359 ret = -ERANGE;
360 }
361 break;
362 }
363
364 case FILTER_OP_LOAD_DOUBLE:
365 {
366 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
367 > start_pc + bytecode->len)) {
368 ret = -ERANGE;
369 }
370 break;
371 }
372
373 case FILTER_OP_CAST_TO_S64:
374 case FILTER_OP_CAST_DOUBLE_TO_S64:
375 case FILTER_OP_CAST_NOP:
376 {
377 if (unlikely(pc + sizeof(struct cast_op)
378 > start_pc + bytecode->len)) {
379 ret = -ERANGE;
380 }
381 break;
382 }
383
384 }
385
386 return ret;
387 }
388
389 static
390 unsigned long delete_all_nodes(struct cds_lfht *ht)
391 {
392 struct cds_lfht_iter iter;
393 struct lfht_mp_node *node;
394 unsigned long nr_nodes = 0;
395
396 cds_lfht_for_each_entry(ht, &iter, node, node) {
397 int ret;
398
399 ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter));
400 assert(!ret);
401 /* note: this hash table is never used concurrently */
402 free(node);
403 nr_nodes++;
404 }
405 return nr_nodes;
406 }
407
408 /*
409 * Return value:
410 * >=0: success
411 * <0: error
412 */
413 static
414 int validate_instruction_context(struct bytecode_runtime *bytecode,
415 struct vstack *stack,
416 char *start_pc,
417 char *pc)
418 {
419 int ret = 0;
420
421 switch (*(filter_opcode_t *) pc) {
422 case FILTER_OP_UNKNOWN:
423 default:
424 {
425 ERR("unknown bytecode op %u\n",
426 (unsigned int) *(filter_opcode_t *) pc);
427 ret = -EINVAL;
428 goto end;
429 }
430
431 case FILTER_OP_RETURN:
432 {
433 goto end;
434 }
435
436 /* binary */
437 case FILTER_OP_MUL:
438 case FILTER_OP_DIV:
439 case FILTER_OP_MOD:
440 case FILTER_OP_PLUS:
441 case FILTER_OP_MINUS:
442 case FILTER_OP_RSHIFT:
443 case FILTER_OP_LSHIFT:
444 case FILTER_OP_BIN_AND:
445 case FILTER_OP_BIN_OR:
446 case FILTER_OP_BIN_XOR:
447 {
448 ERR("unsupported bytecode op %u\n",
449 (unsigned int) *(filter_opcode_t *) pc);
450 ret = -EINVAL;
451 goto end;
452 }
453
454 case FILTER_OP_EQ:
455 {
456 ret = bin_op_compare_check(stack, "==");
457 if (ret < 0)
458 goto end;
459 break;
460 }
461 case FILTER_OP_NE:
462 {
463 ret = bin_op_compare_check(stack, "!=");
464 if (ret < 0)
465 goto end;
466 break;
467 }
468 case FILTER_OP_GT:
469 {
470 ret = bin_op_compare_check(stack, ">");
471 if (ret < 0)
472 goto end;
473 break;
474 }
475 case FILTER_OP_LT:
476 {
477 ret = bin_op_compare_check(stack, "<");
478 if (ret < 0)
479 goto end;
480 break;
481 }
482 case FILTER_OP_GE:
483 {
484 ret = bin_op_compare_check(stack, ">=");
485 if (ret < 0)
486 goto end;
487 break;
488 }
489 case FILTER_OP_LE:
490 {
491 ret = bin_op_compare_check(stack, "<=");
492 if (ret < 0)
493 goto end;
494 break;
495 }
496
497 case FILTER_OP_EQ_STRING:
498 case FILTER_OP_NE_STRING:
499 case FILTER_OP_GT_STRING:
500 case FILTER_OP_LT_STRING:
501 case FILTER_OP_GE_STRING:
502 case FILTER_OP_LE_STRING:
503 {
504 if (!vstack_ax(stack) || !vstack_bx(stack)) {
505 ERR("Empty stack\n");
506 ret = -EINVAL;
507 goto end;
508 }
509 if (vstack_ax(stack)->type != REG_STRING
510 || vstack_bx(stack)->type != REG_STRING) {
511 ERR("Unexpected register type for string comparator\n");
512 ret = -EINVAL;
513 goto end;
514 }
515 break;
516 }
517
518 case FILTER_OP_EQ_S64:
519 case FILTER_OP_NE_S64:
520 case FILTER_OP_GT_S64:
521 case FILTER_OP_LT_S64:
522 case FILTER_OP_GE_S64:
523 case FILTER_OP_LE_S64:
524 {
525 if (!vstack_ax(stack) || !vstack_bx(stack)) {
526 ERR("Empty stack\n");
527 ret = -EINVAL;
528 goto end;
529 }
530 if (vstack_ax(stack)->type != REG_S64
531 || vstack_bx(stack)->type != REG_S64) {
532 ERR("Unexpected register type for s64 comparator\n");
533 ret = -EINVAL;
534 goto end;
535 }
536 break;
537 }
538
539 case FILTER_OP_EQ_DOUBLE:
540 case FILTER_OP_NE_DOUBLE:
541 case FILTER_OP_GT_DOUBLE:
542 case FILTER_OP_LT_DOUBLE:
543 case FILTER_OP_GE_DOUBLE:
544 case FILTER_OP_LE_DOUBLE:
545 {
546 if (!vstack_ax(stack) || !vstack_bx(stack)) {
547 ERR("Empty stack\n");
548 ret = -EINVAL;
549 goto end;
550 }
551 if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
552 ERR("Double operator should have two double registers\n");
553 ret = -EINVAL;
554 goto end;
555 }
556 break;
557 }
558
559 case FILTER_OP_EQ_DOUBLE_S64:
560 case FILTER_OP_NE_DOUBLE_S64:
561 case FILTER_OP_GT_DOUBLE_S64:
562 case FILTER_OP_LT_DOUBLE_S64:
563 case FILTER_OP_GE_DOUBLE_S64:
564 case FILTER_OP_LE_DOUBLE_S64:
565 {
566 if (!vstack_ax(stack) || !vstack_bx(stack)) {
567 ERR("Empty stack\n");
568 ret = -EINVAL;
569 goto end;
570 }
571 if (vstack_ax(stack)->type != REG_S64 && vstack_bx(stack)->type != REG_DOUBLE) {
572 ERR("Double-S64 operator has unexpected register types\n");
573 ret = -EINVAL;
574 goto end;
575 }
576 break;
577 }
578
579 case FILTER_OP_EQ_S64_DOUBLE:
580 case FILTER_OP_NE_S64_DOUBLE:
581 case FILTER_OP_GT_S64_DOUBLE:
582 case FILTER_OP_LT_S64_DOUBLE:
583 case FILTER_OP_GE_S64_DOUBLE:
584 case FILTER_OP_LE_S64_DOUBLE:
585 {
586 if (!vstack_ax(stack) || !vstack_bx(stack)) {
587 ERR("Empty stack\n");
588 ret = -EINVAL;
589 goto end;
590 }
591 if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_S64) {
592 ERR("S64-Double operator has unexpected register types\n");
593 ret = -EINVAL;
594 goto end;
595 }
596 break;
597 }
598
599 /* unary */
600 case FILTER_OP_UNARY_PLUS:
601 case FILTER_OP_UNARY_MINUS:
602 case FILTER_OP_UNARY_NOT:
603 {
604 if (!vstack_ax(stack)) {
605 ERR("Empty stack\n");
606 ret = -EINVAL;
607 goto end;
608 }
609 switch (vstack_ax(stack)->type) {
610 default:
611 ERR("unknown register type\n");
612 ret = -EINVAL;
613 goto end;
614
615 case REG_STRING:
616 ERR("Unary op can only be applied to numeric or floating point registers\n");
617 ret = -EINVAL;
618 goto end;
619 case REG_S64:
620 break;
621 case REG_DOUBLE:
622 break;
623 case REG_UNKNOWN:
624 break;
625 }
626 break;
627 }
628
629 case FILTER_OP_UNARY_PLUS_S64:
630 case FILTER_OP_UNARY_MINUS_S64:
631 case FILTER_OP_UNARY_NOT_S64:
632 {
633 if (!vstack_ax(stack)) {
634 ERR("Empty stack\n");
635 ret = -EINVAL;
636 goto end;
637 }
638 if (vstack_ax(stack)->type != REG_S64) {
639 ERR("Invalid register type\n");
640 ret = -EINVAL;
641 goto end;
642 }
643 break;
644 }
645
646 case FILTER_OP_UNARY_PLUS_DOUBLE:
647 case FILTER_OP_UNARY_MINUS_DOUBLE:
648 case FILTER_OP_UNARY_NOT_DOUBLE:
649 {
650 if (!vstack_ax(stack)) {
651 ERR("Empty stack\n");
652 ret = -EINVAL;
653 goto end;
654 }
655 if (vstack_ax(stack)->type != REG_DOUBLE) {
656 ERR("Invalid register type\n");
657 ret = -EINVAL;
658 goto end;
659 }
660 break;
661 }
662
663 /* logical */
664 case FILTER_OP_AND:
665 case FILTER_OP_OR:
666 {
667 struct logical_op *insn = (struct logical_op *) pc;
668
669 if (!vstack_ax(stack)) {
670 ERR("Empty stack\n");
671 ret = -EINVAL;
672 goto end;
673 }
674 if (vstack_ax(stack)->type != REG_S64
675 && vstack_ax(stack)->type != REG_UNKNOWN) {
676 ERR("Logical comparator expects S64 or dynamic register\n");
677 ret = -EINVAL;
678 goto end;
679 }
680
681 dbg_printf("Validate jumping to bytecode offset %u\n",
682 (unsigned int) insn->skip_offset);
683 if (unlikely(start_pc + insn->skip_offset <= pc)) {
684 ERR("Loops are not allowed in bytecode\n");
685 ret = -EINVAL;
686 goto end;
687 }
688 break;
689 }
690
691 /* load field ref */
692 case FILTER_OP_LOAD_FIELD_REF:
693 {
694 ERR("Unknown field ref type\n");
695 ret = -EINVAL;
696 goto end;
697 }
698 case FILTER_OP_LOAD_FIELD_REF_STRING:
699 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
700 {
701 struct load_op *insn = (struct load_op *) pc;
702 struct field_ref *ref = (struct field_ref *) insn->data;
703
704 dbg_printf("Validate load field ref offset %u type string\n",
705 ref->offset);
706 break;
707 }
708 case FILTER_OP_LOAD_FIELD_REF_S64:
709 {
710 struct load_op *insn = (struct load_op *) pc;
711 struct field_ref *ref = (struct field_ref *) insn->data;
712
713 dbg_printf("Validate load field ref offset %u type s64\n",
714 ref->offset);
715 break;
716 }
717 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
718 {
719 struct load_op *insn = (struct load_op *) pc;
720 struct field_ref *ref = (struct field_ref *) insn->data;
721
722 dbg_printf("Validate load field ref offset %u type double\n",
723 ref->offset);
724 break;
725 }
726
727 /* load from immediate operand */
728 case FILTER_OP_LOAD_STRING:
729 {
730 break;
731 }
732
733 case FILTER_OP_LOAD_S64:
734 {
735 break;
736 }
737
738 case FILTER_OP_LOAD_DOUBLE:
739 {
740 break;
741 }
742
743 case FILTER_OP_CAST_TO_S64:
744 case FILTER_OP_CAST_DOUBLE_TO_S64:
745 {
746 struct cast_op *insn = (struct cast_op *) pc;
747
748 if (!vstack_ax(stack)) {
749 ERR("Empty stack\n");
750 ret = -EINVAL;
751 goto end;
752 }
753 switch (vstack_ax(stack)->type) {
754 default:
755 ERR("unknown register type\n");
756 ret = -EINVAL;
757 goto end;
758
759 case REG_STRING:
760 ERR("Cast op can only be applied to numeric or floating point registers\n");
761 ret = -EINVAL;
762 goto end;
763 case REG_S64:
764 break;
765 case REG_DOUBLE:
766 break;
767 case REG_UNKNOWN:
768 break;
769 }
770 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
771 if (vstack_ax(stack)->type != REG_DOUBLE) {
772 ERR("Cast expects double\n");
773 ret = -EINVAL;
774 goto end;
775 }
776 }
777 break;
778 }
779 case FILTER_OP_CAST_NOP:
780 {
781 break;
782 }
783
784 /* get context ref */
785 case FILTER_OP_GET_CONTEXT_REF:
786 {
787 struct load_op *insn = (struct load_op *) pc;
788 struct field_ref *ref = (struct field_ref *) insn->data;
789
790 dbg_printf("Validate get context ref offset %u type dynamic\n",
791 ref->offset);
792 break;
793 }
794 case FILTER_OP_GET_CONTEXT_REF_STRING:
795 {
796 struct load_op *insn = (struct load_op *) pc;
797 struct field_ref *ref = (struct field_ref *) insn->data;
798
799 dbg_printf("Validate get context ref offset %u type string\n",
800 ref->offset);
801 break;
802 }
803 case FILTER_OP_GET_CONTEXT_REF_S64:
804 {
805 struct load_op *insn = (struct load_op *) pc;
806 struct field_ref *ref = (struct field_ref *) insn->data;
807
808 dbg_printf("Validate get context ref offset %u type s64\n",
809 ref->offset);
810 break;
811 }
812 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
813 {
814 struct load_op *insn = (struct load_op *) pc;
815 struct field_ref *ref = (struct field_ref *) insn->data;
816
817 dbg_printf("Validate get context ref offset %u type double\n",
818 ref->offset);
819 break;
820 }
821
822 }
823 end:
824 return ret;
825 }
826
827 /*
828 * Return value:
829 * 0: success
830 * <0: error
831 */
832 static
833 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
834 struct cds_lfht *merge_points,
835 struct vstack *stack,
836 char *start_pc,
837 char *pc)
838 {
839 int ret;
840 unsigned long target_pc = pc - start_pc;
841 struct cds_lfht_iter iter;
842 struct cds_lfht_node *node;
843 struct lfht_mp_node *mp_node;
844 unsigned long hash;
845
846 /* Validate the context resulting from the previous instruction */
847 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
848 if (ret < 0)
849 return ret;
850
851 /* Validate merge points */
852 hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
853 lttng_hash_seed);
854 cds_lfht_lookup(merge_points, hash, lttng_hash_match,
855 (const char *) target_pc, &iter);
856 node = cds_lfht_iter_get_node(&iter);
857 if (node) {
858 mp_node = caa_container_of(node, struct lfht_mp_node, node);
859
860 dbg_printf("Filter: validate merge point at offset %lu\n",
861 target_pc);
862 if (merge_points_compare(stack, &mp_node->stack)) {
863 ERR("Merge points differ for offset %lu\n",
864 target_pc);
865 return -EINVAL;
866 }
867 /* Once validated, we can remove the merge point */
868 dbg_printf("Filter: remove merge point at offset %lu\n",
869 target_pc);
870 ret = cds_lfht_del(merge_points, node);
871 assert(!ret);
872 }
873 return 0;
874 }
875
876 /*
877 * Return value:
878 * >0: going to next insn.
879 * 0: success, stop iteration.
880 * <0: error
881 */
882 static
883 int exec_insn(struct bytecode_runtime *bytecode,
884 struct cds_lfht *merge_points,
885 struct vstack *stack,
886 char **_next_pc,
887 char *pc)
888 {
889 int ret = 1;
890 char *next_pc = *_next_pc;
891
892 switch (*(filter_opcode_t *) pc) {
893 case FILTER_OP_UNKNOWN:
894 default:
895 {
896 ERR("unknown bytecode op %u\n",
897 (unsigned int) *(filter_opcode_t *) pc);
898 ret = -EINVAL;
899 goto end;
900 }
901
902 case FILTER_OP_RETURN:
903 {
904 if (!vstack_ax(stack)) {
905 ERR("Empty stack\n");
906 ret = -EINVAL;
907 goto end;
908 }
909 ret = 0;
910 goto end;
911 }
912
913 /* binary */
914 case FILTER_OP_MUL:
915 case FILTER_OP_DIV:
916 case FILTER_OP_MOD:
917 case FILTER_OP_PLUS:
918 case FILTER_OP_MINUS:
919 case FILTER_OP_RSHIFT:
920 case FILTER_OP_LSHIFT:
921 case FILTER_OP_BIN_AND:
922 case FILTER_OP_BIN_OR:
923 case FILTER_OP_BIN_XOR:
924 {
925 ERR("unsupported bytecode op %u\n",
926 (unsigned int) *(filter_opcode_t *) pc);
927 ret = -EINVAL;
928 goto end;
929 }
930
931 case FILTER_OP_EQ:
932 case FILTER_OP_NE:
933 case FILTER_OP_GT:
934 case FILTER_OP_LT:
935 case FILTER_OP_GE:
936 case FILTER_OP_LE:
937 case FILTER_OP_EQ_STRING:
938 case FILTER_OP_NE_STRING:
939 case FILTER_OP_GT_STRING:
940 case FILTER_OP_LT_STRING:
941 case FILTER_OP_GE_STRING:
942 case FILTER_OP_LE_STRING:
943 case FILTER_OP_EQ_S64:
944 case FILTER_OP_NE_S64:
945 case FILTER_OP_GT_S64:
946 case FILTER_OP_LT_S64:
947 case FILTER_OP_GE_S64:
948 case FILTER_OP_LE_S64:
949 case FILTER_OP_EQ_DOUBLE:
950 case FILTER_OP_NE_DOUBLE:
951 case FILTER_OP_GT_DOUBLE:
952 case FILTER_OP_LT_DOUBLE:
953 case FILTER_OP_GE_DOUBLE:
954 case FILTER_OP_LE_DOUBLE:
955 case FILTER_OP_EQ_DOUBLE_S64:
956 case FILTER_OP_NE_DOUBLE_S64:
957 case FILTER_OP_GT_DOUBLE_S64:
958 case FILTER_OP_LT_DOUBLE_S64:
959 case FILTER_OP_GE_DOUBLE_S64:
960 case FILTER_OP_LE_DOUBLE_S64:
961 case FILTER_OP_EQ_S64_DOUBLE:
962 case FILTER_OP_NE_S64_DOUBLE:
963 case FILTER_OP_GT_S64_DOUBLE:
964 case FILTER_OP_LT_S64_DOUBLE:
965 case FILTER_OP_GE_S64_DOUBLE:
966 case FILTER_OP_LE_S64_DOUBLE:
967 {
968 /* Pop 2, push 1 */
969 if (vstack_pop(stack)) {
970 ret = -EINVAL;
971 goto end;
972 }
973 if (!vstack_ax(stack)) {
974 ERR("Empty stack\n");
975 ret = -EINVAL;
976 goto end;
977 }
978 vstack_ax(stack)->type = REG_S64;
979 next_pc += sizeof(struct binary_op);
980 break;
981 }
982
983 /* unary */
984 case FILTER_OP_UNARY_PLUS:
985 case FILTER_OP_UNARY_MINUS:
986 {
987 /* Pop 1, push 1 */
988 if (!vstack_ax(stack)) {
989 ERR("Empty stack\n");
990 ret = -EINVAL;
991 goto end;
992 }
993 vstack_ax(stack)->type = REG_UNKNOWN;
994 next_pc += sizeof(struct unary_op);
995 break;
996 }
997
998 case FILTER_OP_UNARY_PLUS_S64:
999 case FILTER_OP_UNARY_MINUS_S64:
1000 case FILTER_OP_UNARY_NOT:
1001 case FILTER_OP_UNARY_NOT_S64:
1002 case FILTER_OP_UNARY_NOT_DOUBLE:
1003 {
1004 /* Pop 1, push 1 */
1005 if (!vstack_ax(stack)) {
1006 ERR("Empty stack\n");
1007 ret = -EINVAL;
1008 goto end;
1009 }
1010 vstack_ax(stack)->type = REG_S64;
1011 next_pc += sizeof(struct unary_op);
1012 break;
1013 }
1014
1015 case FILTER_OP_UNARY_PLUS_DOUBLE:
1016 case FILTER_OP_UNARY_MINUS_DOUBLE:
1017 {
1018 /* Pop 1, push 1 */
1019 if (!vstack_ax(stack)) {
1020 ERR("Empty stack\n");
1021 ret = -EINVAL;
1022 goto end;
1023 }
1024 vstack_ax(stack)->type = REG_DOUBLE;
1025 next_pc += sizeof(struct unary_op);
1026 break;
1027 }
1028
1029 /* logical */
1030 case FILTER_OP_AND:
1031 case FILTER_OP_OR:
1032 {
1033 struct logical_op *insn = (struct logical_op *) pc;
1034 int merge_ret;
1035
1036 /* Add merge point to table */
1037 merge_ret = merge_point_add_check(merge_points,
1038 insn->skip_offset, stack);
1039 if (merge_ret) {
1040 ret = merge_ret;
1041 goto end;
1042 }
1043 /* Continue to next instruction */
1044 /* Pop 1 when jump not taken */
1045 if (vstack_pop(stack)) {
1046 ret = -EINVAL;
1047 goto end;
1048 }
1049 next_pc += sizeof(struct logical_op);
1050 break;
1051 }
1052
1053 /* load field ref */
1054 case FILTER_OP_LOAD_FIELD_REF:
1055 {
1056 ERR("Unknown field ref type\n");
1057 ret = -EINVAL;
1058 goto end;
1059 }
1060 /* get context ref */
1061 case FILTER_OP_GET_CONTEXT_REF:
1062 {
1063 if (vstack_push(stack)) {
1064 ret = -EINVAL;
1065 goto end;
1066 }
1067 vstack_ax(stack)->type = REG_UNKNOWN;
1068 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1069 break;
1070 }
1071 case FILTER_OP_LOAD_FIELD_REF_STRING:
1072 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1073 case FILTER_OP_GET_CONTEXT_REF_STRING:
1074 {
1075 if (vstack_push(stack)) {
1076 ret = -EINVAL;
1077 goto end;
1078 }
1079 vstack_ax(stack)->type = REG_STRING;
1080 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1081 break;
1082 }
1083 case FILTER_OP_LOAD_FIELD_REF_S64:
1084 case FILTER_OP_GET_CONTEXT_REF_S64:
1085 {
1086 if (vstack_push(stack)) {
1087 ret = -EINVAL;
1088 goto end;
1089 }
1090 vstack_ax(stack)->type = REG_S64;
1091 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1092 break;
1093 }
1094 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1095 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1096 {
1097 if (vstack_push(stack)) {
1098 ret = -EINVAL;
1099 goto end;
1100 }
1101 vstack_ax(stack)->type = REG_DOUBLE;
1102 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1103 break;
1104 }
1105
1106 /* load from immediate operand */
1107 case FILTER_OP_LOAD_STRING:
1108 {
1109 struct load_op *insn = (struct load_op *) pc;
1110
1111 if (vstack_push(stack)) {
1112 ret = -EINVAL;
1113 goto end;
1114 }
1115 vstack_ax(stack)->type = REG_STRING;
1116 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1117 break;
1118 }
1119
1120 case FILTER_OP_LOAD_S64:
1121 {
1122 if (vstack_push(stack)) {
1123 ret = -EINVAL;
1124 goto end;
1125 }
1126 vstack_ax(stack)->type = REG_S64;
1127 next_pc += sizeof(struct load_op)
1128 + sizeof(struct literal_numeric);
1129 break;
1130 }
1131
1132 case FILTER_OP_LOAD_DOUBLE:
1133 {
1134 if (vstack_push(stack)) {
1135 ret = -EINVAL;
1136 goto end;
1137 }
1138 vstack_ax(stack)->type = REG_DOUBLE;
1139 next_pc += sizeof(struct load_op)
1140 + sizeof(struct literal_double);
1141 break;
1142 }
1143
1144 case FILTER_OP_CAST_TO_S64:
1145 case FILTER_OP_CAST_DOUBLE_TO_S64:
1146 {
1147 /* Pop 1, push 1 */
1148 if (!vstack_ax(stack)) {
1149 ERR("Empty stack\n");
1150 ret = -EINVAL;
1151 goto end;
1152 }
1153 vstack_ax(stack)->type = REG_S64;
1154 next_pc += sizeof(struct cast_op);
1155 break;
1156 }
1157 case FILTER_OP_CAST_NOP:
1158 {
1159 next_pc += sizeof(struct cast_op);
1160 break;
1161 }
1162
1163 }
1164 end:
1165 *_next_pc = next_pc;
1166 return ret;
1167 }
1168
1169 /*
1170 * Never called concurrently (hash seed is shared).
1171 */
1172 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1173 {
1174 struct cds_lfht *merge_points;
1175 char *pc, *next_pc, *start_pc;
1176 int ret = -EINVAL;
1177 struct vstack stack;
1178
1179 vstack_init(&stack);
1180
1181 if (!lttng_hash_seed_ready) {
1182 lttng_hash_seed = time(NULL);
1183 lttng_hash_seed_ready = 1;
1184 }
1185 /*
1186 * Note: merge_points hash table used by single thread, and
1187 * never concurrently resized. Therefore, we can use it without
1188 * holding RCU read-side lock and free nodes without using
1189 * call_rcu.
1190 */
1191 merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS,
1192 MIN_NR_BUCKETS, MAX_NR_BUCKETS,
1193 0, NULL);
1194 if (!merge_points) {
1195 ERR("Error allocating hash table for bytecode validation\n");
1196 return -ENOMEM;
1197 }
1198 start_pc = &bytecode->data[0];
1199 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1200 pc = next_pc) {
1201 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1202 if (ret != 0) {
1203 if (ret == -ERANGE)
1204 ERR("filter bytecode overflow\n");
1205 goto end;
1206 }
1207 dbg_printf("Validating op %s (%u)\n",
1208 print_op((unsigned int) *(filter_opcode_t *) pc),
1209 (unsigned int) *(filter_opcode_t *) pc);
1210
1211 /*
1212 * For each instruction, validate the current context
1213 * (traversal of entire execution flow), and validate
1214 * all merge points targeting this instruction.
1215 */
1216 ret = validate_instruction_all_contexts(bytecode, merge_points,
1217 &stack, start_pc, pc);
1218 if (ret)
1219 goto end;
1220 ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
1221 if (ret <= 0)
1222 goto end;
1223 }
1224 end:
1225 if (delete_all_nodes(merge_points)) {
1226 if (!ret) {
1227 ERR("Unexpected merge points\n");
1228 ret = -EINVAL;
1229 }
1230 }
1231 if (cds_lfht_destroy(merge_points, NULL)) {
1232 ERR("Error destroying hash table\n");
1233 }
1234 return ret;
1235 }
This page took 0.083546 seconds and 4 git commands to generate.