Filtering: add support for star-only globbing patterns
[lttng-ust.git] / liblttng-ust / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng UST filter bytecode validator.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include <urcu-bp.h>
29 #include <time.h>
30 #include "lttng-filter.h"
31
32 #include <urcu/rculfhash.h>
33 #include "lttng-hash-helper.h"
34 #include "string-utils.h"
35
36 /*
37 * Number of merge points for hash table size. Hash table initialized to
38 * that size, and we do not resize, because we do not want to trigger
39 * RCU worker thread execution: fall-back on linear traversal if number
40 * of merge points exceeds this value.
41 */
42 #define DEFAULT_NR_MERGE_POINTS 128
43 #define MIN_NR_BUCKETS 128
44 #define MAX_NR_BUCKETS 128
45
46 /* merge point table node */
47 struct lfht_mp_node {
48 struct cds_lfht_node node;
49
50 /* Context at merge point */
51 struct vstack stack;
52 unsigned long target_pc;
53 };
54
55 static unsigned long lttng_hash_seed;
56 static unsigned int lttng_hash_seed_ready;
57
58 static
59 int lttng_hash_match(struct cds_lfht_node *node, const void *key)
60 {
61 struct lfht_mp_node *mp_node =
62 caa_container_of(node, struct lfht_mp_node, node);
63 unsigned long key_pc = (unsigned long) key;
64
65 if (mp_node->target_pc == key_pc)
66 return 1;
67 else
68 return 0;
69 }
70
71 static
72 int merge_points_compare(const struct vstack *stacka,
73 const struct vstack *stackb)
74 {
75 int i, len;
76
77 if (stacka->top != stackb->top)
78 return 1;
79 len = stacka->top + 1;
80 assert(len >= 0);
81 for (i = 0; i < len; i++) {
82 if (stacka->e[i].type != REG_UNKNOWN
83 && stackb->e[i].type != REG_UNKNOWN
84 && stacka->e[i].type != stackb->e[i].type)
85 return 1;
86 }
87 return 0;
88 }
89
90 static
91 int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc,
92 const struct vstack *stack)
93 {
94 struct lfht_mp_node *node;
95 unsigned long hash = lttng_hash_mix((const char *) target_pc,
96 sizeof(target_pc),
97 lttng_hash_seed);
98 struct cds_lfht_node *ret;
99
100 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
101 target_pc, hash);
102 node = zmalloc(sizeof(struct lfht_mp_node));
103 if (!node)
104 return -ENOMEM;
105 node->target_pc = target_pc;
106 memcpy(&node->stack, stack, sizeof(node->stack));
107 ret = cds_lfht_add_unique(ht, hash, lttng_hash_match,
108 (const char *) target_pc, &node->node);
109 if (ret != &node->node) {
110 struct lfht_mp_node *ret_mp =
111 caa_container_of(ret, struct lfht_mp_node, node);
112
113 /* Key already present */
114 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
115 target_pc, hash);
116 free(node);
117 if (merge_points_compare(stack, &ret_mp->stack)) {
118 ERR("Merge points differ for offset %lu\n",
119 target_pc);
120 return -EINVAL;
121 }
122 }
123 return 0;
124 }
125
126 /*
127 * Binary comparators use top of stack and top of stack -1.
128 * Return 0 if typing is known to match, 1 if typing is dynamic
129 * (unknown), negative error value on error.
130 */
131 static
132 int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode,
133 const char *str)
134 {
135 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
136 goto error_empty;
137
138 switch (vstack_ax(stack)->type) {
139 default:
140 goto error_type;
141
142 case REG_UNKNOWN:
143 goto unknown;
144 case REG_STRING:
145 switch (vstack_bx(stack)->type) {
146 default:
147 goto error_type;
148
149 case REG_UNKNOWN:
150 goto unknown;
151 case REG_STRING:
152 break;
153 case REG_STAR_GLOB_STRING:
154 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
155 goto error_mismatch;
156 }
157 break;
158 case REG_S64:
159 case REG_DOUBLE:
160 goto error_mismatch;
161 }
162 break;
163 case REG_STAR_GLOB_STRING:
164 switch (vstack_bx(stack)->type) {
165 default:
166 goto error_type;
167
168 case REG_UNKNOWN:
169 goto unknown;
170 case REG_STRING:
171 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
172 goto error_mismatch;
173 }
174 break;
175 case REG_STAR_GLOB_STRING:
176 case REG_S64:
177 case REG_DOUBLE:
178 goto error_mismatch;
179 }
180 break;
181 case REG_S64:
182 case REG_DOUBLE:
183 switch (vstack_bx(stack)->type) {
184 default:
185 goto error_type;
186
187 case REG_UNKNOWN:
188 goto unknown;
189 case REG_STRING:
190 case REG_STAR_GLOB_STRING:
191 goto error_mismatch;
192 case REG_S64:
193 case REG_DOUBLE:
194 break;
195 }
196 break;
197 }
198 return 0;
199
200 unknown:
201 return 1;
202
203 error_mismatch:
204 ERR("type mismatch for '%s' binary operator\n", str);
205 return -EINVAL;
206
207 error_empty:
208 ERR("empty stack for '%s' binary operator\n", str);
209 return -EINVAL;
210
211 error_type:
212 ERR("unknown type for '%s' binary operator\n", str);
213 return -EINVAL;
214 }
215
216 /*
217 * Validate bytecode range overflow within the validation pass.
218 * Called for each instruction encountered.
219 */
220 static
221 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
222 char *start_pc, char *pc)
223 {
224 int ret = 0;
225
226 switch (*(filter_opcode_t *) pc) {
227 case FILTER_OP_UNKNOWN:
228 default:
229 {
230 ERR("unknown bytecode op %u\n",
231 (unsigned int) *(filter_opcode_t *) pc);
232 ret = -EINVAL;
233 break;
234 }
235
236 case FILTER_OP_RETURN:
237 {
238 if (unlikely(pc + sizeof(struct return_op)
239 > start_pc + bytecode->len)) {
240 ret = -ERANGE;
241 }
242 break;
243 }
244
245 /* binary */
246 case FILTER_OP_MUL:
247 case FILTER_OP_DIV:
248 case FILTER_OP_MOD:
249 case FILTER_OP_PLUS:
250 case FILTER_OP_MINUS:
251 case FILTER_OP_RSHIFT:
252 case FILTER_OP_LSHIFT:
253 case FILTER_OP_BIN_AND:
254 case FILTER_OP_BIN_OR:
255 case FILTER_OP_BIN_XOR:
256 {
257 ERR("unsupported bytecode op %u\n",
258 (unsigned int) *(filter_opcode_t *) pc);
259 ret = -EINVAL;
260 break;
261 }
262
263 case FILTER_OP_EQ:
264 case FILTER_OP_NE:
265 case FILTER_OP_GT:
266 case FILTER_OP_LT:
267 case FILTER_OP_GE:
268 case FILTER_OP_LE:
269 case FILTER_OP_EQ_STRING:
270 case FILTER_OP_NE_STRING:
271 case FILTER_OP_GT_STRING:
272 case FILTER_OP_LT_STRING:
273 case FILTER_OP_GE_STRING:
274 case FILTER_OP_LE_STRING:
275 case FILTER_OP_EQ_STAR_GLOB_STRING:
276 case FILTER_OP_NE_STAR_GLOB_STRING:
277 case FILTER_OP_EQ_S64:
278 case FILTER_OP_NE_S64:
279 case FILTER_OP_GT_S64:
280 case FILTER_OP_LT_S64:
281 case FILTER_OP_GE_S64:
282 case FILTER_OP_LE_S64:
283 case FILTER_OP_EQ_DOUBLE:
284 case FILTER_OP_NE_DOUBLE:
285 case FILTER_OP_GT_DOUBLE:
286 case FILTER_OP_LT_DOUBLE:
287 case FILTER_OP_GE_DOUBLE:
288 case FILTER_OP_LE_DOUBLE:
289 case FILTER_OP_EQ_DOUBLE_S64:
290 case FILTER_OP_NE_DOUBLE_S64:
291 case FILTER_OP_GT_DOUBLE_S64:
292 case FILTER_OP_LT_DOUBLE_S64:
293 case FILTER_OP_GE_DOUBLE_S64:
294 case FILTER_OP_LE_DOUBLE_S64:
295 case FILTER_OP_EQ_S64_DOUBLE:
296 case FILTER_OP_NE_S64_DOUBLE:
297 case FILTER_OP_GT_S64_DOUBLE:
298 case FILTER_OP_LT_S64_DOUBLE:
299 case FILTER_OP_GE_S64_DOUBLE:
300 case FILTER_OP_LE_S64_DOUBLE:
301 {
302 if (unlikely(pc + sizeof(struct binary_op)
303 > start_pc + bytecode->len)) {
304 ret = -ERANGE;
305 }
306 break;
307 }
308
309 /* unary */
310 case FILTER_OP_UNARY_PLUS:
311 case FILTER_OP_UNARY_MINUS:
312 case FILTER_OP_UNARY_NOT:
313 case FILTER_OP_UNARY_PLUS_S64:
314 case FILTER_OP_UNARY_MINUS_S64:
315 case FILTER_OP_UNARY_NOT_S64:
316 case FILTER_OP_UNARY_PLUS_DOUBLE:
317 case FILTER_OP_UNARY_MINUS_DOUBLE:
318 case FILTER_OP_UNARY_NOT_DOUBLE:
319 {
320 if (unlikely(pc + sizeof(struct unary_op)
321 > start_pc + bytecode->len)) {
322 ret = -ERANGE;
323 }
324 break;
325 }
326
327 /* logical */
328 case FILTER_OP_AND:
329 case FILTER_OP_OR:
330 {
331 if (unlikely(pc + sizeof(struct logical_op)
332 > start_pc + bytecode->len)) {
333 ret = -ERANGE;
334 }
335 break;
336 }
337
338 /* load field ref */
339 case FILTER_OP_LOAD_FIELD_REF:
340 {
341 ERR("Unknown field ref type\n");
342 ret = -EINVAL;
343 break;
344 }
345 /* get context ref */
346 case FILTER_OP_GET_CONTEXT_REF:
347 case FILTER_OP_LOAD_FIELD_REF_STRING:
348 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
349 case FILTER_OP_LOAD_FIELD_REF_S64:
350 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
351 case FILTER_OP_GET_CONTEXT_REF_STRING:
352 case FILTER_OP_GET_CONTEXT_REF_S64:
353 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
354 {
355 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
356 > start_pc + bytecode->len)) {
357 ret = -ERANGE;
358 }
359 break;
360 }
361
362 /* load from immediate operand */
363 case FILTER_OP_LOAD_STRING:
364 case FILTER_OP_LOAD_STAR_GLOB_STRING:
365 {
366 struct load_op *insn = (struct load_op *) pc;
367 uint32_t str_len, maxlen;
368
369 if (unlikely(pc + sizeof(struct load_op)
370 > start_pc + bytecode->len)) {
371 ret = -ERANGE;
372 break;
373 }
374
375 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
376 str_len = strnlen(insn->data, maxlen);
377 if (unlikely(str_len >= maxlen)) {
378 /* Final '\0' not found within range */
379 ret = -ERANGE;
380 }
381 break;
382 }
383
384 case FILTER_OP_LOAD_S64:
385 {
386 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
387 > start_pc + bytecode->len)) {
388 ret = -ERANGE;
389 }
390 break;
391 }
392
393 case FILTER_OP_LOAD_DOUBLE:
394 {
395 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
396 > start_pc + bytecode->len)) {
397 ret = -ERANGE;
398 }
399 break;
400 }
401
402 case FILTER_OP_CAST_TO_S64:
403 case FILTER_OP_CAST_DOUBLE_TO_S64:
404 case FILTER_OP_CAST_NOP:
405 {
406 if (unlikely(pc + sizeof(struct cast_op)
407 > start_pc + bytecode->len)) {
408 ret = -ERANGE;
409 }
410 break;
411 }
412
413 }
414
415 return ret;
416 }
417
418 static
419 unsigned long delete_all_nodes(struct cds_lfht *ht)
420 {
421 struct cds_lfht_iter iter;
422 struct lfht_mp_node *node;
423 unsigned long nr_nodes = 0;
424
425 cds_lfht_for_each_entry(ht, &iter, node, node) {
426 int ret;
427
428 ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter));
429 assert(!ret);
430 /* note: this hash table is never used concurrently */
431 free(node);
432 nr_nodes++;
433 }
434 return nr_nodes;
435 }
436
437 /*
438 * Return value:
439 * >=0: success
440 * <0: error
441 */
442 static
443 int validate_instruction_context(struct bytecode_runtime *bytecode,
444 struct vstack *stack,
445 char *start_pc,
446 char *pc)
447 {
448 int ret = 0;
449 const filter_opcode_t opcode = *(filter_opcode_t *) pc;
450
451 switch (opcode) {
452 case FILTER_OP_UNKNOWN:
453 default:
454 {
455 ERR("unknown bytecode op %u\n",
456 (unsigned int) *(filter_opcode_t *) pc);
457 ret = -EINVAL;
458 goto end;
459 }
460
461 case FILTER_OP_RETURN:
462 {
463 goto end;
464 }
465
466 /* binary */
467 case FILTER_OP_MUL:
468 case FILTER_OP_DIV:
469 case FILTER_OP_MOD:
470 case FILTER_OP_PLUS:
471 case FILTER_OP_MINUS:
472 case FILTER_OP_RSHIFT:
473 case FILTER_OP_LSHIFT:
474 case FILTER_OP_BIN_AND:
475 case FILTER_OP_BIN_OR:
476 case FILTER_OP_BIN_XOR:
477 {
478 ERR("unsupported bytecode op %u\n",
479 (unsigned int) opcode);
480 ret = -EINVAL;
481 goto end;
482 }
483
484 case FILTER_OP_EQ:
485 {
486 ret = bin_op_compare_check(stack, opcode, "==");
487 if (ret < 0)
488 goto end;
489 break;
490 }
491 case FILTER_OP_NE:
492 {
493 ret = bin_op_compare_check(stack, opcode, "!=");
494 if (ret < 0)
495 goto end;
496 break;
497 }
498 case FILTER_OP_GT:
499 {
500 ret = bin_op_compare_check(stack, opcode, ">");
501 if (ret < 0)
502 goto end;
503 break;
504 }
505 case FILTER_OP_LT:
506 {
507 ret = bin_op_compare_check(stack, opcode, "<");
508 if (ret < 0)
509 goto end;
510 break;
511 }
512 case FILTER_OP_GE:
513 {
514 ret = bin_op_compare_check(stack, opcode, ">=");
515 if (ret < 0)
516 goto end;
517 break;
518 }
519 case FILTER_OP_LE:
520 {
521 ret = bin_op_compare_check(stack, opcode, "<=");
522 if (ret < 0)
523 goto end;
524 break;
525 }
526
527 case FILTER_OP_EQ_STRING:
528 case FILTER_OP_NE_STRING:
529 case FILTER_OP_GT_STRING:
530 case FILTER_OP_LT_STRING:
531 case FILTER_OP_GE_STRING:
532 case FILTER_OP_LE_STRING:
533 {
534 if (!vstack_ax(stack) || !vstack_bx(stack)) {
535 ERR("Empty stack\n");
536 ret = -EINVAL;
537 goto end;
538 }
539 if (vstack_ax(stack)->type != REG_STRING
540 || vstack_bx(stack)->type != REG_STRING) {
541 ERR("Unexpected register type for string comparator\n");
542 ret = -EINVAL;
543 goto end;
544 }
545 break;
546 }
547
548 case FILTER_OP_EQ_STAR_GLOB_STRING:
549 case FILTER_OP_NE_STAR_GLOB_STRING:
550 {
551 if (!vstack_ax(stack) || !vstack_bx(stack)) {
552 ERR("Empty stack\n");
553 ret = -EINVAL;
554 goto end;
555 }
556 if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
557 && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
558 ERR("Unexpected register type for globbing pattern comparator\n");
559 ret = -EINVAL;
560 goto end;
561 }
562 break;
563 }
564
565 case FILTER_OP_EQ_S64:
566 case FILTER_OP_NE_S64:
567 case FILTER_OP_GT_S64:
568 case FILTER_OP_LT_S64:
569 case FILTER_OP_GE_S64:
570 case FILTER_OP_LE_S64:
571 {
572 if (!vstack_ax(stack) || !vstack_bx(stack)) {
573 ERR("Empty stack\n");
574 ret = -EINVAL;
575 goto end;
576 }
577 if (vstack_ax(stack)->type != REG_S64
578 || vstack_bx(stack)->type != REG_S64) {
579 ERR("Unexpected register type for s64 comparator\n");
580 ret = -EINVAL;
581 goto end;
582 }
583 break;
584 }
585
586 case FILTER_OP_EQ_DOUBLE:
587 case FILTER_OP_NE_DOUBLE:
588 case FILTER_OP_GT_DOUBLE:
589 case FILTER_OP_LT_DOUBLE:
590 case FILTER_OP_GE_DOUBLE:
591 case FILTER_OP_LE_DOUBLE:
592 {
593 if (!vstack_ax(stack) || !vstack_bx(stack)) {
594 ERR("Empty stack\n");
595 ret = -EINVAL;
596 goto end;
597 }
598 if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
599 ERR("Double operator should have two double registers\n");
600 ret = -EINVAL;
601 goto end;
602 }
603 break;
604 }
605
606 case FILTER_OP_EQ_DOUBLE_S64:
607 case FILTER_OP_NE_DOUBLE_S64:
608 case FILTER_OP_GT_DOUBLE_S64:
609 case FILTER_OP_LT_DOUBLE_S64:
610 case FILTER_OP_GE_DOUBLE_S64:
611 case FILTER_OP_LE_DOUBLE_S64:
612 {
613 if (!vstack_ax(stack) || !vstack_bx(stack)) {
614 ERR("Empty stack\n");
615 ret = -EINVAL;
616 goto end;
617 }
618 if (vstack_ax(stack)->type != REG_S64 && vstack_bx(stack)->type != REG_DOUBLE) {
619 ERR("Double-S64 operator has unexpected register types\n");
620 ret = -EINVAL;
621 goto end;
622 }
623 break;
624 }
625
626 case FILTER_OP_EQ_S64_DOUBLE:
627 case FILTER_OP_NE_S64_DOUBLE:
628 case FILTER_OP_GT_S64_DOUBLE:
629 case FILTER_OP_LT_S64_DOUBLE:
630 case FILTER_OP_GE_S64_DOUBLE:
631 case FILTER_OP_LE_S64_DOUBLE:
632 {
633 if (!vstack_ax(stack) || !vstack_bx(stack)) {
634 ERR("Empty stack\n");
635 ret = -EINVAL;
636 goto end;
637 }
638 if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_S64) {
639 ERR("S64-Double operator has unexpected register types\n");
640 ret = -EINVAL;
641 goto end;
642 }
643 break;
644 }
645
646 /* unary */
647 case FILTER_OP_UNARY_PLUS:
648 case FILTER_OP_UNARY_MINUS:
649 case FILTER_OP_UNARY_NOT:
650 {
651 if (!vstack_ax(stack)) {
652 ERR("Empty stack\n");
653 ret = -EINVAL;
654 goto end;
655 }
656 switch (vstack_ax(stack)->type) {
657 default:
658 ERR("unknown register type\n");
659 ret = -EINVAL;
660 goto end;
661
662 case REG_STRING:
663 case REG_STAR_GLOB_STRING:
664 ERR("Unary op can only be applied to numeric or floating point registers\n");
665 ret = -EINVAL;
666 goto end;
667 case REG_S64:
668 break;
669 case REG_DOUBLE:
670 break;
671 case REG_UNKNOWN:
672 break;
673 }
674 break;
675 }
676
677 case FILTER_OP_UNARY_PLUS_S64:
678 case FILTER_OP_UNARY_MINUS_S64:
679 case FILTER_OP_UNARY_NOT_S64:
680 {
681 if (!vstack_ax(stack)) {
682 ERR("Empty stack\n");
683 ret = -EINVAL;
684 goto end;
685 }
686 if (vstack_ax(stack)->type != REG_S64) {
687 ERR("Invalid register type\n");
688 ret = -EINVAL;
689 goto end;
690 }
691 break;
692 }
693
694 case FILTER_OP_UNARY_PLUS_DOUBLE:
695 case FILTER_OP_UNARY_MINUS_DOUBLE:
696 case FILTER_OP_UNARY_NOT_DOUBLE:
697 {
698 if (!vstack_ax(stack)) {
699 ERR("Empty stack\n");
700 ret = -EINVAL;
701 goto end;
702 }
703 if (vstack_ax(stack)->type != REG_DOUBLE) {
704 ERR("Invalid register type\n");
705 ret = -EINVAL;
706 goto end;
707 }
708 break;
709 }
710
711 /* logical */
712 case FILTER_OP_AND:
713 case FILTER_OP_OR:
714 {
715 struct logical_op *insn = (struct logical_op *) pc;
716
717 if (!vstack_ax(stack)) {
718 ERR("Empty stack\n");
719 ret = -EINVAL;
720 goto end;
721 }
722 if (vstack_ax(stack)->type != REG_S64
723 && vstack_ax(stack)->type != REG_UNKNOWN) {
724 ERR("Logical comparator expects S64 or dynamic register\n");
725 ret = -EINVAL;
726 goto end;
727 }
728
729 dbg_printf("Validate jumping to bytecode offset %u\n",
730 (unsigned int) insn->skip_offset);
731 if (unlikely(start_pc + insn->skip_offset <= pc)) {
732 ERR("Loops are not allowed in bytecode\n");
733 ret = -EINVAL;
734 goto end;
735 }
736 break;
737 }
738
739 /* load field ref */
740 case FILTER_OP_LOAD_FIELD_REF:
741 {
742 ERR("Unknown field ref type\n");
743 ret = -EINVAL;
744 goto end;
745 }
746 case FILTER_OP_LOAD_FIELD_REF_STRING:
747 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
748 {
749 struct load_op *insn = (struct load_op *) pc;
750 struct field_ref *ref = (struct field_ref *) insn->data;
751
752 dbg_printf("Validate load field ref offset %u type string\n",
753 ref->offset);
754 break;
755 }
756 case FILTER_OP_LOAD_FIELD_REF_S64:
757 {
758 struct load_op *insn = (struct load_op *) pc;
759 struct field_ref *ref = (struct field_ref *) insn->data;
760
761 dbg_printf("Validate load field ref offset %u type s64\n",
762 ref->offset);
763 break;
764 }
765 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
766 {
767 struct load_op *insn = (struct load_op *) pc;
768 struct field_ref *ref = (struct field_ref *) insn->data;
769
770 dbg_printf("Validate load field ref offset %u type double\n",
771 ref->offset);
772 break;
773 }
774
775 /* load from immediate operand */
776 case FILTER_OP_LOAD_STRING:
777 case FILTER_OP_LOAD_STAR_GLOB_STRING:
778 {
779 break;
780 }
781
782 case FILTER_OP_LOAD_S64:
783 {
784 break;
785 }
786
787 case FILTER_OP_LOAD_DOUBLE:
788 {
789 break;
790 }
791
792 case FILTER_OP_CAST_TO_S64:
793 case FILTER_OP_CAST_DOUBLE_TO_S64:
794 {
795 struct cast_op *insn = (struct cast_op *) pc;
796
797 if (!vstack_ax(stack)) {
798 ERR("Empty stack\n");
799 ret = -EINVAL;
800 goto end;
801 }
802 switch (vstack_ax(stack)->type) {
803 default:
804 ERR("unknown register type\n");
805 ret = -EINVAL;
806 goto end;
807
808 case REG_STRING:
809 case REG_STAR_GLOB_STRING:
810 ERR("Cast op can only be applied to numeric or floating point registers\n");
811 ret = -EINVAL;
812 goto end;
813 case REG_S64:
814 break;
815 case REG_DOUBLE:
816 break;
817 case REG_UNKNOWN:
818 break;
819 }
820 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
821 if (vstack_ax(stack)->type != REG_DOUBLE) {
822 ERR("Cast expects double\n");
823 ret = -EINVAL;
824 goto end;
825 }
826 }
827 break;
828 }
829 case FILTER_OP_CAST_NOP:
830 {
831 break;
832 }
833
834 /* get context ref */
835 case FILTER_OP_GET_CONTEXT_REF:
836 {
837 struct load_op *insn = (struct load_op *) pc;
838 struct field_ref *ref = (struct field_ref *) insn->data;
839
840 dbg_printf("Validate get context ref offset %u type dynamic\n",
841 ref->offset);
842 break;
843 }
844 case FILTER_OP_GET_CONTEXT_REF_STRING:
845 {
846 struct load_op *insn = (struct load_op *) pc;
847 struct field_ref *ref = (struct field_ref *) insn->data;
848
849 dbg_printf("Validate get context ref offset %u type string\n",
850 ref->offset);
851 break;
852 }
853 case FILTER_OP_GET_CONTEXT_REF_S64:
854 {
855 struct load_op *insn = (struct load_op *) pc;
856 struct field_ref *ref = (struct field_ref *) insn->data;
857
858 dbg_printf("Validate get context ref offset %u type s64\n",
859 ref->offset);
860 break;
861 }
862 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
863 {
864 struct load_op *insn = (struct load_op *) pc;
865 struct field_ref *ref = (struct field_ref *) insn->data;
866
867 dbg_printf("Validate get context ref offset %u type double\n",
868 ref->offset);
869 break;
870 }
871
872 }
873 end:
874 return ret;
875 }
876
877 /*
878 * Return value:
879 * 0: success
880 * <0: error
881 */
882 static
883 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
884 struct cds_lfht *merge_points,
885 struct vstack *stack,
886 char *start_pc,
887 char *pc)
888 {
889 int ret;
890 unsigned long target_pc = pc - start_pc;
891 struct cds_lfht_iter iter;
892 struct cds_lfht_node *node;
893 struct lfht_mp_node *mp_node;
894 unsigned long hash;
895
896 /* Validate the context resulting from the previous instruction */
897 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
898 if (ret < 0)
899 return ret;
900
901 /* Validate merge points */
902 hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
903 lttng_hash_seed);
904 cds_lfht_lookup(merge_points, hash, lttng_hash_match,
905 (const char *) target_pc, &iter);
906 node = cds_lfht_iter_get_node(&iter);
907 if (node) {
908 mp_node = caa_container_of(node, struct lfht_mp_node, node);
909
910 dbg_printf("Filter: validate merge point at offset %lu\n",
911 target_pc);
912 if (merge_points_compare(stack, &mp_node->stack)) {
913 ERR("Merge points differ for offset %lu\n",
914 target_pc);
915 return -EINVAL;
916 }
917 /* Once validated, we can remove the merge point */
918 dbg_printf("Filter: remove merge point at offset %lu\n",
919 target_pc);
920 ret = cds_lfht_del(merge_points, node);
921 assert(!ret);
922 }
923 return 0;
924 }
925
926 /*
927 * Return value:
928 * >0: going to next insn.
929 * 0: success, stop iteration.
930 * <0: error
931 */
932 static
933 int exec_insn(struct bytecode_runtime *bytecode,
934 struct cds_lfht *merge_points,
935 struct vstack *stack,
936 char **_next_pc,
937 char *pc)
938 {
939 int ret = 1;
940 char *next_pc = *_next_pc;
941
942 switch (*(filter_opcode_t *) pc) {
943 case FILTER_OP_UNKNOWN:
944 default:
945 {
946 ERR("unknown bytecode op %u\n",
947 (unsigned int) *(filter_opcode_t *) pc);
948 ret = -EINVAL;
949 goto end;
950 }
951
952 case FILTER_OP_RETURN:
953 {
954 if (!vstack_ax(stack)) {
955 ERR("Empty stack\n");
956 ret = -EINVAL;
957 goto end;
958 }
959 ret = 0;
960 goto end;
961 }
962
963 /* binary */
964 case FILTER_OP_MUL:
965 case FILTER_OP_DIV:
966 case FILTER_OP_MOD:
967 case FILTER_OP_PLUS:
968 case FILTER_OP_MINUS:
969 case FILTER_OP_RSHIFT:
970 case FILTER_OP_LSHIFT:
971 case FILTER_OP_BIN_AND:
972 case FILTER_OP_BIN_OR:
973 case FILTER_OP_BIN_XOR:
974 {
975 ERR("unsupported bytecode op %u\n",
976 (unsigned int) *(filter_opcode_t *) pc);
977 ret = -EINVAL;
978 goto end;
979 }
980
981 case FILTER_OP_EQ:
982 case FILTER_OP_NE:
983 case FILTER_OP_GT:
984 case FILTER_OP_LT:
985 case FILTER_OP_GE:
986 case FILTER_OP_LE:
987 case FILTER_OP_EQ_STRING:
988 case FILTER_OP_NE_STRING:
989 case FILTER_OP_GT_STRING:
990 case FILTER_OP_LT_STRING:
991 case FILTER_OP_GE_STRING:
992 case FILTER_OP_LE_STRING:
993 case FILTER_OP_EQ_STAR_GLOB_STRING:
994 case FILTER_OP_NE_STAR_GLOB_STRING:
995 case FILTER_OP_EQ_S64:
996 case FILTER_OP_NE_S64:
997 case FILTER_OP_GT_S64:
998 case FILTER_OP_LT_S64:
999 case FILTER_OP_GE_S64:
1000 case FILTER_OP_LE_S64:
1001 case FILTER_OP_EQ_DOUBLE:
1002 case FILTER_OP_NE_DOUBLE:
1003 case FILTER_OP_GT_DOUBLE:
1004 case FILTER_OP_LT_DOUBLE:
1005 case FILTER_OP_GE_DOUBLE:
1006 case FILTER_OP_LE_DOUBLE:
1007 case FILTER_OP_EQ_DOUBLE_S64:
1008 case FILTER_OP_NE_DOUBLE_S64:
1009 case FILTER_OP_GT_DOUBLE_S64:
1010 case FILTER_OP_LT_DOUBLE_S64:
1011 case FILTER_OP_GE_DOUBLE_S64:
1012 case FILTER_OP_LE_DOUBLE_S64:
1013 case FILTER_OP_EQ_S64_DOUBLE:
1014 case FILTER_OP_NE_S64_DOUBLE:
1015 case FILTER_OP_GT_S64_DOUBLE:
1016 case FILTER_OP_LT_S64_DOUBLE:
1017 case FILTER_OP_GE_S64_DOUBLE:
1018 case FILTER_OP_LE_S64_DOUBLE:
1019 {
1020 /* Pop 2, push 1 */
1021 if (vstack_pop(stack)) {
1022 ret = -EINVAL;
1023 goto end;
1024 }
1025 if (!vstack_ax(stack)) {
1026 ERR("Empty stack\n");
1027 ret = -EINVAL;
1028 goto end;
1029 }
1030 vstack_ax(stack)->type = REG_S64;
1031 next_pc += sizeof(struct binary_op);
1032 break;
1033 }
1034
1035 /* unary */
1036 case FILTER_OP_UNARY_PLUS:
1037 case FILTER_OP_UNARY_MINUS:
1038 {
1039 /* Pop 1, push 1 */
1040 if (!vstack_ax(stack)) {
1041 ERR("Empty stack\n");
1042 ret = -EINVAL;
1043 goto end;
1044 }
1045 vstack_ax(stack)->type = REG_UNKNOWN;
1046 next_pc += sizeof(struct unary_op);
1047 break;
1048 }
1049
1050 case FILTER_OP_UNARY_PLUS_S64:
1051 case FILTER_OP_UNARY_MINUS_S64:
1052 case FILTER_OP_UNARY_NOT:
1053 case FILTER_OP_UNARY_NOT_S64:
1054 case FILTER_OP_UNARY_NOT_DOUBLE:
1055 {
1056 /* Pop 1, push 1 */
1057 if (!vstack_ax(stack)) {
1058 ERR("Empty stack\n");
1059 ret = -EINVAL;
1060 goto end;
1061 }
1062 vstack_ax(stack)->type = REG_S64;
1063 next_pc += sizeof(struct unary_op);
1064 break;
1065 }
1066
1067 case FILTER_OP_UNARY_PLUS_DOUBLE:
1068 case FILTER_OP_UNARY_MINUS_DOUBLE:
1069 {
1070 /* Pop 1, push 1 */
1071 if (!vstack_ax(stack)) {
1072 ERR("Empty stack\n");
1073 ret = -EINVAL;
1074 goto end;
1075 }
1076 vstack_ax(stack)->type = REG_DOUBLE;
1077 next_pc += sizeof(struct unary_op);
1078 break;
1079 }
1080
1081 /* logical */
1082 case FILTER_OP_AND:
1083 case FILTER_OP_OR:
1084 {
1085 struct logical_op *insn = (struct logical_op *) pc;
1086 int merge_ret;
1087
1088 /* Add merge point to table */
1089 merge_ret = merge_point_add_check(merge_points,
1090 insn->skip_offset, stack);
1091 if (merge_ret) {
1092 ret = merge_ret;
1093 goto end;
1094 }
1095 /* Continue to next instruction */
1096 /* Pop 1 when jump not taken */
1097 if (vstack_pop(stack)) {
1098 ret = -EINVAL;
1099 goto end;
1100 }
1101 next_pc += sizeof(struct logical_op);
1102 break;
1103 }
1104
1105 /* load field ref */
1106 case FILTER_OP_LOAD_FIELD_REF:
1107 {
1108 ERR("Unknown field ref type\n");
1109 ret = -EINVAL;
1110 goto end;
1111 }
1112 /* get context ref */
1113 case FILTER_OP_GET_CONTEXT_REF:
1114 {
1115 if (vstack_push(stack)) {
1116 ret = -EINVAL;
1117 goto end;
1118 }
1119 vstack_ax(stack)->type = REG_UNKNOWN;
1120 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1121 break;
1122 }
1123 case FILTER_OP_LOAD_FIELD_REF_STRING:
1124 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1125 case FILTER_OP_GET_CONTEXT_REF_STRING:
1126 {
1127 if (vstack_push(stack)) {
1128 ret = -EINVAL;
1129 goto end;
1130 }
1131 vstack_ax(stack)->type = REG_STRING;
1132 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1133 break;
1134 }
1135 case FILTER_OP_LOAD_FIELD_REF_S64:
1136 case FILTER_OP_GET_CONTEXT_REF_S64:
1137 {
1138 if (vstack_push(stack)) {
1139 ret = -EINVAL;
1140 goto end;
1141 }
1142 vstack_ax(stack)->type = REG_S64;
1143 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1144 break;
1145 }
1146 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1147 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1148 {
1149 if (vstack_push(stack)) {
1150 ret = -EINVAL;
1151 goto end;
1152 }
1153 vstack_ax(stack)->type = REG_DOUBLE;
1154 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1155 break;
1156 }
1157
1158 /* load from immediate operand */
1159 case FILTER_OP_LOAD_STRING:
1160 {
1161 struct load_op *insn = (struct load_op *) pc;
1162
1163 if (vstack_push(stack)) {
1164 ret = -EINVAL;
1165 goto end;
1166 }
1167 vstack_ax(stack)->type = REG_STRING;
1168 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1169 break;
1170 }
1171
1172 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1173 {
1174 struct load_op *insn = (struct load_op *) pc;
1175
1176 if (vstack_push(stack)) {
1177 ret = -EINVAL;
1178 goto end;
1179 }
1180 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1181 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1182 break;
1183 }
1184
1185 case FILTER_OP_LOAD_S64:
1186 {
1187 if (vstack_push(stack)) {
1188 ret = -EINVAL;
1189 goto end;
1190 }
1191 vstack_ax(stack)->type = REG_S64;
1192 next_pc += sizeof(struct load_op)
1193 + sizeof(struct literal_numeric);
1194 break;
1195 }
1196
1197 case FILTER_OP_LOAD_DOUBLE:
1198 {
1199 if (vstack_push(stack)) {
1200 ret = -EINVAL;
1201 goto end;
1202 }
1203 vstack_ax(stack)->type = REG_DOUBLE;
1204 next_pc += sizeof(struct load_op)
1205 + sizeof(struct literal_double);
1206 break;
1207 }
1208
1209 case FILTER_OP_CAST_TO_S64:
1210 case FILTER_OP_CAST_DOUBLE_TO_S64:
1211 {
1212 /* Pop 1, push 1 */
1213 if (!vstack_ax(stack)) {
1214 ERR("Empty stack\n");
1215 ret = -EINVAL;
1216 goto end;
1217 }
1218 vstack_ax(stack)->type = REG_S64;
1219 next_pc += sizeof(struct cast_op);
1220 break;
1221 }
1222 case FILTER_OP_CAST_NOP:
1223 {
1224 next_pc += sizeof(struct cast_op);
1225 break;
1226 }
1227
1228 }
1229 end:
1230 *_next_pc = next_pc;
1231 return ret;
1232 }
1233
1234 /*
1235 * Never called concurrently (hash seed is shared).
1236 */
1237 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
1238 {
1239 struct cds_lfht *merge_points;
1240 char *pc, *next_pc, *start_pc;
1241 int ret = -EINVAL;
1242 struct vstack stack;
1243
1244 vstack_init(&stack);
1245
1246 if (!lttng_hash_seed_ready) {
1247 lttng_hash_seed = time(NULL);
1248 lttng_hash_seed_ready = 1;
1249 }
1250 /*
1251 * Note: merge_points hash table used by single thread, and
1252 * never concurrently resized. Therefore, we can use it without
1253 * holding RCU read-side lock and free nodes without using
1254 * call_rcu.
1255 */
1256 merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS,
1257 MIN_NR_BUCKETS, MAX_NR_BUCKETS,
1258 0, NULL);
1259 if (!merge_points) {
1260 ERR("Error allocating hash table for bytecode validation\n");
1261 return -ENOMEM;
1262 }
1263 start_pc = &bytecode->data[0];
1264 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
1265 pc = next_pc) {
1266 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
1267 if (ret != 0) {
1268 if (ret == -ERANGE)
1269 ERR("filter bytecode overflow\n");
1270 goto end;
1271 }
1272 dbg_printf("Validating op %s (%u)\n",
1273 print_op((unsigned int) *(filter_opcode_t *) pc),
1274 (unsigned int) *(filter_opcode_t *) pc);
1275
1276 /*
1277 * For each instruction, validate the current context
1278 * (traversal of entire execution flow), and validate
1279 * all merge points targeting this instruction.
1280 */
1281 ret = validate_instruction_all_contexts(bytecode, merge_points,
1282 &stack, start_pc, pc);
1283 if (ret)
1284 goto end;
1285 ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
1286 if (ret <= 0)
1287 goto end;
1288 }
1289 end:
1290 if (delete_all_nodes(merge_points)) {
1291 if (!ret) {
1292 ERR("Unexpected merge points\n");
1293 ret = -EINVAL;
1294 }
1295 }
1296 if (cds_lfht_destroy(merge_points, NULL)) {
1297 ERR("Error destroying hash table\n");
1298 }
1299 return ret;
1300 }
This page took 0.097423 seconds and 4 git commands to generate.