Filter: double comparator produces s64
[lttng-ust.git] / liblttng-ust / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng UST filter bytecode validator.
5 *
6 * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; only
11 * version 2.1 of the License.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21 */
22
23 #define _LGPL_SOURCE
24 #include <urcu-bp.h>
25 #include <time.h>
26 #include "lttng-filter.h"
27
28 #include <urcu/rculfhash.h>
29 #include "lttng-hash-helper.h"
30
31 /* merge point table node */
32 struct lfht_mp_node {
33 struct cds_lfht_node node;
34
35 /* Context at merge point */
36 struct vreg reg[NR_REG];
37 unsigned long target_pc;
38 };
39
40 static unsigned long lttng_hash_seed;
41 static unsigned int lttng_hash_seed_ready;
42
43 static
44 int lttng_hash_match(struct cds_lfht_node *node, const void *key)
45 {
46 struct lfht_mp_node *mp_node =
47 caa_container_of(node, struct lfht_mp_node, node);
48 unsigned long key_pc = (unsigned long) key;
49
50 if (mp_node->target_pc == key_pc)
51 return 1;
52 else
53 return 0;
54 }
55
56 static
57 int merge_point_add(struct cds_lfht *ht, unsigned long target_pc,
58 const struct vreg reg[NR_REG])
59 {
60 struct lfht_mp_node *node;
61 unsigned long hash = lttng_hash_mix((const void *) target_pc,
62 sizeof(target_pc),
63 lttng_hash_seed);
64
65 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
66 target_pc, hash);
67 node = zmalloc(sizeof(struct lfht_mp_node));
68 if (!node)
69 return -ENOMEM;
70 node->target_pc = target_pc;
71 memcpy(node->reg, reg, sizeof(node->reg));
72 cds_lfht_add(ht, hash, &node->node);
73 return 0;
74 }
75
76 /*
77 * Number of merge points for hash table size. Hash table initialized to
78 * that size, and we do not resize, because we do not want to trigger
79 * RCU worker thread execution: fall-back on linear traversal if number
80 * of merge points exceeds this value.
81 */
82 #define DEFAULT_NR_MERGE_POINTS 128
83 #define MIN_NR_BUCKETS 128
84 #define MAX_NR_BUCKETS 128
85
86 static
87 int bin_op_compare_check(const struct vreg reg[NR_REG], const char *str)
88 {
89 switch (reg[REG_R0].type) {
90 default:
91 goto error_unknown;
92
93 case REG_STRING:
94 switch (reg[REG_R1].type) {
95 default:
96 goto error_unknown;
97
98 case REG_STRING:
99 break;
100 case REG_S64:
101 case REG_DOUBLE:
102 goto error_mismatch;
103 }
104 break;
105 case REG_S64:
106 case REG_DOUBLE:
107 switch (reg[REG_R1].type) {
108 default:
109 goto error_unknown;
110
111 case REG_STRING:
112 goto error_mismatch;
113
114 case REG_S64:
115 case REG_DOUBLE:
116 break;
117 }
118 break;
119 }
120 return 0;
121
122 error_unknown:
123
124 return -EINVAL;
125 error_mismatch:
126 ERR("type mismatch for '%s' binary operator\n", str);
127 return -EINVAL;
128 }
129
130 /*
131 * Validate bytecode range overflow within the validation pass.
132 * Called for each instruction encountered.
133 */
134 static
135 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
136 void *start_pc, void *pc)
137 {
138 int ret = 0;
139
140 switch (*(filter_opcode_t *) pc) {
141 case FILTER_OP_UNKNOWN:
142 default:
143 {
144 ERR("unknown bytecode op %u\n",
145 (unsigned int) *(filter_opcode_t *) pc);
146 ret = -EINVAL;
147 break;
148 }
149
150 case FILTER_OP_RETURN:
151 {
152 if (unlikely(pc + sizeof(struct return_op)
153 > start_pc + bytecode->len)) {
154 ret = -EINVAL;
155 }
156 break;
157 }
158
159 /* binary */
160 case FILTER_OP_MUL:
161 case FILTER_OP_DIV:
162 case FILTER_OP_MOD:
163 case FILTER_OP_PLUS:
164 case FILTER_OP_MINUS:
165 case FILTER_OP_RSHIFT:
166 case FILTER_OP_LSHIFT:
167 case FILTER_OP_BIN_AND:
168 case FILTER_OP_BIN_OR:
169 case FILTER_OP_BIN_XOR:
170 {
171 ERR("unsupported bytecode op %u\n",
172 (unsigned int) *(filter_opcode_t *) pc);
173 ret = -EINVAL;
174 break;
175 }
176
177 case FILTER_OP_EQ:
178 case FILTER_OP_NE:
179 case FILTER_OP_GT:
180 case FILTER_OP_LT:
181 case FILTER_OP_GE:
182 case FILTER_OP_LE:
183 case FILTER_OP_EQ_STRING:
184 case FILTER_OP_NE_STRING:
185 case FILTER_OP_GT_STRING:
186 case FILTER_OP_LT_STRING:
187 case FILTER_OP_GE_STRING:
188 case FILTER_OP_LE_STRING:
189 case FILTER_OP_EQ_S64:
190 case FILTER_OP_NE_S64:
191 case FILTER_OP_GT_S64:
192 case FILTER_OP_LT_S64:
193 case FILTER_OP_GE_S64:
194 case FILTER_OP_LE_S64:
195 case FILTER_OP_EQ_DOUBLE:
196 case FILTER_OP_NE_DOUBLE:
197 case FILTER_OP_GT_DOUBLE:
198 case FILTER_OP_LT_DOUBLE:
199 case FILTER_OP_GE_DOUBLE:
200 case FILTER_OP_LE_DOUBLE:
201 {
202 if (unlikely(pc + sizeof(struct binary_op)
203 > start_pc + bytecode->len)) {
204 ret = -EINVAL;
205 }
206 break;
207 }
208
209 /* unary */
210 case FILTER_OP_UNARY_PLUS:
211 case FILTER_OP_UNARY_MINUS:
212 case FILTER_OP_UNARY_NOT:
213 case FILTER_OP_UNARY_PLUS_S64:
214 case FILTER_OP_UNARY_MINUS_S64:
215 case FILTER_OP_UNARY_NOT_S64:
216 case FILTER_OP_UNARY_PLUS_DOUBLE:
217 case FILTER_OP_UNARY_MINUS_DOUBLE:
218 case FILTER_OP_UNARY_NOT_DOUBLE:
219 {
220 if (unlikely(pc + sizeof(struct unary_op)
221 > start_pc + bytecode->len)) {
222 ret = -EINVAL;
223 }
224 break;
225 }
226
227 /* logical */
228 case FILTER_OP_AND:
229 case FILTER_OP_OR:
230 {
231 if (unlikely(pc + sizeof(struct logical_op)
232 > start_pc + bytecode->len)) {
233 ret = -EINVAL;
234 }
235 break;
236 }
237
238 /* load */
239 case FILTER_OP_LOAD_FIELD_REF:
240 {
241 ERR("Unknown field ref type\n");
242 ret = -EINVAL;
243 break;
244 }
245 case FILTER_OP_LOAD_FIELD_REF_STRING:
246 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
247 case FILTER_OP_LOAD_FIELD_REF_S64:
248 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
249 {
250 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
251 > start_pc + bytecode->len)) {
252 ret = -EINVAL;
253 }
254 break;
255 }
256
257 case FILTER_OP_LOAD_STRING:
258 {
259 struct load_op *insn = (struct load_op *) pc;
260 uint32_t str_len, maxlen;
261
262 if (unlikely(pc + sizeof(struct load_op)
263 > start_pc + bytecode->len)) {
264 ret = -EINVAL;
265 break;
266 }
267
268 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
269 str_len = strnlen(insn->data, maxlen);
270 if (unlikely(str_len >= maxlen)) {
271 /* Final '\0' not found within range */
272 ret = -EINVAL;
273 }
274 break;
275 }
276
277 case FILTER_OP_LOAD_S64:
278 {
279 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
280 > start_pc + bytecode->len)) {
281 ret = -EINVAL;
282 }
283 break;
284 }
285
286 case FILTER_OP_LOAD_DOUBLE:
287 {
288 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
289 > start_pc + bytecode->len)) {
290 ret = -EINVAL;
291 }
292 break;
293 }
294
295 case FILTER_OP_CAST_TO_S64:
296 case FILTER_OP_CAST_DOUBLE_TO_S64:
297 case FILTER_OP_CAST_NOP:
298 {
299 if (unlikely(pc + sizeof(struct cast_op)
300 > start_pc + bytecode->len)) {
301 ret = -EINVAL;
302 }
303 break;
304 }
305 }
306
307 return ret;
308 }
309
310 static
311 unsigned long delete_all_nodes(struct cds_lfht *ht)
312 {
313 struct cds_lfht_iter iter;
314 struct lfht_mp_node *node;
315 unsigned long nr_nodes = 0;
316
317 cds_lfht_for_each_entry(ht, &iter, node, node) {
318 int ret;
319
320 ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter));
321 assert(!ret);
322 /* note: this hash table is never used concurrently */
323 free(node);
324 nr_nodes++;
325 }
326 return nr_nodes;
327 }
328
329 /*
330 * Return value:
331 * 0: success
332 * <0: error
333 */
334 static
335 int validate_instruction_context(struct bytecode_runtime *bytecode,
336 const struct vreg reg[NR_REG],
337 void *start_pc,
338 void *pc)
339 {
340 int ret = 0;
341
342 switch (*(filter_opcode_t *) pc) {
343 case FILTER_OP_UNKNOWN:
344 default:
345 {
346 ERR("unknown bytecode op %u\n",
347 (unsigned int) *(filter_opcode_t *) pc);
348 ret = -EINVAL;
349 goto end;
350 }
351
352 case FILTER_OP_RETURN:
353 {
354 goto end;
355 }
356
357 /* binary */
358 case FILTER_OP_MUL:
359 case FILTER_OP_DIV:
360 case FILTER_OP_MOD:
361 case FILTER_OP_PLUS:
362 case FILTER_OP_MINUS:
363 case FILTER_OP_RSHIFT:
364 case FILTER_OP_LSHIFT:
365 case FILTER_OP_BIN_AND:
366 case FILTER_OP_BIN_OR:
367 case FILTER_OP_BIN_XOR:
368 {
369 ERR("unsupported bytecode op %u\n",
370 (unsigned int) *(filter_opcode_t *) pc);
371 ret = -EINVAL;
372 goto end;
373 }
374
375 case FILTER_OP_EQ:
376 {
377 ret = bin_op_compare_check(reg, "==");
378 if (ret)
379 goto end;
380 break;
381 }
382 case FILTER_OP_NE:
383 {
384 ret = bin_op_compare_check(reg, "!=");
385 if (ret)
386 goto end;
387 break;
388 }
389 case FILTER_OP_GT:
390 {
391 ret = bin_op_compare_check(reg, ">");
392 if (ret)
393 goto end;
394 break;
395 }
396 case FILTER_OP_LT:
397 {
398 ret = bin_op_compare_check(reg, "<");
399 if (ret)
400 goto end;
401 break;
402 }
403 case FILTER_OP_GE:
404 {
405 ret = bin_op_compare_check(reg, ">=");
406 if (ret)
407 goto end;
408 break;
409 }
410 case FILTER_OP_LE:
411 {
412 ret = bin_op_compare_check(reg, "<=");
413 if (ret)
414 goto end;
415 break;
416 }
417
418 case FILTER_OP_EQ_STRING:
419 case FILTER_OP_NE_STRING:
420 case FILTER_OP_GT_STRING:
421 case FILTER_OP_LT_STRING:
422 case FILTER_OP_GE_STRING:
423 case FILTER_OP_LE_STRING:
424 {
425 if (reg[REG_R0].type != REG_STRING
426 || reg[REG_R1].type != REG_STRING) {
427 ERR("Unexpected register type for string comparator\n");
428 ret = -EINVAL;
429 goto end;
430 }
431 break;
432 }
433
434 case FILTER_OP_EQ_S64:
435 case FILTER_OP_NE_S64:
436 case FILTER_OP_GT_S64:
437 case FILTER_OP_LT_S64:
438 case FILTER_OP_GE_S64:
439 case FILTER_OP_LE_S64:
440 {
441 if (reg[REG_R0].type != REG_S64
442 || reg[REG_R1].type != REG_S64) {
443 ERR("Unexpected register type for s64 comparator\n");
444 ret = -EINVAL;
445 goto end;
446 }
447 break;
448 }
449
450 case FILTER_OP_EQ_DOUBLE:
451 case FILTER_OP_NE_DOUBLE:
452 case FILTER_OP_GT_DOUBLE:
453 case FILTER_OP_LT_DOUBLE:
454 case FILTER_OP_GE_DOUBLE:
455 case FILTER_OP_LE_DOUBLE:
456 {
457 if ((reg[REG_R0].type != REG_DOUBLE && reg[REG_R0].type != REG_S64)
458 || (reg[REG_R1].type != REG_DOUBLE && reg[REG_R1].type != REG_S64)) {
459 ERR("Unexpected register type for double comparator\n");
460 ret = -EINVAL;
461 goto end;
462 }
463 if (reg[REG_R0].type != REG_DOUBLE && reg[REG_R1].type != REG_DOUBLE) {
464 ERR("Double operator should have at least one double register\n");
465 ret = -EINVAL;
466 goto end;
467 }
468 break;
469 }
470
471 /* unary */
472 case FILTER_OP_UNARY_PLUS:
473 case FILTER_OP_UNARY_MINUS:
474 case FILTER_OP_UNARY_NOT:
475 {
476 struct unary_op *insn = (struct unary_op *) pc;
477
478 if (unlikely(insn->reg >= REG_ERROR)) {
479 ERR("invalid register %u\n",
480 (unsigned int) insn->reg);
481 ret = -EINVAL;
482 goto end;
483 }
484 switch (reg[insn->reg].type) {
485 default:
486 ERR("unknown register type\n");
487 ret = -EINVAL;
488 goto end;
489
490 case REG_STRING:
491 ERR("Unary op can only be applied to numeric or floating point registers\n");
492 ret = -EINVAL;
493 goto end;
494 case REG_S64:
495 break;
496 case REG_DOUBLE:
497 break;
498 }
499 break;
500 }
501
502 case FILTER_OP_UNARY_PLUS_S64:
503 case FILTER_OP_UNARY_MINUS_S64:
504 case FILTER_OP_UNARY_NOT_S64:
505 {
506 struct unary_op *insn = (struct unary_op *) pc;
507
508 if (unlikely(insn->reg >= REG_ERROR)) {
509 ERR("invalid register %u\n",
510 (unsigned int) insn->reg);
511 ret = -EINVAL;
512 goto end;
513 }
514 if (reg[insn->reg].type != REG_S64) {
515 ERR("Invalid register type\n");
516 ret = -EINVAL;
517 goto end;
518 }
519 break;
520 }
521
522 case FILTER_OP_UNARY_PLUS_DOUBLE:
523 case FILTER_OP_UNARY_MINUS_DOUBLE:
524 case FILTER_OP_UNARY_NOT_DOUBLE:
525 {
526 struct unary_op *insn = (struct unary_op *) pc;
527
528 if (unlikely(insn->reg >= REG_ERROR)) {
529 ERR("invalid register %u\n",
530 (unsigned int) insn->reg);
531 ret = -EINVAL;
532 goto end;
533 }
534 if (reg[insn->reg].type != REG_DOUBLE) {
535 ERR("Invalid register type\n");
536 ret = -EINVAL;
537 goto end;
538 }
539 break;
540 }
541
542 /* logical */
543 case FILTER_OP_AND:
544 case FILTER_OP_OR:
545 {
546 struct logical_op *insn = (struct logical_op *) pc;
547
548 if (reg[REG_R0].type != REG_S64) {
549 ERR("Logical comparator expects S64 register\n");
550 ret = -EINVAL;
551 goto end;
552 }
553
554 dbg_printf("Validate jumping to bytecode offset %u\n",
555 (unsigned int) insn->skip_offset);
556 if (unlikely(start_pc + insn->skip_offset <= pc)) {
557 ERR("Loops are not allowed in bytecode\n");
558 ret = -EINVAL;
559 goto end;
560 }
561 break;
562 }
563
564 /* load */
565 case FILTER_OP_LOAD_FIELD_REF:
566 {
567 ERR("Unknown field ref type\n");
568 ret = -EINVAL;
569 goto end;
570 }
571 case FILTER_OP_LOAD_FIELD_REF_STRING:
572 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
573 {
574 struct load_op *insn = (struct load_op *) pc;
575 struct field_ref *ref = (struct field_ref *) insn->data;
576
577 if (unlikely(insn->reg >= REG_ERROR)) {
578 ERR("invalid register %u\n",
579 (unsigned int) insn->reg);
580 ret = -EINVAL;
581 goto end;
582 }
583 dbg_printf("Validate load field ref offset %u type string\n",
584 ref->offset);
585 break;
586 }
587 case FILTER_OP_LOAD_FIELD_REF_S64:
588 {
589 struct load_op *insn = (struct load_op *) pc;
590 struct field_ref *ref = (struct field_ref *) insn->data;
591
592 if (unlikely(insn->reg >= REG_ERROR)) {
593 ERR("invalid register %u\n",
594 (unsigned int) insn->reg);
595 ret = -EINVAL;
596 goto end;
597 }
598 dbg_printf("Validate load field ref offset %u type s64\n",
599 ref->offset);
600 break;
601 }
602 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
603 {
604 struct load_op *insn = (struct load_op *) pc;
605 struct field_ref *ref = (struct field_ref *) insn->data;
606
607 if (unlikely(insn->reg >= REG_ERROR)) {
608 ERR("invalid register %u\n",
609 (unsigned int) insn->reg);
610 ret = -EINVAL;
611 goto end;
612 }
613 dbg_printf("Validate load field ref offset %u type double\n",
614 ref->offset);
615 break;
616 }
617
618 case FILTER_OP_LOAD_STRING:
619 {
620 struct load_op *insn = (struct load_op *) pc;
621
622 if (unlikely(insn->reg >= REG_ERROR)) {
623 ERR("invalid register %u\n",
624 (unsigned int) insn->reg);
625 ret = -EINVAL;
626 goto end;
627 }
628 break;
629 }
630
631 case FILTER_OP_LOAD_S64:
632 {
633 struct load_op *insn = (struct load_op *) pc;
634
635 if (unlikely(insn->reg >= REG_ERROR)) {
636 ERR("invalid register %u\n",
637 (unsigned int) insn->reg);
638 ret = -EINVAL;
639 goto end;
640 }
641 break;
642 }
643
644 case FILTER_OP_LOAD_DOUBLE:
645 {
646 struct load_op *insn = (struct load_op *) pc;
647
648 if (unlikely(insn->reg >= REG_ERROR)) {
649 ERR("invalid register %u\n",
650 (unsigned int) insn->reg);
651 ret = -EINVAL;
652 goto end;
653 }
654 break;
655 }
656
657 case FILTER_OP_CAST_TO_S64:
658 case FILTER_OP_CAST_DOUBLE_TO_S64:
659 {
660 struct cast_op *insn = (struct cast_op *) pc;
661
662 if (unlikely(insn->reg >= REG_ERROR)) {
663 ERR("invalid register %u\n",
664 (unsigned int) insn->reg);
665 ret = -EINVAL;
666 goto end;
667 }
668 switch (reg[insn->reg].type) {
669 default:
670 ERR("unknown register type\n");
671 ret = -EINVAL;
672 goto end;
673
674 case REG_STRING:
675 ERR("Cast op can only be applied to numeric or floating point registers\n");
676 ret = -EINVAL;
677 goto end;
678 case REG_S64:
679 break;
680 case REG_DOUBLE:
681 break;
682 }
683 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
684 if (reg[insn->reg].type != REG_DOUBLE) {
685 ERR("Cast expects double\n");
686 ret = -EINVAL;
687 goto end;
688 }
689 }
690 break;
691 }
692 case FILTER_OP_CAST_NOP:
693 {
694 break;
695 }
696
697 }
698 end:
699 return ret;
700 }
701
702 /*
703 * Return value:
704 * 0: success
705 * <0: error
706 */
707 static
708 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
709 struct cds_lfht *merge_points,
710 const struct vreg reg[NR_REG],
711 void *start_pc,
712 void *pc)
713 {
714 int ret;
715 unsigned long target_pc = pc - start_pc;
716 struct cds_lfht_iter iter;
717 struct cds_lfht_node *node;
718 unsigned long hash;
719
720 /* Validate the context resulting from the previous instruction */
721 ret = validate_instruction_context(bytecode, reg, start_pc, pc);
722 if (ret)
723 return ret;
724
725 /* Validate merge points */
726 hash = lttng_hash_mix((const void *) target_pc, sizeof(target_pc),
727 lttng_hash_seed);
728 cds_lfht_for_each_duplicate(merge_points, hash, lttng_hash_match,
729 (const void *) target_pc, &iter, node) {
730 struct lfht_mp_node *mp_node =
731 caa_container_of(node, struct lfht_mp_node, node);
732
733 dbg_printf("Filter: validate merge point at offset %lu\n",
734 target_pc);
735 ret = validate_instruction_context(bytecode, mp_node->reg,
736 start_pc, pc);
737 if (ret)
738 return ret;
739 /* Once validated, we can remove the merge point */
740 dbg_printf("Filter: remove one merge point at offset %lu\n",
741 target_pc);
742 ret = cds_lfht_del(merge_points, node);
743 assert(!ret);
744 }
745 return 0;
746 }
747
748 /*
749 * Return value:
750 * >0: going to next insn.
751 * 0: success, stop iteration.
752 * <0: error
753 */
754 static
755 int exec_insn(struct bytecode_runtime *bytecode,
756 struct cds_lfht *merge_points,
757 struct vreg reg[NR_REG],
758 void **_next_pc,
759 void *pc)
760 {
761 int ret = 1;
762 void *next_pc = *_next_pc;
763
764 switch (*(filter_opcode_t *) pc) {
765 case FILTER_OP_UNKNOWN:
766 default:
767 {
768 ERR("unknown bytecode op %u\n",
769 (unsigned int) *(filter_opcode_t *) pc);
770 ret = -EINVAL;
771 goto end;
772 }
773
774 case FILTER_OP_RETURN:
775 {
776 ret = 0;
777 goto end;
778 }
779
780 /* binary */
781 case FILTER_OP_MUL:
782 case FILTER_OP_DIV:
783 case FILTER_OP_MOD:
784 case FILTER_OP_PLUS:
785 case FILTER_OP_MINUS:
786 case FILTER_OP_RSHIFT:
787 case FILTER_OP_LSHIFT:
788 case FILTER_OP_BIN_AND:
789 case FILTER_OP_BIN_OR:
790 case FILTER_OP_BIN_XOR:
791 {
792 ERR("unsupported bytecode op %u\n",
793 (unsigned int) *(filter_opcode_t *) pc);
794 ret = -EINVAL;
795 goto end;
796 }
797
798 case FILTER_OP_EQ:
799 case FILTER_OP_NE:
800 case FILTER_OP_GT:
801 case FILTER_OP_LT:
802 case FILTER_OP_GE:
803 case FILTER_OP_LE:
804 case FILTER_OP_EQ_STRING:
805 case FILTER_OP_NE_STRING:
806 case FILTER_OP_GT_STRING:
807 case FILTER_OP_LT_STRING:
808 case FILTER_OP_GE_STRING:
809 case FILTER_OP_LE_STRING:
810 case FILTER_OP_EQ_S64:
811 case FILTER_OP_NE_S64:
812 case FILTER_OP_GT_S64:
813 case FILTER_OP_LT_S64:
814 case FILTER_OP_GE_S64:
815 case FILTER_OP_LE_S64:
816 case FILTER_OP_EQ_DOUBLE:
817 case FILTER_OP_NE_DOUBLE:
818 case FILTER_OP_GT_DOUBLE:
819 case FILTER_OP_LT_DOUBLE:
820 case FILTER_OP_GE_DOUBLE:
821 case FILTER_OP_LE_DOUBLE:
822 {
823 reg[REG_R0].type = REG_S64;
824 next_pc += sizeof(struct binary_op);
825 break;
826 }
827
828 /* unary */
829 case FILTER_OP_UNARY_PLUS:
830 case FILTER_OP_UNARY_MINUS:
831 case FILTER_OP_UNARY_NOT:
832 case FILTER_OP_UNARY_PLUS_S64:
833 case FILTER_OP_UNARY_MINUS_S64:
834 case FILTER_OP_UNARY_NOT_S64:
835 {
836 reg[REG_R0].type = REG_S64;
837 next_pc += sizeof(struct unary_op);
838 break;
839 }
840
841 case FILTER_OP_UNARY_PLUS_DOUBLE:
842 case FILTER_OP_UNARY_MINUS_DOUBLE:
843 case FILTER_OP_UNARY_NOT_DOUBLE:
844 {
845 reg[REG_R0].type = REG_DOUBLE;
846 next_pc += sizeof(struct unary_op);
847 break;
848 }
849
850 /* logical */
851 case FILTER_OP_AND:
852 case FILTER_OP_OR:
853 {
854 struct logical_op *insn = (struct logical_op *) pc;
855 int merge_ret;
856
857 /* Add merge point to table */
858 merge_ret = merge_point_add(merge_points, insn->skip_offset, reg);
859 if (merge_ret) {
860 ret = merge_ret;
861 goto end;
862 }
863 /* Continue to next instruction */
864 next_pc += sizeof(struct logical_op);
865 break;
866 }
867
868 /* load */
869 case FILTER_OP_LOAD_FIELD_REF:
870 {
871 ERR("Unknown field ref type\n");
872 ret = -EINVAL;
873 goto end;
874 }
875 case FILTER_OP_LOAD_FIELD_REF_STRING:
876 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
877 {
878 struct load_op *insn = (struct load_op *) pc;
879
880 reg[insn->reg].type = REG_STRING;
881 reg[insn->reg].literal = 0;
882 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
883 break;
884 }
885 case FILTER_OP_LOAD_FIELD_REF_S64:
886 {
887 struct load_op *insn = (struct load_op *) pc;
888
889 reg[insn->reg].type = REG_S64;
890 reg[insn->reg].literal = 0;
891 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
892 break;
893 }
894 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
895 {
896 struct load_op *insn = (struct load_op *) pc;
897
898 reg[insn->reg].type = REG_DOUBLE;
899 reg[insn->reg].literal = 0;
900 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
901 break;
902 }
903
904 case FILTER_OP_LOAD_STRING:
905 {
906 struct load_op *insn = (struct load_op *) pc;
907
908 reg[insn->reg].type = REG_STRING;
909 reg[insn->reg].literal = 1;
910 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
911 break;
912 }
913
914 case FILTER_OP_LOAD_S64:
915 {
916 struct load_op *insn = (struct load_op *) pc;
917
918 reg[insn->reg].type = REG_S64;
919 reg[insn->reg].literal = 1;
920 next_pc += sizeof(struct load_op)
921 + sizeof(struct literal_numeric);
922 break;
923 }
924
925 case FILTER_OP_LOAD_DOUBLE:
926 {
927 struct load_op *insn = (struct load_op *) pc;
928
929 reg[insn->reg].type = REG_DOUBLE;
930 reg[insn->reg].literal = 1;
931 next_pc += sizeof(struct load_op)
932 + sizeof(struct literal_double);
933 break;
934 }
935
936 case FILTER_OP_CAST_TO_S64:
937 case FILTER_OP_CAST_DOUBLE_TO_S64:
938 {
939 struct cast_op *insn = (struct cast_op *) pc;
940
941 reg[insn->reg].type = REG_S64;
942 next_pc += sizeof(struct cast_op);
943 break;
944 }
945 case FILTER_OP_CAST_NOP:
946 {
947 next_pc += sizeof(struct cast_op);
948 break;
949 }
950
951 }
952 end:
953 *_next_pc = next_pc;
954 return ret;
955 }
956
957 /*
958 * Never called concurrently (hash seed is shared).
959 */
960 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
961 {
962 struct cds_lfht *merge_points;
963 void *pc, *next_pc, *start_pc;
964 int ret = -EINVAL;
965 struct vreg reg[NR_REG];
966 int i;
967
968 for (i = 0; i < NR_REG; i++) {
969 reg[i].type = REG_TYPE_UNKNOWN;
970 reg[i].literal = 0;
971 }
972
973 if (!lttng_hash_seed_ready) {
974 lttng_hash_seed = time(NULL);
975 lttng_hash_seed_ready = 1;
976 }
977 /*
978 * Note: merge_points hash table used by single thread, and
979 * never concurrently resized. Therefore, we can use it without
980 * holding RCU read-side lock and free nodes without using
981 * call_rcu.
982 */
983 merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS,
984 MIN_NR_BUCKETS, MAX_NR_BUCKETS,
985 0, NULL);
986 if (!merge_points) {
987 ERR("Error allocating hash table for bytecode validation\n");
988 return -ENOMEM;
989 }
990 start_pc = &bytecode->data[0];
991 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
992 pc = next_pc) {
993 if (bytecode_validate_overflow(bytecode, start_pc, pc) != 0) {
994 ERR("filter bytecode overflow\n");
995 ret = -EINVAL;
996 goto end;
997 }
998 dbg_printf("Validating op %s (%u)\n",
999 print_op((unsigned int) *(filter_opcode_t *) pc),
1000 (unsigned int) *(filter_opcode_t *) pc);
1001
1002 /*
1003 * For each instruction, validate the current context
1004 * (traversal of entire execution flow), and validate
1005 * all merge points targeting this instruction.
1006 */
1007 ret = validate_instruction_all_contexts(bytecode, merge_points,
1008 reg, start_pc, pc);
1009 if (ret)
1010 goto end;
1011 ret = exec_insn(bytecode, merge_points, reg, &next_pc, pc);
1012 if (ret <= 0)
1013 goto end;
1014 }
1015 end:
1016 if (delete_all_nodes(merge_points)) {
1017 if (!ret) {
1018 ERR("Unexpected merge points\n");
1019 ret = -EINVAL;
1020 }
1021 }
1022 if (cds_lfht_destroy(merge_points, NULL)) {
1023 ERR("Error destroying hash table\n");
1024 }
1025 return ret;
1026 }
This page took 0.049174 seconds and 5 git commands to generate.