Fix: trace events in C constructors/destructors
[lttng-ust.git] / liblttng-ust / lttng-filter-validator.c
1 /*
2 * lttng-filter-validator.c
3 *
4 * LTTng UST filter bytecode validator.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include <urcu-bp.h>
29 #include <time.h>
30 #include "lttng-filter.h"
31
32 #include <urcu/rculfhash.h>
33 #include "lttng-hash-helper.h"
34 #include "string-utils.h"
35
36 /*
37 * Number of merge points for hash table size. Hash table initialized to
38 * that size, and we do not resize, because we do not want to trigger
39 * RCU worker thread execution: fall-back on linear traversal if number
40 * of merge points exceeds this value.
41 */
42 #define DEFAULT_NR_MERGE_POINTS 128
43 #define MIN_NR_BUCKETS 128
44 #define MAX_NR_BUCKETS 128
45
46 /* merge point table node */
47 struct lfht_mp_node {
48 struct cds_lfht_node node;
49
50 /* Context at merge point */
51 struct vstack stack;
52 unsigned long target_pc;
53 };
54
55 static unsigned long lttng_hash_seed;
56 static unsigned int lttng_hash_seed_ready;
57
58 static
59 int lttng_hash_match(struct cds_lfht_node *node, const void *key)
60 {
61 struct lfht_mp_node *mp_node =
62 caa_container_of(node, struct lfht_mp_node, node);
63 unsigned long key_pc = (unsigned long) key;
64
65 if (mp_node->target_pc == key_pc)
66 return 1;
67 else
68 return 0;
69 }
70
71 static
72 int merge_points_compare(const struct vstack *stacka,
73 const struct vstack *stackb)
74 {
75 int i, len;
76
77 if (stacka->top != stackb->top)
78 return 1;
79 len = stacka->top + 1;
80 assert(len >= 0);
81 for (i = 0; i < len; i++) {
82 if (stacka->e[i].type != REG_UNKNOWN
83 && stackb->e[i].type != REG_UNKNOWN
84 && stacka->e[i].type != stackb->e[i].type)
85 return 1;
86 }
87 return 0;
88 }
89
90 static
91 int merge_point_add_check(struct cds_lfht *ht, unsigned long target_pc,
92 const struct vstack *stack)
93 {
94 struct lfht_mp_node *node;
95 unsigned long hash = lttng_hash_mix((const char *) target_pc,
96 sizeof(target_pc),
97 lttng_hash_seed);
98 struct cds_lfht_node *ret;
99
100 dbg_printf("Filter: adding merge point at offset %lu, hash %lu\n",
101 target_pc, hash);
102 node = zmalloc(sizeof(struct lfht_mp_node));
103 if (!node)
104 return -ENOMEM;
105 node->target_pc = target_pc;
106 memcpy(&node->stack, stack, sizeof(node->stack));
107 ret = cds_lfht_add_unique(ht, hash, lttng_hash_match,
108 (const char *) target_pc, &node->node);
109 if (ret != &node->node) {
110 struct lfht_mp_node *ret_mp =
111 caa_container_of(ret, struct lfht_mp_node, node);
112
113 /* Key already present */
114 dbg_printf("Filter: compare merge points for offset %lu, hash %lu\n",
115 target_pc, hash);
116 free(node);
117 if (merge_points_compare(stack, &ret_mp->stack)) {
118 ERR("Merge points differ for offset %lu\n",
119 target_pc);
120 return -EINVAL;
121 }
122 }
123 return 0;
124 }
125
126 /*
127 * Binary comparators use top of stack and top of stack -1.
128 * Return 0 if typing is known to match, 1 if typing is dynamic
129 * (unknown), negative error value on error.
130 */
131 static
132 int bin_op_compare_check(struct vstack *stack, filter_opcode_t opcode,
133 const char *str)
134 {
135 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
136 goto error_empty;
137
138 switch (vstack_ax(stack)->type) {
139 default:
140 goto error_type;
141
142 case REG_UNKNOWN:
143 goto unknown;
144 case REG_STRING:
145 switch (vstack_bx(stack)->type) {
146 default:
147 goto error_type;
148
149 case REG_UNKNOWN:
150 goto unknown;
151 case REG_STRING:
152 break;
153 case REG_STAR_GLOB_STRING:
154 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
155 goto error_mismatch;
156 }
157 break;
158 case REG_S64:
159 case REG_DOUBLE:
160 goto error_mismatch;
161 }
162 break;
163 case REG_STAR_GLOB_STRING:
164 switch (vstack_bx(stack)->type) {
165 default:
166 goto error_type;
167
168 case REG_UNKNOWN:
169 goto unknown;
170 case REG_STRING:
171 if (opcode != FILTER_OP_EQ && opcode != FILTER_OP_NE) {
172 goto error_mismatch;
173 }
174 break;
175 case REG_STAR_GLOB_STRING:
176 case REG_S64:
177 case REG_DOUBLE:
178 goto error_mismatch;
179 }
180 break;
181 case REG_S64:
182 case REG_DOUBLE:
183 switch (vstack_bx(stack)->type) {
184 default:
185 goto error_type;
186
187 case REG_UNKNOWN:
188 goto unknown;
189 case REG_STRING:
190 case REG_STAR_GLOB_STRING:
191 goto error_mismatch;
192 case REG_S64:
193 case REG_DOUBLE:
194 break;
195 }
196 break;
197 }
198 return 0;
199
200 unknown:
201 return 1;
202
203 error_mismatch:
204 ERR("type mismatch for '%s' binary operator\n", str);
205 return -EINVAL;
206
207 error_empty:
208 ERR("empty stack for '%s' binary operator\n", str);
209 return -EINVAL;
210
211 error_type:
212 ERR("unknown type for '%s' binary operator\n", str);
213 return -EINVAL;
214 }
215
216 /*
217 * Binary bitwise operators use top of stack and top of stack -1.
218 * Return 0 if typing is known to match, 1 if typing is dynamic
219 * (unknown), negative error value on error.
220 */
221 static
222 int bin_op_bitwise_check(struct vstack *stack, filter_opcode_t opcode,
223 const char *str)
224 {
225 if (unlikely(!vstack_ax(stack) || !vstack_bx(stack)))
226 goto error_empty;
227
228 switch (vstack_ax(stack)->type) {
229 default:
230 goto error_type;
231
232 case REG_UNKNOWN:
233 goto unknown;
234 case REG_S64:
235 switch (vstack_bx(stack)->type) {
236 default:
237 goto error_type;
238
239 case REG_UNKNOWN:
240 goto unknown;
241 case REG_S64:
242 break;
243 }
244 break;
245 }
246 return 0;
247
248 unknown:
249 return 1;
250
251 error_empty:
252 ERR("empty stack for '%s' binary operator\n", str);
253 return -EINVAL;
254
255 error_type:
256 ERR("unknown type for '%s' binary operator\n", str);
257 return -EINVAL;
258 }
259
260 static
261 int validate_get_symbol(struct bytecode_runtime *bytecode,
262 const struct get_symbol *sym)
263 {
264 const char *str, *str_limit;
265 size_t len_limit;
266
267 if (sym->offset >= bytecode->p.bc->bc.len - bytecode->p.bc->bc.reloc_offset)
268 return -EINVAL;
269
270 str = bytecode->p.bc->bc.data + bytecode->p.bc->bc.reloc_offset + sym->offset;
271 str_limit = bytecode->p.bc->bc.data + bytecode->p.bc->bc.len;
272 len_limit = str_limit - str;
273 if (strnlen(str, len_limit) == len_limit)
274 return -EINVAL;
275 return 0;
276 }
277
278 /*
279 * Validate bytecode range overflow within the validation pass.
280 * Called for each instruction encountered.
281 */
282 static
283 int bytecode_validate_overflow(struct bytecode_runtime *bytecode,
284 char *start_pc, char *pc)
285 {
286 int ret = 0;
287
288 switch (*(filter_opcode_t *) pc) {
289 case FILTER_OP_UNKNOWN:
290 default:
291 {
292 ERR("unknown bytecode op %u\n",
293 (unsigned int) *(filter_opcode_t *) pc);
294 ret = -EINVAL;
295 break;
296 }
297
298 case FILTER_OP_RETURN:
299 case FILTER_OP_RETURN_S64:
300 {
301 if (unlikely(pc + sizeof(struct return_op)
302 > start_pc + bytecode->len)) {
303 ret = -ERANGE;
304 }
305 break;
306 }
307
308 /* binary */
309 case FILTER_OP_MUL:
310 case FILTER_OP_DIV:
311 case FILTER_OP_MOD:
312 case FILTER_OP_PLUS:
313 case FILTER_OP_MINUS:
314 {
315 ERR("unsupported bytecode op %u\n",
316 (unsigned int) *(filter_opcode_t *) pc);
317 ret = -EINVAL;
318 break;
319 }
320
321 case FILTER_OP_EQ:
322 case FILTER_OP_NE:
323 case FILTER_OP_GT:
324 case FILTER_OP_LT:
325 case FILTER_OP_GE:
326 case FILTER_OP_LE:
327 case FILTER_OP_EQ_STRING:
328 case FILTER_OP_NE_STRING:
329 case FILTER_OP_GT_STRING:
330 case FILTER_OP_LT_STRING:
331 case FILTER_OP_GE_STRING:
332 case FILTER_OP_LE_STRING:
333 case FILTER_OP_EQ_STAR_GLOB_STRING:
334 case FILTER_OP_NE_STAR_GLOB_STRING:
335 case FILTER_OP_EQ_S64:
336 case FILTER_OP_NE_S64:
337 case FILTER_OP_GT_S64:
338 case FILTER_OP_LT_S64:
339 case FILTER_OP_GE_S64:
340 case FILTER_OP_LE_S64:
341 case FILTER_OP_EQ_DOUBLE:
342 case FILTER_OP_NE_DOUBLE:
343 case FILTER_OP_GT_DOUBLE:
344 case FILTER_OP_LT_DOUBLE:
345 case FILTER_OP_GE_DOUBLE:
346 case FILTER_OP_LE_DOUBLE:
347 case FILTER_OP_EQ_DOUBLE_S64:
348 case FILTER_OP_NE_DOUBLE_S64:
349 case FILTER_OP_GT_DOUBLE_S64:
350 case FILTER_OP_LT_DOUBLE_S64:
351 case FILTER_OP_GE_DOUBLE_S64:
352 case FILTER_OP_LE_DOUBLE_S64:
353 case FILTER_OP_EQ_S64_DOUBLE:
354 case FILTER_OP_NE_S64_DOUBLE:
355 case FILTER_OP_GT_S64_DOUBLE:
356 case FILTER_OP_LT_S64_DOUBLE:
357 case FILTER_OP_GE_S64_DOUBLE:
358 case FILTER_OP_LE_S64_DOUBLE:
359 case FILTER_OP_BIT_RSHIFT:
360 case FILTER_OP_BIT_LSHIFT:
361 case FILTER_OP_BIT_AND:
362 case FILTER_OP_BIT_OR:
363 case FILTER_OP_BIT_XOR:
364 {
365 if (unlikely(pc + sizeof(struct binary_op)
366 > start_pc + bytecode->len)) {
367 ret = -ERANGE;
368 }
369 break;
370 }
371
372 /* unary */
373 case FILTER_OP_UNARY_PLUS:
374 case FILTER_OP_UNARY_MINUS:
375 case FILTER_OP_UNARY_NOT:
376 case FILTER_OP_UNARY_PLUS_S64:
377 case FILTER_OP_UNARY_MINUS_S64:
378 case FILTER_OP_UNARY_NOT_S64:
379 case FILTER_OP_UNARY_PLUS_DOUBLE:
380 case FILTER_OP_UNARY_MINUS_DOUBLE:
381 case FILTER_OP_UNARY_NOT_DOUBLE:
382 case FILTER_OP_UNARY_BIT_NOT:
383 {
384 if (unlikely(pc + sizeof(struct unary_op)
385 > start_pc + bytecode->len)) {
386 ret = -ERANGE;
387 }
388 break;
389 }
390
391 /* logical */
392 case FILTER_OP_AND:
393 case FILTER_OP_OR:
394 {
395 if (unlikely(pc + sizeof(struct logical_op)
396 > start_pc + bytecode->len)) {
397 ret = -ERANGE;
398 }
399 break;
400 }
401
402 /* load field and get context ref */
403 case FILTER_OP_LOAD_FIELD_REF:
404 case FILTER_OP_GET_CONTEXT_REF:
405 case FILTER_OP_LOAD_FIELD_REF_STRING:
406 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
407 case FILTER_OP_LOAD_FIELD_REF_S64:
408 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
409 case FILTER_OP_GET_CONTEXT_REF_STRING:
410 case FILTER_OP_GET_CONTEXT_REF_S64:
411 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
412 {
413 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct field_ref)
414 > start_pc + bytecode->len)) {
415 ret = -ERANGE;
416 }
417 break;
418 }
419
420 /* load from immediate operand */
421 case FILTER_OP_LOAD_STRING:
422 case FILTER_OP_LOAD_STAR_GLOB_STRING:
423 {
424 struct load_op *insn = (struct load_op *) pc;
425 uint32_t str_len, maxlen;
426
427 if (unlikely(pc + sizeof(struct load_op)
428 > start_pc + bytecode->len)) {
429 ret = -ERANGE;
430 break;
431 }
432
433 maxlen = start_pc + bytecode->len - pc - sizeof(struct load_op);
434 str_len = strnlen(insn->data, maxlen);
435 if (unlikely(str_len >= maxlen)) {
436 /* Final '\0' not found within range */
437 ret = -ERANGE;
438 }
439 break;
440 }
441
442 case FILTER_OP_LOAD_S64:
443 {
444 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_numeric)
445 > start_pc + bytecode->len)) {
446 ret = -ERANGE;
447 }
448 break;
449 }
450
451 case FILTER_OP_LOAD_DOUBLE:
452 {
453 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct literal_double)
454 > start_pc + bytecode->len)) {
455 ret = -ERANGE;
456 }
457 break;
458 }
459
460 case FILTER_OP_CAST_TO_S64:
461 case FILTER_OP_CAST_DOUBLE_TO_S64:
462 case FILTER_OP_CAST_NOP:
463 {
464 if (unlikely(pc + sizeof(struct cast_op)
465 > start_pc + bytecode->len)) {
466 ret = -ERANGE;
467 }
468 break;
469 }
470
471 /*
472 * Instructions for recursive traversal through composed types.
473 */
474 case FILTER_OP_GET_CONTEXT_ROOT:
475 case FILTER_OP_GET_APP_CONTEXT_ROOT:
476 case FILTER_OP_GET_PAYLOAD_ROOT:
477 case FILTER_OP_LOAD_FIELD:
478 case FILTER_OP_LOAD_FIELD_S8:
479 case FILTER_OP_LOAD_FIELD_S16:
480 case FILTER_OP_LOAD_FIELD_S32:
481 case FILTER_OP_LOAD_FIELD_S64:
482 case FILTER_OP_LOAD_FIELD_U8:
483 case FILTER_OP_LOAD_FIELD_U16:
484 case FILTER_OP_LOAD_FIELD_U32:
485 case FILTER_OP_LOAD_FIELD_U64:
486 case FILTER_OP_LOAD_FIELD_STRING:
487 case FILTER_OP_LOAD_FIELD_SEQUENCE:
488 case FILTER_OP_LOAD_FIELD_DOUBLE:
489 if (unlikely(pc + sizeof(struct load_op)
490 > start_pc + bytecode->len)) {
491 ret = -ERANGE;
492 }
493 break;
494
495 case FILTER_OP_GET_SYMBOL:
496 {
497 struct load_op *insn = (struct load_op *) pc;
498 struct get_symbol *sym = (struct get_symbol *) insn->data;
499
500 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_symbol)
501 > start_pc + bytecode->len)) {
502 ret = -ERANGE;
503 break;
504 }
505 ret = validate_get_symbol(bytecode, sym);
506 break;
507 }
508
509 case FILTER_OP_GET_SYMBOL_FIELD:
510 ERR("Unexpected get symbol field");
511 ret = -EINVAL;
512 break;
513
514 case FILTER_OP_GET_INDEX_U16:
515 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u16)
516 > start_pc + bytecode->len)) {
517 ret = -ERANGE;
518 }
519 break;
520
521 case FILTER_OP_GET_INDEX_U64:
522 if (unlikely(pc + sizeof(struct load_op) + sizeof(struct get_index_u64)
523 > start_pc + bytecode->len)) {
524 ret = -ERANGE;
525 }
526 break;
527 }
528
529 return ret;
530 }
531
532 static
533 unsigned long delete_all_nodes(struct cds_lfht *ht)
534 {
535 struct cds_lfht_iter iter;
536 struct lfht_mp_node *node;
537 unsigned long nr_nodes = 0;
538
539 cds_lfht_for_each_entry(ht, &iter, node, node) {
540 int ret;
541
542 ret = cds_lfht_del(ht, cds_lfht_iter_get_node(&iter));
543 assert(!ret);
544 /* note: this hash table is never used concurrently */
545 free(node);
546 nr_nodes++;
547 }
548 return nr_nodes;
549 }
550
551 /*
552 * Return value:
553 * >=0: success
554 * <0: error
555 */
556 static
557 int validate_instruction_context(struct bytecode_runtime *bytecode,
558 struct vstack *stack,
559 char *start_pc,
560 char *pc)
561 {
562 int ret = 0;
563 const filter_opcode_t opcode = *(filter_opcode_t *) pc;
564
565 switch (opcode) {
566 case FILTER_OP_UNKNOWN:
567 default:
568 {
569 ERR("unknown bytecode op %u\n",
570 (unsigned int) *(filter_opcode_t *) pc);
571 ret = -EINVAL;
572 goto end;
573 }
574
575 case FILTER_OP_RETURN:
576 case FILTER_OP_RETURN_S64:
577 {
578 goto end;
579 }
580
581 /* binary */
582 case FILTER_OP_MUL:
583 case FILTER_OP_DIV:
584 case FILTER_OP_MOD:
585 case FILTER_OP_PLUS:
586 case FILTER_OP_MINUS:
587 {
588 ERR("unsupported bytecode op %u\n",
589 (unsigned int) opcode);
590 ret = -EINVAL;
591 goto end;
592 }
593
594 case FILTER_OP_EQ:
595 {
596 ret = bin_op_compare_check(stack, opcode, "==");
597 if (ret < 0)
598 goto end;
599 break;
600 }
601 case FILTER_OP_NE:
602 {
603 ret = bin_op_compare_check(stack, opcode, "!=");
604 if (ret < 0)
605 goto end;
606 break;
607 }
608 case FILTER_OP_GT:
609 {
610 ret = bin_op_compare_check(stack, opcode, ">");
611 if (ret < 0)
612 goto end;
613 break;
614 }
615 case FILTER_OP_LT:
616 {
617 ret = bin_op_compare_check(stack, opcode, "<");
618 if (ret < 0)
619 goto end;
620 break;
621 }
622 case FILTER_OP_GE:
623 {
624 ret = bin_op_compare_check(stack, opcode, ">=");
625 if (ret < 0)
626 goto end;
627 break;
628 }
629 case FILTER_OP_LE:
630 {
631 ret = bin_op_compare_check(stack, opcode, "<=");
632 if (ret < 0)
633 goto end;
634 break;
635 }
636
637 case FILTER_OP_EQ_STRING:
638 case FILTER_OP_NE_STRING:
639 case FILTER_OP_GT_STRING:
640 case FILTER_OP_LT_STRING:
641 case FILTER_OP_GE_STRING:
642 case FILTER_OP_LE_STRING:
643 {
644 if (!vstack_ax(stack) || !vstack_bx(stack)) {
645 ERR("Empty stack\n");
646 ret = -EINVAL;
647 goto end;
648 }
649 if (vstack_ax(stack)->type != REG_STRING
650 || vstack_bx(stack)->type != REG_STRING) {
651 ERR("Unexpected register type for string comparator\n");
652 ret = -EINVAL;
653 goto end;
654 }
655 break;
656 }
657
658 case FILTER_OP_EQ_STAR_GLOB_STRING:
659 case FILTER_OP_NE_STAR_GLOB_STRING:
660 {
661 if (!vstack_ax(stack) || !vstack_bx(stack)) {
662 ERR("Empty stack\n");
663 ret = -EINVAL;
664 goto end;
665 }
666 if (vstack_ax(stack)->type != REG_STAR_GLOB_STRING
667 && vstack_bx(stack)->type != REG_STAR_GLOB_STRING) {
668 ERR("Unexpected register type for globbing pattern comparator\n");
669 ret = -EINVAL;
670 goto end;
671 }
672 break;
673 }
674
675 case FILTER_OP_EQ_S64:
676 case FILTER_OP_NE_S64:
677 case FILTER_OP_GT_S64:
678 case FILTER_OP_LT_S64:
679 case FILTER_OP_GE_S64:
680 case FILTER_OP_LE_S64:
681 {
682 if (!vstack_ax(stack) || !vstack_bx(stack)) {
683 ERR("Empty stack\n");
684 ret = -EINVAL;
685 goto end;
686 }
687 if (vstack_ax(stack)->type != REG_S64
688 || vstack_bx(stack)->type != REG_S64) {
689 ERR("Unexpected register type for s64 comparator\n");
690 ret = -EINVAL;
691 goto end;
692 }
693 break;
694 }
695
696 case FILTER_OP_EQ_DOUBLE:
697 case FILTER_OP_NE_DOUBLE:
698 case FILTER_OP_GT_DOUBLE:
699 case FILTER_OP_LT_DOUBLE:
700 case FILTER_OP_GE_DOUBLE:
701 case FILTER_OP_LE_DOUBLE:
702 {
703 if (!vstack_ax(stack) || !vstack_bx(stack)) {
704 ERR("Empty stack\n");
705 ret = -EINVAL;
706 goto end;
707 }
708 if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_DOUBLE) {
709 ERR("Double operator should have two double registers\n");
710 ret = -EINVAL;
711 goto end;
712 }
713 break;
714 }
715
716 case FILTER_OP_EQ_DOUBLE_S64:
717 case FILTER_OP_NE_DOUBLE_S64:
718 case FILTER_OP_GT_DOUBLE_S64:
719 case FILTER_OP_LT_DOUBLE_S64:
720 case FILTER_OP_GE_DOUBLE_S64:
721 case FILTER_OP_LE_DOUBLE_S64:
722 {
723 if (!vstack_ax(stack) || !vstack_bx(stack)) {
724 ERR("Empty stack\n");
725 ret = -EINVAL;
726 goto end;
727 }
728 if (vstack_ax(stack)->type != REG_S64 && vstack_bx(stack)->type != REG_DOUBLE) {
729 ERR("Double-S64 operator has unexpected register types\n");
730 ret = -EINVAL;
731 goto end;
732 }
733 break;
734 }
735
736 case FILTER_OP_EQ_S64_DOUBLE:
737 case FILTER_OP_NE_S64_DOUBLE:
738 case FILTER_OP_GT_S64_DOUBLE:
739 case FILTER_OP_LT_S64_DOUBLE:
740 case FILTER_OP_GE_S64_DOUBLE:
741 case FILTER_OP_LE_S64_DOUBLE:
742 {
743 if (!vstack_ax(stack) || !vstack_bx(stack)) {
744 ERR("Empty stack\n");
745 ret = -EINVAL;
746 goto end;
747 }
748 if (vstack_ax(stack)->type != REG_DOUBLE && vstack_bx(stack)->type != REG_S64) {
749 ERR("S64-Double operator has unexpected register types\n");
750 ret = -EINVAL;
751 goto end;
752 }
753 break;
754 }
755
756 case FILTER_OP_BIT_RSHIFT:
757 ret = bin_op_bitwise_check(stack, opcode, ">>");
758 if (ret < 0)
759 goto end;
760 break;
761 case FILTER_OP_BIT_LSHIFT:
762 ret = bin_op_bitwise_check(stack, opcode, "<<");
763 if (ret < 0)
764 goto end;
765 break;
766 case FILTER_OP_BIT_AND:
767 ret = bin_op_bitwise_check(stack, opcode, "&");
768 if (ret < 0)
769 goto end;
770 break;
771 case FILTER_OP_BIT_OR:
772 ret = bin_op_bitwise_check(stack, opcode, "|");
773 if (ret < 0)
774 goto end;
775 break;
776 case FILTER_OP_BIT_XOR:
777 ret = bin_op_bitwise_check(stack, opcode, "^");
778 if (ret < 0)
779 goto end;
780 break;
781
782 /* unary */
783 case FILTER_OP_UNARY_PLUS:
784 case FILTER_OP_UNARY_MINUS:
785 case FILTER_OP_UNARY_NOT:
786 {
787 if (!vstack_ax(stack)) {
788 ERR("Empty stack\n");
789 ret = -EINVAL;
790 goto end;
791 }
792 switch (vstack_ax(stack)->type) {
793 default:
794 ERR("unknown register type\n");
795 ret = -EINVAL;
796 goto end;
797
798 case REG_STRING:
799 case REG_STAR_GLOB_STRING:
800 ERR("Unary op can only be applied to numeric or floating point registers\n");
801 ret = -EINVAL;
802 goto end;
803 case REG_S64:
804 break;
805 case REG_DOUBLE:
806 break;
807 case REG_UNKNOWN:
808 break;
809 }
810 break;
811 }
812 case FILTER_OP_UNARY_BIT_NOT:
813 {
814 if (!vstack_ax(stack)) {
815 ERR("Empty stack\n");
816 ret = -EINVAL;
817 goto end;
818 }
819 switch (vstack_ax(stack)->type) {
820 default:
821 ERR("unknown register type\n");
822 ret = -EINVAL;
823 goto end;
824
825 case REG_STRING:
826 case REG_STAR_GLOB_STRING:
827 case REG_DOUBLE:
828 ERR("Unary bitwise op can only be applied to numeric registers\n");
829 ret = -EINVAL;
830 goto end;
831 case REG_S64:
832 break;
833 case REG_UNKNOWN:
834 break;
835 }
836 break;
837 }
838
839 case FILTER_OP_UNARY_PLUS_S64:
840 case FILTER_OP_UNARY_MINUS_S64:
841 case FILTER_OP_UNARY_NOT_S64:
842 {
843 if (!vstack_ax(stack)) {
844 ERR("Empty stack\n");
845 ret = -EINVAL;
846 goto end;
847 }
848 if (vstack_ax(stack)->type != REG_S64) {
849 ERR("Invalid register type\n");
850 ret = -EINVAL;
851 goto end;
852 }
853 break;
854 }
855
856 case FILTER_OP_UNARY_PLUS_DOUBLE:
857 case FILTER_OP_UNARY_MINUS_DOUBLE:
858 case FILTER_OP_UNARY_NOT_DOUBLE:
859 {
860 if (!vstack_ax(stack)) {
861 ERR("Empty stack\n");
862 ret = -EINVAL;
863 goto end;
864 }
865 if (vstack_ax(stack)->type != REG_DOUBLE) {
866 ERR("Invalid register type\n");
867 ret = -EINVAL;
868 goto end;
869 }
870 break;
871 }
872
873 /* logical */
874 case FILTER_OP_AND:
875 case FILTER_OP_OR:
876 {
877 struct logical_op *insn = (struct logical_op *) pc;
878
879 if (!vstack_ax(stack)) {
880 ERR("Empty stack\n");
881 ret = -EINVAL;
882 goto end;
883 }
884 if (vstack_ax(stack)->type != REG_S64
885 && vstack_ax(stack)->type != REG_UNKNOWN) {
886 ERR("Logical comparator expects S64 or dynamic register\n");
887 ret = -EINVAL;
888 goto end;
889 }
890
891 dbg_printf("Validate jumping to bytecode offset %u\n",
892 (unsigned int) insn->skip_offset);
893 if (unlikely(start_pc + insn->skip_offset <= pc)) {
894 ERR("Loops are not allowed in bytecode\n");
895 ret = -EINVAL;
896 goto end;
897 }
898 break;
899 }
900
901 /* load field ref */
902 case FILTER_OP_LOAD_FIELD_REF:
903 {
904 ERR("Unknown field ref type\n");
905 ret = -EINVAL;
906 goto end;
907 }
908 case FILTER_OP_LOAD_FIELD_REF_STRING:
909 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
910 {
911 struct load_op *insn = (struct load_op *) pc;
912 struct field_ref *ref = (struct field_ref *) insn->data;
913
914 dbg_printf("Validate load field ref offset %u type string\n",
915 ref->offset);
916 break;
917 }
918 case FILTER_OP_LOAD_FIELD_REF_S64:
919 {
920 struct load_op *insn = (struct load_op *) pc;
921 struct field_ref *ref = (struct field_ref *) insn->data;
922
923 dbg_printf("Validate load field ref offset %u type s64\n",
924 ref->offset);
925 break;
926 }
927 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
928 {
929 struct load_op *insn = (struct load_op *) pc;
930 struct field_ref *ref = (struct field_ref *) insn->data;
931
932 dbg_printf("Validate load field ref offset %u type double\n",
933 ref->offset);
934 break;
935 }
936
937 /* load from immediate operand */
938 case FILTER_OP_LOAD_STRING:
939 case FILTER_OP_LOAD_STAR_GLOB_STRING:
940 {
941 break;
942 }
943
944 case FILTER_OP_LOAD_S64:
945 {
946 break;
947 }
948
949 case FILTER_OP_LOAD_DOUBLE:
950 {
951 break;
952 }
953
954 case FILTER_OP_CAST_TO_S64:
955 case FILTER_OP_CAST_DOUBLE_TO_S64:
956 {
957 struct cast_op *insn = (struct cast_op *) pc;
958
959 if (!vstack_ax(stack)) {
960 ERR("Empty stack\n");
961 ret = -EINVAL;
962 goto end;
963 }
964 switch (vstack_ax(stack)->type) {
965 default:
966 ERR("unknown register type\n");
967 ret = -EINVAL;
968 goto end;
969
970 case REG_STRING:
971 case REG_STAR_GLOB_STRING:
972 ERR("Cast op can only be applied to numeric or floating point registers\n");
973 ret = -EINVAL;
974 goto end;
975 case REG_S64:
976 break;
977 case REG_DOUBLE:
978 break;
979 case REG_UNKNOWN:
980 break;
981 }
982 if (insn->op == FILTER_OP_CAST_DOUBLE_TO_S64) {
983 if (vstack_ax(stack)->type != REG_DOUBLE) {
984 ERR("Cast expects double\n");
985 ret = -EINVAL;
986 goto end;
987 }
988 }
989 break;
990 }
991 case FILTER_OP_CAST_NOP:
992 {
993 break;
994 }
995
996 /* get context ref */
997 case FILTER_OP_GET_CONTEXT_REF:
998 {
999 struct load_op *insn = (struct load_op *) pc;
1000 struct field_ref *ref = (struct field_ref *) insn->data;
1001
1002 dbg_printf("Validate get context ref offset %u type dynamic\n",
1003 ref->offset);
1004 break;
1005 }
1006 case FILTER_OP_GET_CONTEXT_REF_STRING:
1007 {
1008 struct load_op *insn = (struct load_op *) pc;
1009 struct field_ref *ref = (struct field_ref *) insn->data;
1010
1011 dbg_printf("Validate get context ref offset %u type string\n",
1012 ref->offset);
1013 break;
1014 }
1015 case FILTER_OP_GET_CONTEXT_REF_S64:
1016 {
1017 struct load_op *insn = (struct load_op *) pc;
1018 struct field_ref *ref = (struct field_ref *) insn->data;
1019
1020 dbg_printf("Validate get context ref offset %u type s64\n",
1021 ref->offset);
1022 break;
1023 }
1024 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1025 {
1026 struct load_op *insn = (struct load_op *) pc;
1027 struct field_ref *ref = (struct field_ref *) insn->data;
1028
1029 dbg_printf("Validate get context ref offset %u type double\n",
1030 ref->offset);
1031 break;
1032 }
1033
1034 /*
1035 * Instructions for recursive traversal through composed types.
1036 */
1037 case FILTER_OP_GET_CONTEXT_ROOT:
1038 {
1039 dbg_printf("Validate get context root\n");
1040 break;
1041 }
1042 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1043 {
1044 dbg_printf("Validate get app context root\n");
1045 break;
1046 }
1047 case FILTER_OP_GET_PAYLOAD_ROOT:
1048 {
1049 dbg_printf("Validate get payload root\n");
1050 break;
1051 }
1052 case FILTER_OP_LOAD_FIELD:
1053 {
1054 /*
1055 * We tolerate that field type is unknown at validation,
1056 * because we are performing the load specialization in
1057 * a phase after validation.
1058 */
1059 dbg_printf("Validate load field\n");
1060 break;
1061 }
1062
1063 /*
1064 * Disallow already specialized bytecode op load field instructions to
1065 * ensure that the received bytecode does not read a memory area larger
1066 * than the memory targeted by the instrumentation.
1067 */
1068 case FILTER_OP_LOAD_FIELD_S8:
1069 case FILTER_OP_LOAD_FIELD_S16:
1070 case FILTER_OP_LOAD_FIELD_S32:
1071 case FILTER_OP_LOAD_FIELD_S64:
1072 case FILTER_OP_LOAD_FIELD_U8:
1073 case FILTER_OP_LOAD_FIELD_U16:
1074 case FILTER_OP_LOAD_FIELD_U32:
1075 case FILTER_OP_LOAD_FIELD_U64:
1076 case FILTER_OP_LOAD_FIELD_STRING:
1077 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1078 case FILTER_OP_LOAD_FIELD_DOUBLE:
1079 {
1080 dbg_printf("Validate load field, reject specialized load instruction (%d)\n",
1081 (int) opcode);
1082 ret = -EINVAL;
1083 goto end;
1084 }
1085
1086 case FILTER_OP_GET_SYMBOL:
1087 {
1088 struct load_op *insn = (struct load_op *) pc;
1089 struct get_symbol *sym = (struct get_symbol *) insn->data;
1090
1091 dbg_printf("Validate get symbol offset %u\n", sym->offset);
1092 break;
1093 }
1094
1095 case FILTER_OP_GET_SYMBOL_FIELD:
1096 {
1097 struct load_op *insn = (struct load_op *) pc;
1098 struct get_symbol *sym = (struct get_symbol *) insn->data;
1099
1100 dbg_printf("Validate get symbol field offset %u\n", sym->offset);
1101 break;
1102 }
1103
1104 case FILTER_OP_GET_INDEX_U16:
1105 {
1106 struct load_op *insn = (struct load_op *) pc;
1107 struct get_index_u16 *get_index = (struct get_index_u16 *) insn->data;
1108
1109 dbg_printf("Validate get index u16 index %u\n", get_index->index);
1110 break;
1111 }
1112
1113 case FILTER_OP_GET_INDEX_U64:
1114 {
1115 struct load_op *insn = (struct load_op *) pc;
1116 struct get_index_u64 *get_index = (struct get_index_u64 *) insn->data;
1117
1118 dbg_printf("Validate get index u64 index %" PRIu64 "\n", get_index->index);
1119 break;
1120 }
1121 }
1122 end:
1123 return ret;
1124 }
1125
1126 /*
1127 * Return value:
1128 * 0: success
1129 * <0: error
1130 */
1131 static
1132 int validate_instruction_all_contexts(struct bytecode_runtime *bytecode,
1133 struct cds_lfht *merge_points,
1134 struct vstack *stack,
1135 char *start_pc,
1136 char *pc)
1137 {
1138 int ret;
1139 unsigned long target_pc = pc - start_pc;
1140 struct cds_lfht_iter iter;
1141 struct cds_lfht_node *node;
1142 struct lfht_mp_node *mp_node;
1143 unsigned long hash;
1144
1145 /* Validate the context resulting from the previous instruction */
1146 ret = validate_instruction_context(bytecode, stack, start_pc, pc);
1147 if (ret < 0)
1148 return ret;
1149
1150 /* Validate merge points */
1151 hash = lttng_hash_mix((const char *) target_pc, sizeof(target_pc),
1152 lttng_hash_seed);
1153 cds_lfht_lookup(merge_points, hash, lttng_hash_match,
1154 (const char *) target_pc, &iter);
1155 node = cds_lfht_iter_get_node(&iter);
1156 if (node) {
1157 mp_node = caa_container_of(node, struct lfht_mp_node, node);
1158
1159 dbg_printf("Filter: validate merge point at offset %lu\n",
1160 target_pc);
1161 if (merge_points_compare(stack, &mp_node->stack)) {
1162 ERR("Merge points differ for offset %lu\n",
1163 target_pc);
1164 return -EINVAL;
1165 }
1166 /* Once validated, we can remove the merge point */
1167 dbg_printf("Filter: remove merge point at offset %lu\n",
1168 target_pc);
1169 ret = cds_lfht_del(merge_points, node);
1170 assert(!ret);
1171 }
1172 return 0;
1173 }
1174
1175 /*
1176 * Validate load instructions: specialized instructions not accepted as input.
1177 *
1178 * Return value:
1179 * >0: going to next insn.
1180 * 0: success, stop iteration.
1181 * <0: error
1182 */
1183 static
1184 int validate_load(char **_next_pc,
1185 char *pc)
1186 {
1187 int ret = 0;
1188 char *next_pc = *_next_pc;
1189
1190 switch (*(filter_opcode_t *) pc) {
1191 case FILTER_OP_UNKNOWN:
1192 default:
1193 {
1194 ERR("Unknown bytecode op %u\n",
1195 (unsigned int) *(filter_opcode_t *) pc);
1196 ret = -EINVAL;
1197 goto end;
1198 }
1199
1200 case FILTER_OP_RETURN:
1201 {
1202 next_pc += sizeof(struct return_op);
1203 break;
1204 }
1205
1206 case FILTER_OP_RETURN_S64:
1207 {
1208 next_pc += sizeof(struct return_op);
1209 break;
1210 }
1211
1212 /* binary */
1213 case FILTER_OP_MUL:
1214 case FILTER_OP_DIV:
1215 case FILTER_OP_MOD:
1216 case FILTER_OP_PLUS:
1217 case FILTER_OP_MINUS:
1218 {
1219 ERR("Unsupported bytecode op %u\n",
1220 (unsigned int) *(filter_opcode_t *) pc);
1221 ret = -EINVAL;
1222 goto end;
1223 }
1224
1225 case FILTER_OP_EQ:
1226 case FILTER_OP_NE:
1227 case FILTER_OP_GT:
1228 case FILTER_OP_LT:
1229 case FILTER_OP_GE:
1230 case FILTER_OP_LE:
1231 case FILTER_OP_EQ_STRING:
1232 case FILTER_OP_NE_STRING:
1233 case FILTER_OP_GT_STRING:
1234 case FILTER_OP_LT_STRING:
1235 case FILTER_OP_GE_STRING:
1236 case FILTER_OP_LE_STRING:
1237 case FILTER_OP_EQ_STAR_GLOB_STRING:
1238 case FILTER_OP_NE_STAR_GLOB_STRING:
1239 case FILTER_OP_EQ_S64:
1240 case FILTER_OP_NE_S64:
1241 case FILTER_OP_GT_S64:
1242 case FILTER_OP_LT_S64:
1243 case FILTER_OP_GE_S64:
1244 case FILTER_OP_LE_S64:
1245 case FILTER_OP_EQ_DOUBLE:
1246 case FILTER_OP_NE_DOUBLE:
1247 case FILTER_OP_GT_DOUBLE:
1248 case FILTER_OP_LT_DOUBLE:
1249 case FILTER_OP_GE_DOUBLE:
1250 case FILTER_OP_LE_DOUBLE:
1251 case FILTER_OP_EQ_DOUBLE_S64:
1252 case FILTER_OP_NE_DOUBLE_S64:
1253 case FILTER_OP_GT_DOUBLE_S64:
1254 case FILTER_OP_LT_DOUBLE_S64:
1255 case FILTER_OP_GE_DOUBLE_S64:
1256 case FILTER_OP_LE_DOUBLE_S64:
1257 case FILTER_OP_EQ_S64_DOUBLE:
1258 case FILTER_OP_NE_S64_DOUBLE:
1259 case FILTER_OP_GT_S64_DOUBLE:
1260 case FILTER_OP_LT_S64_DOUBLE:
1261 case FILTER_OP_GE_S64_DOUBLE:
1262 case FILTER_OP_LE_S64_DOUBLE:
1263 case FILTER_OP_BIT_RSHIFT:
1264 case FILTER_OP_BIT_LSHIFT:
1265 case FILTER_OP_BIT_AND:
1266 case FILTER_OP_BIT_OR:
1267 case FILTER_OP_BIT_XOR:
1268 {
1269 next_pc += sizeof(struct binary_op);
1270 break;
1271 }
1272
1273 /* unary */
1274 case FILTER_OP_UNARY_PLUS:
1275 case FILTER_OP_UNARY_MINUS:
1276 case FILTER_OP_UNARY_PLUS_S64:
1277 case FILTER_OP_UNARY_MINUS_S64:
1278 case FILTER_OP_UNARY_NOT_S64:
1279 case FILTER_OP_UNARY_NOT:
1280 case FILTER_OP_UNARY_BIT_NOT:
1281 case FILTER_OP_UNARY_PLUS_DOUBLE:
1282 case FILTER_OP_UNARY_MINUS_DOUBLE:
1283 case FILTER_OP_UNARY_NOT_DOUBLE:
1284 {
1285 next_pc += sizeof(struct unary_op);
1286 break;
1287 }
1288
1289 /* logical */
1290 case FILTER_OP_AND:
1291 case FILTER_OP_OR:
1292 {
1293 next_pc += sizeof(struct logical_op);
1294 break;
1295 }
1296
1297 /* load field ref */
1298 case FILTER_OP_LOAD_FIELD_REF:
1299 /* get context ref */
1300 case FILTER_OP_GET_CONTEXT_REF:
1301 {
1302 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1303 break;
1304 }
1305 case FILTER_OP_LOAD_FIELD_REF_STRING:
1306 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1307 case FILTER_OP_GET_CONTEXT_REF_STRING:
1308 case FILTER_OP_LOAD_FIELD_REF_S64:
1309 case FILTER_OP_GET_CONTEXT_REF_S64:
1310 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1311 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1312 {
1313 /*
1314 * Reject specialized load field ref instructions.
1315 */
1316 ret = -EINVAL;
1317 goto end;
1318 }
1319
1320 /* load from immediate operand */
1321 case FILTER_OP_LOAD_STRING:
1322 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1323 {
1324 struct load_op *insn = (struct load_op *) pc;
1325
1326 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1327 break;
1328 }
1329
1330 case FILTER_OP_LOAD_S64:
1331 {
1332 next_pc += sizeof(struct load_op) + sizeof(struct literal_numeric);
1333 break;
1334 }
1335 case FILTER_OP_LOAD_DOUBLE:
1336 {
1337 next_pc += sizeof(struct load_op) + sizeof(struct literal_double);
1338 break;
1339 }
1340
1341 case FILTER_OP_CAST_DOUBLE_TO_S64:
1342 case FILTER_OP_CAST_TO_S64:
1343 case FILTER_OP_CAST_NOP:
1344 {
1345 next_pc += sizeof(struct cast_op);
1346 break;
1347 }
1348
1349 /*
1350 * Instructions for recursive traversal through composed types.
1351 */
1352 case FILTER_OP_GET_CONTEXT_ROOT:
1353 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1354 case FILTER_OP_GET_PAYLOAD_ROOT:
1355 case FILTER_OP_LOAD_FIELD:
1356 {
1357 next_pc += sizeof(struct load_op);
1358 break;
1359 }
1360
1361 case FILTER_OP_LOAD_FIELD_S8:
1362 case FILTER_OP_LOAD_FIELD_S16:
1363 case FILTER_OP_LOAD_FIELD_S32:
1364 case FILTER_OP_LOAD_FIELD_S64:
1365 case FILTER_OP_LOAD_FIELD_U8:
1366 case FILTER_OP_LOAD_FIELD_U16:
1367 case FILTER_OP_LOAD_FIELD_U32:
1368 case FILTER_OP_LOAD_FIELD_U64:
1369 case FILTER_OP_LOAD_FIELD_STRING:
1370 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1371 case FILTER_OP_LOAD_FIELD_DOUBLE:
1372 {
1373 /*
1374 * Reject specialized load field instructions.
1375 */
1376 ret = -EINVAL;
1377 goto end;
1378 }
1379
1380 case FILTER_OP_GET_SYMBOL:
1381 case FILTER_OP_GET_SYMBOL_FIELD:
1382 {
1383 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1384 break;
1385 }
1386
1387 case FILTER_OP_GET_INDEX_U16:
1388 {
1389 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1390 break;
1391 }
1392
1393 case FILTER_OP_GET_INDEX_U64:
1394 {
1395 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1396 break;
1397 }
1398
1399 }
1400 end:
1401 *_next_pc = next_pc;
1402 return ret;
1403 }
1404
1405 /*
1406 * Return value:
1407 * >0: going to next insn.
1408 * 0: success, stop iteration.
1409 * <0: error
1410 */
1411 static
1412 int exec_insn(struct bytecode_runtime *bytecode,
1413 struct cds_lfht *merge_points,
1414 struct vstack *stack,
1415 char **_next_pc,
1416 char *pc)
1417 {
1418 int ret = 1;
1419 char *next_pc = *_next_pc;
1420
1421 switch (*(filter_opcode_t *) pc) {
1422 case FILTER_OP_UNKNOWN:
1423 default:
1424 {
1425 ERR("unknown bytecode op %u\n",
1426 (unsigned int) *(filter_opcode_t *) pc);
1427 ret = -EINVAL;
1428 goto end;
1429 }
1430
1431 case FILTER_OP_RETURN:
1432 {
1433 if (!vstack_ax(stack)) {
1434 ERR("Empty stack\n");
1435 ret = -EINVAL;
1436 goto end;
1437 }
1438 switch (vstack_ax(stack)->type) {
1439 case REG_S64:
1440 case REG_UNKNOWN:
1441 break;
1442 default:
1443 ERR("Unexpected register type %d at end of bytecode\n",
1444 (int) vstack_ax(stack)->type);
1445 ret = -EINVAL;
1446 goto end;
1447 }
1448
1449 ret = 0;
1450 goto end;
1451 }
1452 case FILTER_OP_RETURN_S64:
1453 {
1454 if (!vstack_ax(stack)) {
1455 ERR("Empty stack\n");
1456 ret = -EINVAL;
1457 goto end;
1458 }
1459 switch (vstack_ax(stack)->type) {
1460 case REG_S64:
1461 break;
1462 default:
1463 case REG_UNKNOWN:
1464 ERR("Unexpected register type %d at end of bytecode\n",
1465 (int) vstack_ax(stack)->type);
1466 ret = -EINVAL;
1467 goto end;
1468 }
1469
1470 ret = 0;
1471 goto end;
1472 }
1473
1474 /* binary */
1475 case FILTER_OP_MUL:
1476 case FILTER_OP_DIV:
1477 case FILTER_OP_MOD:
1478 case FILTER_OP_PLUS:
1479 case FILTER_OP_MINUS:
1480 {
1481 ERR("unsupported bytecode op %u\n",
1482 (unsigned int) *(filter_opcode_t *) pc);
1483 ret = -EINVAL;
1484 goto end;
1485 }
1486
1487 case FILTER_OP_EQ:
1488 case FILTER_OP_NE:
1489 case FILTER_OP_GT:
1490 case FILTER_OP_LT:
1491 case FILTER_OP_GE:
1492 case FILTER_OP_LE:
1493 case FILTER_OP_EQ_STRING:
1494 case FILTER_OP_NE_STRING:
1495 case FILTER_OP_GT_STRING:
1496 case FILTER_OP_LT_STRING:
1497 case FILTER_OP_GE_STRING:
1498 case FILTER_OP_LE_STRING:
1499 case FILTER_OP_EQ_STAR_GLOB_STRING:
1500 case FILTER_OP_NE_STAR_GLOB_STRING:
1501 case FILTER_OP_EQ_S64:
1502 case FILTER_OP_NE_S64:
1503 case FILTER_OP_GT_S64:
1504 case FILTER_OP_LT_S64:
1505 case FILTER_OP_GE_S64:
1506 case FILTER_OP_LE_S64:
1507 case FILTER_OP_EQ_DOUBLE:
1508 case FILTER_OP_NE_DOUBLE:
1509 case FILTER_OP_GT_DOUBLE:
1510 case FILTER_OP_LT_DOUBLE:
1511 case FILTER_OP_GE_DOUBLE:
1512 case FILTER_OP_LE_DOUBLE:
1513 case FILTER_OP_EQ_DOUBLE_S64:
1514 case FILTER_OP_NE_DOUBLE_S64:
1515 case FILTER_OP_GT_DOUBLE_S64:
1516 case FILTER_OP_LT_DOUBLE_S64:
1517 case FILTER_OP_GE_DOUBLE_S64:
1518 case FILTER_OP_LE_DOUBLE_S64:
1519 case FILTER_OP_EQ_S64_DOUBLE:
1520 case FILTER_OP_NE_S64_DOUBLE:
1521 case FILTER_OP_GT_S64_DOUBLE:
1522 case FILTER_OP_LT_S64_DOUBLE:
1523 case FILTER_OP_GE_S64_DOUBLE:
1524 case FILTER_OP_LE_S64_DOUBLE:
1525 case FILTER_OP_BIT_RSHIFT:
1526 case FILTER_OP_BIT_LSHIFT:
1527 case FILTER_OP_BIT_AND:
1528 case FILTER_OP_BIT_OR:
1529 case FILTER_OP_BIT_XOR:
1530 {
1531 /* Pop 2, push 1 */
1532 if (vstack_pop(stack)) {
1533 ret = -EINVAL;
1534 goto end;
1535 }
1536 if (!vstack_ax(stack)) {
1537 ERR("Empty stack\n");
1538 ret = -EINVAL;
1539 goto end;
1540 }
1541 switch (vstack_ax(stack)->type) {
1542 case REG_S64:
1543 case REG_DOUBLE:
1544 case REG_STRING:
1545 case REG_STAR_GLOB_STRING:
1546 case REG_UNKNOWN:
1547 break;
1548 default:
1549 ERR("Unexpected register type %d for operation\n",
1550 (int) vstack_ax(stack)->type);
1551 ret = -EINVAL;
1552 goto end;
1553 }
1554
1555 vstack_ax(stack)->type = REG_S64;
1556 next_pc += sizeof(struct binary_op);
1557 break;
1558 }
1559
1560 /* unary */
1561 case FILTER_OP_UNARY_PLUS:
1562 case FILTER_OP_UNARY_MINUS:
1563 {
1564 /* Pop 1, push 1 */
1565 if (!vstack_ax(stack)) {
1566 ERR("Empty stack\n");
1567 ret = -EINVAL;
1568 goto end;
1569 }
1570 switch (vstack_ax(stack)->type) {
1571 case REG_UNKNOWN:
1572 case REG_DOUBLE:
1573 case REG_S64:
1574 break;
1575 default:
1576 ERR("Unexpected register type %d for operation\n",
1577 (int) vstack_ax(stack)->type);
1578 ret = -EINVAL;
1579 goto end;
1580 }
1581 vstack_ax(stack)->type = REG_UNKNOWN;
1582 next_pc += sizeof(struct unary_op);
1583 break;
1584 }
1585
1586 case FILTER_OP_UNARY_PLUS_S64:
1587 case FILTER_OP_UNARY_MINUS_S64:
1588 case FILTER_OP_UNARY_NOT_S64:
1589 {
1590 /* Pop 1, push 1 */
1591 if (!vstack_ax(stack)) {
1592 ERR("Empty stack\n");
1593 ret = -EINVAL;
1594 goto end;
1595 }
1596 switch (vstack_ax(stack)->type) {
1597 case REG_S64:
1598 break;
1599 default:
1600 ERR("Unexpected register type %d for operation\n",
1601 (int) vstack_ax(stack)->type);
1602 ret = -EINVAL;
1603 goto end;
1604 }
1605
1606 vstack_ax(stack)->type = REG_S64;
1607 next_pc += sizeof(struct unary_op);
1608 break;
1609 }
1610
1611 case FILTER_OP_UNARY_NOT:
1612 {
1613 /* Pop 1, push 1 */
1614 if (!vstack_ax(stack)) {
1615 ERR("Empty stack\n");
1616 ret = -EINVAL;
1617 goto end;
1618 }
1619 switch (vstack_ax(stack)->type) {
1620 case REG_UNKNOWN:
1621 case REG_DOUBLE:
1622 case REG_S64:
1623 break;
1624 default:
1625 ERR("Unexpected register type %d for operation\n",
1626 (int) vstack_ax(stack)->type);
1627 ret = -EINVAL;
1628 goto end;
1629 }
1630
1631 vstack_ax(stack)->type = REG_S64;
1632 next_pc += sizeof(struct unary_op);
1633 break;
1634 }
1635
1636 case FILTER_OP_UNARY_BIT_NOT:
1637 {
1638 /* Pop 1, push 1 */
1639 if (!vstack_ax(stack)) {
1640 ERR("Empty stack\n");
1641 ret = -EINVAL;
1642 goto end;
1643 }
1644 switch (vstack_ax(stack)->type) {
1645 case REG_UNKNOWN:
1646 case REG_S64:
1647 break;
1648 case REG_DOUBLE:
1649 default:
1650 ERR("Unexpected register type %d for operation\n",
1651 (int) vstack_ax(stack)->type);
1652 ret = -EINVAL;
1653 goto end;
1654 }
1655
1656 vstack_ax(stack)->type = REG_S64;
1657 next_pc += sizeof(struct unary_op);
1658 break;
1659 }
1660
1661 case FILTER_OP_UNARY_NOT_DOUBLE:
1662 {
1663 /* Pop 1, push 1 */
1664 if (!vstack_ax(stack)) {
1665 ERR("Empty stack\n");
1666 ret = -EINVAL;
1667 goto end;
1668 }
1669 switch (vstack_ax(stack)->type) {
1670 case REG_DOUBLE:
1671 break;
1672 default:
1673 ERR("Incorrect register type %d for operation\n",
1674 (int) vstack_ax(stack)->type);
1675 ret = -EINVAL;
1676 goto end;
1677 }
1678
1679 vstack_ax(stack)->type = REG_S64;
1680 next_pc += sizeof(struct unary_op);
1681 break;
1682 }
1683
1684 case FILTER_OP_UNARY_PLUS_DOUBLE:
1685 case FILTER_OP_UNARY_MINUS_DOUBLE:
1686 {
1687 /* Pop 1, push 1 */
1688 if (!vstack_ax(stack)) {
1689 ERR("Empty stack\n");
1690 ret = -EINVAL;
1691 goto end;
1692 }
1693 switch (vstack_ax(stack)->type) {
1694 case REG_DOUBLE:
1695 break;
1696 default:
1697 ERR("Incorrect register type %d for operation\n",
1698 (int) vstack_ax(stack)->type);
1699 ret = -EINVAL;
1700 goto end;
1701 }
1702
1703 vstack_ax(stack)->type = REG_DOUBLE;
1704 next_pc += sizeof(struct unary_op);
1705 break;
1706 }
1707
1708 /* logical */
1709 case FILTER_OP_AND:
1710 case FILTER_OP_OR:
1711 {
1712 struct logical_op *insn = (struct logical_op *) pc;
1713 int merge_ret;
1714
1715 /* Add merge point to table */
1716 merge_ret = merge_point_add_check(merge_points,
1717 insn->skip_offset, stack);
1718 if (merge_ret) {
1719 ret = merge_ret;
1720 goto end;
1721 }
1722
1723 if (!vstack_ax(stack)) {
1724 ERR("Empty stack\n");
1725 ret = -EINVAL;
1726 goto end;
1727 }
1728 /* There is always a cast-to-s64 operation before a or/and op. */
1729 switch (vstack_ax(stack)->type) {
1730 case REG_S64:
1731 break;
1732 default:
1733 ERR("Incorrect register type %d for operation\n",
1734 (int) vstack_ax(stack)->type);
1735 ret = -EINVAL;
1736 goto end;
1737 }
1738
1739 /* Continue to next instruction */
1740 /* Pop 1 when jump not taken */
1741 if (vstack_pop(stack)) {
1742 ret = -EINVAL;
1743 goto end;
1744 }
1745 next_pc += sizeof(struct logical_op);
1746 break;
1747 }
1748
1749 /* load field ref */
1750 case FILTER_OP_LOAD_FIELD_REF:
1751 {
1752 ERR("Unknown field ref type\n");
1753 ret = -EINVAL;
1754 goto end;
1755 }
1756 /* get context ref */
1757 case FILTER_OP_GET_CONTEXT_REF:
1758 {
1759 if (vstack_push(stack)) {
1760 ret = -EINVAL;
1761 goto end;
1762 }
1763 vstack_ax(stack)->type = REG_UNKNOWN;
1764 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1765 break;
1766 }
1767 case FILTER_OP_LOAD_FIELD_REF_STRING:
1768 case FILTER_OP_LOAD_FIELD_REF_SEQUENCE:
1769 case FILTER_OP_GET_CONTEXT_REF_STRING:
1770 {
1771 if (vstack_push(stack)) {
1772 ret = -EINVAL;
1773 goto end;
1774 }
1775 vstack_ax(stack)->type = REG_STRING;
1776 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1777 break;
1778 }
1779 case FILTER_OP_LOAD_FIELD_REF_S64:
1780 case FILTER_OP_GET_CONTEXT_REF_S64:
1781 {
1782 if (vstack_push(stack)) {
1783 ret = -EINVAL;
1784 goto end;
1785 }
1786 vstack_ax(stack)->type = REG_S64;
1787 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1788 break;
1789 }
1790 case FILTER_OP_LOAD_FIELD_REF_DOUBLE:
1791 case FILTER_OP_GET_CONTEXT_REF_DOUBLE:
1792 {
1793 if (vstack_push(stack)) {
1794 ret = -EINVAL;
1795 goto end;
1796 }
1797 vstack_ax(stack)->type = REG_DOUBLE;
1798 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1799 break;
1800 }
1801
1802 /* load from immediate operand */
1803 case FILTER_OP_LOAD_STRING:
1804 {
1805 struct load_op *insn = (struct load_op *) pc;
1806
1807 if (vstack_push(stack)) {
1808 ret = -EINVAL;
1809 goto end;
1810 }
1811 vstack_ax(stack)->type = REG_STRING;
1812 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1813 break;
1814 }
1815
1816 case FILTER_OP_LOAD_STAR_GLOB_STRING:
1817 {
1818 struct load_op *insn = (struct load_op *) pc;
1819
1820 if (vstack_push(stack)) {
1821 ret = -EINVAL;
1822 goto end;
1823 }
1824 vstack_ax(stack)->type = REG_STAR_GLOB_STRING;
1825 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1826 break;
1827 }
1828
1829 case FILTER_OP_LOAD_S64:
1830 {
1831 if (vstack_push(stack)) {
1832 ret = -EINVAL;
1833 goto end;
1834 }
1835 vstack_ax(stack)->type = REG_S64;
1836 next_pc += sizeof(struct load_op)
1837 + sizeof(struct literal_numeric);
1838 break;
1839 }
1840
1841 case FILTER_OP_LOAD_DOUBLE:
1842 {
1843 if (vstack_push(stack)) {
1844 ret = -EINVAL;
1845 goto end;
1846 }
1847 vstack_ax(stack)->type = REG_DOUBLE;
1848 next_pc += sizeof(struct load_op)
1849 + sizeof(struct literal_double);
1850 break;
1851 }
1852
1853 case FILTER_OP_CAST_TO_S64:
1854 case FILTER_OP_CAST_DOUBLE_TO_S64:
1855 {
1856 /* Pop 1, push 1 */
1857 if (!vstack_ax(stack)) {
1858 ERR("Empty stack\n");
1859 ret = -EINVAL;
1860 goto end;
1861 }
1862 switch (vstack_ax(stack)->type) {
1863 case REG_S64:
1864 case REG_DOUBLE:
1865 case REG_UNKNOWN:
1866 break;
1867 default:
1868 ERR("Incorrect register type %d for cast\n",
1869 (int) vstack_ax(stack)->type);
1870 ret = -EINVAL;
1871 goto end;
1872 }
1873 vstack_ax(stack)->type = REG_S64;
1874 next_pc += sizeof(struct cast_op);
1875 break;
1876 }
1877 case FILTER_OP_CAST_NOP:
1878 {
1879 next_pc += sizeof(struct cast_op);
1880 break;
1881 }
1882
1883 /*
1884 * Instructions for recursive traversal through composed types.
1885 */
1886 case FILTER_OP_GET_CONTEXT_ROOT:
1887 case FILTER_OP_GET_APP_CONTEXT_ROOT:
1888 case FILTER_OP_GET_PAYLOAD_ROOT:
1889 {
1890 if (vstack_push(stack)) {
1891 ret = -EINVAL;
1892 goto end;
1893 }
1894 vstack_ax(stack)->type = REG_PTR;
1895 next_pc += sizeof(struct load_op);
1896 break;
1897 }
1898
1899 case FILTER_OP_LOAD_FIELD:
1900 {
1901 /* Pop 1, push 1 */
1902 if (!vstack_ax(stack)) {
1903 ERR("Empty stack\n");
1904 ret = -EINVAL;
1905 goto end;
1906 }
1907 if (vstack_ax(stack)->type != REG_PTR) {
1908 ERR("Expecting pointer on top of stack\n");
1909 ret = -EINVAL;
1910 goto end;
1911 }
1912 vstack_ax(stack)->type = REG_UNKNOWN;
1913 next_pc += sizeof(struct load_op);
1914 break;
1915 }
1916
1917 case FILTER_OP_LOAD_FIELD_S8:
1918 case FILTER_OP_LOAD_FIELD_S16:
1919 case FILTER_OP_LOAD_FIELD_S32:
1920 case FILTER_OP_LOAD_FIELD_S64:
1921 case FILTER_OP_LOAD_FIELD_U8:
1922 case FILTER_OP_LOAD_FIELD_U16:
1923 case FILTER_OP_LOAD_FIELD_U32:
1924 case FILTER_OP_LOAD_FIELD_U64:
1925 {
1926 /* Pop 1, push 1 */
1927 if (!vstack_ax(stack)) {
1928 ERR("Empty stack\n");
1929 ret = -EINVAL;
1930 goto end;
1931 }
1932 if (vstack_ax(stack)->type != REG_PTR) {
1933 ERR("Expecting pointer on top of stack\n");
1934 ret = -EINVAL;
1935 goto end;
1936 }
1937 vstack_ax(stack)->type = REG_S64;
1938 next_pc += sizeof(struct load_op);
1939 break;
1940 }
1941
1942 case FILTER_OP_LOAD_FIELD_STRING:
1943 case FILTER_OP_LOAD_FIELD_SEQUENCE:
1944 {
1945 /* Pop 1, push 1 */
1946 if (!vstack_ax(stack)) {
1947 ERR("Empty stack\n");
1948 ret = -EINVAL;
1949 goto end;
1950 }
1951 if (vstack_ax(stack)->type != REG_PTR) {
1952 ERR("Expecting pointer on top of stack\n");
1953 ret = -EINVAL;
1954 goto end;
1955 }
1956 vstack_ax(stack)->type = REG_STRING;
1957 next_pc += sizeof(struct load_op);
1958 break;
1959 }
1960
1961 case FILTER_OP_LOAD_FIELD_DOUBLE:
1962 {
1963 /* Pop 1, push 1 */
1964 if (!vstack_ax(stack)) {
1965 ERR("Empty stack\n");
1966 ret = -EINVAL;
1967 goto end;
1968 }
1969 if (vstack_ax(stack)->type != REG_PTR) {
1970 ERR("Expecting pointer on top of stack\n");
1971 ret = -EINVAL;
1972 goto end;
1973 }
1974 vstack_ax(stack)->type = REG_DOUBLE;
1975 next_pc += sizeof(struct load_op);
1976 break;
1977 }
1978
1979 case FILTER_OP_GET_SYMBOL:
1980 case FILTER_OP_GET_SYMBOL_FIELD:
1981 {
1982 /* Pop 1, push 1 */
1983 if (!vstack_ax(stack)) {
1984 ERR("Empty stack\n");
1985 ret = -EINVAL;
1986 goto end;
1987 }
1988 if (vstack_ax(stack)->type != REG_PTR) {
1989 ERR("Expecting pointer on top of stack\n");
1990 ret = -EINVAL;
1991 goto end;
1992 }
1993 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1994 break;
1995 }
1996
1997 case FILTER_OP_GET_INDEX_U16:
1998 {
1999 /* Pop 1, push 1 */
2000 if (!vstack_ax(stack)) {
2001 ERR("Empty stack\n");
2002 ret = -EINVAL;
2003 goto end;
2004 }
2005 if (vstack_ax(stack)->type != REG_PTR) {
2006 ERR("Expecting pointer on top of stack\n");
2007 ret = -EINVAL;
2008 goto end;
2009 }
2010 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
2011 break;
2012 }
2013
2014 case FILTER_OP_GET_INDEX_U64:
2015 {
2016 /* Pop 1, push 1 */
2017 if (!vstack_ax(stack)) {
2018 ERR("Empty stack\n");
2019 ret = -EINVAL;
2020 goto end;
2021 }
2022 if (vstack_ax(stack)->type != REG_PTR) {
2023 ERR("Expecting pointer on top of stack\n");
2024 ret = -EINVAL;
2025 goto end;
2026 }
2027 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
2028 break;
2029 }
2030
2031 }
2032 end:
2033 *_next_pc = next_pc;
2034 return ret;
2035 }
2036
2037 int lttng_filter_validate_bytecode_load(struct bytecode_runtime *bytecode)
2038 {
2039 char *pc, *next_pc, *start_pc;
2040 int ret = -EINVAL;
2041
2042 start_pc = &bytecode->code[0];
2043 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
2044 pc = next_pc) {
2045 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
2046 if (ret != 0) {
2047 if (ret == -ERANGE)
2048 ERR("filter bytecode overflow\n");
2049 goto end;
2050 }
2051 dbg_printf("Validating loads: op %s (%u)\n",
2052 print_op((unsigned int) *(filter_opcode_t *) pc),
2053 (unsigned int) *(filter_opcode_t *) pc);
2054
2055 ret = validate_load(&next_pc, pc);
2056 if (ret)
2057 goto end;
2058 }
2059 end:
2060 return ret;
2061 }
2062
2063 /*
2064 * Never called concurrently (hash seed is shared).
2065 */
2066 int lttng_filter_validate_bytecode(struct bytecode_runtime *bytecode)
2067 {
2068 struct cds_lfht *merge_points;
2069 char *pc, *next_pc, *start_pc;
2070 int ret = -EINVAL;
2071 struct vstack stack;
2072
2073 vstack_init(&stack);
2074
2075 if (!lttng_hash_seed_ready) {
2076 lttng_hash_seed = time(NULL);
2077 lttng_hash_seed_ready = 1;
2078 }
2079 /*
2080 * Note: merge_points hash table used by single thread, and
2081 * never concurrently resized. Therefore, we can use it without
2082 * holding RCU read-side lock and free nodes without using
2083 * call_rcu.
2084 */
2085 merge_points = cds_lfht_new(DEFAULT_NR_MERGE_POINTS,
2086 MIN_NR_BUCKETS, MAX_NR_BUCKETS,
2087 0, NULL);
2088 if (!merge_points) {
2089 ERR("Error allocating hash table for bytecode validation\n");
2090 return -ENOMEM;
2091 }
2092 start_pc = &bytecode->code[0];
2093 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len;
2094 pc = next_pc) {
2095 ret = bytecode_validate_overflow(bytecode, start_pc, pc);
2096 if (ret != 0) {
2097 if (ret == -ERANGE)
2098 ERR("filter bytecode overflow\n");
2099 goto end;
2100 }
2101 dbg_printf("Validating op %s (%u)\n",
2102 print_op((unsigned int) *(filter_opcode_t *) pc),
2103 (unsigned int) *(filter_opcode_t *) pc);
2104
2105 /*
2106 * For each instruction, validate the current context
2107 * (traversal of entire execution flow), and validate
2108 * all merge points targeting this instruction.
2109 */
2110 ret = validate_instruction_all_contexts(bytecode, merge_points,
2111 &stack, start_pc, pc);
2112 if (ret)
2113 goto end;
2114 ret = exec_insn(bytecode, merge_points, &stack, &next_pc, pc);
2115 if (ret <= 0)
2116 goto end;
2117 }
2118 end:
2119 if (delete_all_nodes(merge_points)) {
2120 if (!ret) {
2121 ERR("Unexpected merge points\n");
2122 ret = -EINVAL;
2123 }
2124 }
2125 if (cds_lfht_destroy(merge_points, NULL)) {
2126 ERR("Error destroying hash table\n");
2127 }
2128 return ret;
2129 }
This page took 0.073207 seconds and 4 git commands to generate.