0bede9c5bb36c761583369850cba43eb0d92086d
[lttng-modules.git] / lttng-filter-interpreter.c
1 /*
2 * lttng-filter-interpreter.c
3 *
4 * LTTng modules filter interpreter.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/uaccess.h>
28 #include <wrapper/frame.h>
29 #include <wrapper/types.h>
30 #include <linux/swab.h>
31
32 #include <lttng-filter.h>
33 #include <lttng-string-utils.h>
34
35 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
36
37 /*
38 * get_char should be called with page fault handler disabled if it is expected
39 * to handle user-space read.
40 */
41 static
42 char get_char(struct estack_entry *reg, size_t offset)
43 {
44 if (unlikely(offset >= reg->u.s.seq_len))
45 return '\0';
46 if (reg->u.s.user) {
47 char c;
48
49 /* Handle invalid access as end of string. */
50 if (unlikely(!access_ok(VERIFY_READ,
51 reg->u.s.user_str + offset,
52 sizeof(c))))
53 return '\0';
54 /* Handle fault (nonzero return value) as end of string. */
55 if (unlikely(__copy_from_user_inatomic(&c,
56 reg->u.s.user_str + offset,
57 sizeof(c))))
58 return '\0';
59 return c;
60 } else {
61 return reg->u.s.str[offset];
62 }
63 }
64
65 /*
66 * -1: wildcard found.
67 * -2: unknown escape char.
68 * 0: normal char.
69 */
70 static
71 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
72 {
73 switch (*c) {
74 case '\\':
75 (*offset)++;
76 *c = get_char(reg, *offset);
77 switch (*c) {
78 case '\\':
79 case '*':
80 return 0;
81 default:
82 return -2;
83 }
84 case '*':
85 return -1;
86 default:
87 return 0;
88 }
89 }
90
91 static
92 char get_char_at_cb(size_t at, void *data)
93 {
94 return get_char(data, at);
95 }
96
97 static
98 int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
99 {
100 bool has_user = false;
101 mm_segment_t old_fs;
102 int result;
103 struct estack_entry *pattern_reg;
104 struct estack_entry *candidate_reg;
105
106 if (estack_bx(stack, top)->u.s.user
107 || estack_ax(stack, top)->u.s.user) {
108 has_user = true;
109 old_fs = get_fs();
110 set_fs(KERNEL_DS);
111 pagefault_disable();
112 }
113
114 /* Find out which side is the pattern vs. the candidate. */
115 if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
116 pattern_reg = estack_ax(stack, top);
117 candidate_reg = estack_bx(stack, top);
118 } else {
119 pattern_reg = estack_bx(stack, top);
120 candidate_reg = estack_ax(stack, top);
121 }
122
123 /* Perform the match operation. */
124 result = !strutils_star_glob_match_char_cb(get_char_at_cb,
125 pattern_reg, get_char_at_cb, candidate_reg);
126 if (has_user) {
127 pagefault_enable();
128 set_fs(old_fs);
129 }
130
131 return result;
132 }
133
134 static
135 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
136 {
137 size_t offset_bx = 0, offset_ax = 0;
138 int diff, has_user = 0;
139 mm_segment_t old_fs;
140
141 if (estack_bx(stack, top)->u.s.user
142 || estack_ax(stack, top)->u.s.user) {
143 has_user = 1;
144 old_fs = get_fs();
145 set_fs(KERNEL_DS);
146 pagefault_disable();
147 }
148
149 for (;;) {
150 int ret;
151 int escaped_r0 = 0;
152 char char_bx, char_ax;
153
154 char_bx = get_char(estack_bx(stack, top), offset_bx);
155 char_ax = get_char(estack_ax(stack, top), offset_ax);
156
157 if (unlikely(char_bx == '\0')) {
158 if (char_ax == '\0') {
159 diff = 0;
160 break;
161 } else {
162 if (estack_ax(stack, top)->u.s.literal_type ==
163 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
164 ret = parse_char(estack_ax(stack, top),
165 &char_ax, &offset_ax);
166 if (ret == -1) {
167 diff = 0;
168 break;
169 }
170 }
171 diff = -1;
172 break;
173 }
174 }
175 if (unlikely(char_ax == '\0')) {
176 if (estack_bx(stack, top)->u.s.literal_type ==
177 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
178 ret = parse_char(estack_bx(stack, top),
179 &char_bx, &offset_bx);
180 if (ret == -1) {
181 diff = 0;
182 break;
183 }
184 }
185 diff = 1;
186 break;
187 }
188 if (estack_bx(stack, top)->u.s.literal_type ==
189 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
190 ret = parse_char(estack_bx(stack, top),
191 &char_bx, &offset_bx);
192 if (ret == -1) {
193 diff = 0;
194 break;
195 } else if (ret == -2) {
196 escaped_r0 = 1;
197 }
198 /* else compare both char */
199 }
200 if (estack_ax(stack, top)->u.s.literal_type ==
201 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
202 ret = parse_char(estack_ax(stack, top),
203 &char_ax, &offset_ax);
204 if (ret == -1) {
205 diff = 0;
206 break;
207 } else if (ret == -2) {
208 if (!escaped_r0) {
209 diff = -1;
210 break;
211 }
212 } else {
213 if (escaped_r0) {
214 diff = 1;
215 break;
216 }
217 }
218 } else {
219 if (escaped_r0) {
220 diff = 1;
221 break;
222 }
223 }
224 diff = char_bx - char_ax;
225 if (diff != 0)
226 break;
227 offset_bx++;
228 offset_ax++;
229 }
230 if (has_user) {
231 pagefault_enable();
232 set_fs(old_fs);
233 }
234 return diff;
235 }
236
237 uint64_t lttng_filter_false(void *filter_data,
238 struct lttng_probe_ctx *lttng_probe_ctx,
239 const char *filter_stack_data)
240 {
241 return 0;
242 }
243
244 #ifdef INTERPRETER_USE_SWITCH
245
246 /*
247 * Fallback for compilers that do not support taking address of labels.
248 */
249
250 #define START_OP \
251 start_pc = &bytecode->data[0]; \
252 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
253 pc = next_pc) { \
254 dbg_printk("Executing op %s (%u)\n", \
255 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
256 (unsigned int) *(filter_opcode_t *) pc); \
257 switch (*(filter_opcode_t *) pc) {
258
259 #define OP(name) case name
260
261 #define PO break
262
263 #define END_OP } \
264 }
265
266 #else
267
268 /*
269 * Dispatch-table based interpreter.
270 */
271
272 #define START_OP \
273 start_pc = &bytecode->code[0]; \
274 pc = next_pc = start_pc; \
275 if (unlikely(pc - start_pc >= bytecode->len)) \
276 goto end; \
277 goto *dispatch[*(filter_opcode_t *) pc];
278
279 #define OP(name) \
280 LABEL_##name
281
282 #define PO \
283 pc = next_pc; \
284 goto *dispatch[*(filter_opcode_t *) pc];
285
286 #define END_OP
287
288 #endif
289
290 static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
291 struct load_ptr *ptr,
292 uint32_t idx)
293 {
294
295 struct lttng_ctx_field *ctx_field;
296 struct lttng_event_field *field;
297 union lttng_ctx_value v;
298
299 ctx_field = &lttng_static_ctx->fields[idx];
300 field = &ctx_field->event_field;
301 ptr->type = LOAD_OBJECT;
302 /* field is only used for types nested within variants. */
303 ptr->field = NULL;
304
305 switch (field->type.atype) {
306 case atype_integer:
307 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
308 if (field->type.u.basic.integer.signedness) {
309 ptr->object_type = OBJECT_TYPE_S64;
310 ptr->u.s64 = v.s64;
311 ptr->ptr = &ptr->u.s64;
312 } else {
313 ptr->object_type = OBJECT_TYPE_U64;
314 ptr->u.u64 = v.s64; /* Cast. */
315 ptr->ptr = &ptr->u.u64;
316 }
317 break;
318 case atype_enum:
319 {
320 const struct lttng_integer_type *itype =
321 &field->type.u.basic.enumeration.container_type;
322
323 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
324 if (itype->signedness) {
325 ptr->object_type = OBJECT_TYPE_S64;
326 ptr->u.s64 = v.s64;
327 ptr->ptr = &ptr->u.s64;
328 } else {
329 ptr->object_type = OBJECT_TYPE_U64;
330 ptr->u.u64 = v.s64; /* Cast. */
331 ptr->ptr = &ptr->u.u64;
332 }
333 break;
334 }
335 case atype_array:
336 if (field->type.u.array.elem_type.atype != atype_integer) {
337 printk(KERN_WARNING "Array nesting only supports integer types.\n");
338 return -EINVAL;
339 }
340 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
341 printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
342 return -EINVAL;
343 }
344 ptr->object_type = OBJECT_TYPE_STRING;
345 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
346 ptr->ptr = v.str;
347 break;
348 case atype_sequence:
349 if (field->type.u.sequence.elem_type.atype != atype_integer) {
350 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
351 return -EINVAL;
352 }
353 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
354 printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
355 return -EINVAL;
356 }
357 ptr->object_type = OBJECT_TYPE_STRING;
358 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
359 ptr->ptr = v.str;
360 break;
361 case atype_array_bitfield:
362 printk(KERN_WARNING "Bitfield array type is not supported.\n");
363 return -EINVAL;
364 case atype_sequence_bitfield:
365 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
366 return -EINVAL;
367 case atype_string:
368 ptr->object_type = OBJECT_TYPE_STRING;
369 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
370 ptr->ptr = v.str;
371 break;
372 case atype_struct:
373 printk(KERN_WARNING "Structure type cannot be loaded.\n");
374 return -EINVAL;
375 default:
376 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
377 return -EINVAL;
378 }
379 return 0;
380 }
381
382 static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
383 struct bytecode_runtime *runtime,
384 uint64_t index, struct estack_entry *stack_top)
385 {
386 int ret;
387 const struct filter_get_index_data *gid;
388
389 /*
390 * Types nested within variants need to perform dynamic lookup
391 * based on the field descriptions. LTTng-UST does not implement
392 * variants for now.
393 */
394 if (stack_top->u.ptr.field)
395 return -EINVAL;
396 gid = (const struct filter_get_index_data *) &runtime->data[index];
397 switch (stack_top->u.ptr.type) {
398 case LOAD_OBJECT:
399 switch (stack_top->u.ptr.object_type) {
400 case OBJECT_TYPE_ARRAY:
401 {
402 const char *ptr;
403
404 WARN_ON_ONCE(gid->offset >= gid->array_len);
405 /* Skip count (unsigned long) */
406 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
407 ptr = ptr + gid->offset;
408 stack_top->u.ptr.ptr = ptr;
409 stack_top->u.ptr.object_type = gid->elem.type;
410 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
411 /* field is only used for types nested within variants. */
412 stack_top->u.ptr.field = NULL;
413 break;
414 }
415 case OBJECT_TYPE_SEQUENCE:
416 {
417 const char *ptr;
418 size_t ptr_seq_len;
419
420 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
421 ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
422 if (gid->offset >= gid->elem.len * ptr_seq_len) {
423 ret = -EINVAL;
424 goto end;
425 }
426 ptr = ptr + gid->offset;
427 stack_top->u.ptr.ptr = ptr;
428 stack_top->u.ptr.object_type = gid->elem.type;
429 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
430 /* field is only used for types nested within variants. */
431 stack_top->u.ptr.field = NULL;
432 break;
433 }
434 case OBJECT_TYPE_STRUCT:
435 printk(KERN_WARNING "Nested structures are not supported yet.\n");
436 ret = -EINVAL;
437 goto end;
438 case OBJECT_TYPE_VARIANT:
439 default:
440 printk(KERN_WARNING "Unexpected get index type %d",
441 (int) stack_top->u.ptr.object_type);
442 ret = -EINVAL;
443 goto end;
444 }
445 break;
446 case LOAD_ROOT_CONTEXT:
447 case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
448 {
449 ret = context_get_index(lttng_probe_ctx,
450 &stack_top->u.ptr,
451 gid->ctx_index);
452 if (ret) {
453 goto end;
454 }
455 break;
456 }
457 case LOAD_ROOT_PAYLOAD:
458 stack_top->u.ptr.ptr += gid->offset;
459 if (gid->elem.type == OBJECT_TYPE_STRING)
460 stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
461 stack_top->u.ptr.object_type = gid->elem.type;
462 stack_top->u.ptr.type = LOAD_OBJECT;
463 /* field is only used for types nested within variants. */
464 stack_top->u.ptr.field = NULL;
465 break;
466 }
467 return 0;
468
469 end:
470 return ret;
471 }
472
473 static int dynamic_load_field(struct estack_entry *stack_top)
474 {
475 int ret;
476
477 switch (stack_top->u.ptr.type) {
478 case LOAD_OBJECT:
479 break;
480 case LOAD_ROOT_CONTEXT:
481 case LOAD_ROOT_APP_CONTEXT:
482 case LOAD_ROOT_PAYLOAD:
483 default:
484 dbg_printk("Filter warning: cannot load root, missing field name.\n");
485 ret = -EINVAL;
486 goto end;
487 }
488 switch (stack_top->u.ptr.object_type) {
489 case OBJECT_TYPE_S8:
490 dbg_printk("op load field s8\n");
491 stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
492 break;
493 case OBJECT_TYPE_S16:
494 {
495 int16_t tmp;
496
497 dbg_printk("op load field s16\n");
498 tmp = *(int16_t *) stack_top->u.ptr.ptr;
499 if (stack_top->u.ptr.rev_bo)
500 __swab16s(&tmp);
501 stack_top->u.v = tmp;
502 break;
503 }
504 case OBJECT_TYPE_S32:
505 {
506 int32_t tmp;
507
508 dbg_printk("op load field s32\n");
509 tmp = *(int32_t *) stack_top->u.ptr.ptr;
510 if (stack_top->u.ptr.rev_bo)
511 __swab32s(&tmp);
512 stack_top->u.v = tmp;
513 break;
514 }
515 case OBJECT_TYPE_S64:
516 {
517 int64_t tmp;
518
519 dbg_printk("op load field s64\n");
520 tmp = *(int64_t *) stack_top->u.ptr.ptr;
521 if (stack_top->u.ptr.rev_bo)
522 __swab64s(&tmp);
523 stack_top->u.v = tmp;
524 break;
525 }
526 case OBJECT_TYPE_U8:
527 dbg_printk("op load field u8\n");
528 stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
529 break;
530 case OBJECT_TYPE_U16:
531 {
532 uint16_t tmp;
533
534 dbg_printk("op load field s16\n");
535 tmp = *(uint16_t *) stack_top->u.ptr.ptr;
536 if (stack_top->u.ptr.rev_bo)
537 __swab16s(&tmp);
538 stack_top->u.v = tmp;
539 break;
540 }
541 case OBJECT_TYPE_U32:
542 {
543 uint32_t tmp;
544
545 dbg_printk("op load field u32\n");
546 tmp = *(uint32_t *) stack_top->u.ptr.ptr;
547 if (stack_top->u.ptr.rev_bo)
548 __swab32s(&tmp);
549 stack_top->u.v = tmp;
550 break;
551 }
552 case OBJECT_TYPE_U64:
553 {
554 uint64_t tmp;
555
556 dbg_printk("op load field u64\n");
557 tmp = *(uint64_t *) stack_top->u.ptr.ptr;
558 if (stack_top->u.ptr.rev_bo)
559 __swab64s(&tmp);
560 stack_top->u.v = tmp;
561 break;
562 }
563 case OBJECT_TYPE_STRING:
564 {
565 const char *str;
566
567 dbg_printk("op load field string\n");
568 str = (const char *) stack_top->u.ptr.ptr;
569 stack_top->u.s.str = str;
570 if (unlikely(!stack_top->u.s.str)) {
571 dbg_printk("Filter warning: loading a NULL string.\n");
572 ret = -EINVAL;
573 goto end;
574 }
575 stack_top->u.s.seq_len = SIZE_MAX;
576 stack_top->u.s.literal_type =
577 ESTACK_STRING_LITERAL_TYPE_NONE;
578 break;
579 }
580 case OBJECT_TYPE_STRING_SEQUENCE:
581 {
582 const char *ptr;
583
584 dbg_printk("op load field string sequence\n");
585 ptr = stack_top->u.ptr.ptr;
586 stack_top->u.s.seq_len = *(unsigned long *) ptr;
587 stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
588 if (unlikely(!stack_top->u.s.str)) {
589 dbg_printk("Filter warning: loading a NULL sequence.\n");
590 ret = -EINVAL;
591 goto end;
592 }
593 stack_top->u.s.literal_type =
594 ESTACK_STRING_LITERAL_TYPE_NONE;
595 break;
596 }
597 case OBJECT_TYPE_DYNAMIC:
598 /*
599 * Dynamic types in context are looked up
600 * by context get index.
601 */
602 ret = -EINVAL;
603 goto end;
604 case OBJECT_TYPE_DOUBLE:
605 ret = -EINVAL;
606 goto end;
607 case OBJECT_TYPE_SEQUENCE:
608 case OBJECT_TYPE_ARRAY:
609 case OBJECT_TYPE_STRUCT:
610 case OBJECT_TYPE_VARIANT:
611 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
612 ret = -EINVAL;
613 goto end;
614 }
615 return 0;
616
617 end:
618 return ret;
619 }
620
621 /*
622 * Return 0 (discard), or raise the 0x1 flag (log event).
623 * Currently, other flags are kept for future extensions and have no
624 * effect.
625 */
626 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
627 struct lttng_probe_ctx *lttng_probe_ctx,
628 const char *filter_stack_data)
629 {
630 struct bytecode_runtime *bytecode = filter_data;
631 void *pc, *next_pc, *start_pc;
632 int ret = -EINVAL;
633 uint64_t retval = 0;
634 struct estack _stack;
635 struct estack *stack = &_stack;
636 register int64_t ax = 0, bx = 0;
637 register int top = FILTER_STACK_EMPTY;
638 #ifndef INTERPRETER_USE_SWITCH
639 static void *dispatch[NR_FILTER_OPS] = {
640 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
641
642 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
643
644 /* binary */
645 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
646 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
647 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
648 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
649 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
650 [ FILTER_OP_RSHIFT ] = &&LABEL_FILTER_OP_RSHIFT,
651 [ FILTER_OP_LSHIFT ] = &&LABEL_FILTER_OP_LSHIFT,
652 [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
653 [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
654 [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
655
656 /* binary comparators */
657 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
658 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
659 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
660 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
661 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
662 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
663
664 /* string binary comparator */
665 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
666 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
667 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
668 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
669 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
670 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
671
672 /* globbing pattern binary comparator */
673 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
674 [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
675
676 /* s64 binary comparator */
677 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
678 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
679 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
680 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
681 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
682 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
683
684 /* double binary comparator */
685 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
686 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
687 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
688 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
689 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
690 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
691
692 /* Mixed S64-double binary comparators */
693 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
694 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
695 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
696 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
697 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
698 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
699
700 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
701 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
702 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
703 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
704 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
705 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
706
707 /* unary */
708 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
709 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
710 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
711 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
712 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
713 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
714 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
715 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
716 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
717
718 /* logical */
719 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
720 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
721
722 /* load field ref */
723 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
724 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
725 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
726 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
727 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
728
729 /* load from immediate operand */
730 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
731 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
732 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
733 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
734
735 /* cast */
736 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
737 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
738 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
739
740 /* get context ref */
741 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
742 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
743 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
744 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
745
746 /* load userspace field ref */
747 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
748 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
749
750 /* Instructions for recursive traversal through composed types. */
751 [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
752 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
753 [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
754
755 [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
756 [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
757 [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
758 [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
759
760 [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
761 [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
762 [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
763 [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
764 [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
765 [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
766 [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
767 [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
768 [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
769 [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
770 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
771 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
772 };
773 #endif /* #ifndef INTERPRETER_USE_SWITCH */
774
775 START_OP
776
777 OP(FILTER_OP_UNKNOWN):
778 OP(FILTER_OP_LOAD_FIELD_REF):
779 OP(FILTER_OP_GET_CONTEXT_REF):
780 #ifdef INTERPRETER_USE_SWITCH
781 default:
782 #endif /* INTERPRETER_USE_SWITCH */
783 printk(KERN_WARNING "unknown bytecode op %u\n",
784 (unsigned int) *(filter_opcode_t *) pc);
785 ret = -EINVAL;
786 goto end;
787
788 OP(FILTER_OP_RETURN):
789 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
790 retval = !!estack_ax_v;
791 ret = 0;
792 goto end;
793
794 /* binary */
795 OP(FILTER_OP_MUL):
796 OP(FILTER_OP_DIV):
797 OP(FILTER_OP_MOD):
798 OP(FILTER_OP_PLUS):
799 OP(FILTER_OP_MINUS):
800 OP(FILTER_OP_RSHIFT):
801 OP(FILTER_OP_LSHIFT):
802 printk(KERN_WARNING "unsupported bytecode op %u\n",
803 (unsigned int) *(filter_opcode_t *) pc);
804 ret = -EINVAL;
805 goto end;
806
807 OP(FILTER_OP_EQ):
808 OP(FILTER_OP_NE):
809 OP(FILTER_OP_GT):
810 OP(FILTER_OP_LT):
811 OP(FILTER_OP_GE):
812 OP(FILTER_OP_LE):
813 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
814 (unsigned int) *(filter_opcode_t *) pc);
815 ret = -EINVAL;
816 goto end;
817
818 OP(FILTER_OP_EQ_STRING):
819 {
820 int res;
821
822 res = (stack_strcmp(stack, top, "==") == 0);
823 estack_pop(stack, top, ax, bx);
824 estack_ax_v = res;
825 next_pc += sizeof(struct binary_op);
826 PO;
827 }
828 OP(FILTER_OP_NE_STRING):
829 {
830 int res;
831
832 res = (stack_strcmp(stack, top, "!=") != 0);
833 estack_pop(stack, top, ax, bx);
834 estack_ax_v = res;
835 next_pc += sizeof(struct binary_op);
836 PO;
837 }
838 OP(FILTER_OP_GT_STRING):
839 {
840 int res;
841
842 res = (stack_strcmp(stack, top, ">") > 0);
843 estack_pop(stack, top, ax, bx);
844 estack_ax_v = res;
845 next_pc += sizeof(struct binary_op);
846 PO;
847 }
848 OP(FILTER_OP_LT_STRING):
849 {
850 int res;
851
852 res = (stack_strcmp(stack, top, "<") < 0);
853 estack_pop(stack, top, ax, bx);
854 estack_ax_v = res;
855 next_pc += sizeof(struct binary_op);
856 PO;
857 }
858 OP(FILTER_OP_GE_STRING):
859 {
860 int res;
861
862 res = (stack_strcmp(stack, top, ">=") >= 0);
863 estack_pop(stack, top, ax, bx);
864 estack_ax_v = res;
865 next_pc += sizeof(struct binary_op);
866 PO;
867 }
868 OP(FILTER_OP_LE_STRING):
869 {
870 int res;
871
872 res = (stack_strcmp(stack, top, "<=") <= 0);
873 estack_pop(stack, top, ax, bx);
874 estack_ax_v = res;
875 next_pc += sizeof(struct binary_op);
876 PO;
877 }
878
879 OP(FILTER_OP_EQ_STAR_GLOB_STRING):
880 {
881 int res;
882
883 res = (stack_star_glob_match(stack, top, "==") == 0);
884 estack_pop(stack, top, ax, bx);
885 estack_ax_v = res;
886 next_pc += sizeof(struct binary_op);
887 PO;
888 }
889 OP(FILTER_OP_NE_STAR_GLOB_STRING):
890 {
891 int res;
892
893 res = (stack_star_glob_match(stack, top, "!=") != 0);
894 estack_pop(stack, top, ax, bx);
895 estack_ax_v = res;
896 next_pc += sizeof(struct binary_op);
897 PO;
898 }
899
900 OP(FILTER_OP_EQ_S64):
901 {
902 int res;
903
904 res = (estack_bx_v == estack_ax_v);
905 estack_pop(stack, top, ax, bx);
906 estack_ax_v = res;
907 next_pc += sizeof(struct binary_op);
908 PO;
909 }
910 OP(FILTER_OP_NE_S64):
911 {
912 int res;
913
914 res = (estack_bx_v != estack_ax_v);
915 estack_pop(stack, top, ax, bx);
916 estack_ax_v = res;
917 next_pc += sizeof(struct binary_op);
918 PO;
919 }
920 OP(FILTER_OP_GT_S64):
921 {
922 int res;
923
924 res = (estack_bx_v > estack_ax_v);
925 estack_pop(stack, top, ax, bx);
926 estack_ax_v = res;
927 next_pc += sizeof(struct binary_op);
928 PO;
929 }
930 OP(FILTER_OP_LT_S64):
931 {
932 int res;
933
934 res = (estack_bx_v < estack_ax_v);
935 estack_pop(stack, top, ax, bx);
936 estack_ax_v = res;
937 next_pc += sizeof(struct binary_op);
938 PO;
939 }
940 OP(FILTER_OP_GE_S64):
941 {
942 int res;
943
944 res = (estack_bx_v >= estack_ax_v);
945 estack_pop(stack, top, ax, bx);
946 estack_ax_v = res;
947 next_pc += sizeof(struct binary_op);
948 PO;
949 }
950 OP(FILTER_OP_LE_S64):
951 {
952 int res;
953
954 res = (estack_bx_v <= estack_ax_v);
955 estack_pop(stack, top, ax, bx);
956 estack_ax_v = res;
957 next_pc += sizeof(struct binary_op);
958 PO;
959 }
960
961 OP(FILTER_OP_EQ_DOUBLE):
962 OP(FILTER_OP_NE_DOUBLE):
963 OP(FILTER_OP_GT_DOUBLE):
964 OP(FILTER_OP_LT_DOUBLE):
965 OP(FILTER_OP_GE_DOUBLE):
966 OP(FILTER_OP_LE_DOUBLE):
967 {
968 BUG_ON(1);
969 PO;
970 }
971
972 /* Mixed S64-double binary comparators */
973 OP(FILTER_OP_EQ_DOUBLE_S64):
974 OP(FILTER_OP_NE_DOUBLE_S64):
975 OP(FILTER_OP_GT_DOUBLE_S64):
976 OP(FILTER_OP_LT_DOUBLE_S64):
977 OP(FILTER_OP_GE_DOUBLE_S64):
978 OP(FILTER_OP_LE_DOUBLE_S64):
979 OP(FILTER_OP_EQ_S64_DOUBLE):
980 OP(FILTER_OP_NE_S64_DOUBLE):
981 OP(FILTER_OP_GT_S64_DOUBLE):
982 OP(FILTER_OP_LT_S64_DOUBLE):
983 OP(FILTER_OP_GE_S64_DOUBLE):
984 OP(FILTER_OP_LE_S64_DOUBLE):
985 {
986 BUG_ON(1);
987 PO;
988 }
989 OP(FILTER_OP_BIT_AND):
990 {
991 int64_t res;
992
993 res = (estack_bx_v & estack_ax_v);
994 estack_pop(stack, top, ax, bx);
995 estack_ax_v = res;
996 next_pc += sizeof(struct binary_op);
997 PO;
998 }
999 OP(FILTER_OP_BIT_OR):
1000 {
1001 int64_t res;
1002
1003 res = (estack_bx_v | estack_ax_v);
1004 estack_pop(stack, top, ax, bx);
1005 estack_ax_v = res;
1006 next_pc += sizeof(struct binary_op);
1007 PO;
1008 }
1009 OP(FILTER_OP_BIT_XOR):
1010 {
1011 int64_t res;
1012
1013 res = (estack_bx_v ^ estack_ax_v);
1014 estack_pop(stack, top, ax, bx);
1015 estack_ax_v = res;
1016 next_pc += sizeof(struct binary_op);
1017 PO;
1018 }
1019
1020 /* unary */
1021 OP(FILTER_OP_UNARY_PLUS):
1022 OP(FILTER_OP_UNARY_MINUS):
1023 OP(FILTER_OP_UNARY_NOT):
1024 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1025 (unsigned int) *(filter_opcode_t *) pc);
1026 ret = -EINVAL;
1027 goto end;
1028
1029
1030 OP(FILTER_OP_UNARY_PLUS_S64):
1031 {
1032 next_pc += sizeof(struct unary_op);
1033 PO;
1034 }
1035 OP(FILTER_OP_UNARY_MINUS_S64):
1036 {
1037 estack_ax_v = -estack_ax_v;
1038 next_pc += sizeof(struct unary_op);
1039 PO;
1040 }
1041 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
1042 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
1043 {
1044 BUG_ON(1);
1045 PO;
1046 }
1047 OP(FILTER_OP_UNARY_NOT_S64):
1048 {
1049 estack_ax_v = !estack_ax_v;
1050 next_pc += sizeof(struct unary_op);
1051 PO;
1052 }
1053 OP(FILTER_OP_UNARY_NOT_DOUBLE):
1054 {
1055 BUG_ON(1);
1056 PO;
1057 }
1058
1059 /* logical */
1060 OP(FILTER_OP_AND):
1061 {
1062 struct logical_op *insn = (struct logical_op *) pc;
1063
1064 /* If AX is 0, skip and evaluate to 0 */
1065 if (unlikely(estack_ax_v == 0)) {
1066 dbg_printk("Jumping to bytecode offset %u\n",
1067 (unsigned int) insn->skip_offset);
1068 next_pc = start_pc + insn->skip_offset;
1069 } else {
1070 /* Pop 1 when jump not taken */
1071 estack_pop(stack, top, ax, bx);
1072 next_pc += sizeof(struct logical_op);
1073 }
1074 PO;
1075 }
1076 OP(FILTER_OP_OR):
1077 {
1078 struct logical_op *insn = (struct logical_op *) pc;
1079
1080 /* If AX is nonzero, skip and evaluate to 1 */
1081
1082 if (unlikely(estack_ax_v != 0)) {
1083 estack_ax_v = 1;
1084 dbg_printk("Jumping to bytecode offset %u\n",
1085 (unsigned int) insn->skip_offset);
1086 next_pc = start_pc + insn->skip_offset;
1087 } else {
1088 /* Pop 1 when jump not taken */
1089 estack_pop(stack, top, ax, bx);
1090 next_pc += sizeof(struct logical_op);
1091 }
1092 PO;
1093 }
1094
1095
1096 /* load field ref */
1097 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
1098 {
1099 struct load_op *insn = (struct load_op *) pc;
1100 struct field_ref *ref = (struct field_ref *) insn->data;
1101
1102 dbg_printk("load field ref offset %u type string\n",
1103 ref->offset);
1104 estack_push(stack, top, ax, bx);
1105 estack_ax(stack, top)->u.s.str =
1106 *(const char * const *) &filter_stack_data[ref->offset];
1107 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1108 dbg_printk("Filter warning: loading a NULL string.\n");
1109 ret = -EINVAL;
1110 goto end;
1111 }
1112 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1113 estack_ax(stack, top)->u.s.literal_type =
1114 ESTACK_STRING_LITERAL_TYPE_NONE;
1115 estack_ax(stack, top)->u.s.user = 0;
1116 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1117 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1118 PO;
1119 }
1120
1121 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
1122 {
1123 struct load_op *insn = (struct load_op *) pc;
1124 struct field_ref *ref = (struct field_ref *) insn->data;
1125
1126 dbg_printk("load field ref offset %u type sequence\n",
1127 ref->offset);
1128 estack_push(stack, top, ax, bx);
1129 estack_ax(stack, top)->u.s.seq_len =
1130 *(unsigned long *) &filter_stack_data[ref->offset];
1131 estack_ax(stack, top)->u.s.str =
1132 *(const char **) (&filter_stack_data[ref->offset
1133 + sizeof(unsigned long)]);
1134 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1135 dbg_printk("Filter warning: loading a NULL sequence.\n");
1136 ret = -EINVAL;
1137 goto end;
1138 }
1139 estack_ax(stack, top)->u.s.literal_type =
1140 ESTACK_STRING_LITERAL_TYPE_NONE;
1141 estack_ax(stack, top)->u.s.user = 0;
1142 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1143 PO;
1144 }
1145
1146 OP(FILTER_OP_LOAD_FIELD_REF_S64):
1147 {
1148 struct load_op *insn = (struct load_op *) pc;
1149 struct field_ref *ref = (struct field_ref *) insn->data;
1150
1151 dbg_printk("load field ref offset %u type s64\n",
1152 ref->offset);
1153 estack_push(stack, top, ax, bx);
1154 estack_ax_v =
1155 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
1156 dbg_printk("ref load s64 %lld\n",
1157 (long long) estack_ax_v);
1158 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1159 PO;
1160 }
1161
1162 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
1163 {
1164 BUG_ON(1);
1165 PO;
1166 }
1167
1168 /* load from immediate operand */
1169 OP(FILTER_OP_LOAD_STRING):
1170 {
1171 struct load_op *insn = (struct load_op *) pc;
1172
1173 dbg_printk("load string %s\n", insn->data);
1174 estack_push(stack, top, ax, bx);
1175 estack_ax(stack, top)->u.s.str = insn->data;
1176 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1177 estack_ax(stack, top)->u.s.literal_type =
1178 ESTACK_STRING_LITERAL_TYPE_PLAIN;
1179 estack_ax(stack, top)->u.s.user = 0;
1180 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1181 PO;
1182 }
1183
1184 OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
1185 {
1186 struct load_op *insn = (struct load_op *) pc;
1187
1188 dbg_printk("load globbing pattern %s\n", insn->data);
1189 estack_push(stack, top, ax, bx);
1190 estack_ax(stack, top)->u.s.str = insn->data;
1191 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1192 estack_ax(stack, top)->u.s.literal_type =
1193 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
1194 estack_ax(stack, top)->u.s.user = 0;
1195 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1196 PO;
1197 }
1198
1199 OP(FILTER_OP_LOAD_S64):
1200 {
1201 struct load_op *insn = (struct load_op *) pc;
1202
1203 estack_push(stack, top, ax, bx);
1204 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
1205 dbg_printk("load s64 %lld\n",
1206 (long long) estack_ax_v);
1207 next_pc += sizeof(struct load_op)
1208 + sizeof(struct literal_numeric);
1209 PO;
1210 }
1211
1212 OP(FILTER_OP_LOAD_DOUBLE):
1213 {
1214 BUG_ON(1);
1215 PO;
1216 }
1217
1218 /* cast */
1219 OP(FILTER_OP_CAST_TO_S64):
1220 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1221 (unsigned int) *(filter_opcode_t *) pc);
1222 ret = -EINVAL;
1223 goto end;
1224
1225 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
1226 {
1227 BUG_ON(1);
1228 PO;
1229 }
1230
1231 OP(FILTER_OP_CAST_NOP):
1232 {
1233 next_pc += sizeof(struct cast_op);
1234 PO;
1235 }
1236
1237 /* get context ref */
1238 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
1239 {
1240 struct load_op *insn = (struct load_op *) pc;
1241 struct field_ref *ref = (struct field_ref *) insn->data;
1242 struct lttng_ctx_field *ctx_field;
1243 union lttng_ctx_value v;
1244
1245 dbg_printk("get context ref offset %u type string\n",
1246 ref->offset);
1247 ctx_field = &lttng_static_ctx->fields[ref->offset];
1248 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1249 estack_push(stack, top, ax, bx);
1250 estack_ax(stack, top)->u.s.str = v.str;
1251 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1252 dbg_printk("Filter warning: loading a NULL string.\n");
1253 ret = -EINVAL;
1254 goto end;
1255 }
1256 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1257 estack_ax(stack, top)->u.s.literal_type =
1258 ESTACK_STRING_LITERAL_TYPE_NONE;
1259 estack_ax(stack, top)->u.s.user = 0;
1260 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
1261 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1262 PO;
1263 }
1264
1265 OP(FILTER_OP_GET_CONTEXT_REF_S64):
1266 {
1267 struct load_op *insn = (struct load_op *) pc;
1268 struct field_ref *ref = (struct field_ref *) insn->data;
1269 struct lttng_ctx_field *ctx_field;
1270 union lttng_ctx_value v;
1271
1272 dbg_printk("get context ref offset %u type s64\n",
1273 ref->offset);
1274 ctx_field = &lttng_static_ctx->fields[ref->offset];
1275 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1276 estack_push(stack, top, ax, bx);
1277 estack_ax_v = v.s64;
1278 dbg_printk("ref get context s64 %lld\n",
1279 (long long) estack_ax_v);
1280 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1281 PO;
1282 }
1283
1284 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
1285 {
1286 BUG_ON(1);
1287 PO;
1288 }
1289
1290 /* load userspace field ref */
1291 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
1292 {
1293 struct load_op *insn = (struct load_op *) pc;
1294 struct field_ref *ref = (struct field_ref *) insn->data;
1295
1296 dbg_printk("load field ref offset %u type user string\n",
1297 ref->offset);
1298 estack_push(stack, top, ax, bx);
1299 estack_ax(stack, top)->u.s.user_str =
1300 *(const char * const *) &filter_stack_data[ref->offset];
1301 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1302 dbg_printk("Filter warning: loading a NULL string.\n");
1303 ret = -EINVAL;
1304 goto end;
1305 }
1306 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1307 estack_ax(stack, top)->u.s.literal_type =
1308 ESTACK_STRING_LITERAL_TYPE_NONE;
1309 estack_ax(stack, top)->u.s.user = 1;
1310 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1311 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1312 PO;
1313 }
1314
1315 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
1316 {
1317 struct load_op *insn = (struct load_op *) pc;
1318 struct field_ref *ref = (struct field_ref *) insn->data;
1319
1320 dbg_printk("load field ref offset %u type user sequence\n",
1321 ref->offset);
1322 estack_push(stack, top, ax, bx);
1323 estack_ax(stack, top)->u.s.seq_len =
1324 *(unsigned long *) &filter_stack_data[ref->offset];
1325 estack_ax(stack, top)->u.s.user_str =
1326 *(const char **) (&filter_stack_data[ref->offset
1327 + sizeof(unsigned long)]);
1328 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1329 dbg_printk("Filter warning: loading a NULL sequence.\n");
1330 ret = -EINVAL;
1331 goto end;
1332 }
1333 estack_ax(stack, top)->u.s.literal_type =
1334 ESTACK_STRING_LITERAL_TYPE_NONE;
1335 estack_ax(stack, top)->u.s.user = 1;
1336 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1337 PO;
1338 }
1339
1340 OP(FILTER_OP_GET_CONTEXT_ROOT):
1341 {
1342 dbg_printk("op get context root\n");
1343 estack_push(stack, top, ax, bx);
1344 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
1345 /* "field" only needed for variants. */
1346 estack_ax(stack, top)->u.ptr.field = NULL;
1347 next_pc += sizeof(struct load_op);
1348 PO;
1349 }
1350
1351 OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
1352 {
1353 BUG_ON(1);
1354 PO;
1355 }
1356
1357 OP(FILTER_OP_GET_PAYLOAD_ROOT):
1358 {
1359 dbg_printk("op get app payload root\n");
1360 estack_push(stack, top, ax, bx);
1361 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
1362 estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
1363 /* "field" only needed for variants. */
1364 estack_ax(stack, top)->u.ptr.field = NULL;
1365 next_pc += sizeof(struct load_op);
1366 PO;
1367 }
1368
1369 OP(FILTER_OP_GET_SYMBOL):
1370 {
1371 dbg_printk("op get symbol\n");
1372 switch (estack_ax(stack, top)->u.ptr.type) {
1373 case LOAD_OBJECT:
1374 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1375 ret = -EINVAL;
1376 goto end;
1377 case LOAD_ROOT_CONTEXT:
1378 case LOAD_ROOT_APP_CONTEXT:
1379 case LOAD_ROOT_PAYLOAD:
1380 /*
1381 * symbol lookup is performed by
1382 * specialization.
1383 */
1384 ret = -EINVAL;
1385 goto end;
1386 }
1387 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1388 PO;
1389 }
1390
1391 OP(FILTER_OP_GET_SYMBOL_FIELD):
1392 {
1393 /*
1394 * Used for first variant encountered in a
1395 * traversal. Variants are not implemented yet.
1396 */
1397 ret = -EINVAL;
1398 goto end;
1399 }
1400
1401 OP(FILTER_OP_GET_INDEX_U16):
1402 {
1403 struct load_op *insn = (struct load_op *) pc;
1404 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1405
1406 dbg_printk("op get index u16\n");
1407 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1408 if (ret)
1409 goto end;
1410 estack_ax_v = estack_ax(stack, top)->u.v;
1411 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1412 PO;
1413 }
1414
1415 OP(FILTER_OP_GET_INDEX_U64):
1416 {
1417 struct load_op *insn = (struct load_op *) pc;
1418 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1419
1420 dbg_printk("op get index u64\n");
1421 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1422 if (ret)
1423 goto end;
1424 estack_ax_v = estack_ax(stack, top)->u.v;
1425 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1426 PO;
1427 }
1428
1429 OP(FILTER_OP_LOAD_FIELD):
1430 {
1431 dbg_printk("op load field\n");
1432 ret = dynamic_load_field(estack_ax(stack, top));
1433 if (ret)
1434 goto end;
1435 estack_ax_v = estack_ax(stack, top)->u.v;
1436 next_pc += sizeof(struct load_op);
1437 PO;
1438 }
1439
1440 OP(FILTER_OP_LOAD_FIELD_S8):
1441 {
1442 dbg_printk("op load field s8\n");
1443
1444 estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
1445 next_pc += sizeof(struct load_op);
1446 PO;
1447 }
1448 OP(FILTER_OP_LOAD_FIELD_S16):
1449 {
1450 dbg_printk("op load field s16\n");
1451
1452 estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
1453 next_pc += sizeof(struct load_op);
1454 PO;
1455 }
1456 OP(FILTER_OP_LOAD_FIELD_S32):
1457 {
1458 dbg_printk("op load field s32\n");
1459
1460 estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
1461 next_pc += sizeof(struct load_op);
1462 PO;
1463 }
1464 OP(FILTER_OP_LOAD_FIELD_S64):
1465 {
1466 dbg_printk("op load field s64\n");
1467
1468 estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
1469 next_pc += sizeof(struct load_op);
1470 PO;
1471 }
1472 OP(FILTER_OP_LOAD_FIELD_U8):
1473 {
1474 dbg_printk("op load field u8\n");
1475
1476 estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
1477 next_pc += sizeof(struct load_op);
1478 PO;
1479 }
1480 OP(FILTER_OP_LOAD_FIELD_U16):
1481 {
1482 dbg_printk("op load field u16\n");
1483
1484 estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
1485 next_pc += sizeof(struct load_op);
1486 PO;
1487 }
1488 OP(FILTER_OP_LOAD_FIELD_U32):
1489 {
1490 dbg_printk("op load field u32\n");
1491
1492 estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
1493 next_pc += sizeof(struct load_op);
1494 PO;
1495 }
1496 OP(FILTER_OP_LOAD_FIELD_U64):
1497 {
1498 dbg_printk("op load field u64\n");
1499
1500 estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
1501 next_pc += sizeof(struct load_op);
1502 PO;
1503 }
1504 OP(FILTER_OP_LOAD_FIELD_DOUBLE):
1505 {
1506 ret = -EINVAL;
1507 goto end;
1508 }
1509
1510 OP(FILTER_OP_LOAD_FIELD_STRING):
1511 {
1512 const char *str;
1513
1514 dbg_printk("op load field string\n");
1515 str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
1516 estack_ax(stack, top)->u.s.str = str;
1517 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1518 dbg_printk("Filter warning: loading a NULL string.\n");
1519 ret = -EINVAL;
1520 goto end;
1521 }
1522 estack_ax(stack, top)->u.s.seq_len = SIZE_MAX;
1523 estack_ax(stack, top)->u.s.literal_type =
1524 ESTACK_STRING_LITERAL_TYPE_NONE;
1525 next_pc += sizeof(struct load_op);
1526 PO;
1527 }
1528
1529 OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
1530 {
1531 const char *ptr;
1532
1533 dbg_printk("op load field string sequence\n");
1534 ptr = estack_ax(stack, top)->u.ptr.ptr;
1535 estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
1536 estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
1537 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1538 dbg_printk("Filter warning: loading a NULL sequence.\n");
1539 ret = -EINVAL;
1540 goto end;
1541 }
1542 estack_ax(stack, top)->u.s.literal_type =
1543 ESTACK_STRING_LITERAL_TYPE_NONE;
1544 next_pc += sizeof(struct load_op);
1545 PO;
1546 }
1547
1548 END_OP
1549 end:
1550 /* return 0 (discard) on error */
1551 if (ret)
1552 return 0;
1553 return retval;
1554 }
1555
1556 #undef START_OP
1557 #undef OP
1558 #undef PO
1559 #undef END_OP
This page took 0.104473 seconds and 3 git commands to generate.