Fix: timer_expire_entry changed in 4.19.312
[lttng-modules.git] / lttng-filter-interpreter.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-interpreter.c
4 *
5 * LTTng modules filter interpreter.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/uaccess.h>
11 #include <wrapper/frame.h>
12 #include <wrapper/types.h>
13 #include <linux/swab.h>
14
15 #include <lttng-filter.h>
16 #include <lttng-string-utils.h>
17
18 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
19
20 /*
21 * get_char should be called with page fault handler disabled if it is expected
22 * to handle user-space read.
23 */
24 static
25 char get_char(struct estack_entry *reg, size_t offset)
26 {
27 if (unlikely(offset >= reg->u.s.seq_len))
28 return '\0';
29 if (reg->u.s.user) {
30 char c;
31
32 /* Handle invalid access as end of string. */
33 if (unlikely(!access_ok(VERIFY_READ,
34 reg->u.s.user_str + offset,
35 sizeof(c))))
36 return '\0';
37 /* Handle fault (nonzero return value) as end of string. */
38 if (unlikely(__copy_from_user_inatomic(&c,
39 reg->u.s.user_str + offset,
40 sizeof(c))))
41 return '\0';
42 return c;
43 } else {
44 return reg->u.s.str[offset];
45 }
46 }
47
48 /*
49 * -1: wildcard found.
50 * -2: unknown escape char.
51 * 0: normal char.
52 */
53 static
54 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
55 {
56 switch (*c) {
57 case '\\':
58 (*offset)++;
59 *c = get_char(reg, *offset);
60 switch (*c) {
61 case '\\':
62 case '*':
63 return 0;
64 default:
65 return -2;
66 }
67 case '*':
68 return -1;
69 default:
70 return 0;
71 }
72 }
73
74 static
75 char get_char_at_cb(size_t at, void *data)
76 {
77 return get_char(data, at);
78 }
79
80 static
81 int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
82 {
83 bool has_user = false;
84 mm_segment_t old_fs;
85 int result;
86 struct estack_entry *pattern_reg;
87 struct estack_entry *candidate_reg;
88
89 if (estack_bx(stack, top)->u.s.user
90 || estack_ax(stack, top)->u.s.user) {
91 has_user = true;
92 old_fs = get_fs();
93 set_fs(KERNEL_DS);
94 pagefault_disable();
95 }
96
97 /* Find out which side is the pattern vs. the candidate. */
98 if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
99 pattern_reg = estack_ax(stack, top);
100 candidate_reg = estack_bx(stack, top);
101 } else {
102 pattern_reg = estack_bx(stack, top);
103 candidate_reg = estack_ax(stack, top);
104 }
105
106 /* Perform the match operation. */
107 result = !strutils_star_glob_match_char_cb(get_char_at_cb,
108 pattern_reg, get_char_at_cb, candidate_reg);
109 if (has_user) {
110 pagefault_enable();
111 set_fs(old_fs);
112 }
113
114 return result;
115 }
116
117 static
118 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
119 {
120 size_t offset_bx = 0, offset_ax = 0;
121 int diff, has_user = 0;
122 mm_segment_t old_fs;
123
124 if (estack_bx(stack, top)->u.s.user
125 || estack_ax(stack, top)->u.s.user) {
126 has_user = 1;
127 old_fs = get_fs();
128 set_fs(KERNEL_DS);
129 pagefault_disable();
130 }
131
132 for (;;) {
133 int ret;
134 int escaped_r0 = 0;
135 char char_bx, char_ax;
136
137 char_bx = get_char(estack_bx(stack, top), offset_bx);
138 char_ax = get_char(estack_ax(stack, top), offset_ax);
139
140 if (unlikely(char_bx == '\0')) {
141 if (char_ax == '\0') {
142 diff = 0;
143 break;
144 } else {
145 if (estack_ax(stack, top)->u.s.literal_type ==
146 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
147 ret = parse_char(estack_ax(stack, top),
148 &char_ax, &offset_ax);
149 if (ret == -1) {
150 diff = 0;
151 break;
152 }
153 }
154 diff = -1;
155 break;
156 }
157 }
158 if (unlikely(char_ax == '\0')) {
159 if (estack_bx(stack, top)->u.s.literal_type ==
160 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
161 ret = parse_char(estack_bx(stack, top),
162 &char_bx, &offset_bx);
163 if (ret == -1) {
164 diff = 0;
165 break;
166 }
167 }
168 diff = 1;
169 break;
170 }
171 if (estack_bx(stack, top)->u.s.literal_type ==
172 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
173 ret = parse_char(estack_bx(stack, top),
174 &char_bx, &offset_bx);
175 if (ret == -1) {
176 diff = 0;
177 break;
178 } else if (ret == -2) {
179 escaped_r0 = 1;
180 }
181 /* else compare both char */
182 }
183 if (estack_ax(stack, top)->u.s.literal_type ==
184 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
185 ret = parse_char(estack_ax(stack, top),
186 &char_ax, &offset_ax);
187 if (ret == -1) {
188 diff = 0;
189 break;
190 } else if (ret == -2) {
191 if (!escaped_r0) {
192 diff = -1;
193 break;
194 }
195 } else {
196 if (escaped_r0) {
197 diff = 1;
198 break;
199 }
200 }
201 } else {
202 if (escaped_r0) {
203 diff = 1;
204 break;
205 }
206 }
207 diff = char_bx - char_ax;
208 if (diff != 0)
209 break;
210 offset_bx++;
211 offset_ax++;
212 }
213 if (has_user) {
214 pagefault_enable();
215 set_fs(old_fs);
216 }
217 return diff;
218 }
219
220 uint64_t lttng_filter_false(void *filter_data,
221 struct lttng_probe_ctx *lttng_probe_ctx,
222 const char *filter_stack_data)
223 {
224 return 0;
225 }
226
227 #ifdef INTERPRETER_USE_SWITCH
228
229 /*
230 * Fallback for compilers that do not support taking address of labels.
231 */
232
233 #define START_OP \
234 start_pc = &bytecode->data[0]; \
235 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
236 pc = next_pc) { \
237 dbg_printk("Executing op %s (%u)\n", \
238 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
239 (unsigned int) *(filter_opcode_t *) pc); \
240 switch (*(filter_opcode_t *) pc) {
241
242 #define OP(name) case name
243
244 #define PO break
245
246 #define END_OP } \
247 }
248
249 #else
250
251 /*
252 * Dispatch-table based interpreter.
253 */
254
255 #define START_OP \
256 start_pc = &bytecode->code[0]; \
257 pc = next_pc = start_pc; \
258 if (unlikely(pc - start_pc >= bytecode->len)) \
259 goto end; \
260 goto *dispatch[*(filter_opcode_t *) pc];
261
262 #define OP(name) \
263 LABEL_##name
264
265 #define PO \
266 pc = next_pc; \
267 goto *dispatch[*(filter_opcode_t *) pc];
268
269 #define END_OP
270
271 #endif
272
273 static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
274 struct load_ptr *ptr,
275 uint32_t idx)
276 {
277
278 struct lttng_ctx_field *ctx_field;
279 struct lttng_event_field *field;
280 union lttng_ctx_value v;
281
282 ctx_field = &lttng_static_ctx->fields[idx];
283 field = &ctx_field->event_field;
284 ptr->type = LOAD_OBJECT;
285 /* field is only used for types nested within variants. */
286 ptr->field = NULL;
287
288 switch (field->type.atype) {
289 case atype_integer:
290 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
291 if (field->type.u.basic.integer.signedness) {
292 ptr->object_type = OBJECT_TYPE_S64;
293 ptr->u.s64 = v.s64;
294 ptr->ptr = &ptr->u.s64;
295 } else {
296 ptr->object_type = OBJECT_TYPE_U64;
297 ptr->u.u64 = v.s64; /* Cast. */
298 ptr->ptr = &ptr->u.u64;
299 }
300 break;
301 case atype_enum:
302 {
303 const struct lttng_integer_type *itype =
304 &field->type.u.basic.enumeration.container_type;
305
306 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
307 if (itype->signedness) {
308 ptr->object_type = OBJECT_TYPE_S64;
309 ptr->u.s64 = v.s64;
310 ptr->ptr = &ptr->u.s64;
311 } else {
312 ptr->object_type = OBJECT_TYPE_U64;
313 ptr->u.u64 = v.s64; /* Cast. */
314 ptr->ptr = &ptr->u.u64;
315 }
316 break;
317 }
318 case atype_array:
319 if (field->type.u.array.elem_type.atype != atype_integer) {
320 printk(KERN_WARNING "Array nesting only supports integer types.\n");
321 return -EINVAL;
322 }
323 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
324 printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
325 return -EINVAL;
326 }
327 ptr->object_type = OBJECT_TYPE_STRING;
328 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
329 ptr->ptr = v.str;
330 break;
331 case atype_sequence:
332 if (field->type.u.sequence.elem_type.atype != atype_integer) {
333 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
334 return -EINVAL;
335 }
336 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
337 printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
338 return -EINVAL;
339 }
340 ptr->object_type = OBJECT_TYPE_STRING;
341 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
342 ptr->ptr = v.str;
343 break;
344 case atype_array_bitfield:
345 printk(KERN_WARNING "Bitfield array type is not supported.\n");
346 return -EINVAL;
347 case atype_sequence_bitfield:
348 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
349 return -EINVAL;
350 case atype_string:
351 ptr->object_type = OBJECT_TYPE_STRING;
352 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
353 ptr->ptr = v.str;
354 break;
355 case atype_struct:
356 printk(KERN_WARNING "Structure type cannot be loaded.\n");
357 return -EINVAL;
358 default:
359 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
360 return -EINVAL;
361 }
362 return 0;
363 }
364
365 static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
366 struct bytecode_runtime *runtime,
367 uint64_t index, struct estack_entry *stack_top)
368 {
369 int ret;
370 const struct filter_get_index_data *gid;
371
372 /*
373 * Types nested within variants need to perform dynamic lookup
374 * based on the field descriptions. LTTng-UST does not implement
375 * variants for now.
376 */
377 if (stack_top->u.ptr.field)
378 return -EINVAL;
379 gid = (const struct filter_get_index_data *) &runtime->data[index];
380 switch (stack_top->u.ptr.type) {
381 case LOAD_OBJECT:
382 switch (stack_top->u.ptr.object_type) {
383 case OBJECT_TYPE_ARRAY:
384 {
385 const char *ptr;
386
387 WARN_ON_ONCE(gid->offset >= gid->array_len);
388 /* Skip count (unsigned long) */
389 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
390 ptr = ptr + gid->offset;
391 stack_top->u.ptr.ptr = ptr;
392 stack_top->u.ptr.object_type = gid->elem.type;
393 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
394 /* field is only used for types nested within variants. */
395 stack_top->u.ptr.field = NULL;
396 break;
397 }
398 case OBJECT_TYPE_SEQUENCE:
399 {
400 const char *ptr;
401 size_t ptr_seq_len;
402
403 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
404 ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
405 if (gid->offset >= gid->elem.len * ptr_seq_len) {
406 ret = -EINVAL;
407 goto end;
408 }
409 ptr = ptr + gid->offset;
410 stack_top->u.ptr.ptr = ptr;
411 stack_top->u.ptr.object_type = gid->elem.type;
412 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
413 /* field is only used for types nested within variants. */
414 stack_top->u.ptr.field = NULL;
415 break;
416 }
417 case OBJECT_TYPE_STRUCT:
418 printk(KERN_WARNING "Nested structures are not supported yet.\n");
419 ret = -EINVAL;
420 goto end;
421 case OBJECT_TYPE_VARIANT:
422 default:
423 printk(KERN_WARNING "Unexpected get index type %d",
424 (int) stack_top->u.ptr.object_type);
425 ret = -EINVAL;
426 goto end;
427 }
428 break;
429 case LOAD_ROOT_CONTEXT:
430 case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
431 {
432 ret = context_get_index(lttng_probe_ctx,
433 &stack_top->u.ptr,
434 gid->ctx_index);
435 if (ret) {
436 goto end;
437 }
438 break;
439 }
440 case LOAD_ROOT_PAYLOAD:
441 stack_top->u.ptr.ptr += gid->offset;
442 if (gid->elem.type == OBJECT_TYPE_STRING)
443 stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
444 stack_top->u.ptr.object_type = gid->elem.type;
445 stack_top->u.ptr.type = LOAD_OBJECT;
446 /* field is only used for types nested within variants. */
447 stack_top->u.ptr.field = NULL;
448 break;
449 }
450 return 0;
451
452 end:
453 return ret;
454 }
455
456 static int dynamic_load_field(struct estack_entry *stack_top)
457 {
458 int ret;
459
460 switch (stack_top->u.ptr.type) {
461 case LOAD_OBJECT:
462 break;
463 case LOAD_ROOT_CONTEXT:
464 case LOAD_ROOT_APP_CONTEXT:
465 case LOAD_ROOT_PAYLOAD:
466 default:
467 dbg_printk("Filter warning: cannot load root, missing field name.\n");
468 ret = -EINVAL;
469 goto end;
470 }
471 switch (stack_top->u.ptr.object_type) {
472 case OBJECT_TYPE_S8:
473 dbg_printk("op load field s8\n");
474 stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
475 break;
476 case OBJECT_TYPE_S16:
477 {
478 int16_t tmp;
479
480 dbg_printk("op load field s16\n");
481 tmp = *(int16_t *) stack_top->u.ptr.ptr;
482 if (stack_top->u.ptr.rev_bo)
483 __swab16s(&tmp);
484 stack_top->u.v = tmp;
485 break;
486 }
487 case OBJECT_TYPE_S32:
488 {
489 int32_t tmp;
490
491 dbg_printk("op load field s32\n");
492 tmp = *(int32_t *) stack_top->u.ptr.ptr;
493 if (stack_top->u.ptr.rev_bo)
494 __swab32s(&tmp);
495 stack_top->u.v = tmp;
496 break;
497 }
498 case OBJECT_TYPE_S64:
499 {
500 int64_t tmp;
501
502 dbg_printk("op load field s64\n");
503 tmp = *(int64_t *) stack_top->u.ptr.ptr;
504 if (stack_top->u.ptr.rev_bo)
505 __swab64s(&tmp);
506 stack_top->u.v = tmp;
507 break;
508 }
509 case OBJECT_TYPE_U8:
510 dbg_printk("op load field u8\n");
511 stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
512 break;
513 case OBJECT_TYPE_U16:
514 {
515 uint16_t tmp;
516
517 dbg_printk("op load field s16\n");
518 tmp = *(uint16_t *) stack_top->u.ptr.ptr;
519 if (stack_top->u.ptr.rev_bo)
520 __swab16s(&tmp);
521 stack_top->u.v = tmp;
522 break;
523 }
524 case OBJECT_TYPE_U32:
525 {
526 uint32_t tmp;
527
528 dbg_printk("op load field u32\n");
529 tmp = *(uint32_t *) stack_top->u.ptr.ptr;
530 if (stack_top->u.ptr.rev_bo)
531 __swab32s(&tmp);
532 stack_top->u.v = tmp;
533 break;
534 }
535 case OBJECT_TYPE_U64:
536 {
537 uint64_t tmp;
538
539 dbg_printk("op load field u64\n");
540 tmp = *(uint64_t *) stack_top->u.ptr.ptr;
541 if (stack_top->u.ptr.rev_bo)
542 __swab64s(&tmp);
543 stack_top->u.v = tmp;
544 break;
545 }
546 case OBJECT_TYPE_STRING:
547 {
548 const char *str;
549
550 dbg_printk("op load field string\n");
551 str = (const char *) stack_top->u.ptr.ptr;
552 stack_top->u.s.str = str;
553 if (unlikely(!stack_top->u.s.str)) {
554 dbg_printk("Filter warning: loading a NULL string.\n");
555 ret = -EINVAL;
556 goto end;
557 }
558 stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
559 stack_top->u.s.literal_type =
560 ESTACK_STRING_LITERAL_TYPE_NONE;
561 break;
562 }
563 case OBJECT_TYPE_STRING_SEQUENCE:
564 {
565 const char *ptr;
566
567 dbg_printk("op load field string sequence\n");
568 ptr = stack_top->u.ptr.ptr;
569 stack_top->u.s.seq_len = *(unsigned long *) ptr;
570 stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
571 if (unlikely(!stack_top->u.s.str)) {
572 dbg_printk("Filter warning: loading a NULL sequence.\n");
573 ret = -EINVAL;
574 goto end;
575 }
576 stack_top->u.s.literal_type =
577 ESTACK_STRING_LITERAL_TYPE_NONE;
578 break;
579 }
580 case OBJECT_TYPE_DYNAMIC:
581 /*
582 * Dynamic types in context are looked up
583 * by context get index.
584 */
585 ret = -EINVAL;
586 goto end;
587 case OBJECT_TYPE_DOUBLE:
588 ret = -EINVAL;
589 goto end;
590 case OBJECT_TYPE_SEQUENCE:
591 case OBJECT_TYPE_ARRAY:
592 case OBJECT_TYPE_STRUCT:
593 case OBJECT_TYPE_VARIANT:
594 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
595 ret = -EINVAL;
596 goto end;
597 }
598 return 0;
599
600 end:
601 return ret;
602 }
603
604 /*
605 * Return 0 (discard), or raise the 0x1 flag (log event).
606 * Currently, other flags are kept for future extensions and have no
607 * effect.
608 */
609 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
610 struct lttng_probe_ctx *lttng_probe_ctx,
611 const char *filter_stack_data)
612 {
613 struct bytecode_runtime *bytecode = filter_data;
614 void *pc, *next_pc, *start_pc;
615 int ret = -EINVAL;
616 uint64_t retval = 0;
617 struct estack _stack;
618 struct estack *stack = &_stack;
619 register int64_t ax = 0, bx = 0;
620 register int top = FILTER_STACK_EMPTY;
621 #ifndef INTERPRETER_USE_SWITCH
622 static void *dispatch[NR_FILTER_OPS] = {
623 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
624
625 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
626
627 /* binary */
628 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
629 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
630 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
631 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
632 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
633 [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
634 [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
635 [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
636 [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
637 [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
638
639 /* binary comparators */
640 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
641 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
642 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
643 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
644 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
645 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
646
647 /* string binary comparator */
648 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
649 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
650 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
651 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
652 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
653 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
654
655 /* globbing pattern binary comparator */
656 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
657 [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
658
659 /* s64 binary comparator */
660 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
661 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
662 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
663 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
664 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
665 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
666
667 /* double binary comparator */
668 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
669 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
670 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
671 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
672 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
673 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
674
675 /* Mixed S64-double binary comparators */
676 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
677 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
678 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
679 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
680 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
681 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
682
683 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
684 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
685 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
686 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
687 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
688 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
689
690 /* unary */
691 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
692 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
693 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
694 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
695 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
696 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
697 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
698 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
699 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
700
701 /* logical */
702 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
703 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
704
705 /* load field ref */
706 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
707 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
708 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
709 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
710 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
711
712 /* load from immediate operand */
713 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
714 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
715 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
716 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
717
718 /* cast */
719 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
720 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
721 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
722
723 /* get context ref */
724 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
725 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
726 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
727 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
728
729 /* load userspace field ref */
730 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
731 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
732
733 /* Instructions for recursive traversal through composed types. */
734 [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
735 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
736 [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
737
738 [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
739 [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
740 [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
741 [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
742
743 [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
744 [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
745 [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
746 [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
747 [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
748 [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
749 [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
750 [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
751 [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
752 [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
753 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
754 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
755
756 [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
757
758 [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
759 };
760 #endif /* #ifndef INTERPRETER_USE_SWITCH */
761
762 START_OP
763
764 OP(FILTER_OP_UNKNOWN):
765 OP(FILTER_OP_LOAD_FIELD_REF):
766 OP(FILTER_OP_GET_CONTEXT_REF):
767 #ifdef INTERPRETER_USE_SWITCH
768 default:
769 #endif /* INTERPRETER_USE_SWITCH */
770 printk(KERN_WARNING "unknown bytecode op %u\n",
771 (unsigned int) *(filter_opcode_t *) pc);
772 ret = -EINVAL;
773 goto end;
774
775 OP(FILTER_OP_RETURN):
776 OP(FILTER_OP_RETURN_S64):
777 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
778 retval = !!estack_ax_v;
779 ret = 0;
780 goto end;
781
782 /* binary */
783 OP(FILTER_OP_MUL):
784 OP(FILTER_OP_DIV):
785 OP(FILTER_OP_MOD):
786 OP(FILTER_OP_PLUS):
787 OP(FILTER_OP_MINUS):
788 printk(KERN_WARNING "unsupported bytecode op %u\n",
789 (unsigned int) *(filter_opcode_t *) pc);
790 ret = -EINVAL;
791 goto end;
792
793 OP(FILTER_OP_EQ):
794 OP(FILTER_OP_NE):
795 OP(FILTER_OP_GT):
796 OP(FILTER_OP_LT):
797 OP(FILTER_OP_GE):
798 OP(FILTER_OP_LE):
799 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
800 (unsigned int) *(filter_opcode_t *) pc);
801 ret = -EINVAL;
802 goto end;
803
804 OP(FILTER_OP_EQ_STRING):
805 {
806 int res;
807
808 res = (stack_strcmp(stack, top, "==") == 0);
809 estack_pop(stack, top, ax, bx);
810 estack_ax_v = res;
811 next_pc += sizeof(struct binary_op);
812 PO;
813 }
814 OP(FILTER_OP_NE_STRING):
815 {
816 int res;
817
818 res = (stack_strcmp(stack, top, "!=") != 0);
819 estack_pop(stack, top, ax, bx);
820 estack_ax_v = res;
821 next_pc += sizeof(struct binary_op);
822 PO;
823 }
824 OP(FILTER_OP_GT_STRING):
825 {
826 int res;
827
828 res = (stack_strcmp(stack, top, ">") > 0);
829 estack_pop(stack, top, ax, bx);
830 estack_ax_v = res;
831 next_pc += sizeof(struct binary_op);
832 PO;
833 }
834 OP(FILTER_OP_LT_STRING):
835 {
836 int res;
837
838 res = (stack_strcmp(stack, top, "<") < 0);
839 estack_pop(stack, top, ax, bx);
840 estack_ax_v = res;
841 next_pc += sizeof(struct binary_op);
842 PO;
843 }
844 OP(FILTER_OP_GE_STRING):
845 {
846 int res;
847
848 res = (stack_strcmp(stack, top, ">=") >= 0);
849 estack_pop(stack, top, ax, bx);
850 estack_ax_v = res;
851 next_pc += sizeof(struct binary_op);
852 PO;
853 }
854 OP(FILTER_OP_LE_STRING):
855 {
856 int res;
857
858 res = (stack_strcmp(stack, top, "<=") <= 0);
859 estack_pop(stack, top, ax, bx);
860 estack_ax_v = res;
861 next_pc += sizeof(struct binary_op);
862 PO;
863 }
864
865 OP(FILTER_OP_EQ_STAR_GLOB_STRING):
866 {
867 int res;
868
869 res = (stack_star_glob_match(stack, top, "==") == 0);
870 estack_pop(stack, top, ax, bx);
871 estack_ax_v = res;
872 next_pc += sizeof(struct binary_op);
873 PO;
874 }
875 OP(FILTER_OP_NE_STAR_GLOB_STRING):
876 {
877 int res;
878
879 res = (stack_star_glob_match(stack, top, "!=") != 0);
880 estack_pop(stack, top, ax, bx);
881 estack_ax_v = res;
882 next_pc += sizeof(struct binary_op);
883 PO;
884 }
885
886 OP(FILTER_OP_EQ_S64):
887 {
888 int res;
889
890 res = (estack_bx_v == estack_ax_v);
891 estack_pop(stack, top, ax, bx);
892 estack_ax_v = res;
893 next_pc += sizeof(struct binary_op);
894 PO;
895 }
896 OP(FILTER_OP_NE_S64):
897 {
898 int res;
899
900 res = (estack_bx_v != estack_ax_v);
901 estack_pop(stack, top, ax, bx);
902 estack_ax_v = res;
903 next_pc += sizeof(struct binary_op);
904 PO;
905 }
906 OP(FILTER_OP_GT_S64):
907 {
908 int res;
909
910 res = (estack_bx_v > estack_ax_v);
911 estack_pop(stack, top, ax, bx);
912 estack_ax_v = res;
913 next_pc += sizeof(struct binary_op);
914 PO;
915 }
916 OP(FILTER_OP_LT_S64):
917 {
918 int res;
919
920 res = (estack_bx_v < estack_ax_v);
921 estack_pop(stack, top, ax, bx);
922 estack_ax_v = res;
923 next_pc += sizeof(struct binary_op);
924 PO;
925 }
926 OP(FILTER_OP_GE_S64):
927 {
928 int res;
929
930 res = (estack_bx_v >= estack_ax_v);
931 estack_pop(stack, top, ax, bx);
932 estack_ax_v = res;
933 next_pc += sizeof(struct binary_op);
934 PO;
935 }
936 OP(FILTER_OP_LE_S64):
937 {
938 int res;
939
940 res = (estack_bx_v <= estack_ax_v);
941 estack_pop(stack, top, ax, bx);
942 estack_ax_v = res;
943 next_pc += sizeof(struct binary_op);
944 PO;
945 }
946
947 OP(FILTER_OP_EQ_DOUBLE):
948 OP(FILTER_OP_NE_DOUBLE):
949 OP(FILTER_OP_GT_DOUBLE):
950 OP(FILTER_OP_LT_DOUBLE):
951 OP(FILTER_OP_GE_DOUBLE):
952 OP(FILTER_OP_LE_DOUBLE):
953 {
954 BUG_ON(1);
955 PO;
956 }
957
958 /* Mixed S64-double binary comparators */
959 OP(FILTER_OP_EQ_DOUBLE_S64):
960 OP(FILTER_OP_NE_DOUBLE_S64):
961 OP(FILTER_OP_GT_DOUBLE_S64):
962 OP(FILTER_OP_LT_DOUBLE_S64):
963 OP(FILTER_OP_GE_DOUBLE_S64):
964 OP(FILTER_OP_LE_DOUBLE_S64):
965 OP(FILTER_OP_EQ_S64_DOUBLE):
966 OP(FILTER_OP_NE_S64_DOUBLE):
967 OP(FILTER_OP_GT_S64_DOUBLE):
968 OP(FILTER_OP_LT_S64_DOUBLE):
969 OP(FILTER_OP_GE_S64_DOUBLE):
970 OP(FILTER_OP_LE_S64_DOUBLE):
971 {
972 BUG_ON(1);
973 PO;
974 }
975 OP(FILTER_OP_BIT_RSHIFT):
976 {
977 int64_t res;
978
979 /* Catch undefined behavior. */
980 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
981 ret = -EINVAL;
982 goto end;
983 }
984 res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
985 estack_pop(stack, top, ax, bx);
986 estack_ax_v = res;
987 next_pc += sizeof(struct binary_op);
988 PO;
989 }
990 OP(FILTER_OP_BIT_LSHIFT):
991 {
992 int64_t res;
993
994 /* Catch undefined behavior. */
995 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
996 ret = -EINVAL;
997 goto end;
998 }
999 res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
1000 estack_pop(stack, top, ax, bx);
1001 estack_ax_v = res;
1002 next_pc += sizeof(struct binary_op);
1003 PO;
1004 }
1005 OP(FILTER_OP_BIT_AND):
1006 {
1007 int64_t res;
1008
1009 res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
1010 estack_pop(stack, top, ax, bx);
1011 estack_ax_v = res;
1012 next_pc += sizeof(struct binary_op);
1013 PO;
1014 }
1015 OP(FILTER_OP_BIT_OR):
1016 {
1017 int64_t res;
1018
1019 res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
1020 estack_pop(stack, top, ax, bx);
1021 estack_ax_v = res;
1022 next_pc += sizeof(struct binary_op);
1023 PO;
1024 }
1025 OP(FILTER_OP_BIT_XOR):
1026 {
1027 int64_t res;
1028
1029 res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
1030 estack_pop(stack, top, ax, bx);
1031 estack_ax_v = res;
1032 next_pc += sizeof(struct binary_op);
1033 PO;
1034 }
1035
1036 /* unary */
1037 OP(FILTER_OP_UNARY_PLUS):
1038 OP(FILTER_OP_UNARY_MINUS):
1039 OP(FILTER_OP_UNARY_NOT):
1040 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1041 (unsigned int) *(filter_opcode_t *) pc);
1042 ret = -EINVAL;
1043 goto end;
1044
1045
1046 OP(FILTER_OP_UNARY_BIT_NOT):
1047 {
1048 estack_ax_v = ~(uint64_t) estack_ax_v;
1049 next_pc += sizeof(struct unary_op);
1050 PO;
1051 }
1052
1053 OP(FILTER_OP_UNARY_PLUS_S64):
1054 {
1055 next_pc += sizeof(struct unary_op);
1056 PO;
1057 }
1058 OP(FILTER_OP_UNARY_MINUS_S64):
1059 {
1060 estack_ax_v = -estack_ax_v;
1061 next_pc += sizeof(struct unary_op);
1062 PO;
1063 }
1064 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
1065 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
1066 {
1067 BUG_ON(1);
1068 PO;
1069 }
1070 OP(FILTER_OP_UNARY_NOT_S64):
1071 {
1072 estack_ax_v = !estack_ax_v;
1073 next_pc += sizeof(struct unary_op);
1074 PO;
1075 }
1076 OP(FILTER_OP_UNARY_NOT_DOUBLE):
1077 {
1078 BUG_ON(1);
1079 PO;
1080 }
1081
1082 /* logical */
1083 OP(FILTER_OP_AND):
1084 {
1085 struct logical_op *insn = (struct logical_op *) pc;
1086
1087 /* If AX is 0, skip and evaluate to 0 */
1088 if (unlikely(estack_ax_v == 0)) {
1089 dbg_printk("Jumping to bytecode offset %u\n",
1090 (unsigned int) insn->skip_offset);
1091 next_pc = start_pc + insn->skip_offset;
1092 } else {
1093 /* Pop 1 when jump not taken */
1094 estack_pop(stack, top, ax, bx);
1095 next_pc += sizeof(struct logical_op);
1096 }
1097 PO;
1098 }
1099 OP(FILTER_OP_OR):
1100 {
1101 struct logical_op *insn = (struct logical_op *) pc;
1102
1103 /* If AX is nonzero, skip and evaluate to 1 */
1104
1105 if (unlikely(estack_ax_v != 0)) {
1106 estack_ax_v = 1;
1107 dbg_printk("Jumping to bytecode offset %u\n",
1108 (unsigned int) insn->skip_offset);
1109 next_pc = start_pc + insn->skip_offset;
1110 } else {
1111 /* Pop 1 when jump not taken */
1112 estack_pop(stack, top, ax, bx);
1113 next_pc += sizeof(struct logical_op);
1114 }
1115 PO;
1116 }
1117
1118
1119 /* load field ref */
1120 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
1121 {
1122 struct load_op *insn = (struct load_op *) pc;
1123 struct field_ref *ref = (struct field_ref *) insn->data;
1124
1125 dbg_printk("load field ref offset %u type string\n",
1126 ref->offset);
1127 estack_push(stack, top, ax, bx);
1128 estack_ax(stack, top)->u.s.str =
1129 *(const char * const *) &filter_stack_data[ref->offset];
1130 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1131 dbg_printk("Filter warning: loading a NULL string.\n");
1132 ret = -EINVAL;
1133 goto end;
1134 }
1135 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1136 estack_ax(stack, top)->u.s.literal_type =
1137 ESTACK_STRING_LITERAL_TYPE_NONE;
1138 estack_ax(stack, top)->u.s.user = 0;
1139 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1140 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1141 PO;
1142 }
1143
1144 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
1145 {
1146 struct load_op *insn = (struct load_op *) pc;
1147 struct field_ref *ref = (struct field_ref *) insn->data;
1148
1149 dbg_printk("load field ref offset %u type sequence\n",
1150 ref->offset);
1151 estack_push(stack, top, ax, bx);
1152 estack_ax(stack, top)->u.s.seq_len =
1153 *(unsigned long *) &filter_stack_data[ref->offset];
1154 estack_ax(stack, top)->u.s.str =
1155 *(const char **) (&filter_stack_data[ref->offset
1156 + sizeof(unsigned long)]);
1157 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1158 dbg_printk("Filter warning: loading a NULL sequence.\n");
1159 ret = -EINVAL;
1160 goto end;
1161 }
1162 estack_ax(stack, top)->u.s.literal_type =
1163 ESTACK_STRING_LITERAL_TYPE_NONE;
1164 estack_ax(stack, top)->u.s.user = 0;
1165 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1166 PO;
1167 }
1168
1169 OP(FILTER_OP_LOAD_FIELD_REF_S64):
1170 {
1171 struct load_op *insn = (struct load_op *) pc;
1172 struct field_ref *ref = (struct field_ref *) insn->data;
1173
1174 dbg_printk("load field ref offset %u type s64\n",
1175 ref->offset);
1176 estack_push(stack, top, ax, bx);
1177 estack_ax_v =
1178 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
1179 dbg_printk("ref load s64 %lld\n",
1180 (long long) estack_ax_v);
1181 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1182 PO;
1183 }
1184
1185 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
1186 {
1187 BUG_ON(1);
1188 PO;
1189 }
1190
1191 /* load from immediate operand */
1192 OP(FILTER_OP_LOAD_STRING):
1193 {
1194 struct load_op *insn = (struct load_op *) pc;
1195
1196 dbg_printk("load string %s\n", insn->data);
1197 estack_push(stack, top, ax, bx);
1198 estack_ax(stack, top)->u.s.str = insn->data;
1199 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1200 estack_ax(stack, top)->u.s.literal_type =
1201 ESTACK_STRING_LITERAL_TYPE_PLAIN;
1202 estack_ax(stack, top)->u.s.user = 0;
1203 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1204 PO;
1205 }
1206
1207 OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
1208 {
1209 struct load_op *insn = (struct load_op *) pc;
1210
1211 dbg_printk("load globbing pattern %s\n", insn->data);
1212 estack_push(stack, top, ax, bx);
1213 estack_ax(stack, top)->u.s.str = insn->data;
1214 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1215 estack_ax(stack, top)->u.s.literal_type =
1216 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
1217 estack_ax(stack, top)->u.s.user = 0;
1218 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1219 PO;
1220 }
1221
1222 OP(FILTER_OP_LOAD_S64):
1223 {
1224 struct load_op *insn = (struct load_op *) pc;
1225
1226 estack_push(stack, top, ax, bx);
1227 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
1228 dbg_printk("load s64 %lld\n",
1229 (long long) estack_ax_v);
1230 next_pc += sizeof(struct load_op)
1231 + sizeof(struct literal_numeric);
1232 PO;
1233 }
1234
1235 OP(FILTER_OP_LOAD_DOUBLE):
1236 {
1237 BUG_ON(1);
1238 PO;
1239 }
1240
1241 /* cast */
1242 OP(FILTER_OP_CAST_TO_S64):
1243 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1244 (unsigned int) *(filter_opcode_t *) pc);
1245 ret = -EINVAL;
1246 goto end;
1247
1248 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
1249 {
1250 BUG_ON(1);
1251 PO;
1252 }
1253
1254 OP(FILTER_OP_CAST_NOP):
1255 {
1256 next_pc += sizeof(struct cast_op);
1257 PO;
1258 }
1259
1260 /* get context ref */
1261 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
1262 {
1263 struct load_op *insn = (struct load_op *) pc;
1264 struct field_ref *ref = (struct field_ref *) insn->data;
1265 struct lttng_ctx_field *ctx_field;
1266 union lttng_ctx_value v;
1267
1268 dbg_printk("get context ref offset %u type string\n",
1269 ref->offset);
1270 ctx_field = &lttng_static_ctx->fields[ref->offset];
1271 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1272 estack_push(stack, top, ax, bx);
1273 estack_ax(stack, top)->u.s.str = v.str;
1274 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1275 dbg_printk("Filter warning: loading a NULL string.\n");
1276 ret = -EINVAL;
1277 goto end;
1278 }
1279 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1280 estack_ax(stack, top)->u.s.literal_type =
1281 ESTACK_STRING_LITERAL_TYPE_NONE;
1282 estack_ax(stack, top)->u.s.user = 0;
1283 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
1284 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1285 PO;
1286 }
1287
1288 OP(FILTER_OP_GET_CONTEXT_REF_S64):
1289 {
1290 struct load_op *insn = (struct load_op *) pc;
1291 struct field_ref *ref = (struct field_ref *) insn->data;
1292 struct lttng_ctx_field *ctx_field;
1293 union lttng_ctx_value v;
1294
1295 dbg_printk("get context ref offset %u type s64\n",
1296 ref->offset);
1297 ctx_field = &lttng_static_ctx->fields[ref->offset];
1298 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1299 estack_push(stack, top, ax, bx);
1300 estack_ax_v = v.s64;
1301 dbg_printk("ref get context s64 %lld\n",
1302 (long long) estack_ax_v);
1303 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1304 PO;
1305 }
1306
1307 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
1308 {
1309 BUG_ON(1);
1310 PO;
1311 }
1312
1313 /* load userspace field ref */
1314 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
1315 {
1316 struct load_op *insn = (struct load_op *) pc;
1317 struct field_ref *ref = (struct field_ref *) insn->data;
1318
1319 dbg_printk("load field ref offset %u type user string\n",
1320 ref->offset);
1321 estack_push(stack, top, ax, bx);
1322 estack_ax(stack, top)->u.s.user_str =
1323 *(const char * const *) &filter_stack_data[ref->offset];
1324 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1325 dbg_printk("Filter warning: loading a NULL string.\n");
1326 ret = -EINVAL;
1327 goto end;
1328 }
1329 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1330 estack_ax(stack, top)->u.s.literal_type =
1331 ESTACK_STRING_LITERAL_TYPE_NONE;
1332 estack_ax(stack, top)->u.s.user = 1;
1333 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1334 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1335 PO;
1336 }
1337
1338 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
1339 {
1340 struct load_op *insn = (struct load_op *) pc;
1341 struct field_ref *ref = (struct field_ref *) insn->data;
1342
1343 dbg_printk("load field ref offset %u type user sequence\n",
1344 ref->offset);
1345 estack_push(stack, top, ax, bx);
1346 estack_ax(stack, top)->u.s.seq_len =
1347 *(unsigned long *) &filter_stack_data[ref->offset];
1348 estack_ax(stack, top)->u.s.user_str =
1349 *(const char **) (&filter_stack_data[ref->offset
1350 + sizeof(unsigned long)]);
1351 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1352 dbg_printk("Filter warning: loading a NULL sequence.\n");
1353 ret = -EINVAL;
1354 goto end;
1355 }
1356 estack_ax(stack, top)->u.s.literal_type =
1357 ESTACK_STRING_LITERAL_TYPE_NONE;
1358 estack_ax(stack, top)->u.s.user = 1;
1359 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1360 PO;
1361 }
1362
1363 OP(FILTER_OP_GET_CONTEXT_ROOT):
1364 {
1365 dbg_printk("op get context root\n");
1366 estack_push(stack, top, ax, bx);
1367 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
1368 /* "field" only needed for variants. */
1369 estack_ax(stack, top)->u.ptr.field = NULL;
1370 next_pc += sizeof(struct load_op);
1371 PO;
1372 }
1373
1374 OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
1375 {
1376 BUG_ON(1);
1377 PO;
1378 }
1379
1380 OP(FILTER_OP_GET_PAYLOAD_ROOT):
1381 {
1382 dbg_printk("op get app payload root\n");
1383 estack_push(stack, top, ax, bx);
1384 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
1385 estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
1386 /* "field" only needed for variants. */
1387 estack_ax(stack, top)->u.ptr.field = NULL;
1388 next_pc += sizeof(struct load_op);
1389 PO;
1390 }
1391
1392 OP(FILTER_OP_GET_SYMBOL):
1393 {
1394 dbg_printk("op get symbol\n");
1395 switch (estack_ax(stack, top)->u.ptr.type) {
1396 case LOAD_OBJECT:
1397 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1398 ret = -EINVAL;
1399 goto end;
1400 case LOAD_ROOT_CONTEXT:
1401 case LOAD_ROOT_APP_CONTEXT:
1402 case LOAD_ROOT_PAYLOAD:
1403 /*
1404 * symbol lookup is performed by
1405 * specialization.
1406 */
1407 ret = -EINVAL;
1408 goto end;
1409 }
1410 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1411 PO;
1412 }
1413
1414 OP(FILTER_OP_GET_SYMBOL_FIELD):
1415 {
1416 /*
1417 * Used for first variant encountered in a
1418 * traversal. Variants are not implemented yet.
1419 */
1420 ret = -EINVAL;
1421 goto end;
1422 }
1423
1424 OP(FILTER_OP_GET_INDEX_U16):
1425 {
1426 struct load_op *insn = (struct load_op *) pc;
1427 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1428
1429 dbg_printk("op get index u16\n");
1430 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1431 if (ret)
1432 goto end;
1433 estack_ax_v = estack_ax(stack, top)->u.v;
1434 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1435 PO;
1436 }
1437
1438 OP(FILTER_OP_GET_INDEX_U64):
1439 {
1440 struct load_op *insn = (struct load_op *) pc;
1441 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1442
1443 dbg_printk("op get index u64\n");
1444 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1445 if (ret)
1446 goto end;
1447 estack_ax_v = estack_ax(stack, top)->u.v;
1448 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1449 PO;
1450 }
1451
1452 OP(FILTER_OP_LOAD_FIELD):
1453 {
1454 dbg_printk("op load field\n");
1455 ret = dynamic_load_field(estack_ax(stack, top));
1456 if (ret)
1457 goto end;
1458 estack_ax_v = estack_ax(stack, top)->u.v;
1459 next_pc += sizeof(struct load_op);
1460 PO;
1461 }
1462
1463 OP(FILTER_OP_LOAD_FIELD_S8):
1464 {
1465 dbg_printk("op load field s8\n");
1466
1467 estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
1468 next_pc += sizeof(struct load_op);
1469 PO;
1470 }
1471 OP(FILTER_OP_LOAD_FIELD_S16):
1472 {
1473 dbg_printk("op load field s16\n");
1474
1475 estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
1476 next_pc += sizeof(struct load_op);
1477 PO;
1478 }
1479 OP(FILTER_OP_LOAD_FIELD_S32):
1480 {
1481 dbg_printk("op load field s32\n");
1482
1483 estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
1484 next_pc += sizeof(struct load_op);
1485 PO;
1486 }
1487 OP(FILTER_OP_LOAD_FIELD_S64):
1488 {
1489 dbg_printk("op load field s64\n");
1490
1491 estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
1492 next_pc += sizeof(struct load_op);
1493 PO;
1494 }
1495 OP(FILTER_OP_LOAD_FIELD_U8):
1496 {
1497 dbg_printk("op load field u8\n");
1498
1499 estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
1500 next_pc += sizeof(struct load_op);
1501 PO;
1502 }
1503 OP(FILTER_OP_LOAD_FIELD_U16):
1504 {
1505 dbg_printk("op load field u16\n");
1506
1507 estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
1508 next_pc += sizeof(struct load_op);
1509 PO;
1510 }
1511 OP(FILTER_OP_LOAD_FIELD_U32):
1512 {
1513 dbg_printk("op load field u32\n");
1514
1515 estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
1516 next_pc += sizeof(struct load_op);
1517 PO;
1518 }
1519 OP(FILTER_OP_LOAD_FIELD_U64):
1520 {
1521 dbg_printk("op load field u64\n");
1522
1523 estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
1524 next_pc += sizeof(struct load_op);
1525 PO;
1526 }
1527 OP(FILTER_OP_LOAD_FIELD_DOUBLE):
1528 {
1529 ret = -EINVAL;
1530 goto end;
1531 }
1532
1533 OP(FILTER_OP_LOAD_FIELD_STRING):
1534 {
1535 const char *str;
1536
1537 dbg_printk("op load field string\n");
1538 str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
1539 estack_ax(stack, top)->u.s.str = str;
1540 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1541 dbg_printk("Filter warning: loading a NULL string.\n");
1542 ret = -EINVAL;
1543 goto end;
1544 }
1545 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1546 estack_ax(stack, top)->u.s.literal_type =
1547 ESTACK_STRING_LITERAL_TYPE_NONE;
1548 next_pc += sizeof(struct load_op);
1549 PO;
1550 }
1551
1552 OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
1553 {
1554 const char *ptr;
1555
1556 dbg_printk("op load field string sequence\n");
1557 ptr = estack_ax(stack, top)->u.ptr.ptr;
1558 estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
1559 estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
1560 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1561 dbg_printk("Filter warning: loading a NULL sequence.\n");
1562 ret = -EINVAL;
1563 goto end;
1564 }
1565 estack_ax(stack, top)->u.s.literal_type =
1566 ESTACK_STRING_LITERAL_TYPE_NONE;
1567 next_pc += sizeof(struct load_op);
1568 PO;
1569 }
1570
1571 END_OP
1572 end:
1573 /* return 0 (discard) on error */
1574 if (ret)
1575 return 0;
1576 return retval;
1577 }
1578
1579 #undef START_OP
1580 #undef OP
1581 #undef PO
1582 #undef END_OP
This page took 0.088781 seconds and 4 git commands to generate.