fix: btrfs: make ordered extent tracepoint take btrfs_inode (v5.10)
[lttng-modules.git] / lttng-filter-interpreter.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-filter-interpreter.c
4 *
5 * LTTng modules filter interpreter.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <wrapper/uaccess.h>
11 #include <wrapper/objtool.h>
12 #include <wrapper/types.h>
13 #include <linux/swab.h>
14
15 #include <lttng-filter.h>
16 #include <lttng-string-utils.h>
17
18 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
19
20 /*
21 * get_char should be called with page fault handler disabled if it is expected
22 * to handle user-space read.
23 */
24 static
25 char get_char(struct estack_entry *reg, size_t offset)
26 {
27 if (unlikely(offset >= reg->u.s.seq_len))
28 return '\0';
29 if (reg->u.s.user) {
30 char c;
31
32 /* Handle invalid access as end of string. */
33 if (unlikely(!lttng_access_ok(VERIFY_READ,
34 reg->u.s.user_str + offset,
35 sizeof(c))))
36 return '\0';
37 /* Handle fault (nonzero return value) as end of string. */
38 if (unlikely(__copy_from_user_inatomic(&c,
39 reg->u.s.user_str + offset,
40 sizeof(c))))
41 return '\0';
42 return c;
43 } else {
44 return reg->u.s.str[offset];
45 }
46 }
47
48 /*
49 * -1: wildcard found.
50 * -2: unknown escape char.
51 * 0: normal char.
52 */
53 static
54 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
55 {
56 switch (*c) {
57 case '\\':
58 (*offset)++;
59 *c = get_char(reg, *offset);
60 switch (*c) {
61 case '\\':
62 case '*':
63 return 0;
64 default:
65 return -2;
66 }
67 case '*':
68 return -1;
69 default:
70 return 0;
71 }
72 }
73
74 static
75 char get_char_at_cb(size_t at, void *data)
76 {
77 return get_char(data, at);
78 }
79
80 static
81 int stack_star_glob_match(struct estack *stack, int top, const char *cmp_type)
82 {
83 bool has_user = false;
84 int result;
85 struct estack_entry *pattern_reg;
86 struct estack_entry *candidate_reg;
87
88 /* Disable the page fault handler when reading from userspace. */
89 if (estack_bx(stack, top)->u.s.user
90 || estack_ax(stack, top)->u.s.user) {
91 has_user = true;
92 pagefault_disable();
93 }
94
95 /* Find out which side is the pattern vs. the candidate. */
96 if (estack_ax(stack, top)->u.s.literal_type == ESTACK_STRING_LITERAL_TYPE_STAR_GLOB) {
97 pattern_reg = estack_ax(stack, top);
98 candidate_reg = estack_bx(stack, top);
99 } else {
100 pattern_reg = estack_bx(stack, top);
101 candidate_reg = estack_ax(stack, top);
102 }
103
104 /* Perform the match operation. */
105 result = !strutils_star_glob_match_char_cb(get_char_at_cb,
106 pattern_reg, get_char_at_cb, candidate_reg);
107 if (has_user)
108 pagefault_enable();
109
110 return result;
111 }
112
113 static
114 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
115 {
116 size_t offset_bx = 0, offset_ax = 0;
117 int diff, has_user = 0;
118
119 if (estack_bx(stack, top)->u.s.user
120 || estack_ax(stack, top)->u.s.user) {
121 has_user = 1;
122 pagefault_disable();
123 }
124
125 for (;;) {
126 int ret;
127 int escaped_r0 = 0;
128 char char_bx, char_ax;
129
130 char_bx = get_char(estack_bx(stack, top), offset_bx);
131 char_ax = get_char(estack_ax(stack, top), offset_ax);
132
133 if (unlikely(char_bx == '\0')) {
134 if (char_ax == '\0') {
135 diff = 0;
136 break;
137 } else {
138 if (estack_ax(stack, top)->u.s.literal_type ==
139 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
140 ret = parse_char(estack_ax(stack, top),
141 &char_ax, &offset_ax);
142 if (ret == -1) {
143 diff = 0;
144 break;
145 }
146 }
147 diff = -1;
148 break;
149 }
150 }
151 if (unlikely(char_ax == '\0')) {
152 if (estack_bx(stack, top)->u.s.literal_type ==
153 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
154 ret = parse_char(estack_bx(stack, top),
155 &char_bx, &offset_bx);
156 if (ret == -1) {
157 diff = 0;
158 break;
159 }
160 }
161 diff = 1;
162 break;
163 }
164 if (estack_bx(stack, top)->u.s.literal_type ==
165 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
166 ret = parse_char(estack_bx(stack, top),
167 &char_bx, &offset_bx);
168 if (ret == -1) {
169 diff = 0;
170 break;
171 } else if (ret == -2) {
172 escaped_r0 = 1;
173 }
174 /* else compare both char */
175 }
176 if (estack_ax(stack, top)->u.s.literal_type ==
177 ESTACK_STRING_LITERAL_TYPE_PLAIN) {
178 ret = parse_char(estack_ax(stack, top),
179 &char_ax, &offset_ax);
180 if (ret == -1) {
181 diff = 0;
182 break;
183 } else if (ret == -2) {
184 if (!escaped_r0) {
185 diff = -1;
186 break;
187 }
188 } else {
189 if (escaped_r0) {
190 diff = 1;
191 break;
192 }
193 }
194 } else {
195 if (escaped_r0) {
196 diff = 1;
197 break;
198 }
199 }
200 diff = char_bx - char_ax;
201 if (diff != 0)
202 break;
203 offset_bx++;
204 offset_ax++;
205 }
206 if (has_user)
207 pagefault_enable();
208
209 return diff;
210 }
211
212 uint64_t lttng_filter_false(void *filter_data,
213 struct lttng_probe_ctx *lttng_probe_ctx,
214 const char *filter_stack_data)
215 {
216 return 0;
217 }
218
219 #ifdef INTERPRETER_USE_SWITCH
220
221 /*
222 * Fallback for compilers that do not support taking address of labels.
223 */
224
225 #define START_OP \
226 start_pc = &bytecode->data[0]; \
227 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
228 pc = next_pc) { \
229 dbg_printk("Executing op %s (%u)\n", \
230 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
231 (unsigned int) *(filter_opcode_t *) pc); \
232 switch (*(filter_opcode_t *) pc) {
233
234 #define OP(name) case name
235
236 #define PO break
237
238 #define END_OP } \
239 }
240
241 #else
242
243 /*
244 * Dispatch-table based interpreter.
245 */
246
247 #define START_OP \
248 start_pc = &bytecode->code[0]; \
249 pc = next_pc = start_pc; \
250 if (unlikely(pc - start_pc >= bytecode->len)) \
251 goto end; \
252 goto *dispatch[*(filter_opcode_t *) pc];
253
254 #define OP(name) \
255 LABEL_##name
256
257 #define PO \
258 pc = next_pc; \
259 goto *dispatch[*(filter_opcode_t *) pc];
260
261 #define END_OP
262
263 #endif
264
265 static int context_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
266 struct load_ptr *ptr,
267 uint32_t idx)
268 {
269
270 struct lttng_ctx_field *ctx_field;
271 struct lttng_event_field *field;
272 union lttng_ctx_value v;
273
274 ctx_field = &lttng_static_ctx->fields[idx];
275 field = &ctx_field->event_field;
276 ptr->type = LOAD_OBJECT;
277 /* field is only used for types nested within variants. */
278 ptr->field = NULL;
279
280 switch (field->type.atype) {
281 case atype_integer:
282 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
283 if (field->type.u.basic.integer.signedness) {
284 ptr->object_type = OBJECT_TYPE_S64;
285 ptr->u.s64 = v.s64;
286 ptr->ptr = &ptr->u.s64;
287 } else {
288 ptr->object_type = OBJECT_TYPE_U64;
289 ptr->u.u64 = v.s64; /* Cast. */
290 ptr->ptr = &ptr->u.u64;
291 }
292 break;
293 case atype_enum:
294 {
295 const struct lttng_integer_type *itype =
296 &field->type.u.basic.enumeration.container_type;
297
298 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
299 if (itype->signedness) {
300 ptr->object_type = OBJECT_TYPE_S64;
301 ptr->u.s64 = v.s64;
302 ptr->ptr = &ptr->u.s64;
303 } else {
304 ptr->object_type = OBJECT_TYPE_U64;
305 ptr->u.u64 = v.s64; /* Cast. */
306 ptr->ptr = &ptr->u.u64;
307 }
308 break;
309 }
310 case atype_array:
311 if (field->type.u.array.elem_type.atype != atype_integer) {
312 printk(KERN_WARNING "Array nesting only supports integer types.\n");
313 return -EINVAL;
314 }
315 if (field->type.u.array.elem_type.u.basic.integer.encoding == lttng_encode_none) {
316 printk(KERN_WARNING "Only string arrays are supported for contexts.\n");
317 return -EINVAL;
318 }
319 ptr->object_type = OBJECT_TYPE_STRING;
320 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
321 ptr->ptr = v.str;
322 break;
323 case atype_sequence:
324 if (field->type.u.sequence.elem_type.atype != atype_integer) {
325 printk(KERN_WARNING "Sequence nesting only supports integer types.\n");
326 return -EINVAL;
327 }
328 if (field->type.u.sequence.elem_type.u.basic.integer.encoding == lttng_encode_none) {
329 printk(KERN_WARNING "Only string sequences are supported for contexts.\n");
330 return -EINVAL;
331 }
332 ptr->object_type = OBJECT_TYPE_STRING;
333 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
334 ptr->ptr = v.str;
335 break;
336 case atype_array_bitfield:
337 printk(KERN_WARNING "Bitfield array type is not supported.\n");
338 return -EINVAL;
339 case atype_sequence_bitfield:
340 printk(KERN_WARNING "Bitfield sequence type is not supported.\n");
341 return -EINVAL;
342 case atype_string:
343 ptr->object_type = OBJECT_TYPE_STRING;
344 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
345 ptr->ptr = v.str;
346 break;
347 case atype_struct:
348 printk(KERN_WARNING "Structure type cannot be loaded.\n");
349 return -EINVAL;
350 default:
351 printk(KERN_WARNING "Unknown type: %d", (int) field->type.atype);
352 return -EINVAL;
353 }
354 return 0;
355 }
356
357 static int dynamic_get_index(struct lttng_probe_ctx *lttng_probe_ctx,
358 struct bytecode_runtime *runtime,
359 uint64_t index, struct estack_entry *stack_top)
360 {
361 int ret;
362 const struct filter_get_index_data *gid;
363
364 /*
365 * Types nested within variants need to perform dynamic lookup
366 * based on the field descriptions. LTTng-UST does not implement
367 * variants for now.
368 */
369 if (stack_top->u.ptr.field)
370 return -EINVAL;
371 gid = (const struct filter_get_index_data *) &runtime->data[index];
372 switch (stack_top->u.ptr.type) {
373 case LOAD_OBJECT:
374 switch (stack_top->u.ptr.object_type) {
375 case OBJECT_TYPE_ARRAY:
376 {
377 const char *ptr;
378
379 WARN_ON_ONCE(gid->offset >= gid->array_len);
380 /* Skip count (unsigned long) */
381 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
382 ptr = ptr + gid->offset;
383 stack_top->u.ptr.ptr = ptr;
384 stack_top->u.ptr.object_type = gid->elem.type;
385 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
386 /* field is only used for types nested within variants. */
387 stack_top->u.ptr.field = NULL;
388 break;
389 }
390 case OBJECT_TYPE_SEQUENCE:
391 {
392 const char *ptr;
393 size_t ptr_seq_len;
394
395 ptr = *(const char **) (stack_top->u.ptr.ptr + sizeof(unsigned long));
396 ptr_seq_len = *(unsigned long *) stack_top->u.ptr.ptr;
397 if (gid->offset >= gid->elem.len * ptr_seq_len) {
398 ret = -EINVAL;
399 goto end;
400 }
401 ptr = ptr + gid->offset;
402 stack_top->u.ptr.ptr = ptr;
403 stack_top->u.ptr.object_type = gid->elem.type;
404 stack_top->u.ptr.rev_bo = gid->elem.rev_bo;
405 /* field is only used for types nested within variants. */
406 stack_top->u.ptr.field = NULL;
407 break;
408 }
409 case OBJECT_TYPE_STRUCT:
410 printk(KERN_WARNING "Nested structures are not supported yet.\n");
411 ret = -EINVAL;
412 goto end;
413 case OBJECT_TYPE_VARIANT:
414 default:
415 printk(KERN_WARNING "Unexpected get index type %d",
416 (int) stack_top->u.ptr.object_type);
417 ret = -EINVAL;
418 goto end;
419 }
420 break;
421 case LOAD_ROOT_CONTEXT:
422 case LOAD_ROOT_APP_CONTEXT: /* Fall-through */
423 {
424 ret = context_get_index(lttng_probe_ctx,
425 &stack_top->u.ptr,
426 gid->ctx_index);
427 if (ret) {
428 goto end;
429 }
430 break;
431 }
432 case LOAD_ROOT_PAYLOAD:
433 stack_top->u.ptr.ptr += gid->offset;
434 if (gid->elem.type == OBJECT_TYPE_STRING)
435 stack_top->u.ptr.ptr = *(const char * const *) stack_top->u.ptr.ptr;
436 stack_top->u.ptr.object_type = gid->elem.type;
437 stack_top->u.ptr.type = LOAD_OBJECT;
438 /* field is only used for types nested within variants. */
439 stack_top->u.ptr.field = NULL;
440 break;
441 }
442 return 0;
443
444 end:
445 return ret;
446 }
447
448 static int dynamic_load_field(struct estack_entry *stack_top)
449 {
450 int ret;
451
452 switch (stack_top->u.ptr.type) {
453 case LOAD_OBJECT:
454 break;
455 case LOAD_ROOT_CONTEXT:
456 case LOAD_ROOT_APP_CONTEXT:
457 case LOAD_ROOT_PAYLOAD:
458 default:
459 dbg_printk("Filter warning: cannot load root, missing field name.\n");
460 ret = -EINVAL;
461 goto end;
462 }
463 switch (stack_top->u.ptr.object_type) {
464 case OBJECT_TYPE_S8:
465 dbg_printk("op load field s8\n");
466 stack_top->u.v = *(int8_t *) stack_top->u.ptr.ptr;
467 break;
468 case OBJECT_TYPE_S16:
469 {
470 int16_t tmp;
471
472 dbg_printk("op load field s16\n");
473 tmp = *(int16_t *) stack_top->u.ptr.ptr;
474 if (stack_top->u.ptr.rev_bo)
475 __swab16s(&tmp);
476 stack_top->u.v = tmp;
477 break;
478 }
479 case OBJECT_TYPE_S32:
480 {
481 int32_t tmp;
482
483 dbg_printk("op load field s32\n");
484 tmp = *(int32_t *) stack_top->u.ptr.ptr;
485 if (stack_top->u.ptr.rev_bo)
486 __swab32s(&tmp);
487 stack_top->u.v = tmp;
488 break;
489 }
490 case OBJECT_TYPE_S64:
491 {
492 int64_t tmp;
493
494 dbg_printk("op load field s64\n");
495 tmp = *(int64_t *) stack_top->u.ptr.ptr;
496 if (stack_top->u.ptr.rev_bo)
497 __swab64s(&tmp);
498 stack_top->u.v = tmp;
499 break;
500 }
501 case OBJECT_TYPE_U8:
502 dbg_printk("op load field u8\n");
503 stack_top->u.v = *(uint8_t *) stack_top->u.ptr.ptr;
504 break;
505 case OBJECT_TYPE_U16:
506 {
507 uint16_t tmp;
508
509 dbg_printk("op load field s16\n");
510 tmp = *(uint16_t *) stack_top->u.ptr.ptr;
511 if (stack_top->u.ptr.rev_bo)
512 __swab16s(&tmp);
513 stack_top->u.v = tmp;
514 break;
515 }
516 case OBJECT_TYPE_U32:
517 {
518 uint32_t tmp;
519
520 dbg_printk("op load field u32\n");
521 tmp = *(uint32_t *) stack_top->u.ptr.ptr;
522 if (stack_top->u.ptr.rev_bo)
523 __swab32s(&tmp);
524 stack_top->u.v = tmp;
525 break;
526 }
527 case OBJECT_TYPE_U64:
528 {
529 uint64_t tmp;
530
531 dbg_printk("op load field u64\n");
532 tmp = *(uint64_t *) stack_top->u.ptr.ptr;
533 if (stack_top->u.ptr.rev_bo)
534 __swab64s(&tmp);
535 stack_top->u.v = tmp;
536 break;
537 }
538 case OBJECT_TYPE_STRING:
539 {
540 const char *str;
541
542 dbg_printk("op load field string\n");
543 str = (const char *) stack_top->u.ptr.ptr;
544 stack_top->u.s.str = str;
545 if (unlikely(!stack_top->u.s.str)) {
546 dbg_printk("Filter warning: loading a NULL string.\n");
547 ret = -EINVAL;
548 goto end;
549 }
550 stack_top->u.s.seq_len = LTTNG_SIZE_MAX;
551 stack_top->u.s.literal_type =
552 ESTACK_STRING_LITERAL_TYPE_NONE;
553 break;
554 }
555 case OBJECT_TYPE_STRING_SEQUENCE:
556 {
557 const char *ptr;
558
559 dbg_printk("op load field string sequence\n");
560 ptr = stack_top->u.ptr.ptr;
561 stack_top->u.s.seq_len = *(unsigned long *) ptr;
562 stack_top->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
563 if (unlikely(!stack_top->u.s.str)) {
564 dbg_printk("Filter warning: loading a NULL sequence.\n");
565 ret = -EINVAL;
566 goto end;
567 }
568 stack_top->u.s.literal_type =
569 ESTACK_STRING_LITERAL_TYPE_NONE;
570 break;
571 }
572 case OBJECT_TYPE_DYNAMIC:
573 /*
574 * Dynamic types in context are looked up
575 * by context get index.
576 */
577 ret = -EINVAL;
578 goto end;
579 case OBJECT_TYPE_DOUBLE:
580 ret = -EINVAL;
581 goto end;
582 case OBJECT_TYPE_SEQUENCE:
583 case OBJECT_TYPE_ARRAY:
584 case OBJECT_TYPE_STRUCT:
585 case OBJECT_TYPE_VARIANT:
586 printk(KERN_WARNING "Sequences, arrays, struct and variant cannot be loaded (nested types).\n");
587 ret = -EINVAL;
588 goto end;
589 }
590 return 0;
591
592 end:
593 return ret;
594 }
595
596 /*
597 * Return 0 (discard), or raise the 0x1 flag (log event).
598 * Currently, other flags are kept for future extensions and have no
599 * effect.
600 */
601 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
602 struct lttng_probe_ctx *lttng_probe_ctx,
603 const char *filter_stack_data)
604 {
605 struct bytecode_runtime *bytecode = filter_data;
606 void *pc, *next_pc, *start_pc;
607 int ret = -EINVAL;
608 uint64_t retval = 0;
609 struct estack _stack;
610 struct estack *stack = &_stack;
611 register int64_t ax = 0, bx = 0;
612 register int top = FILTER_STACK_EMPTY;
613 #ifndef INTERPRETER_USE_SWITCH
614 static void *dispatch[NR_FILTER_OPS] = {
615 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
616
617 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
618
619 /* binary */
620 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
621 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
622 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
623 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
624 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
625 [ FILTER_OP_BIT_RSHIFT ] = &&LABEL_FILTER_OP_BIT_RSHIFT,
626 [ FILTER_OP_BIT_LSHIFT ] = &&LABEL_FILTER_OP_BIT_LSHIFT,
627 [ FILTER_OP_BIT_AND ] = &&LABEL_FILTER_OP_BIT_AND,
628 [ FILTER_OP_BIT_OR ] = &&LABEL_FILTER_OP_BIT_OR,
629 [ FILTER_OP_BIT_XOR ] = &&LABEL_FILTER_OP_BIT_XOR,
630
631 /* binary comparators */
632 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
633 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
634 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
635 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
636 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
637 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
638
639 /* string binary comparator */
640 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
641 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
642 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
643 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
644 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
645 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
646
647 /* globbing pattern binary comparator */
648 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_EQ_STAR_GLOB_STRING,
649 [ FILTER_OP_NE_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_NE_STAR_GLOB_STRING,
650
651 /* s64 binary comparator */
652 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
653 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
654 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
655 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
656 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
657 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
658
659 /* double binary comparator */
660 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
661 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
662 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
663 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
664 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
665 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
666
667 /* Mixed S64-double binary comparators */
668 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
669 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
670 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
671 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
672 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
673 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
674
675 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
676 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
677 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
678 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
679 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
680 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
681
682 /* unary */
683 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
684 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
685 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
686 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
687 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
688 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
689 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
690 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
691 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
692
693 /* logical */
694 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
695 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
696
697 /* load field ref */
698 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
699 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
700 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
701 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
702 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
703
704 /* load from immediate operand */
705 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
706 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = &&LABEL_FILTER_OP_LOAD_STAR_GLOB_STRING,
707 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
708 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
709
710 /* cast */
711 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
712 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
713 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
714
715 /* get context ref */
716 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
717 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
718 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
719 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
720
721 /* load userspace field ref */
722 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
723 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
724
725 /* Instructions for recursive traversal through composed types. */
726 [ FILTER_OP_GET_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_CONTEXT_ROOT,
727 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = &&LABEL_FILTER_OP_GET_APP_CONTEXT_ROOT,
728 [ FILTER_OP_GET_PAYLOAD_ROOT ] = &&LABEL_FILTER_OP_GET_PAYLOAD_ROOT,
729
730 [ FILTER_OP_GET_SYMBOL ] = &&LABEL_FILTER_OP_GET_SYMBOL,
731 [ FILTER_OP_GET_SYMBOL_FIELD ] = &&LABEL_FILTER_OP_GET_SYMBOL_FIELD,
732 [ FILTER_OP_GET_INDEX_U16 ] = &&LABEL_FILTER_OP_GET_INDEX_U16,
733 [ FILTER_OP_GET_INDEX_U64 ] = &&LABEL_FILTER_OP_GET_INDEX_U64,
734
735 [ FILTER_OP_LOAD_FIELD ] = &&LABEL_FILTER_OP_LOAD_FIELD,
736 [ FILTER_OP_LOAD_FIELD_S8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S8,
737 [ FILTER_OP_LOAD_FIELD_S16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S16,
738 [ FILTER_OP_LOAD_FIELD_S32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S32,
739 [ FILTER_OP_LOAD_FIELD_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_S64,
740 [ FILTER_OP_LOAD_FIELD_U8 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U8,
741 [ FILTER_OP_LOAD_FIELD_U16 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U16,
742 [ FILTER_OP_LOAD_FIELD_U32 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U32,
743 [ FILTER_OP_LOAD_FIELD_U64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_U64,
744 [ FILTER_OP_LOAD_FIELD_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_STRING,
745 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_SEQUENCE,
746 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_DOUBLE,
747
748 [ FILTER_OP_UNARY_BIT_NOT ] = &&LABEL_FILTER_OP_UNARY_BIT_NOT,
749
750 [ FILTER_OP_RETURN_S64 ] = &&LABEL_FILTER_OP_RETURN_S64,
751 };
752 #endif /* #ifndef INTERPRETER_USE_SWITCH */
753
754 START_OP
755
756 OP(FILTER_OP_UNKNOWN):
757 OP(FILTER_OP_LOAD_FIELD_REF):
758 OP(FILTER_OP_GET_CONTEXT_REF):
759 #ifdef INTERPRETER_USE_SWITCH
760 default:
761 #endif /* INTERPRETER_USE_SWITCH */
762 printk(KERN_WARNING "unknown bytecode op %u\n",
763 (unsigned int) *(filter_opcode_t *) pc);
764 ret = -EINVAL;
765 goto end;
766
767 OP(FILTER_OP_RETURN):
768 OP(FILTER_OP_RETURN_S64):
769 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
770 retval = !!estack_ax_v;
771 ret = 0;
772 goto end;
773
774 /* binary */
775 OP(FILTER_OP_MUL):
776 OP(FILTER_OP_DIV):
777 OP(FILTER_OP_MOD):
778 OP(FILTER_OP_PLUS):
779 OP(FILTER_OP_MINUS):
780 printk(KERN_WARNING "unsupported bytecode op %u\n",
781 (unsigned int) *(filter_opcode_t *) pc);
782 ret = -EINVAL;
783 goto end;
784
785 OP(FILTER_OP_EQ):
786 OP(FILTER_OP_NE):
787 OP(FILTER_OP_GT):
788 OP(FILTER_OP_LT):
789 OP(FILTER_OP_GE):
790 OP(FILTER_OP_LE):
791 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
792 (unsigned int) *(filter_opcode_t *) pc);
793 ret = -EINVAL;
794 goto end;
795
796 OP(FILTER_OP_EQ_STRING):
797 {
798 int res;
799
800 res = (stack_strcmp(stack, top, "==") == 0);
801 estack_pop(stack, top, ax, bx);
802 estack_ax_v = res;
803 next_pc += sizeof(struct binary_op);
804 PO;
805 }
806 OP(FILTER_OP_NE_STRING):
807 {
808 int res;
809
810 res = (stack_strcmp(stack, top, "!=") != 0);
811 estack_pop(stack, top, ax, bx);
812 estack_ax_v = res;
813 next_pc += sizeof(struct binary_op);
814 PO;
815 }
816 OP(FILTER_OP_GT_STRING):
817 {
818 int res;
819
820 res = (stack_strcmp(stack, top, ">") > 0);
821 estack_pop(stack, top, ax, bx);
822 estack_ax_v = res;
823 next_pc += sizeof(struct binary_op);
824 PO;
825 }
826 OP(FILTER_OP_LT_STRING):
827 {
828 int res;
829
830 res = (stack_strcmp(stack, top, "<") < 0);
831 estack_pop(stack, top, ax, bx);
832 estack_ax_v = res;
833 next_pc += sizeof(struct binary_op);
834 PO;
835 }
836 OP(FILTER_OP_GE_STRING):
837 {
838 int res;
839
840 res = (stack_strcmp(stack, top, ">=") >= 0);
841 estack_pop(stack, top, ax, bx);
842 estack_ax_v = res;
843 next_pc += sizeof(struct binary_op);
844 PO;
845 }
846 OP(FILTER_OP_LE_STRING):
847 {
848 int res;
849
850 res = (stack_strcmp(stack, top, "<=") <= 0);
851 estack_pop(stack, top, ax, bx);
852 estack_ax_v = res;
853 next_pc += sizeof(struct binary_op);
854 PO;
855 }
856
857 OP(FILTER_OP_EQ_STAR_GLOB_STRING):
858 {
859 int res;
860
861 res = (stack_star_glob_match(stack, top, "==") == 0);
862 estack_pop(stack, top, ax, bx);
863 estack_ax_v = res;
864 next_pc += sizeof(struct binary_op);
865 PO;
866 }
867 OP(FILTER_OP_NE_STAR_GLOB_STRING):
868 {
869 int res;
870
871 res = (stack_star_glob_match(stack, top, "!=") != 0);
872 estack_pop(stack, top, ax, bx);
873 estack_ax_v = res;
874 next_pc += sizeof(struct binary_op);
875 PO;
876 }
877
878 OP(FILTER_OP_EQ_S64):
879 {
880 int res;
881
882 res = (estack_bx_v == estack_ax_v);
883 estack_pop(stack, top, ax, bx);
884 estack_ax_v = res;
885 next_pc += sizeof(struct binary_op);
886 PO;
887 }
888 OP(FILTER_OP_NE_S64):
889 {
890 int res;
891
892 res = (estack_bx_v != estack_ax_v);
893 estack_pop(stack, top, ax, bx);
894 estack_ax_v = res;
895 next_pc += sizeof(struct binary_op);
896 PO;
897 }
898 OP(FILTER_OP_GT_S64):
899 {
900 int res;
901
902 res = (estack_bx_v > estack_ax_v);
903 estack_pop(stack, top, ax, bx);
904 estack_ax_v = res;
905 next_pc += sizeof(struct binary_op);
906 PO;
907 }
908 OP(FILTER_OP_LT_S64):
909 {
910 int res;
911
912 res = (estack_bx_v < estack_ax_v);
913 estack_pop(stack, top, ax, bx);
914 estack_ax_v = res;
915 next_pc += sizeof(struct binary_op);
916 PO;
917 }
918 OP(FILTER_OP_GE_S64):
919 {
920 int res;
921
922 res = (estack_bx_v >= estack_ax_v);
923 estack_pop(stack, top, ax, bx);
924 estack_ax_v = res;
925 next_pc += sizeof(struct binary_op);
926 PO;
927 }
928 OP(FILTER_OP_LE_S64):
929 {
930 int res;
931
932 res = (estack_bx_v <= estack_ax_v);
933 estack_pop(stack, top, ax, bx);
934 estack_ax_v = res;
935 next_pc += sizeof(struct binary_op);
936 PO;
937 }
938
939 OP(FILTER_OP_EQ_DOUBLE):
940 OP(FILTER_OP_NE_DOUBLE):
941 OP(FILTER_OP_GT_DOUBLE):
942 OP(FILTER_OP_LT_DOUBLE):
943 OP(FILTER_OP_GE_DOUBLE):
944 OP(FILTER_OP_LE_DOUBLE):
945 {
946 BUG_ON(1);
947 PO;
948 }
949
950 /* Mixed S64-double binary comparators */
951 OP(FILTER_OP_EQ_DOUBLE_S64):
952 OP(FILTER_OP_NE_DOUBLE_S64):
953 OP(FILTER_OP_GT_DOUBLE_S64):
954 OP(FILTER_OP_LT_DOUBLE_S64):
955 OP(FILTER_OP_GE_DOUBLE_S64):
956 OP(FILTER_OP_LE_DOUBLE_S64):
957 OP(FILTER_OP_EQ_S64_DOUBLE):
958 OP(FILTER_OP_NE_S64_DOUBLE):
959 OP(FILTER_OP_GT_S64_DOUBLE):
960 OP(FILTER_OP_LT_S64_DOUBLE):
961 OP(FILTER_OP_GE_S64_DOUBLE):
962 OP(FILTER_OP_LE_S64_DOUBLE):
963 {
964 BUG_ON(1);
965 PO;
966 }
967 OP(FILTER_OP_BIT_RSHIFT):
968 {
969 int64_t res;
970
971 /* Catch undefined behavior. */
972 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
973 ret = -EINVAL;
974 goto end;
975 }
976 res = ((uint64_t) estack_bx_v >> (uint32_t) estack_ax_v);
977 estack_pop(stack, top, ax, bx);
978 estack_ax_v = res;
979 next_pc += sizeof(struct binary_op);
980 PO;
981 }
982 OP(FILTER_OP_BIT_LSHIFT):
983 {
984 int64_t res;
985
986 /* Catch undefined behavior. */
987 if (unlikely(estack_ax_v < 0 || estack_ax_v >= 64)) {
988 ret = -EINVAL;
989 goto end;
990 }
991 res = ((uint64_t) estack_bx_v << (uint32_t) estack_ax_v);
992 estack_pop(stack, top, ax, bx);
993 estack_ax_v = res;
994 next_pc += sizeof(struct binary_op);
995 PO;
996 }
997 OP(FILTER_OP_BIT_AND):
998 {
999 int64_t res;
1000
1001 res = ((uint64_t) estack_bx_v & (uint64_t) estack_ax_v);
1002 estack_pop(stack, top, ax, bx);
1003 estack_ax_v = res;
1004 next_pc += sizeof(struct binary_op);
1005 PO;
1006 }
1007 OP(FILTER_OP_BIT_OR):
1008 {
1009 int64_t res;
1010
1011 res = ((uint64_t) estack_bx_v | (uint64_t) estack_ax_v);
1012 estack_pop(stack, top, ax, bx);
1013 estack_ax_v = res;
1014 next_pc += sizeof(struct binary_op);
1015 PO;
1016 }
1017 OP(FILTER_OP_BIT_XOR):
1018 {
1019 int64_t res;
1020
1021 res = ((uint64_t) estack_bx_v ^ (uint64_t) estack_ax_v);
1022 estack_pop(stack, top, ax, bx);
1023 estack_ax_v = res;
1024 next_pc += sizeof(struct binary_op);
1025 PO;
1026 }
1027
1028 /* unary */
1029 OP(FILTER_OP_UNARY_PLUS):
1030 OP(FILTER_OP_UNARY_MINUS):
1031 OP(FILTER_OP_UNARY_NOT):
1032 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1033 (unsigned int) *(filter_opcode_t *) pc);
1034 ret = -EINVAL;
1035 goto end;
1036
1037
1038 OP(FILTER_OP_UNARY_BIT_NOT):
1039 {
1040 estack_ax_v = ~(uint64_t) estack_ax_v;
1041 next_pc += sizeof(struct unary_op);
1042 PO;
1043 }
1044
1045 OP(FILTER_OP_UNARY_PLUS_S64):
1046 {
1047 next_pc += sizeof(struct unary_op);
1048 PO;
1049 }
1050 OP(FILTER_OP_UNARY_MINUS_S64):
1051 {
1052 estack_ax_v = -estack_ax_v;
1053 next_pc += sizeof(struct unary_op);
1054 PO;
1055 }
1056 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
1057 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
1058 {
1059 BUG_ON(1);
1060 PO;
1061 }
1062 OP(FILTER_OP_UNARY_NOT_S64):
1063 {
1064 estack_ax_v = !estack_ax_v;
1065 next_pc += sizeof(struct unary_op);
1066 PO;
1067 }
1068 OP(FILTER_OP_UNARY_NOT_DOUBLE):
1069 {
1070 BUG_ON(1);
1071 PO;
1072 }
1073
1074 /* logical */
1075 OP(FILTER_OP_AND):
1076 {
1077 struct logical_op *insn = (struct logical_op *) pc;
1078
1079 /* If AX is 0, skip and evaluate to 0 */
1080 if (unlikely(estack_ax_v == 0)) {
1081 dbg_printk("Jumping to bytecode offset %u\n",
1082 (unsigned int) insn->skip_offset);
1083 next_pc = start_pc + insn->skip_offset;
1084 } else {
1085 /* Pop 1 when jump not taken */
1086 estack_pop(stack, top, ax, bx);
1087 next_pc += sizeof(struct logical_op);
1088 }
1089 PO;
1090 }
1091 OP(FILTER_OP_OR):
1092 {
1093 struct logical_op *insn = (struct logical_op *) pc;
1094
1095 /* If AX is nonzero, skip and evaluate to 1 */
1096
1097 if (unlikely(estack_ax_v != 0)) {
1098 estack_ax_v = 1;
1099 dbg_printk("Jumping to bytecode offset %u\n",
1100 (unsigned int) insn->skip_offset);
1101 next_pc = start_pc + insn->skip_offset;
1102 } else {
1103 /* Pop 1 when jump not taken */
1104 estack_pop(stack, top, ax, bx);
1105 next_pc += sizeof(struct logical_op);
1106 }
1107 PO;
1108 }
1109
1110
1111 /* load field ref */
1112 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
1113 {
1114 struct load_op *insn = (struct load_op *) pc;
1115 struct field_ref *ref = (struct field_ref *) insn->data;
1116
1117 dbg_printk("load field ref offset %u type string\n",
1118 ref->offset);
1119 estack_push(stack, top, ax, bx);
1120 estack_ax(stack, top)->u.s.str =
1121 *(const char * const *) &filter_stack_data[ref->offset];
1122 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1123 dbg_printk("Filter warning: loading a NULL string.\n");
1124 ret = -EINVAL;
1125 goto end;
1126 }
1127 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1128 estack_ax(stack, top)->u.s.literal_type =
1129 ESTACK_STRING_LITERAL_TYPE_NONE;
1130 estack_ax(stack, top)->u.s.user = 0;
1131 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1132 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1133 PO;
1134 }
1135
1136 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
1137 {
1138 struct load_op *insn = (struct load_op *) pc;
1139 struct field_ref *ref = (struct field_ref *) insn->data;
1140
1141 dbg_printk("load field ref offset %u type sequence\n",
1142 ref->offset);
1143 estack_push(stack, top, ax, bx);
1144 estack_ax(stack, top)->u.s.seq_len =
1145 *(unsigned long *) &filter_stack_data[ref->offset];
1146 estack_ax(stack, top)->u.s.str =
1147 *(const char **) (&filter_stack_data[ref->offset
1148 + sizeof(unsigned long)]);
1149 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1150 dbg_printk("Filter warning: loading a NULL sequence.\n");
1151 ret = -EINVAL;
1152 goto end;
1153 }
1154 estack_ax(stack, top)->u.s.literal_type =
1155 ESTACK_STRING_LITERAL_TYPE_NONE;
1156 estack_ax(stack, top)->u.s.user = 0;
1157 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1158 PO;
1159 }
1160
1161 OP(FILTER_OP_LOAD_FIELD_REF_S64):
1162 {
1163 struct load_op *insn = (struct load_op *) pc;
1164 struct field_ref *ref = (struct field_ref *) insn->data;
1165
1166 dbg_printk("load field ref offset %u type s64\n",
1167 ref->offset);
1168 estack_push(stack, top, ax, bx);
1169 estack_ax_v =
1170 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
1171 dbg_printk("ref load s64 %lld\n",
1172 (long long) estack_ax_v);
1173 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1174 PO;
1175 }
1176
1177 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
1178 {
1179 BUG_ON(1);
1180 PO;
1181 }
1182
1183 /* load from immediate operand */
1184 OP(FILTER_OP_LOAD_STRING):
1185 {
1186 struct load_op *insn = (struct load_op *) pc;
1187
1188 dbg_printk("load string %s\n", insn->data);
1189 estack_push(stack, top, ax, bx);
1190 estack_ax(stack, top)->u.s.str = insn->data;
1191 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1192 estack_ax(stack, top)->u.s.literal_type =
1193 ESTACK_STRING_LITERAL_TYPE_PLAIN;
1194 estack_ax(stack, top)->u.s.user = 0;
1195 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1196 PO;
1197 }
1198
1199 OP(FILTER_OP_LOAD_STAR_GLOB_STRING):
1200 {
1201 struct load_op *insn = (struct load_op *) pc;
1202
1203 dbg_printk("load globbing pattern %s\n", insn->data);
1204 estack_push(stack, top, ax, bx);
1205 estack_ax(stack, top)->u.s.str = insn->data;
1206 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1207 estack_ax(stack, top)->u.s.literal_type =
1208 ESTACK_STRING_LITERAL_TYPE_STAR_GLOB;
1209 estack_ax(stack, top)->u.s.user = 0;
1210 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
1211 PO;
1212 }
1213
1214 OP(FILTER_OP_LOAD_S64):
1215 {
1216 struct load_op *insn = (struct load_op *) pc;
1217
1218 estack_push(stack, top, ax, bx);
1219 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
1220 dbg_printk("load s64 %lld\n",
1221 (long long) estack_ax_v);
1222 next_pc += sizeof(struct load_op)
1223 + sizeof(struct literal_numeric);
1224 PO;
1225 }
1226
1227 OP(FILTER_OP_LOAD_DOUBLE):
1228 {
1229 BUG_ON(1);
1230 PO;
1231 }
1232
1233 /* cast */
1234 OP(FILTER_OP_CAST_TO_S64):
1235 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
1236 (unsigned int) *(filter_opcode_t *) pc);
1237 ret = -EINVAL;
1238 goto end;
1239
1240 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
1241 {
1242 BUG_ON(1);
1243 PO;
1244 }
1245
1246 OP(FILTER_OP_CAST_NOP):
1247 {
1248 next_pc += sizeof(struct cast_op);
1249 PO;
1250 }
1251
1252 /* get context ref */
1253 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
1254 {
1255 struct load_op *insn = (struct load_op *) pc;
1256 struct field_ref *ref = (struct field_ref *) insn->data;
1257 struct lttng_ctx_field *ctx_field;
1258 union lttng_ctx_value v;
1259
1260 dbg_printk("get context ref offset %u type string\n",
1261 ref->offset);
1262 ctx_field = &lttng_static_ctx->fields[ref->offset];
1263 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1264 estack_push(stack, top, ax, bx);
1265 estack_ax(stack, top)->u.s.str = v.str;
1266 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1267 dbg_printk("Filter warning: loading a NULL string.\n");
1268 ret = -EINVAL;
1269 goto end;
1270 }
1271 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1272 estack_ax(stack, top)->u.s.literal_type =
1273 ESTACK_STRING_LITERAL_TYPE_NONE;
1274 estack_ax(stack, top)->u.s.user = 0;
1275 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
1276 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1277 PO;
1278 }
1279
1280 OP(FILTER_OP_GET_CONTEXT_REF_S64):
1281 {
1282 struct load_op *insn = (struct load_op *) pc;
1283 struct field_ref *ref = (struct field_ref *) insn->data;
1284 struct lttng_ctx_field *ctx_field;
1285 union lttng_ctx_value v;
1286
1287 dbg_printk("get context ref offset %u type s64\n",
1288 ref->offset);
1289 ctx_field = &lttng_static_ctx->fields[ref->offset];
1290 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
1291 estack_push(stack, top, ax, bx);
1292 estack_ax_v = v.s64;
1293 dbg_printk("ref get context s64 %lld\n",
1294 (long long) estack_ax_v);
1295 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1296 PO;
1297 }
1298
1299 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
1300 {
1301 BUG_ON(1);
1302 PO;
1303 }
1304
1305 /* load userspace field ref */
1306 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
1307 {
1308 struct load_op *insn = (struct load_op *) pc;
1309 struct field_ref *ref = (struct field_ref *) insn->data;
1310
1311 dbg_printk("load field ref offset %u type user string\n",
1312 ref->offset);
1313 estack_push(stack, top, ax, bx);
1314 estack_ax(stack, top)->u.s.user_str =
1315 *(const char * const *) &filter_stack_data[ref->offset];
1316 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1317 dbg_printk("Filter warning: loading a NULL string.\n");
1318 ret = -EINVAL;
1319 goto end;
1320 }
1321 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1322 estack_ax(stack, top)->u.s.literal_type =
1323 ESTACK_STRING_LITERAL_TYPE_NONE;
1324 estack_ax(stack, top)->u.s.user = 1;
1325 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
1326 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1327 PO;
1328 }
1329
1330 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
1331 {
1332 struct load_op *insn = (struct load_op *) pc;
1333 struct field_ref *ref = (struct field_ref *) insn->data;
1334
1335 dbg_printk("load field ref offset %u type user sequence\n",
1336 ref->offset);
1337 estack_push(stack, top, ax, bx);
1338 estack_ax(stack, top)->u.s.seq_len =
1339 *(unsigned long *) &filter_stack_data[ref->offset];
1340 estack_ax(stack, top)->u.s.user_str =
1341 *(const char **) (&filter_stack_data[ref->offset
1342 + sizeof(unsigned long)]);
1343 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1344 dbg_printk("Filter warning: loading a NULL sequence.\n");
1345 ret = -EINVAL;
1346 goto end;
1347 }
1348 estack_ax(stack, top)->u.s.literal_type =
1349 ESTACK_STRING_LITERAL_TYPE_NONE;
1350 estack_ax(stack, top)->u.s.user = 1;
1351 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
1352 PO;
1353 }
1354
1355 OP(FILTER_OP_GET_CONTEXT_ROOT):
1356 {
1357 dbg_printk("op get context root\n");
1358 estack_push(stack, top, ax, bx);
1359 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_CONTEXT;
1360 /* "field" only needed for variants. */
1361 estack_ax(stack, top)->u.ptr.field = NULL;
1362 next_pc += sizeof(struct load_op);
1363 PO;
1364 }
1365
1366 OP(FILTER_OP_GET_APP_CONTEXT_ROOT):
1367 {
1368 BUG_ON(1);
1369 PO;
1370 }
1371
1372 OP(FILTER_OP_GET_PAYLOAD_ROOT):
1373 {
1374 dbg_printk("op get app payload root\n");
1375 estack_push(stack, top, ax, bx);
1376 estack_ax(stack, top)->u.ptr.type = LOAD_ROOT_PAYLOAD;
1377 estack_ax(stack, top)->u.ptr.ptr = filter_stack_data;
1378 /* "field" only needed for variants. */
1379 estack_ax(stack, top)->u.ptr.field = NULL;
1380 next_pc += sizeof(struct load_op);
1381 PO;
1382 }
1383
1384 OP(FILTER_OP_GET_SYMBOL):
1385 {
1386 dbg_printk("op get symbol\n");
1387 switch (estack_ax(stack, top)->u.ptr.type) {
1388 case LOAD_OBJECT:
1389 printk(KERN_WARNING "Nested fields not implemented yet.\n");
1390 ret = -EINVAL;
1391 goto end;
1392 case LOAD_ROOT_CONTEXT:
1393 case LOAD_ROOT_APP_CONTEXT:
1394 case LOAD_ROOT_PAYLOAD:
1395 /*
1396 * symbol lookup is performed by
1397 * specialization.
1398 */
1399 ret = -EINVAL;
1400 goto end;
1401 }
1402 next_pc += sizeof(struct load_op) + sizeof(struct get_symbol);
1403 PO;
1404 }
1405
1406 OP(FILTER_OP_GET_SYMBOL_FIELD):
1407 {
1408 /*
1409 * Used for first variant encountered in a
1410 * traversal. Variants are not implemented yet.
1411 */
1412 ret = -EINVAL;
1413 goto end;
1414 }
1415
1416 OP(FILTER_OP_GET_INDEX_U16):
1417 {
1418 struct load_op *insn = (struct load_op *) pc;
1419 struct get_index_u16 *index = (struct get_index_u16 *) insn->data;
1420
1421 dbg_printk("op get index u16\n");
1422 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1423 if (ret)
1424 goto end;
1425 estack_ax_v = estack_ax(stack, top)->u.v;
1426 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u16);
1427 PO;
1428 }
1429
1430 OP(FILTER_OP_GET_INDEX_U64):
1431 {
1432 struct load_op *insn = (struct load_op *) pc;
1433 struct get_index_u64 *index = (struct get_index_u64 *) insn->data;
1434
1435 dbg_printk("op get index u64\n");
1436 ret = dynamic_get_index(lttng_probe_ctx, bytecode, index->index, estack_ax(stack, top));
1437 if (ret)
1438 goto end;
1439 estack_ax_v = estack_ax(stack, top)->u.v;
1440 next_pc += sizeof(struct load_op) + sizeof(struct get_index_u64);
1441 PO;
1442 }
1443
1444 OP(FILTER_OP_LOAD_FIELD):
1445 {
1446 dbg_printk("op load field\n");
1447 ret = dynamic_load_field(estack_ax(stack, top));
1448 if (ret)
1449 goto end;
1450 estack_ax_v = estack_ax(stack, top)->u.v;
1451 next_pc += sizeof(struct load_op);
1452 PO;
1453 }
1454
1455 OP(FILTER_OP_LOAD_FIELD_S8):
1456 {
1457 dbg_printk("op load field s8\n");
1458
1459 estack_ax_v = *(int8_t *) estack_ax(stack, top)->u.ptr.ptr;
1460 next_pc += sizeof(struct load_op);
1461 PO;
1462 }
1463 OP(FILTER_OP_LOAD_FIELD_S16):
1464 {
1465 dbg_printk("op load field s16\n");
1466
1467 estack_ax_v = *(int16_t *) estack_ax(stack, top)->u.ptr.ptr;
1468 next_pc += sizeof(struct load_op);
1469 PO;
1470 }
1471 OP(FILTER_OP_LOAD_FIELD_S32):
1472 {
1473 dbg_printk("op load field s32\n");
1474
1475 estack_ax_v = *(int32_t *) estack_ax(stack, top)->u.ptr.ptr;
1476 next_pc += sizeof(struct load_op);
1477 PO;
1478 }
1479 OP(FILTER_OP_LOAD_FIELD_S64):
1480 {
1481 dbg_printk("op load field s64\n");
1482
1483 estack_ax_v = *(int64_t *) estack_ax(stack, top)->u.ptr.ptr;
1484 next_pc += sizeof(struct load_op);
1485 PO;
1486 }
1487 OP(FILTER_OP_LOAD_FIELD_U8):
1488 {
1489 dbg_printk("op load field u8\n");
1490
1491 estack_ax_v = *(uint8_t *) estack_ax(stack, top)->u.ptr.ptr;
1492 next_pc += sizeof(struct load_op);
1493 PO;
1494 }
1495 OP(FILTER_OP_LOAD_FIELD_U16):
1496 {
1497 dbg_printk("op load field u16\n");
1498
1499 estack_ax_v = *(uint16_t *) estack_ax(stack, top)->u.ptr.ptr;
1500 next_pc += sizeof(struct load_op);
1501 PO;
1502 }
1503 OP(FILTER_OP_LOAD_FIELD_U32):
1504 {
1505 dbg_printk("op load field u32\n");
1506
1507 estack_ax_v = *(uint32_t *) estack_ax(stack, top)->u.ptr.ptr;
1508 next_pc += sizeof(struct load_op);
1509 PO;
1510 }
1511 OP(FILTER_OP_LOAD_FIELD_U64):
1512 {
1513 dbg_printk("op load field u64\n");
1514
1515 estack_ax_v = *(uint64_t *) estack_ax(stack, top)->u.ptr.ptr;
1516 next_pc += sizeof(struct load_op);
1517 PO;
1518 }
1519 OP(FILTER_OP_LOAD_FIELD_DOUBLE):
1520 {
1521 ret = -EINVAL;
1522 goto end;
1523 }
1524
1525 OP(FILTER_OP_LOAD_FIELD_STRING):
1526 {
1527 const char *str;
1528
1529 dbg_printk("op load field string\n");
1530 str = (const char *) estack_ax(stack, top)->u.ptr.ptr;
1531 estack_ax(stack, top)->u.s.str = str;
1532 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1533 dbg_printk("Filter warning: loading a NULL string.\n");
1534 ret = -EINVAL;
1535 goto end;
1536 }
1537 estack_ax(stack, top)->u.s.seq_len = LTTNG_SIZE_MAX;
1538 estack_ax(stack, top)->u.s.literal_type =
1539 ESTACK_STRING_LITERAL_TYPE_NONE;
1540 next_pc += sizeof(struct load_op);
1541 PO;
1542 }
1543
1544 OP(FILTER_OP_LOAD_FIELD_SEQUENCE):
1545 {
1546 const char *ptr;
1547
1548 dbg_printk("op load field string sequence\n");
1549 ptr = estack_ax(stack, top)->u.ptr.ptr;
1550 estack_ax(stack, top)->u.s.seq_len = *(unsigned long *) ptr;
1551 estack_ax(stack, top)->u.s.str = *(const char **) (ptr + sizeof(unsigned long));
1552 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
1553 dbg_printk("Filter warning: loading a NULL sequence.\n");
1554 ret = -EINVAL;
1555 goto end;
1556 }
1557 estack_ax(stack, top)->u.s.literal_type =
1558 ESTACK_STRING_LITERAL_TYPE_NONE;
1559 next_pc += sizeof(struct load_op);
1560 PO;
1561 }
1562
1563 END_OP
1564 end:
1565 /* return 0 (discard) on error */
1566 if (ret)
1567 return 0;
1568 return retval;
1569 }
1570
1571 #undef START_OP
1572 #undef OP
1573 #undef PO
1574 #undef END_OP
This page took 0.090521 seconds and 4 git commands to generate.