Filter code relicensing to MIT license
[lttng-modules.git] / lttng-filter-interpreter.c
1 /*
2 * lttng-filter-interpreter.c
3 *
4 * LTTng modules filter interpreter.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/uaccess.h>
28 #include <wrapper/frame.h>
29
30 #include <lttng-filter.h>
31
32 LTTNG_STACK_FRAME_NON_STANDARD(lttng_filter_interpret_bytecode);
33
34 /*
35 * get_char should be called with page fault handler disabled if it is expected
36 * to handle user-space read.
37 */
38 static
39 char get_char(struct estack_entry *reg, size_t offset)
40 {
41 if (unlikely(offset >= reg->u.s.seq_len))
42 return '\0';
43 if (reg->u.s.user) {
44 char c;
45
46 /* Handle invalid access as end of string. */
47 if (unlikely(!access_ok(VERIFY_READ,
48 reg->u.s.user_str + offset,
49 sizeof(c))))
50 return '\0';
51 /* Handle fault (nonzero return value) as end of string. */
52 if (unlikely(__copy_from_user_inatomic(&c,
53 reg->u.s.user_str + offset,
54 sizeof(c))))
55 return '\0';
56 return c;
57 } else {
58 return reg->u.s.str[offset];
59 }
60 }
61
62 /*
63 * -1: wildcard found.
64 * -2: unknown escape char.
65 * 0: normal char.
66 */
67 static
68 int parse_char(struct estack_entry *reg, char *c, size_t *offset)
69 {
70 switch (*c) {
71 case '\\':
72 (*offset)++;
73 *c = get_char(reg, *offset);
74 switch (*c) {
75 case '\\':
76 case '*':
77 return 0;
78 default:
79 return -2;
80 }
81 case '*':
82 return -1;
83 default:
84 return 0;
85 }
86 }
87
88 static
89 int stack_strcmp(struct estack *stack, int top, const char *cmp_type)
90 {
91 size_t offset_bx = 0, offset_ax = 0;
92 int diff, has_user = 0;
93 mm_segment_t old_fs;
94
95 if (estack_bx(stack, top)->u.s.user
96 || estack_ax(stack, top)->u.s.user) {
97 has_user = 1;
98 old_fs = get_fs();
99 set_fs(KERNEL_DS);
100 pagefault_disable();
101 }
102
103 for (;;) {
104 int ret;
105 int escaped_r0 = 0;
106 char char_bx, char_ax;
107
108 char_bx = get_char(estack_bx(stack, top), offset_bx);
109 char_ax = get_char(estack_ax(stack, top), offset_ax);
110
111 if (unlikely(char_bx == '\0')) {
112 if (char_ax == '\0') {
113 diff = 0;
114 break;
115 } else {
116 if (estack_ax(stack, top)->u.s.literal) {
117 ret = parse_char(estack_ax(stack, top),
118 &char_ax, &offset_ax);
119 if (ret == -1) {
120 diff = 0;
121 break;
122 }
123 }
124 diff = -1;
125 break;
126 }
127 }
128 if (unlikely(char_ax == '\0')) {
129 if (estack_bx(stack, top)->u.s.literal) {
130 ret = parse_char(estack_bx(stack, top),
131 &char_bx, &offset_bx);
132 if (ret == -1) {
133 diff = 0;
134 break;
135 }
136 }
137 diff = 1;
138 break;
139 }
140 if (estack_bx(stack, top)->u.s.literal) {
141 ret = parse_char(estack_bx(stack, top),
142 &char_bx, &offset_bx);
143 if (ret == -1) {
144 diff = 0;
145 break;
146 } else if (ret == -2) {
147 escaped_r0 = 1;
148 }
149 /* else compare both char */
150 }
151 if (estack_ax(stack, top)->u.s.literal) {
152 ret = parse_char(estack_ax(stack, top),
153 &char_ax, &offset_ax);
154 if (ret == -1) {
155 diff = 0;
156 break;
157 } else if (ret == -2) {
158 if (!escaped_r0) {
159 diff = -1;
160 break;
161 }
162 } else {
163 if (escaped_r0) {
164 diff = 1;
165 break;
166 }
167 }
168 } else {
169 if (escaped_r0) {
170 diff = 1;
171 break;
172 }
173 }
174 diff = char_bx - char_ax;
175 if (diff != 0)
176 break;
177 offset_bx++;
178 offset_ax++;
179 }
180 if (has_user) {
181 pagefault_enable();
182 set_fs(old_fs);
183 }
184 return diff;
185 }
186
187 uint64_t lttng_filter_false(void *filter_data,
188 struct lttng_probe_ctx *lttng_probe_ctx,
189 const char *filter_stack_data)
190 {
191 return 0;
192 }
193
194 #ifdef INTERPRETER_USE_SWITCH
195
196 /*
197 * Fallback for compilers that do not support taking address of labels.
198 */
199
200 #define START_OP \
201 start_pc = &bytecode->data[0]; \
202 for (pc = next_pc = start_pc; pc - start_pc < bytecode->len; \
203 pc = next_pc) { \
204 dbg_printk("Executing op %s (%u)\n", \
205 lttng_filter_print_op((unsigned int) *(filter_opcode_t *) pc), \
206 (unsigned int) *(filter_opcode_t *) pc); \
207 switch (*(filter_opcode_t *) pc) {
208
209 #define OP(name) case name
210
211 #define PO break
212
213 #define END_OP } \
214 }
215
216 #else
217
218 /*
219 * Dispatch-table based interpreter.
220 */
221
222 #define START_OP \
223 start_pc = &bytecode->data[0]; \
224 pc = next_pc = start_pc; \
225 if (unlikely(pc - start_pc >= bytecode->len)) \
226 goto end; \
227 goto *dispatch[*(filter_opcode_t *) pc];
228
229 #define OP(name) \
230 LABEL_##name
231
232 #define PO \
233 pc = next_pc; \
234 goto *dispatch[*(filter_opcode_t *) pc];
235
236 #define END_OP
237
238 #endif
239
240 /*
241 * Return 0 (discard), or raise the 0x1 flag (log event).
242 * Currently, other flags are kept for future extensions and have no
243 * effect.
244 */
245 uint64_t lttng_filter_interpret_bytecode(void *filter_data,
246 struct lttng_probe_ctx *lttng_probe_ctx,
247 const char *filter_stack_data)
248 {
249 struct bytecode_runtime *bytecode = filter_data;
250 void *pc, *next_pc, *start_pc;
251 int ret = -EINVAL;
252 uint64_t retval = 0;
253 struct estack _stack;
254 struct estack *stack = &_stack;
255 register int64_t ax = 0, bx = 0;
256 register int top = FILTER_STACK_EMPTY;
257 #ifndef INTERPRETER_USE_SWITCH
258 static void *dispatch[NR_FILTER_OPS] = {
259 [ FILTER_OP_UNKNOWN ] = &&LABEL_FILTER_OP_UNKNOWN,
260
261 [ FILTER_OP_RETURN ] = &&LABEL_FILTER_OP_RETURN,
262
263 /* binary */
264 [ FILTER_OP_MUL ] = &&LABEL_FILTER_OP_MUL,
265 [ FILTER_OP_DIV ] = &&LABEL_FILTER_OP_DIV,
266 [ FILTER_OP_MOD ] = &&LABEL_FILTER_OP_MOD,
267 [ FILTER_OP_PLUS ] = &&LABEL_FILTER_OP_PLUS,
268 [ FILTER_OP_MINUS ] = &&LABEL_FILTER_OP_MINUS,
269 [ FILTER_OP_RSHIFT ] = &&LABEL_FILTER_OP_RSHIFT,
270 [ FILTER_OP_LSHIFT ] = &&LABEL_FILTER_OP_LSHIFT,
271 [ FILTER_OP_BIN_AND ] = &&LABEL_FILTER_OP_BIN_AND,
272 [ FILTER_OP_BIN_OR ] = &&LABEL_FILTER_OP_BIN_OR,
273 [ FILTER_OP_BIN_XOR ] = &&LABEL_FILTER_OP_BIN_XOR,
274
275 /* binary comparators */
276 [ FILTER_OP_EQ ] = &&LABEL_FILTER_OP_EQ,
277 [ FILTER_OP_NE ] = &&LABEL_FILTER_OP_NE,
278 [ FILTER_OP_GT ] = &&LABEL_FILTER_OP_GT,
279 [ FILTER_OP_LT ] = &&LABEL_FILTER_OP_LT,
280 [ FILTER_OP_GE ] = &&LABEL_FILTER_OP_GE,
281 [ FILTER_OP_LE ] = &&LABEL_FILTER_OP_LE,
282
283 /* string binary comparator */
284 [ FILTER_OP_EQ_STRING ] = &&LABEL_FILTER_OP_EQ_STRING,
285 [ FILTER_OP_NE_STRING ] = &&LABEL_FILTER_OP_NE_STRING,
286 [ FILTER_OP_GT_STRING ] = &&LABEL_FILTER_OP_GT_STRING,
287 [ FILTER_OP_LT_STRING ] = &&LABEL_FILTER_OP_LT_STRING,
288 [ FILTER_OP_GE_STRING ] = &&LABEL_FILTER_OP_GE_STRING,
289 [ FILTER_OP_LE_STRING ] = &&LABEL_FILTER_OP_LE_STRING,
290
291 /* s64 binary comparator */
292 [ FILTER_OP_EQ_S64 ] = &&LABEL_FILTER_OP_EQ_S64,
293 [ FILTER_OP_NE_S64 ] = &&LABEL_FILTER_OP_NE_S64,
294 [ FILTER_OP_GT_S64 ] = &&LABEL_FILTER_OP_GT_S64,
295 [ FILTER_OP_LT_S64 ] = &&LABEL_FILTER_OP_LT_S64,
296 [ FILTER_OP_GE_S64 ] = &&LABEL_FILTER_OP_GE_S64,
297 [ FILTER_OP_LE_S64 ] = &&LABEL_FILTER_OP_LE_S64,
298
299 /* double binary comparator */
300 [ FILTER_OP_EQ_DOUBLE ] = &&LABEL_FILTER_OP_EQ_DOUBLE,
301 [ FILTER_OP_NE_DOUBLE ] = &&LABEL_FILTER_OP_NE_DOUBLE,
302 [ FILTER_OP_GT_DOUBLE ] = &&LABEL_FILTER_OP_GT_DOUBLE,
303 [ FILTER_OP_LT_DOUBLE ] = &&LABEL_FILTER_OP_LT_DOUBLE,
304 [ FILTER_OP_GE_DOUBLE ] = &&LABEL_FILTER_OP_GE_DOUBLE,
305 [ FILTER_OP_LE_DOUBLE ] = &&LABEL_FILTER_OP_LE_DOUBLE,
306
307 /* Mixed S64-double binary comparators */
308 [ FILTER_OP_EQ_DOUBLE_S64 ] = &&LABEL_FILTER_OP_EQ_DOUBLE_S64,
309 [ FILTER_OP_NE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_NE_DOUBLE_S64,
310 [ FILTER_OP_GT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GT_DOUBLE_S64,
311 [ FILTER_OP_LT_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LT_DOUBLE_S64,
312 [ FILTER_OP_GE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_GE_DOUBLE_S64,
313 [ FILTER_OP_LE_DOUBLE_S64 ] = &&LABEL_FILTER_OP_LE_DOUBLE_S64,
314
315 [ FILTER_OP_EQ_S64_DOUBLE ] = &&LABEL_FILTER_OP_EQ_S64_DOUBLE,
316 [ FILTER_OP_NE_S64_DOUBLE ] = &&LABEL_FILTER_OP_NE_S64_DOUBLE,
317 [ FILTER_OP_GT_S64_DOUBLE ] = &&LABEL_FILTER_OP_GT_S64_DOUBLE,
318 [ FILTER_OP_LT_S64_DOUBLE ] = &&LABEL_FILTER_OP_LT_S64_DOUBLE,
319 [ FILTER_OP_GE_S64_DOUBLE ] = &&LABEL_FILTER_OP_GE_S64_DOUBLE,
320 [ FILTER_OP_LE_S64_DOUBLE ] = &&LABEL_FILTER_OP_LE_S64_DOUBLE,
321
322 /* unary */
323 [ FILTER_OP_UNARY_PLUS ] = &&LABEL_FILTER_OP_UNARY_PLUS,
324 [ FILTER_OP_UNARY_MINUS ] = &&LABEL_FILTER_OP_UNARY_MINUS,
325 [ FILTER_OP_UNARY_NOT ] = &&LABEL_FILTER_OP_UNARY_NOT,
326 [ FILTER_OP_UNARY_PLUS_S64 ] = &&LABEL_FILTER_OP_UNARY_PLUS_S64,
327 [ FILTER_OP_UNARY_MINUS_S64 ] = &&LABEL_FILTER_OP_UNARY_MINUS_S64,
328 [ FILTER_OP_UNARY_NOT_S64 ] = &&LABEL_FILTER_OP_UNARY_NOT_S64,
329 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_PLUS_DOUBLE,
330 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_MINUS_DOUBLE,
331 [ FILTER_OP_UNARY_NOT_DOUBLE ] = &&LABEL_FILTER_OP_UNARY_NOT_DOUBLE,
332
333 /* logical */
334 [ FILTER_OP_AND ] = &&LABEL_FILTER_OP_AND,
335 [ FILTER_OP_OR ] = &&LABEL_FILTER_OP_OR,
336
337 /* load field ref */
338 [ FILTER_OP_LOAD_FIELD_REF ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF,
339 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_STRING,
340 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_SEQUENCE,
341 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_S64,
342 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_DOUBLE,
343
344 /* load from immediate operand */
345 [ FILTER_OP_LOAD_STRING ] = &&LABEL_FILTER_OP_LOAD_STRING,
346 [ FILTER_OP_LOAD_S64 ] = &&LABEL_FILTER_OP_LOAD_S64,
347 [ FILTER_OP_LOAD_DOUBLE ] = &&LABEL_FILTER_OP_LOAD_DOUBLE,
348
349 /* cast */
350 [ FILTER_OP_CAST_TO_S64 ] = &&LABEL_FILTER_OP_CAST_TO_S64,
351 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = &&LABEL_FILTER_OP_CAST_DOUBLE_TO_S64,
352 [ FILTER_OP_CAST_NOP ] = &&LABEL_FILTER_OP_CAST_NOP,
353
354 /* get context ref */
355 [ FILTER_OP_GET_CONTEXT_REF ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF,
356 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_STRING,
357 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_S64,
358 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = &&LABEL_FILTER_OP_GET_CONTEXT_REF_DOUBLE,
359
360 /* load userspace field ref */
361 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_STRING,
362 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = &&LABEL_FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE,
363 };
364 #endif /* #ifndef INTERPRETER_USE_SWITCH */
365
366 START_OP
367
368 OP(FILTER_OP_UNKNOWN):
369 OP(FILTER_OP_LOAD_FIELD_REF):
370 OP(FILTER_OP_GET_CONTEXT_REF):
371 #ifdef INTERPRETER_USE_SWITCH
372 default:
373 #endif /* INTERPRETER_USE_SWITCH */
374 printk(KERN_WARNING "unknown bytecode op %u\n",
375 (unsigned int) *(filter_opcode_t *) pc);
376 ret = -EINVAL;
377 goto end;
378
379 OP(FILTER_OP_RETURN):
380 /* LTTNG_FILTER_DISCARD or LTTNG_FILTER_RECORD_FLAG */
381 retval = !!estack_ax_v;
382 ret = 0;
383 goto end;
384
385 /* binary */
386 OP(FILTER_OP_MUL):
387 OP(FILTER_OP_DIV):
388 OP(FILTER_OP_MOD):
389 OP(FILTER_OP_PLUS):
390 OP(FILTER_OP_MINUS):
391 OP(FILTER_OP_RSHIFT):
392 OP(FILTER_OP_LSHIFT):
393 OP(FILTER_OP_BIN_AND):
394 OP(FILTER_OP_BIN_OR):
395 OP(FILTER_OP_BIN_XOR):
396 printk(KERN_WARNING "unsupported bytecode op %u\n",
397 (unsigned int) *(filter_opcode_t *) pc);
398 ret = -EINVAL;
399 goto end;
400
401 OP(FILTER_OP_EQ):
402 OP(FILTER_OP_NE):
403 OP(FILTER_OP_GT):
404 OP(FILTER_OP_LT):
405 OP(FILTER_OP_GE):
406 OP(FILTER_OP_LE):
407 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
408 (unsigned int) *(filter_opcode_t *) pc);
409 ret = -EINVAL;
410 goto end;
411
412 OP(FILTER_OP_EQ_STRING):
413 {
414 int res;
415
416 res = (stack_strcmp(stack, top, "==") == 0);
417 estack_pop(stack, top, ax, bx);
418 estack_ax_v = res;
419 next_pc += sizeof(struct binary_op);
420 PO;
421 }
422 OP(FILTER_OP_NE_STRING):
423 {
424 int res;
425
426 res = (stack_strcmp(stack, top, "!=") != 0);
427 estack_pop(stack, top, ax, bx);
428 estack_ax_v = res;
429 next_pc += sizeof(struct binary_op);
430 PO;
431 }
432 OP(FILTER_OP_GT_STRING):
433 {
434 int res;
435
436 res = (stack_strcmp(stack, top, ">") > 0);
437 estack_pop(stack, top, ax, bx);
438 estack_ax_v = res;
439 next_pc += sizeof(struct binary_op);
440 PO;
441 }
442 OP(FILTER_OP_LT_STRING):
443 {
444 int res;
445
446 res = (stack_strcmp(stack, top, "<") < 0);
447 estack_pop(stack, top, ax, bx);
448 estack_ax_v = res;
449 next_pc += sizeof(struct binary_op);
450 PO;
451 }
452 OP(FILTER_OP_GE_STRING):
453 {
454 int res;
455
456 res = (stack_strcmp(stack, top, ">=") >= 0);
457 estack_pop(stack, top, ax, bx);
458 estack_ax_v = res;
459 next_pc += sizeof(struct binary_op);
460 PO;
461 }
462 OP(FILTER_OP_LE_STRING):
463 {
464 int res;
465
466 res = (stack_strcmp(stack, top, "<=") <= 0);
467 estack_pop(stack, top, ax, bx);
468 estack_ax_v = res;
469 next_pc += sizeof(struct binary_op);
470 PO;
471 }
472
473 OP(FILTER_OP_EQ_S64):
474 {
475 int res;
476
477 res = (estack_bx_v == estack_ax_v);
478 estack_pop(stack, top, ax, bx);
479 estack_ax_v = res;
480 next_pc += sizeof(struct binary_op);
481 PO;
482 }
483 OP(FILTER_OP_NE_S64):
484 {
485 int res;
486
487 res = (estack_bx_v != estack_ax_v);
488 estack_pop(stack, top, ax, bx);
489 estack_ax_v = res;
490 next_pc += sizeof(struct binary_op);
491 PO;
492 }
493 OP(FILTER_OP_GT_S64):
494 {
495 int res;
496
497 res = (estack_bx_v > estack_ax_v);
498 estack_pop(stack, top, ax, bx);
499 estack_ax_v = res;
500 next_pc += sizeof(struct binary_op);
501 PO;
502 }
503 OP(FILTER_OP_LT_S64):
504 {
505 int res;
506
507 res = (estack_bx_v < estack_ax_v);
508 estack_pop(stack, top, ax, bx);
509 estack_ax_v = res;
510 next_pc += sizeof(struct binary_op);
511 PO;
512 }
513 OP(FILTER_OP_GE_S64):
514 {
515 int res;
516
517 res = (estack_bx_v >= estack_ax_v);
518 estack_pop(stack, top, ax, bx);
519 estack_ax_v = res;
520 next_pc += sizeof(struct binary_op);
521 PO;
522 }
523 OP(FILTER_OP_LE_S64):
524 {
525 int res;
526
527 res = (estack_bx_v <= estack_ax_v);
528 estack_pop(stack, top, ax, bx);
529 estack_ax_v = res;
530 next_pc += sizeof(struct binary_op);
531 PO;
532 }
533
534 OP(FILTER_OP_EQ_DOUBLE):
535 OP(FILTER_OP_NE_DOUBLE):
536 OP(FILTER_OP_GT_DOUBLE):
537 OP(FILTER_OP_LT_DOUBLE):
538 OP(FILTER_OP_GE_DOUBLE):
539 OP(FILTER_OP_LE_DOUBLE):
540 {
541 BUG_ON(1);
542 PO;
543 }
544
545 /* Mixed S64-double binary comparators */
546 OP(FILTER_OP_EQ_DOUBLE_S64):
547 OP(FILTER_OP_NE_DOUBLE_S64):
548 OP(FILTER_OP_GT_DOUBLE_S64):
549 OP(FILTER_OP_LT_DOUBLE_S64):
550 OP(FILTER_OP_GE_DOUBLE_S64):
551 OP(FILTER_OP_LE_DOUBLE_S64):
552 OP(FILTER_OP_EQ_S64_DOUBLE):
553 OP(FILTER_OP_NE_S64_DOUBLE):
554 OP(FILTER_OP_GT_S64_DOUBLE):
555 OP(FILTER_OP_LT_S64_DOUBLE):
556 OP(FILTER_OP_GE_S64_DOUBLE):
557 OP(FILTER_OP_LE_S64_DOUBLE):
558 {
559 BUG_ON(1);
560 PO;
561 }
562
563 /* unary */
564 OP(FILTER_OP_UNARY_PLUS):
565 OP(FILTER_OP_UNARY_MINUS):
566 OP(FILTER_OP_UNARY_NOT):
567 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
568 (unsigned int) *(filter_opcode_t *) pc);
569 ret = -EINVAL;
570 goto end;
571
572
573 OP(FILTER_OP_UNARY_PLUS_S64):
574 {
575 next_pc += sizeof(struct unary_op);
576 PO;
577 }
578 OP(FILTER_OP_UNARY_MINUS_S64):
579 {
580 estack_ax_v = -estack_ax_v;
581 next_pc += sizeof(struct unary_op);
582 PO;
583 }
584 OP(FILTER_OP_UNARY_PLUS_DOUBLE):
585 OP(FILTER_OP_UNARY_MINUS_DOUBLE):
586 {
587 BUG_ON(1);
588 PO;
589 }
590 OP(FILTER_OP_UNARY_NOT_S64):
591 {
592 estack_ax_v = !estack_ax_v;
593 next_pc += sizeof(struct unary_op);
594 PO;
595 }
596 OP(FILTER_OP_UNARY_NOT_DOUBLE):
597 {
598 BUG_ON(1);
599 PO;
600 }
601
602 /* logical */
603 OP(FILTER_OP_AND):
604 {
605 struct logical_op *insn = (struct logical_op *) pc;
606
607 /* If AX is 0, skip and evaluate to 0 */
608 if (unlikely(estack_ax_v == 0)) {
609 dbg_printk("Jumping to bytecode offset %u\n",
610 (unsigned int) insn->skip_offset);
611 next_pc = start_pc + insn->skip_offset;
612 } else {
613 /* Pop 1 when jump not taken */
614 estack_pop(stack, top, ax, bx);
615 next_pc += sizeof(struct logical_op);
616 }
617 PO;
618 }
619 OP(FILTER_OP_OR):
620 {
621 struct logical_op *insn = (struct logical_op *) pc;
622
623 /* If AX is nonzero, skip and evaluate to 1 */
624
625 if (unlikely(estack_ax_v != 0)) {
626 estack_ax_v = 1;
627 dbg_printk("Jumping to bytecode offset %u\n",
628 (unsigned int) insn->skip_offset);
629 next_pc = start_pc + insn->skip_offset;
630 } else {
631 /* Pop 1 when jump not taken */
632 estack_pop(stack, top, ax, bx);
633 next_pc += sizeof(struct logical_op);
634 }
635 PO;
636 }
637
638
639 /* load field ref */
640 OP(FILTER_OP_LOAD_FIELD_REF_STRING):
641 {
642 struct load_op *insn = (struct load_op *) pc;
643 struct field_ref *ref = (struct field_ref *) insn->data;
644
645 dbg_printk("load field ref offset %u type string\n",
646 ref->offset);
647 estack_push(stack, top, ax, bx);
648 estack_ax(stack, top)->u.s.str =
649 *(const char * const *) &filter_stack_data[ref->offset];
650 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
651 dbg_printk("Filter warning: loading a NULL string.\n");
652 ret = -EINVAL;
653 goto end;
654 }
655 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
656 estack_ax(stack, top)->u.s.literal = 0;
657 estack_ax(stack, top)->u.s.user = 0;
658 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
659 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
660 PO;
661 }
662
663 OP(FILTER_OP_LOAD_FIELD_REF_SEQUENCE):
664 {
665 struct load_op *insn = (struct load_op *) pc;
666 struct field_ref *ref = (struct field_ref *) insn->data;
667
668 dbg_printk("load field ref offset %u type sequence\n",
669 ref->offset);
670 estack_push(stack, top, ax, bx);
671 estack_ax(stack, top)->u.s.seq_len =
672 *(unsigned long *) &filter_stack_data[ref->offset];
673 estack_ax(stack, top)->u.s.str =
674 *(const char **) (&filter_stack_data[ref->offset
675 + sizeof(unsigned long)]);
676 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
677 dbg_printk("Filter warning: loading a NULL sequence.\n");
678 ret = -EINVAL;
679 goto end;
680 }
681 estack_ax(stack, top)->u.s.literal = 0;
682 estack_ax(stack, top)->u.s.user = 0;
683 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
684 PO;
685 }
686
687 OP(FILTER_OP_LOAD_FIELD_REF_S64):
688 {
689 struct load_op *insn = (struct load_op *) pc;
690 struct field_ref *ref = (struct field_ref *) insn->data;
691
692 dbg_printk("load field ref offset %u type s64\n",
693 ref->offset);
694 estack_push(stack, top, ax, bx);
695 estack_ax_v =
696 ((struct literal_numeric *) &filter_stack_data[ref->offset])->v;
697 dbg_printk("ref load s64 %lld\n",
698 (long long) estack_ax_v);
699 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
700 PO;
701 }
702
703 OP(FILTER_OP_LOAD_FIELD_REF_DOUBLE):
704 {
705 BUG_ON(1);
706 PO;
707 }
708
709 /* load from immediate operand */
710 OP(FILTER_OP_LOAD_STRING):
711 {
712 struct load_op *insn = (struct load_op *) pc;
713
714 dbg_printk("load string %s\n", insn->data);
715 estack_push(stack, top, ax, bx);
716 estack_ax(stack, top)->u.s.str = insn->data;
717 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
718 estack_ax(stack, top)->u.s.literal = 1;
719 estack_ax(stack, top)->u.s.user = 0;
720 next_pc += sizeof(struct load_op) + strlen(insn->data) + 1;
721 PO;
722 }
723
724 OP(FILTER_OP_LOAD_S64):
725 {
726 struct load_op *insn = (struct load_op *) pc;
727
728 estack_push(stack, top, ax, bx);
729 estack_ax_v = ((struct literal_numeric *) insn->data)->v;
730 dbg_printk("load s64 %lld\n",
731 (long long) estack_ax_v);
732 next_pc += sizeof(struct load_op)
733 + sizeof(struct literal_numeric);
734 PO;
735 }
736
737 OP(FILTER_OP_LOAD_DOUBLE):
738 {
739 BUG_ON(1);
740 PO;
741 }
742
743 /* cast */
744 OP(FILTER_OP_CAST_TO_S64):
745 printk(KERN_WARNING "unsupported non-specialized bytecode op %u\n",
746 (unsigned int) *(filter_opcode_t *) pc);
747 ret = -EINVAL;
748 goto end;
749
750 OP(FILTER_OP_CAST_DOUBLE_TO_S64):
751 {
752 BUG_ON(1);
753 PO;
754 }
755
756 OP(FILTER_OP_CAST_NOP):
757 {
758 next_pc += sizeof(struct cast_op);
759 PO;
760 }
761
762 /* get context ref */
763 OP(FILTER_OP_GET_CONTEXT_REF_STRING):
764 {
765 struct load_op *insn = (struct load_op *) pc;
766 struct field_ref *ref = (struct field_ref *) insn->data;
767 struct lttng_ctx_field *ctx_field;
768 union lttng_ctx_value v;
769
770 dbg_printk("get context ref offset %u type string\n",
771 ref->offset);
772 ctx_field = &lttng_static_ctx->fields[ref->offset];
773 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
774 estack_push(stack, top, ax, bx);
775 estack_ax(stack, top)->u.s.str = v.str;
776 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
777 dbg_printk("Filter warning: loading a NULL string.\n");
778 ret = -EINVAL;
779 goto end;
780 }
781 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
782 estack_ax(stack, top)->u.s.literal = 0;
783 estack_ax(stack, top)->u.s.user = 0;
784 dbg_printk("ref get context string %s\n", estack_ax(stack, top)->u.s.str);
785 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
786 PO;
787 }
788
789 OP(FILTER_OP_GET_CONTEXT_REF_S64):
790 {
791 struct load_op *insn = (struct load_op *) pc;
792 struct field_ref *ref = (struct field_ref *) insn->data;
793 struct lttng_ctx_field *ctx_field;
794 union lttng_ctx_value v;
795
796 dbg_printk("get context ref offset %u type s64\n",
797 ref->offset);
798 ctx_field = &lttng_static_ctx->fields[ref->offset];
799 ctx_field->get_value(ctx_field, lttng_probe_ctx, &v);
800 estack_push(stack, top, ax, bx);
801 estack_ax_v = v.s64;
802 dbg_printk("ref get context s64 %lld\n",
803 (long long) estack_ax_v);
804 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
805 PO;
806 }
807
808 OP(FILTER_OP_GET_CONTEXT_REF_DOUBLE):
809 {
810 BUG_ON(1);
811 PO;
812 }
813
814 /* load userspace field ref */
815 OP(FILTER_OP_LOAD_FIELD_REF_USER_STRING):
816 {
817 struct load_op *insn = (struct load_op *) pc;
818 struct field_ref *ref = (struct field_ref *) insn->data;
819
820 dbg_printk("load field ref offset %u type user string\n",
821 ref->offset);
822 estack_push(stack, top, ax, bx);
823 estack_ax(stack, top)->u.s.user_str =
824 *(const char * const *) &filter_stack_data[ref->offset];
825 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
826 dbg_printk("Filter warning: loading a NULL string.\n");
827 ret = -EINVAL;
828 goto end;
829 }
830 estack_ax(stack, top)->u.s.seq_len = UINT_MAX;
831 estack_ax(stack, top)->u.s.literal = 0;
832 estack_ax(stack, top)->u.s.user = 1;
833 dbg_printk("ref load string %s\n", estack_ax(stack, top)->u.s.str);
834 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
835 PO;
836 }
837
838 OP(FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE):
839 {
840 struct load_op *insn = (struct load_op *) pc;
841 struct field_ref *ref = (struct field_ref *) insn->data;
842
843 dbg_printk("load field ref offset %u type user sequence\n",
844 ref->offset);
845 estack_push(stack, top, ax, bx);
846 estack_ax(stack, top)->u.s.seq_len =
847 *(unsigned long *) &filter_stack_data[ref->offset];
848 estack_ax(stack, top)->u.s.user_str =
849 *(const char **) (&filter_stack_data[ref->offset
850 + sizeof(unsigned long)]);
851 if (unlikely(!estack_ax(stack, top)->u.s.str)) {
852 dbg_printk("Filter warning: loading a NULL sequence.\n");
853 ret = -EINVAL;
854 goto end;
855 }
856 estack_ax(stack, top)->u.s.literal = 0;
857 estack_ax(stack, top)->u.s.user = 1;
858 next_pc += sizeof(struct load_op) + sizeof(struct field_ref);
859 PO;
860 }
861
862 END_OP
863 end:
864 /* return 0 (discard) on error */
865 if (ret)
866 return 0;
867 return retval;
868 }
869
870 #undef START_OP
871 #undef OP
872 #undef PO
873 #undef END_OP
This page took 0.048728 seconds and 5 git commands to generate.