Refactoring: type description structures
[lttng-modules.git] / src / lttng-bytecode.c
1 /* SPDX-License-Identifier: MIT
2 *
3 * lttng-bytecode.c
4 *
5 * LTTng modules bytecode code.
6 *
7 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
8 */
9
10 #include <linux/list.h>
11 #include <linux/slab.h>
12
13 #include <lttng/lttng-bytecode.h>
14 #include <lttng/events-internal.h>
15
16 static const char *opnames[] = {
17 [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
18
19 [ BYTECODE_OP_RETURN ] = "RETURN",
20
21 /* binary */
22 [ BYTECODE_OP_MUL ] = "MUL",
23 [ BYTECODE_OP_DIV ] = "DIV",
24 [ BYTECODE_OP_MOD ] = "MOD",
25 [ BYTECODE_OP_PLUS ] = "PLUS",
26 [ BYTECODE_OP_MINUS ] = "MINUS",
27 [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
28 [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
29 [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
30 [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
31 [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
32
33 /* binary comparators */
34 [ BYTECODE_OP_EQ ] = "EQ",
35 [ BYTECODE_OP_NE ] = "NE",
36 [ BYTECODE_OP_GT ] = "GT",
37 [ BYTECODE_OP_LT ] = "LT",
38 [ BYTECODE_OP_GE ] = "GE",
39 [ BYTECODE_OP_LE ] = "LE",
40
41 /* string binary comparators */
42 [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
43 [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
44 [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
45 [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
46 [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
47 [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
48
49 /* s64 binary comparators */
50 [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
51 [ BYTECODE_OP_NE_S64 ] = "NE_S64",
52 [ BYTECODE_OP_GT_S64 ] = "GT_S64",
53 [ BYTECODE_OP_LT_S64 ] = "LT_S64",
54 [ BYTECODE_OP_GE_S64 ] = "GE_S64",
55 [ BYTECODE_OP_LE_S64 ] = "LE_S64",
56
57 /* double binary comparators */
58 [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
59 [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
60 [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
61 [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
62 [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
63 [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
64
65 /* Mixed S64-double binary comparators */
66 [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
67 [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
68 [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
69 [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
70 [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
71 [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
72
73 [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
74 [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
75 [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
76 [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
77 [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
78 [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
79
80 /* unary */
81 [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
82 [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
83 [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
84 [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
85 [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
86 [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
87 [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
88 [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
89 [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
90
91 /* logical */
92 [ BYTECODE_OP_AND ] = "AND",
93 [ BYTECODE_OP_OR ] = "OR",
94
95 /* load field ref */
96 [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
97 [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
98 [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
99 [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
100 [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
101
102 /* load from immediate operand */
103 [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
104 [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
105 [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
106
107 /* cast */
108 [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
109 [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
110 [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
111
112 /* get context ref */
113 [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
114 [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
115 [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
116 [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
117
118 /* load userspace field ref */
119 [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
120 [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
121
122 /*
123 * load immediate star globbing pattern (literal string)
124 * from immediate.
125 */
126 [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
127
128 /* globbing pattern binary operator: apply to */
129 [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
130 [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
131
132 /*
133 * Instructions for recursive traversal through composed types.
134 */
135 [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
136 [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
137 [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
138
139 [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
140 [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
141 [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
142 [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
143
144 [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
145 [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
146 [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
147 [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
148 [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
149 [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
150 [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
151 [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
152 [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
153 [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
154 [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
155 [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
156
157 [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
158
159 [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
160 };
161
162 const char *lttng_bytecode_print_op(enum bytecode_op op)
163 {
164 if (op >= NR_BYTECODE_OPS)
165 return "UNKNOWN";
166 else
167 return opnames[op];
168 }
169
170 static
171 int apply_field_reloc(const struct lttng_kernel_event_desc *event_desc,
172 struct bytecode_runtime *runtime,
173 uint32_t runtime_len,
174 uint32_t reloc_offset,
175 const char *field_name,
176 enum bytecode_op bytecode_op)
177 {
178 const struct lttng_kernel_event_field **fields, *field = NULL;
179 unsigned int nr_fields, i;
180 struct load_op *op;
181 uint32_t field_offset = 0;
182
183 dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
184
185 /* Lookup event by name */
186 if (!event_desc)
187 return -EINVAL;
188 fields = event_desc->fields;
189 if (!fields)
190 return -EINVAL;
191 nr_fields = event_desc->nr_fields;
192 for (i = 0; i < nr_fields; i++) {
193 if (fields[i]->nofilter)
194 continue;
195 if (!strcmp(fields[i]->name, field_name)) {
196 field = fields[i];
197 break;
198 }
199 /* compute field offset */
200 switch (fields[i]->type->type) {
201 case lttng_kernel_type_integer:
202 case lttng_kernel_type_enum:
203 field_offset += sizeof(int64_t);
204 break;
205 case lttng_kernel_type_array:
206 if (!lttng_kernel_type_is_bytewise_integer(lttng_kernel_get_type_array(fields[i]->type)->elem_type))
207 return -EINVAL;
208 field_offset += sizeof(unsigned long);
209 field_offset += sizeof(void *);
210 break;
211 case lttng_kernel_type_sequence:
212 if (!lttng_kernel_type_is_bytewise_integer(lttng_kernel_get_type_sequence(fields[i]->type)->elem_type))
213 return -EINVAL;
214 field_offset += sizeof(unsigned long);
215 field_offset += sizeof(void *);
216 break;
217 case lttng_kernel_type_string:
218 field_offset += sizeof(void *);
219 break;
220 case lttng_kernel_type_struct: /* Unsupported. */
221 case lttng_kernel_type_variant: /* Unsupported. */
222 default:
223 return -EINVAL;
224 }
225 }
226 if (!field)
227 return -EINVAL;
228
229 /* Check if field offset is too large for 16-bit offset */
230 if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
231 return -EINVAL;
232
233 /* set type */
234 op = (struct load_op *) &runtime->code[reloc_offset];
235
236 switch (bytecode_op) {
237 case BYTECODE_OP_LOAD_FIELD_REF:
238 {
239 struct field_ref *field_ref;
240
241 field_ref = (struct field_ref *) op->data;
242 switch (field->type->type) {
243 case lttng_kernel_type_integer:
244 case lttng_kernel_type_enum:
245 op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
246 break;
247 case lttng_kernel_type_array:
248 {
249 const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(field->type);
250 const struct lttng_kernel_type_common *elem_type = array_type->elem_type;
251
252 if (!lttng_kernel_type_is_bytewise_integer(elem_type) || array_type->encoding == lttng_kernel_string_encoding_none)
253 return -EINVAL;
254 if (field->user)
255 op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE;
256 else
257 op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
258 break;
259 }
260 case lttng_kernel_type_sequence:
261 {
262 const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(field->type);
263 const struct lttng_kernel_type_common *elem_type = sequence_type->elem_type;
264
265 if (!lttng_kernel_type_is_bytewise_integer(elem_type) || sequence_type->encoding == lttng_kernel_string_encoding_none)
266 return -EINVAL;
267 if (field->user)
268 op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE;
269 else
270 op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
271 break;
272 }
273 case lttng_kernel_type_string:
274 if (field->user)
275 op->op = BYTECODE_OP_LOAD_FIELD_REF_USER_STRING;
276 else
277 op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
278 break;
279 case lttng_kernel_type_struct: /* Unsupported. */
280 case lttng_kernel_type_variant: /* Unsupported. */
281 default:
282 return -EINVAL;
283 }
284 /* set offset */
285 field_ref->offset = (uint16_t) field_offset;
286 break;
287 }
288 default:
289 return -EINVAL;
290 }
291 return 0;
292 }
293
294 static
295 int apply_context_reloc(struct bytecode_runtime *runtime,
296 uint32_t runtime_len,
297 uint32_t reloc_offset,
298 const char *context_name,
299 enum bytecode_op bytecode_op)
300 {
301 struct load_op *op;
302 struct lttng_kernel_ctx_field *ctx_field;
303 int idx;
304
305 dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
306
307 /* Get context index */
308 idx = lttng_kernel_get_context_index(lttng_static_ctx, context_name);
309 if (idx < 0)
310 return -ENOENT;
311
312 /* Check if idx is too large for 16-bit offset */
313 if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
314 return -EINVAL;
315
316 /* Get context return type */
317 ctx_field = &lttng_static_ctx->fields[idx];
318 op = (struct load_op *) &runtime->code[reloc_offset];
319
320 switch (bytecode_op) {
321 case BYTECODE_OP_GET_CONTEXT_REF:
322 {
323 struct field_ref *field_ref;
324
325 field_ref = (struct field_ref *) op->data;
326 switch (ctx_field->event_field->type->type) {
327 case lttng_kernel_type_integer:
328 case lttng_kernel_type_enum:
329 op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
330 break;
331 /* Sequence and array supported as string */
332 case lttng_kernel_type_string:
333 BUG_ON(ctx_field->event_field->user);
334 op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
335 break;
336 case lttng_kernel_type_array:
337 {
338 const struct lttng_kernel_type_array *array_type = lttng_kernel_get_type_array(ctx_field->event_field->type);
339 const struct lttng_kernel_type_common *elem_type = array_type->elem_type;
340
341 if (!lttng_kernel_type_is_bytewise_integer(elem_type) || array_type->encoding == lttng_kernel_string_encoding_none)
342 return -EINVAL;
343 BUG_ON(ctx_field->event_field->user);
344 op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
345 break;
346 }
347 case lttng_kernel_type_sequence:
348 {
349 const struct lttng_kernel_type_sequence *sequence_type = lttng_kernel_get_type_sequence(ctx_field->event_field->type);
350 const struct lttng_kernel_type_common *elem_type = sequence_type->elem_type;
351
352 if (!lttng_kernel_type_is_bytewise_integer(elem_type) || sequence_type->encoding == lttng_kernel_string_encoding_none)
353 return -EINVAL;
354 BUG_ON(ctx_field->event_field->user);
355 op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
356 break;
357 }
358 case lttng_kernel_type_struct: /* Unsupported. */
359 case lttng_kernel_type_variant: /* Unsupported. */
360 default:
361 return -EINVAL;
362 }
363 /* set offset to context index within channel contexts */
364 field_ref->offset = (uint16_t) idx;
365 break;
366 }
367 default:
368 return -EINVAL;
369 }
370 return 0;
371 }
372
373 static
374 int apply_reloc(const struct lttng_kernel_event_desc *event_desc,
375 struct bytecode_runtime *runtime,
376 uint32_t runtime_len,
377 uint32_t reloc_offset,
378 const char *name)
379 {
380 struct load_op *op;
381
382 dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
383
384 /* Ensure that the reloc is within the code */
385 if (runtime_len - reloc_offset < sizeof(uint16_t))
386 return -EINVAL;
387
388 op = (struct load_op *) &runtime->code[reloc_offset];
389 switch (op->op) {
390 case BYTECODE_OP_LOAD_FIELD_REF:
391 return apply_field_reloc(event_desc, runtime, runtime_len,
392 reloc_offset, name, op->op);
393 case BYTECODE_OP_GET_CONTEXT_REF:
394 return apply_context_reloc(runtime, runtime_len,
395 reloc_offset, name, op->op);
396 case BYTECODE_OP_GET_SYMBOL:
397 case BYTECODE_OP_GET_SYMBOL_FIELD:
398 /*
399 * Will be handled by load specialize phase or
400 * dynamically by interpreter.
401 */
402 return 0;
403 default:
404 printk(KERN_WARNING "LTTng: filter: Unknown reloc op type %u\n", op->op);
405 return -EINVAL;
406 }
407 return 0;
408 }
409
410 static
411 int bytecode_is_linked(struct lttng_bytecode_node *bytecode,
412 struct list_head *bytecode_runtime_head)
413 {
414 struct lttng_bytecode_runtime *bc_runtime;
415
416 list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
417 if (bc_runtime->bc == bytecode)
418 return 1;
419 }
420 return 0;
421 }
422
423 /*
424 * Take a bytecode with reloc table and link it to an event to create a
425 * bytecode runtime.
426 */
427 static
428 int link_bytecode(const struct lttng_kernel_event_desc *event_desc,
429 struct lttng_kernel_ctx *ctx,
430 struct lttng_bytecode_node *bytecode,
431 struct list_head *bytecode_runtime_head,
432 struct list_head *insert_loc)
433 {
434 int ret, offset, next_offset;
435 struct bytecode_runtime *runtime = NULL;
436 size_t runtime_alloc_len;
437
438 if (!bytecode)
439 return 0;
440 /* Bytecode already linked */
441 if (bytecode_is_linked(bytecode, bytecode_runtime_head))
442 return 0;
443
444 dbg_printk("Linking...\n");
445
446 /* We don't need the reloc table in the runtime */
447 runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
448 runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
449 if (!runtime) {
450 ret = -ENOMEM;
451 goto alloc_error;
452 }
453 runtime->p.bc = bytecode;
454 runtime->p.ctx = ctx;
455 runtime->len = bytecode->bc.reloc_offset;
456 /* copy original bytecode */
457 memcpy(runtime->code, bytecode->bc.data, runtime->len);
458 /*
459 * apply relocs. Those are a uint16_t (offset in bytecode)
460 * followed by a string (field name).
461 */
462 for (offset = bytecode->bc.reloc_offset;
463 offset < bytecode->bc.len;
464 offset = next_offset) {
465 uint16_t reloc_offset =
466 *(uint16_t *) &bytecode->bc.data[offset];
467 const char *name =
468 (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
469
470 ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
471 if (ret) {
472 goto link_error;
473 }
474 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
475 }
476 /* Validate bytecode */
477 ret = lttng_bytecode_validate(runtime);
478 if (ret) {
479 goto link_error;
480 }
481 /* Specialize bytecode */
482 ret = lttng_bytecode_specialize(event_desc, runtime);
483 if (ret) {
484 goto link_error;
485 }
486
487 switch (bytecode->type) {
488 case LTTNG_BYTECODE_NODE_TYPE_FILTER:
489 runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret;
490 break;
491 case LTTNG_BYTECODE_NODE_TYPE_CAPTURE:
492 runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
493 break;
494 default:
495 WARN_ON(1);
496 }
497
498 runtime->p.link_failed = 0;
499 list_add_rcu(&runtime->p.node, insert_loc);
500 dbg_printk("Linking successful.\n");
501 return 0;
502
503 link_error:
504
505 switch (bytecode->type) {
506 case LTTNG_BYTECODE_NODE_TYPE_FILTER:
507 runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
508 break;
509 case LTTNG_BYTECODE_NODE_TYPE_CAPTURE:
510 runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
511 break;
512 default:
513 WARN_ON(1);
514 }
515 runtime->p.link_failed = 1;
516 list_add_rcu(&runtime->p.node, insert_loc);
517 alloc_error:
518 dbg_printk("Linking failed.\n");
519 return ret;
520 }
521
522 void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime)
523 {
524 struct lttng_bytecode_node *bc = runtime->bc;
525
526 if (!bc->enabler->enabled || runtime->link_failed)
527 runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
528 else
529 runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret;
530 }
531
532 void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime)
533 {
534 struct lttng_bytecode_node *bc = runtime->bc;
535
536 if (!bc->enabler->enabled || runtime->link_failed)
537 runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
538 else
539 runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret;
540 }
541
542 /*
543 * Given the lists of bytecode programs of an instance (event or event
544 * notifier) and of a matching enabler, try to link all the enabler's bytecode
545 * programs with the instance.
546 *
547 * This function is called after we confirmed that name enabler and the
548 * instance are matching names (or glob pattern matching).
549 */
550 void lttng_enabler_link_bytecode(const struct lttng_kernel_event_desc *event_desc,
551 struct lttng_kernel_ctx *ctx,
552 struct list_head *instance_bytecode_head,
553 struct list_head *enabler_bytecode_head)
554 {
555 struct lttng_bytecode_node *enabler_bc;
556 struct lttng_bytecode_runtime *runtime;
557
558 WARN_ON_ONCE(!event_desc);
559
560 /* Go over all the bytecode programs of the enabler. */
561 list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
562 int found = 0, ret;
563 struct list_head *insert_loc;
564
565 /*
566 * Check if the current enabler bytecode program is already
567 * linked with the instance.
568 */
569 list_for_each_entry(runtime, instance_bytecode_head, node) {
570 if (runtime->bc == enabler_bc) {
571 found = 1;
572 break;
573 }
574 }
575
576 /*
577 * Skip bytecode already linked, go to the next enabler
578 * bytecode program.
579 */
580 if (found)
581 continue;
582
583 /*
584 * Insert at specified priority (seqnum) in increasing
585 * order. If there already is a bytecode of the same priority,
586 * insert the new bytecode right after it.
587 */
588 list_for_each_entry_reverse(runtime,
589 instance_bytecode_head, node) {
590 if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
591 /* insert here */
592 insert_loc = &runtime->node;
593 goto add_within;
594 }
595 }
596 /* Add to head to list */
597 insert_loc = instance_bytecode_head;
598 add_within:
599 dbg_printk("linking bytecode\n");
600 ret = link_bytecode(event_desc, ctx, enabler_bc, instance_bytecode_head, insert_loc);
601 if (ret) {
602 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
603 }
604 }
605 }
606
607 /*
608 * We own the filter_bytecode if we return success.
609 */
610 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
611 struct lttng_bytecode_node *filter_bytecode)
612 {
613 list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
614 return 0;
615 }
616
617 void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
618 {
619 struct lttng_bytecode_node *filter_bytecode, *tmp;
620
621 list_for_each_entry_safe(filter_bytecode, tmp,
622 &enabler->filter_bytecode_head, node) {
623 kfree(filter_bytecode);
624 }
625 }
626
627 void lttng_free_event_filter_runtime(struct lttng_event *event)
628 {
629 struct bytecode_runtime *runtime, *tmp;
630
631 list_for_each_entry_safe(runtime, tmp,
632 &event->filter_bytecode_runtime_head, p.node) {
633 kfree(runtime->data);
634 kfree(runtime);
635 }
636 }
637
638 void lttng_free_event_notifier_filter_runtime(struct lttng_event_notifier *event_notifier)
639 {
640 struct bytecode_runtime *runtime, *tmp;
641
642 list_for_each_entry_safe(runtime, tmp,
643 &event_notifier->filter_bytecode_runtime_head, p.node) {
644 kfree(runtime->data);
645 kfree(runtime);
646 }
647 }
This page took 0.042051 seconds and 4 git commands to generate.