ca8914ede7251291e5d9343ccb0a5514bb8b812c
[lttng-ust.git] / liblttng-ust / lttng-bytecode.c
1 /*
2 * lttng-bytecode.c
3 *
4 * LTTng UST bytecode code.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #define _LGPL_SOURCE
28 #include <stddef.h>
29 #include <stdint.h>
30
31 #include <urcu/rculist.h>
32
33 #include "lttng-bytecode.h"
34 #include "ust-events-internal.h"
35
36 static const char *opnames[] = {
37 [ BYTECODE_OP_UNKNOWN ] = "UNKNOWN",
38
39 [ BYTECODE_OP_RETURN ] = "RETURN",
40
41 /* binary */
42 [ BYTECODE_OP_MUL ] = "MUL",
43 [ BYTECODE_OP_DIV ] = "DIV",
44 [ BYTECODE_OP_MOD ] = "MOD",
45 [ BYTECODE_OP_PLUS ] = "PLUS",
46 [ BYTECODE_OP_MINUS ] = "MINUS",
47 [ BYTECODE_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
48 [ BYTECODE_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
49 [ BYTECODE_OP_BIT_AND ] = "BIT_AND",
50 [ BYTECODE_OP_BIT_OR ] = "BIT_OR",
51 [ BYTECODE_OP_BIT_XOR ] = "BIT_XOR",
52
53 /* binary comparators */
54 [ BYTECODE_OP_EQ ] = "EQ",
55 [ BYTECODE_OP_NE ] = "NE",
56 [ BYTECODE_OP_GT ] = "GT",
57 [ BYTECODE_OP_LT ] = "LT",
58 [ BYTECODE_OP_GE ] = "GE",
59 [ BYTECODE_OP_LE ] = "LE",
60
61 /* string binary comparators */
62 [ BYTECODE_OP_EQ_STRING ] = "EQ_STRING",
63 [ BYTECODE_OP_NE_STRING ] = "NE_STRING",
64 [ BYTECODE_OP_GT_STRING ] = "GT_STRING",
65 [ BYTECODE_OP_LT_STRING ] = "LT_STRING",
66 [ BYTECODE_OP_GE_STRING ] = "GE_STRING",
67 [ BYTECODE_OP_LE_STRING ] = "LE_STRING",
68
69 /* s64 binary comparators */
70 [ BYTECODE_OP_EQ_S64 ] = "EQ_S64",
71 [ BYTECODE_OP_NE_S64 ] = "NE_S64",
72 [ BYTECODE_OP_GT_S64 ] = "GT_S64",
73 [ BYTECODE_OP_LT_S64 ] = "LT_S64",
74 [ BYTECODE_OP_GE_S64 ] = "GE_S64",
75 [ BYTECODE_OP_LE_S64 ] = "LE_S64",
76
77 /* double binary comparators */
78 [ BYTECODE_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
79 [ BYTECODE_OP_NE_DOUBLE ] = "NE_DOUBLE",
80 [ BYTECODE_OP_GT_DOUBLE ] = "GT_DOUBLE",
81 [ BYTECODE_OP_LT_DOUBLE ] = "LT_DOUBLE",
82 [ BYTECODE_OP_GE_DOUBLE ] = "GE_DOUBLE",
83 [ BYTECODE_OP_LE_DOUBLE ] = "LE_DOUBLE",
84
85 /* Mixed S64-double binary comparators */
86 [ BYTECODE_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
87 [ BYTECODE_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
88 [ BYTECODE_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
89 [ BYTECODE_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
90 [ BYTECODE_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
91 [ BYTECODE_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
92
93 [ BYTECODE_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
94 [ BYTECODE_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
95 [ BYTECODE_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
96 [ BYTECODE_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
97 [ BYTECODE_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
98 [ BYTECODE_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
99
100 /* unary */
101 [ BYTECODE_OP_UNARY_PLUS ] = "UNARY_PLUS",
102 [ BYTECODE_OP_UNARY_MINUS ] = "UNARY_MINUS",
103 [ BYTECODE_OP_UNARY_NOT ] = "UNARY_NOT",
104 [ BYTECODE_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
105 [ BYTECODE_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
106 [ BYTECODE_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
107 [ BYTECODE_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
108 [ BYTECODE_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
109 [ BYTECODE_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
110
111 /* logical */
112 [ BYTECODE_OP_AND ] = "AND",
113 [ BYTECODE_OP_OR ] = "OR",
114
115 /* load field ref */
116 [ BYTECODE_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
117 [ BYTECODE_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
118 [ BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
119 [ BYTECODE_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
120 [ BYTECODE_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
121
122 /* load from immediate operand */
123 [ BYTECODE_OP_LOAD_STRING ] = "LOAD_STRING",
124 [ BYTECODE_OP_LOAD_S64 ] = "LOAD_S64",
125 [ BYTECODE_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
126
127 /* cast */
128 [ BYTECODE_OP_CAST_TO_S64 ] = "CAST_TO_S64",
129 [ BYTECODE_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
130 [ BYTECODE_OP_CAST_NOP ] = "CAST_NOP",
131
132 /* get context ref */
133 [ BYTECODE_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
134 [ BYTECODE_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
135 [ BYTECODE_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
136 [ BYTECODE_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
137
138 /* load userspace field ref */
139 [ BYTECODE_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
140 [ BYTECODE_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
141
142 /*
143 * load immediate star globbing pattern (literal string)
144 * from immediate.
145 */
146 [ BYTECODE_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
147
148 /* globbing pattern binary operator: apply to */
149 [ BYTECODE_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
150 [ BYTECODE_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
151
152 /*
153 * Instructions for recursive traversal through composed types.
154 */
155 [ BYTECODE_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
156 [ BYTECODE_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
157 [ BYTECODE_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
158
159 [ BYTECODE_OP_GET_SYMBOL ] = "GET_SYMBOL",
160 [ BYTECODE_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
161 [ BYTECODE_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
162 [ BYTECODE_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
163
164 [ BYTECODE_OP_LOAD_FIELD ] = "LOAD_FIELD",
165 [ BYTECODE_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
166 [ BYTECODE_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
167 [ BYTECODE_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
168 [ BYTECODE_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
169 [ BYTECODE_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
170 [ BYTECODE_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
171 [ BYTECODE_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
172 [ BYTECODE_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
173 [ BYTECODE_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
174 [ BYTECODE_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
175 [ BYTECODE_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
176
177 [ BYTECODE_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
178
179 [ BYTECODE_OP_RETURN_S64 ] = "RETURN_S64",
180 };
181
182 const char *print_op(enum bytecode_op op)
183 {
184 if (op >= NR_BYTECODE_OPS)
185 return "UNKNOWN";
186 else
187 return opnames[op];
188 }
189
190 static
191 int apply_field_reloc(const struct lttng_event_desc *event_desc,
192 struct bytecode_runtime *runtime,
193 uint32_t runtime_len,
194 uint32_t reloc_offset,
195 const char *field_name,
196 enum bytecode_op bytecode_op)
197 {
198 const struct lttng_event_field *fields, *field = NULL;
199 unsigned int nr_fields, i;
200 struct load_op *op;
201 uint32_t field_offset = 0;
202
203 dbg_printf("Apply field reloc: %u %s\n", reloc_offset, field_name);
204
205 /* Lookup event by name */
206 if (!event_desc)
207 return -EINVAL;
208 fields = event_desc->fields;
209 if (!fields)
210 return -EINVAL;
211 nr_fields = event_desc->nr_fields;
212 for (i = 0; i < nr_fields; i++) {
213 if (fields[i].u.ext.nofilter) {
214 continue;
215 }
216 if (!strcmp(fields[i].name, field_name)) {
217 field = &fields[i];
218 break;
219 }
220 /* compute field offset */
221 switch (fields[i].type.atype) {
222 case atype_integer:
223 case atype_enum:
224 case atype_enum_nestable:
225 field_offset += sizeof(int64_t);
226 break;
227 case atype_array:
228 case atype_array_nestable:
229 case atype_sequence:
230 case atype_sequence_nestable:
231 field_offset += sizeof(unsigned long);
232 field_offset += sizeof(void *);
233 break;
234 case atype_string:
235 field_offset += sizeof(void *);
236 break;
237 case atype_float:
238 field_offset += sizeof(double);
239 break;
240 default:
241 return -EINVAL;
242 }
243 }
244 if (!field)
245 return -EINVAL;
246
247 /* Check if field offset is too large for 16-bit offset */
248 if (field_offset > FILTER_BYTECODE_MAX_LEN - 1)
249 return -EINVAL;
250
251 /* set type */
252 op = (struct load_op *) &runtime->code[reloc_offset];
253
254 switch (bytecode_op) {
255 case BYTECODE_OP_LOAD_FIELD_REF:
256 {
257 struct field_ref *field_ref;
258
259 field_ref = (struct field_ref *) op->data;
260 switch (field->type.atype) {
261 case atype_integer:
262 case atype_enum:
263 case atype_enum_nestable:
264 op->op = BYTECODE_OP_LOAD_FIELD_REF_S64;
265 break;
266 case atype_array:
267 case atype_array_nestable:
268 case atype_sequence:
269 case atype_sequence_nestable:
270 op->op = BYTECODE_OP_LOAD_FIELD_REF_SEQUENCE;
271 break;
272 case atype_string:
273 op->op = BYTECODE_OP_LOAD_FIELD_REF_STRING;
274 break;
275 case atype_float:
276 op->op = BYTECODE_OP_LOAD_FIELD_REF_DOUBLE;
277 break;
278 default:
279 return -EINVAL;
280 }
281 /* set offset */
282 field_ref->offset = (uint16_t) field_offset;
283 break;
284 }
285 default:
286 return -EINVAL;
287 }
288 return 0;
289 }
290
291 static
292 int apply_context_reloc(struct bytecode_runtime *runtime,
293 uint32_t runtime_len,
294 uint32_t reloc_offset,
295 const char *context_name,
296 enum bytecode_op bytecode_op)
297 {
298 struct load_op *op;
299 struct lttng_ctx_field *ctx_field;
300 int idx;
301 struct lttng_ctx **pctx = runtime->p.pctx;
302
303 dbg_printf("Apply context reloc: %u %s\n", reloc_offset, context_name);
304
305 /* Get context index */
306 idx = lttng_get_context_index(*pctx, context_name);
307 if (idx < 0) {
308 if (lttng_context_is_app(context_name)) {
309 int ret;
310
311 ret = lttng_ust_add_app_context_to_ctx_rcu(context_name,
312 pctx);
313 if (ret)
314 return ret;
315 idx = lttng_get_context_index(*pctx, context_name);
316 if (idx < 0)
317 return -ENOENT;
318 } else {
319 return -ENOENT;
320 }
321 }
322 /* Check if idx is too large for 16-bit offset */
323 if (idx > FILTER_BYTECODE_MAX_LEN - 1)
324 return -EINVAL;
325
326 /* Get context return type */
327 ctx_field = &(*pctx)->fields[idx];
328 op = (struct load_op *) &runtime->code[reloc_offset];
329
330 switch (bytecode_op) {
331 case BYTECODE_OP_GET_CONTEXT_REF:
332 {
333 struct field_ref *field_ref;
334
335 field_ref = (struct field_ref *) op->data;
336 switch (ctx_field->event_field.type.atype) {
337 case atype_integer:
338 case atype_enum:
339 case atype_enum_nestable:
340 op->op = BYTECODE_OP_GET_CONTEXT_REF_S64;
341 break;
342 /* Sequence and array supported as string */
343 case atype_string:
344 case atype_array:
345 case atype_array_nestable:
346 case atype_sequence:
347 case atype_sequence_nestable:
348 op->op = BYTECODE_OP_GET_CONTEXT_REF_STRING;
349 break;
350 case atype_float:
351 op->op = BYTECODE_OP_GET_CONTEXT_REF_DOUBLE;
352 break;
353 case atype_dynamic:
354 op->op = BYTECODE_OP_GET_CONTEXT_REF;
355 break;
356 default:
357 return -EINVAL;
358 }
359 /* set offset to context index within channel contexts */
360 field_ref->offset = (uint16_t) idx;
361 break;
362 }
363 default:
364 return -EINVAL;
365 }
366 return 0;
367 }
368
369 static
370 int apply_reloc(const struct lttng_event_desc *event_desc,
371 struct bytecode_runtime *runtime,
372 uint32_t runtime_len,
373 uint32_t reloc_offset,
374 const char *name)
375 {
376 struct load_op *op;
377
378 dbg_printf("Apply reloc: %u %s\n", reloc_offset, name);
379
380 /* Ensure that the reloc is within the code */
381 if (runtime_len - reloc_offset < sizeof(uint16_t))
382 return -EINVAL;
383
384 op = (struct load_op *) &runtime->code[reloc_offset];
385 switch (op->op) {
386 case BYTECODE_OP_LOAD_FIELD_REF:
387 return apply_field_reloc(event_desc, runtime, runtime_len,
388 reloc_offset, name, op->op);
389 case BYTECODE_OP_GET_CONTEXT_REF:
390 return apply_context_reloc(runtime, runtime_len,
391 reloc_offset, name, op->op);
392 case BYTECODE_OP_GET_SYMBOL:
393 case BYTECODE_OP_GET_SYMBOL_FIELD:
394 /*
395 * Will be handled by load specialize phase or
396 * dynamically by interpreter.
397 */
398 return 0;
399 default:
400 ERR("Unknown reloc op type %u\n", op->op);
401 return -EINVAL;
402 }
403 return 0;
404 }
405
406 static
407 int bytecode_is_linked(struct lttng_ust_bytecode_node *bytecode,
408 struct cds_list_head *bytecode_runtime_head)
409 {
410 struct lttng_bytecode_runtime *bc_runtime;
411
412 cds_list_for_each_entry(bc_runtime, bytecode_runtime_head, node) {
413 if (bc_runtime->bc == bytecode)
414 return 1;
415 }
416 return 0;
417 }
418
419 /*
420 * Take a bytecode with reloc table and link it to an event to create a
421 * bytecode runtime.
422 */
423 static
424 int link_bytecode(const struct lttng_event_desc *event_desc,
425 struct lttng_ctx **ctx,
426 struct lttng_ust_bytecode_node *bytecode,
427 struct cds_list_head *insert_loc)
428 {
429 int ret, offset, next_offset;
430 struct bytecode_runtime *runtime = NULL;
431 size_t runtime_alloc_len;
432
433 if (!bytecode)
434 return 0;
435 /* Bytecode already linked */
436 if (bytecode_is_linked(bytecode, insert_loc))
437 return 0;
438
439 dbg_printf("Linking...\n");
440
441 /* We don't need the reloc table in the runtime */
442 runtime_alloc_len = sizeof(*runtime) + bytecode->bc.reloc_offset;
443 runtime = zmalloc(runtime_alloc_len);
444 if (!runtime) {
445 ret = -ENOMEM;
446 goto alloc_error;
447 }
448 runtime->p.bc = bytecode;
449 runtime->p.pctx = ctx;
450 runtime->len = bytecode->bc.reloc_offset;
451 /* copy original bytecode */
452 memcpy(runtime->code, bytecode->bc.data, runtime->len);
453 /*
454 * apply relocs. Those are a uint16_t (offset in bytecode)
455 * followed by a string (field name).
456 */
457 for (offset = bytecode->bc.reloc_offset;
458 offset < bytecode->bc.len;
459 offset = next_offset) {
460 uint16_t reloc_offset =
461 *(uint16_t *) &bytecode->bc.data[offset];
462 const char *name =
463 (const char *) &bytecode->bc.data[offset + sizeof(uint16_t)];
464
465 ret = apply_reloc(event_desc, runtime, runtime->len, reloc_offset, name);
466 if (ret) {
467 goto link_error;
468 }
469 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
470 }
471 /* Validate bytecode */
472 ret = lttng_bytecode_validate(runtime);
473 if (ret) {
474 goto link_error;
475 }
476 /* Specialize bytecode */
477 ret = lttng_bytecode_specialize(event_desc, runtime);
478 if (ret) {
479 goto link_error;
480 }
481
482 switch (bytecode->type) {
483 case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER:
484 runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret;
485 break;
486 case LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE:
487 runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret;
488 break;
489 default:
490 abort();
491 }
492
493 runtime->p.link_failed = 0;
494 cds_list_add_rcu(&runtime->p.node, insert_loc);
495 dbg_printf("Linking successful.\n");
496 return 0;
497
498 link_error:
499 switch (bytecode->type) {
500 case LTTNG_UST_BYTECODE_NODE_TYPE_FILTER:
501 runtime->p.interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
502 break;
503 case LTTNG_UST_BYTECODE_NODE_TYPE_CAPTURE:
504 runtime->p.interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
505 break;
506 default:
507 abort();
508 }
509
510 runtime->p.link_failed = 1;
511 cds_list_add_rcu(&runtime->p.node, insert_loc);
512 alloc_error:
513 dbg_printf("Linking failed.\n");
514 return ret;
515 }
516
517 void lttng_bytecode_filter_sync_state(struct lttng_bytecode_runtime *runtime)
518 {
519 struct lttng_ust_bytecode_node *bc = runtime->bc;
520
521 if (!bc->enabler->enabled || runtime->link_failed)
522 runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret_false;
523 else
524 runtime->interpreter_funcs.filter = lttng_bytecode_filter_interpret;
525 }
526
527 void lttng_bytecode_capture_sync_state(struct lttng_bytecode_runtime *runtime)
528 {
529 struct lttng_ust_bytecode_node *bc = runtime->bc;
530
531 if (!bc->enabler->enabled || runtime->link_failed)
532 runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret_false;
533 else
534 runtime->interpreter_funcs.capture = lttng_bytecode_capture_interpret;
535 }
536
537 /*
538 * Given the lists of bytecode programs of an instance (trigger or event) and
539 * of a matching enabler, try to link all the enabler's bytecode programs with
540 * the instance.
541 *
542 * This function is called after we confirmed that name enabler and the
543 * instance are name matching (or glob pattern matching).
544 */
545 void lttng_enabler_link_bytecode(const struct lttng_event_desc *event_desc,
546 struct lttng_ctx **ctx,
547 struct cds_list_head *instance_bytecode_head,
548 struct cds_list_head *enabler_bytecode_head)
549 {
550 struct lttng_ust_bytecode_node *enabler_bc;
551 struct lttng_bytecode_runtime *runtime;
552
553 assert(event_desc);
554
555 /* Go over all the bytecode programs of the enabler. */
556 cds_list_for_each_entry(enabler_bc, enabler_bytecode_head, node) {
557 int found = 0, ret;
558 struct cds_list_head *insert_loc;
559
560 /*
561 * Check if the current enabler bytecode program is already
562 * linked with the instance.
563 */
564 cds_list_for_each_entry(runtime, instance_bytecode_head, node) {
565 if (runtime->bc == enabler_bc) {
566 found = 1;
567 break;
568 }
569 }
570
571 /*
572 * Skip bytecode already linked, go to the next enabler
573 * bytecode program.
574 */
575 if (found)
576 continue;
577
578 /*
579 * Insert at specified priority (seqnum) in increasing
580 * order. If there already is a bytecode of the same priority,
581 * insert the new bytecode right after it.
582 */
583 cds_list_for_each_entry_reverse(runtime,
584 instance_bytecode_head, node) {
585 if (runtime->bc->bc.seqnum <= enabler_bc->bc.seqnum) {
586 /* insert here */
587 insert_loc = &runtime->node;
588 goto add_within;
589 }
590 }
591
592 /* Add to head to list */
593 insert_loc = instance_bytecode_head;
594 add_within:
595 dbg_printf("linking bytecode\n");
596 ret = link_bytecode(event_desc, ctx, enabler_bc, insert_loc);
597 if (ret) {
598 dbg_printf("[lttng filter] warning: cannot link event bytecode\n");
599 }
600 }
601 }
602
603 /*
604 * We own the bytecode if we return success.
605 */
606 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
607 struct lttng_ust_bytecode_node *bytecode)
608 {
609 cds_list_add(&bytecode->node, &enabler->filter_bytecode_head);
610 return 0;
611 }
612
613 static
614 void free_filter_runtime(struct cds_list_head *bytecode_runtime_head)
615 {
616 struct bytecode_runtime *runtime, *tmp;
617
618 cds_list_for_each_entry_safe(runtime, tmp, bytecode_runtime_head,
619 p.node) {
620 free(runtime->data);
621 free(runtime);
622 }
623 }
624
625 void lttng_free_event_filter_runtime(struct lttng_event *event)
626 {
627 free_filter_runtime(&event->filter_bytecode_runtime_head);
628 }
629
630 void lttng_free_event_notifier_filter_runtime(
631 struct lttng_event_notifier *event_notifier)
632 {
633 free_filter_runtime(&event_notifier->filter_bytecode_runtime_head);
634 }
This page took 0.040235 seconds and 3 git commands to generate.