callstack context: use delimiter when stack is incomplete
[lttng-modules.git] / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng modules filter code.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/list.h>
28 #include <linux/slab.h>
29
30 #include <lttng-filter.h>
31
32 static const char *opnames[] = {
33 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
34
35 [ FILTER_OP_RETURN ] = "RETURN",
36
37 /* binary */
38 [ FILTER_OP_MUL ] = "MUL",
39 [ FILTER_OP_DIV ] = "DIV",
40 [ FILTER_OP_MOD ] = "MOD",
41 [ FILTER_OP_PLUS ] = "PLUS",
42 [ FILTER_OP_MINUS ] = "MINUS",
43 [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
44 [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
45 [ FILTER_OP_BIT_AND ] = "BIT_AND",
46 [ FILTER_OP_BIT_OR ] = "BIT_OR",
47 [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
48
49 /* binary comparators */
50 [ FILTER_OP_EQ ] = "EQ",
51 [ FILTER_OP_NE ] = "NE",
52 [ FILTER_OP_GT ] = "GT",
53 [ FILTER_OP_LT ] = "LT",
54 [ FILTER_OP_GE ] = "GE",
55 [ FILTER_OP_LE ] = "LE",
56
57 /* string binary comparators */
58 [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
59 [ FILTER_OP_NE_STRING ] = "NE_STRING",
60 [ FILTER_OP_GT_STRING ] = "GT_STRING",
61 [ FILTER_OP_LT_STRING ] = "LT_STRING",
62 [ FILTER_OP_GE_STRING ] = "GE_STRING",
63 [ FILTER_OP_LE_STRING ] = "LE_STRING",
64
65 /* s64 binary comparators */
66 [ FILTER_OP_EQ_S64 ] = "EQ_S64",
67 [ FILTER_OP_NE_S64 ] = "NE_S64",
68 [ FILTER_OP_GT_S64 ] = "GT_S64",
69 [ FILTER_OP_LT_S64 ] = "LT_S64",
70 [ FILTER_OP_GE_S64 ] = "GE_S64",
71 [ FILTER_OP_LE_S64 ] = "LE_S64",
72
73 /* double binary comparators */
74 [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
75 [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
76 [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
77 [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
78 [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
79 [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
80
81 /* Mixed S64-double binary comparators */
82 [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
83 [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
84 [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
85 [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
86 [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
87 [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
88
89 [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
90 [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
91 [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
92 [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
93 [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
94 [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
95
96 /* unary */
97 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
98 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
99 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
100 [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
101 [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
102 [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
103 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
104 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
105 [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
106
107 /* logical */
108 [ FILTER_OP_AND ] = "AND",
109 [ FILTER_OP_OR ] = "OR",
110
111 /* load field ref */
112 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
113 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
114 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
115 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
116 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
117
118 /* load from immediate operand */
119 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
120 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
121 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
122
123 /* cast */
124 [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
125 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
126 [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
127
128 /* get context ref */
129 [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
130 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
131 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
132 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
133
134 /* load userspace field ref */
135 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
136 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
137
138 /*
139 * load immediate star globbing pattern (literal string)
140 * from immediate.
141 */
142 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
143
144 /* globbing pattern binary operator: apply to */
145 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
146 [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
147
148 /*
149 * Instructions for recursive traversal through composed types.
150 */
151 [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
152 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
153 [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
154
155 [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
156 [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
157 [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
158 [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
159
160 [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
161 [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
162 [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
163 [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
164 [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
165 [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
166 [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
167 [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
168 [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
169 [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
170 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
171 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
172
173 [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
174
175 [ FILTER_OP_RETURN_S64 ] = "RETURN_S64",
176 };
177
178 const char *lttng_filter_print_op(enum filter_op op)
179 {
180 if (op >= NR_FILTER_OPS)
181 return "UNKNOWN";
182 else
183 return opnames[op];
184 }
185
186 static
187 int apply_field_reloc(struct lttng_event *event,
188 struct bytecode_runtime *runtime,
189 uint32_t runtime_len,
190 uint32_t reloc_offset,
191 const char *field_name,
192 enum filter_op filter_op)
193 {
194 const struct lttng_event_desc *desc;
195 const struct lttng_event_field *fields, *field = NULL;
196 unsigned int nr_fields, i;
197 struct load_op *op;
198 uint32_t field_offset = 0;
199
200 dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
201
202 /* Lookup event by name */
203 desc = event->desc;
204 if (!desc)
205 return -EINVAL;
206 fields = desc->fields;
207 if (!fields)
208 return -EINVAL;
209 nr_fields = desc->nr_fields;
210 for (i = 0; i < nr_fields; i++) {
211 if (!strcmp(fields[i].name, field_name)) {
212 field = &fields[i];
213 break;
214 }
215 /* compute field offset */
216 switch (fields[i].type.atype) {
217 case atype_integer:
218 case atype_enum:
219 field_offset += sizeof(int64_t);
220 break;
221 case atype_array:
222 case atype_sequence:
223 case atype_array_bitfield:
224 case atype_sequence_bitfield:
225 field_offset += sizeof(unsigned long);
226 field_offset += sizeof(void *);
227 break;
228 case atype_string:
229 field_offset += sizeof(void *);
230 break;
231 case atype_struct: /* Unsupported. */
232 case atype_array_compound: /* Unsupported. */
233 case atype_sequence_compound: /* Unsupported. */
234 case atype_variant: /* Unsupported. */
235 default:
236 return -EINVAL;
237 }
238 }
239 if (!field)
240 return -EINVAL;
241
242 /* Check if field offset is too large for 16-bit offset */
243 if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
244 return -EINVAL;
245
246 /* set type */
247 op = (struct load_op *) &runtime->code[reloc_offset];
248
249 switch (filter_op) {
250 case FILTER_OP_LOAD_FIELD_REF:
251 {
252 struct field_ref *field_ref;
253
254 field_ref = (struct field_ref *) op->data;
255 switch (field->type.atype) {
256 case atype_integer:
257 case atype_enum:
258 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
259 break;
260 case atype_array:
261 case atype_sequence:
262 if (field->user)
263 op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
264 else
265 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
266 break;
267 case atype_string:
268 if (field->user)
269 op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
270 else
271 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
272 break;
273 case atype_struct: /* Unsupported. */
274 case atype_array_compound: /* Unsupported. */
275 case atype_sequence_compound: /* Unsupported. */
276 case atype_variant: /* Unsupported. */
277 case atype_array_bitfield: /* Unsupported. */
278 case atype_sequence_bitfield: /* Unsupported. */
279 default:
280 return -EINVAL;
281 }
282 /* set offset */
283 field_ref->offset = (uint16_t) field_offset;
284 break;
285 }
286 default:
287 return -EINVAL;
288 }
289 return 0;
290 }
291
292 static
293 int apply_context_reloc(struct lttng_event *event,
294 struct bytecode_runtime *runtime,
295 uint32_t runtime_len,
296 uint32_t reloc_offset,
297 const char *context_name,
298 enum filter_op filter_op)
299 {
300 struct load_op *op;
301 struct lttng_ctx_field *ctx_field;
302 int idx;
303
304 dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
305
306 /* Get context index */
307 idx = lttng_get_context_index(lttng_static_ctx, context_name);
308 if (idx < 0)
309 return -ENOENT;
310
311 /* Check if idx is too large for 16-bit offset */
312 if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
313 return -EINVAL;
314
315 /* Get context return type */
316 ctx_field = &lttng_static_ctx->fields[idx];
317 op = (struct load_op *) &runtime->code[reloc_offset];
318
319 switch (filter_op) {
320 case FILTER_OP_GET_CONTEXT_REF:
321 {
322 struct field_ref *field_ref;
323
324 field_ref = (struct field_ref *) op->data;
325 switch (ctx_field->event_field.type.atype) {
326 case atype_integer:
327 case atype_enum:
328 op->op = FILTER_OP_GET_CONTEXT_REF_S64;
329 break;
330 /* Sequence and array supported as string */
331 case atype_string:
332 case atype_array:
333 case atype_sequence:
334 BUG_ON(ctx_field->event_field.user);
335 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
336 break;
337 case atype_struct: /* Unsupported. */
338 case atype_array_compound: /* Unsupported. */
339 case atype_sequence_compound: /* Unsupported. */
340 case atype_variant: /* Unsupported. */
341 case atype_array_bitfield: /* Unsupported. */
342 case atype_sequence_bitfield: /* Unsupported. */
343 default:
344 return -EINVAL;
345 }
346 /* set offset to context index within channel contexts */
347 field_ref->offset = (uint16_t) idx;
348 break;
349 }
350 default:
351 return -EINVAL;
352 }
353 return 0;
354 }
355
356 static
357 int apply_reloc(struct lttng_event *event,
358 struct bytecode_runtime *runtime,
359 uint32_t runtime_len,
360 uint32_t reloc_offset,
361 const char *name)
362 {
363 struct load_op *op;
364
365 dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
366
367 /* Ensure that the reloc is within the code */
368 if (runtime_len - reloc_offset < sizeof(uint16_t))
369 return -EINVAL;
370
371 op = (struct load_op *) &runtime->code[reloc_offset];
372 switch (op->op) {
373 case FILTER_OP_LOAD_FIELD_REF:
374 return apply_field_reloc(event, runtime, runtime_len,
375 reloc_offset, name, op->op);
376 case FILTER_OP_GET_CONTEXT_REF:
377 return apply_context_reloc(event, runtime, runtime_len,
378 reloc_offset, name, op->op);
379 case FILTER_OP_GET_SYMBOL:
380 case FILTER_OP_GET_SYMBOL_FIELD:
381 /*
382 * Will be handled by load specialize phase or
383 * dynamically by interpreter.
384 */
385 return 0;
386 default:
387 printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
388 return -EINVAL;
389 }
390 return 0;
391 }
392
393 static
394 int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
395 struct lttng_event *event)
396 {
397 struct lttng_bytecode_runtime *bc_runtime;
398
399 list_for_each_entry(bc_runtime,
400 &event->bytecode_runtime_head, node) {
401 if (bc_runtime->bc == filter_bytecode)
402 return 1;
403 }
404 return 0;
405 }
406
407 /*
408 * Take a bytecode with reloc table and link it to an event to create a
409 * bytecode runtime.
410 */
411 static
412 int _lttng_filter_event_link_bytecode(struct lttng_event *event,
413 struct lttng_filter_bytecode_node *filter_bytecode,
414 struct list_head *insert_loc)
415 {
416 int ret, offset, next_offset;
417 struct bytecode_runtime *runtime = NULL;
418 size_t runtime_alloc_len;
419
420 if (!filter_bytecode)
421 return 0;
422 /* Bytecode already linked */
423 if (bytecode_is_linked(filter_bytecode, event))
424 return 0;
425
426 dbg_printk("Linking...\n");
427
428 /* We don't need the reloc table in the runtime */
429 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
430 runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
431 if (!runtime) {
432 ret = -ENOMEM;
433 goto alloc_error;
434 }
435 runtime->p.bc = filter_bytecode;
436 runtime->p.event = event;
437 runtime->len = filter_bytecode->bc.reloc_offset;
438 /* copy original bytecode */
439 memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
440 /*
441 * apply relocs. Those are a uint16_t (offset in bytecode)
442 * followed by a string (field name).
443 */
444 for (offset = filter_bytecode->bc.reloc_offset;
445 offset < filter_bytecode->bc.len;
446 offset = next_offset) {
447 uint16_t reloc_offset =
448 *(uint16_t *) &filter_bytecode->bc.data[offset];
449 const char *name =
450 (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
451
452 ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
453 if (ret) {
454 goto link_error;
455 }
456 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
457 }
458 /* Validate bytecode */
459 ret = lttng_filter_validate_bytecode(runtime);
460 if (ret) {
461 goto link_error;
462 }
463 /* Specialize bytecode */
464 ret = lttng_filter_specialize_bytecode(event, runtime);
465 if (ret) {
466 goto link_error;
467 }
468 runtime->p.filter = lttng_filter_interpret_bytecode;
469 runtime->p.link_failed = 0;
470 list_add_rcu(&runtime->p.node, insert_loc);
471 dbg_printk("Linking successful.\n");
472 return 0;
473
474 link_error:
475 runtime->p.filter = lttng_filter_false;
476 runtime->p.link_failed = 1;
477 list_add_rcu(&runtime->p.node, insert_loc);
478 alloc_error:
479 dbg_printk("Linking failed.\n");
480 return ret;
481 }
482
483 void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
484 {
485 struct lttng_filter_bytecode_node *bc = runtime->bc;
486
487 if (!bc->enabler->enabled || runtime->link_failed)
488 runtime->filter = lttng_filter_false;
489 else
490 runtime->filter = lttng_filter_interpret_bytecode;
491 }
492
493 /*
494 * Link bytecode for all enablers referenced by an event.
495 */
496 void lttng_enabler_event_link_bytecode(struct lttng_event *event,
497 struct lttng_enabler *enabler)
498 {
499 struct lttng_filter_bytecode_node *bc;
500 struct lttng_bytecode_runtime *runtime;
501
502 /* Can only be called for events with desc attached */
503 WARN_ON_ONCE(!event->desc);
504
505 /* Link each bytecode. */
506 list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
507 int found = 0, ret;
508 struct list_head *insert_loc;
509
510 list_for_each_entry(runtime,
511 &event->bytecode_runtime_head, node) {
512 if (runtime->bc == bc) {
513 found = 1;
514 break;
515 }
516 }
517 /* Skip bytecode already linked */
518 if (found)
519 continue;
520
521 /*
522 * Insert at specified priority (seqnum) in increasing
523 * order.
524 */
525 list_for_each_entry_reverse(runtime,
526 &event->bytecode_runtime_head, node) {
527 if (runtime->bc->bc.seqnum < bc->bc.seqnum) {
528 /* insert here */
529 insert_loc = &runtime->node;
530 goto add_within;
531 }
532 }
533 /* Add to head to list */
534 insert_loc = &event->bytecode_runtime_head;
535 add_within:
536 dbg_printk("linking bytecode\n");
537 ret = _lttng_filter_event_link_bytecode(event, bc,
538 insert_loc);
539 if (ret) {
540 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
541 }
542 }
543 }
544
545 /*
546 * We own the filter_bytecode if we return success.
547 */
548 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
549 struct lttng_filter_bytecode_node *filter_bytecode)
550 {
551 list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
552 return 0;
553 }
554
555 void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
556 {
557 struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
558
559 list_for_each_entry_safe(filter_bytecode, tmp,
560 &enabler->filter_bytecode_head, node) {
561 kfree(filter_bytecode);
562 }
563 }
564
565 void lttng_free_event_filter_runtime(struct lttng_event *event)
566 {
567 struct bytecode_runtime *runtime, *tmp;
568
569 list_for_each_entry_safe(runtime, tmp,
570 &event->bytecode_runtime_head, p.node) {
571 kfree(runtime->data);
572 kfree(runtime);
573 }
574 }
This page took 0.040044 seconds and 4 git commands to generate.