Filter: add lshift, rshift, bit not ops
[lttng-modules.git] / lttng-filter.c
1 /*
2 * lttng-filter.c
3 *
4 * LTTng modules filter code.
5 *
6 * Copyright (C) 2010-2016 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7 *
8 * Permission is hereby granted, free of charge, to any person obtaining a copy
9 * of this software and associated documentation files (the "Software"), to deal
10 * in the Software without restriction, including without limitation the rights
11 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
12 * copies of the Software, and to permit persons to whom the Software is
13 * furnished to do so, subject to the following conditions:
14 *
15 * The above copyright notice and this permission notice shall be included in
16 * all copies or substantial portions of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
21 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
23 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * SOFTWARE.
25 */
26
27 #include <linux/list.h>
28 #include <linux/slab.h>
29
30 #include <lttng-filter.h>
31
32 static const char *opnames[] = {
33 [ FILTER_OP_UNKNOWN ] = "UNKNOWN",
34
35 [ FILTER_OP_RETURN ] = "RETURN",
36
37 /* binary */
38 [ FILTER_OP_MUL ] = "MUL",
39 [ FILTER_OP_DIV ] = "DIV",
40 [ FILTER_OP_MOD ] = "MOD",
41 [ FILTER_OP_PLUS ] = "PLUS",
42 [ FILTER_OP_MINUS ] = "MINUS",
43 [ FILTER_OP_BIT_RSHIFT ] = "BIT_RSHIFT",
44 [ FILTER_OP_BIT_LSHIFT ] = "BIT_LSHIFT",
45 [ FILTER_OP_BIT_AND ] = "BIT_AND",
46 [ FILTER_OP_BIT_OR ] = "BIT_OR",
47 [ FILTER_OP_BIT_XOR ] = "BIT_XOR",
48
49 /* binary comparators */
50 [ FILTER_OP_EQ ] = "EQ",
51 [ FILTER_OP_NE ] = "NE",
52 [ FILTER_OP_GT ] = "GT",
53 [ FILTER_OP_LT ] = "LT",
54 [ FILTER_OP_GE ] = "GE",
55 [ FILTER_OP_LE ] = "LE",
56
57 /* string binary comparators */
58 [ FILTER_OP_EQ_STRING ] = "EQ_STRING",
59 [ FILTER_OP_NE_STRING ] = "NE_STRING",
60 [ FILTER_OP_GT_STRING ] = "GT_STRING",
61 [ FILTER_OP_LT_STRING ] = "LT_STRING",
62 [ FILTER_OP_GE_STRING ] = "GE_STRING",
63 [ FILTER_OP_LE_STRING ] = "LE_STRING",
64
65 /* s64 binary comparators */
66 [ FILTER_OP_EQ_S64 ] = "EQ_S64",
67 [ FILTER_OP_NE_S64 ] = "NE_S64",
68 [ FILTER_OP_GT_S64 ] = "GT_S64",
69 [ FILTER_OP_LT_S64 ] = "LT_S64",
70 [ FILTER_OP_GE_S64 ] = "GE_S64",
71 [ FILTER_OP_LE_S64 ] = "LE_S64",
72
73 /* double binary comparators */
74 [ FILTER_OP_EQ_DOUBLE ] = "EQ_DOUBLE",
75 [ FILTER_OP_NE_DOUBLE ] = "NE_DOUBLE",
76 [ FILTER_OP_GT_DOUBLE ] = "GT_DOUBLE",
77 [ FILTER_OP_LT_DOUBLE ] = "LT_DOUBLE",
78 [ FILTER_OP_GE_DOUBLE ] = "GE_DOUBLE",
79 [ FILTER_OP_LE_DOUBLE ] = "LE_DOUBLE",
80
81 /* Mixed S64-double binary comparators */
82 [ FILTER_OP_EQ_DOUBLE_S64 ] = "EQ_DOUBLE_S64",
83 [ FILTER_OP_NE_DOUBLE_S64 ] = "NE_DOUBLE_S64",
84 [ FILTER_OP_GT_DOUBLE_S64 ] = "GT_DOUBLE_S64",
85 [ FILTER_OP_LT_DOUBLE_S64 ] = "LT_DOUBLE_S64",
86 [ FILTER_OP_GE_DOUBLE_S64 ] = "GE_DOUBLE_S64",
87 [ FILTER_OP_LE_DOUBLE_S64 ] = "LE_DOUBLE_S64",
88
89 [ FILTER_OP_EQ_S64_DOUBLE ] = "EQ_S64_DOUBLE",
90 [ FILTER_OP_NE_S64_DOUBLE ] = "NE_S64_DOUBLE",
91 [ FILTER_OP_GT_S64_DOUBLE ] = "GT_S64_DOUBLE",
92 [ FILTER_OP_LT_S64_DOUBLE ] = "LT_S64_DOUBLE",
93 [ FILTER_OP_GE_S64_DOUBLE ] = "GE_S64_DOUBLE",
94 [ FILTER_OP_LE_S64_DOUBLE ] = "LE_S64_DOUBLE",
95
96 /* unary */
97 [ FILTER_OP_UNARY_PLUS ] = "UNARY_PLUS",
98 [ FILTER_OP_UNARY_MINUS ] = "UNARY_MINUS",
99 [ FILTER_OP_UNARY_NOT ] = "UNARY_NOT",
100 [ FILTER_OP_UNARY_PLUS_S64 ] = "UNARY_PLUS_S64",
101 [ FILTER_OP_UNARY_MINUS_S64 ] = "UNARY_MINUS_S64",
102 [ FILTER_OP_UNARY_NOT_S64 ] = "UNARY_NOT_S64",
103 [ FILTER_OP_UNARY_PLUS_DOUBLE ] = "UNARY_PLUS_DOUBLE",
104 [ FILTER_OP_UNARY_MINUS_DOUBLE ] = "UNARY_MINUS_DOUBLE",
105 [ FILTER_OP_UNARY_NOT_DOUBLE ] = "UNARY_NOT_DOUBLE",
106
107 /* logical */
108 [ FILTER_OP_AND ] = "AND",
109 [ FILTER_OP_OR ] = "OR",
110
111 /* load field ref */
112 [ FILTER_OP_LOAD_FIELD_REF ] = "LOAD_FIELD_REF",
113 [ FILTER_OP_LOAD_FIELD_REF_STRING ] = "LOAD_FIELD_REF_STRING",
114 [ FILTER_OP_LOAD_FIELD_REF_SEQUENCE ] = "LOAD_FIELD_REF_SEQUENCE",
115 [ FILTER_OP_LOAD_FIELD_REF_S64 ] = "LOAD_FIELD_REF_S64",
116 [ FILTER_OP_LOAD_FIELD_REF_DOUBLE ] = "LOAD_FIELD_REF_DOUBLE",
117
118 /* load from immediate operand */
119 [ FILTER_OP_LOAD_STRING ] = "LOAD_STRING",
120 [ FILTER_OP_LOAD_S64 ] = "LOAD_S64",
121 [ FILTER_OP_LOAD_DOUBLE ] = "LOAD_DOUBLE",
122
123 /* cast */
124 [ FILTER_OP_CAST_TO_S64 ] = "CAST_TO_S64",
125 [ FILTER_OP_CAST_DOUBLE_TO_S64 ] = "CAST_DOUBLE_TO_S64",
126 [ FILTER_OP_CAST_NOP ] = "CAST_NOP",
127
128 /* get context ref */
129 [ FILTER_OP_GET_CONTEXT_REF ] = "GET_CONTEXT_REF",
130 [ FILTER_OP_GET_CONTEXT_REF_STRING ] = "GET_CONTEXT_REF_STRING",
131 [ FILTER_OP_GET_CONTEXT_REF_S64 ] = "GET_CONTEXT_REF_S64",
132 [ FILTER_OP_GET_CONTEXT_REF_DOUBLE ] = "GET_CONTEXT_REF_DOUBLE",
133
134 /* load userspace field ref */
135 [ FILTER_OP_LOAD_FIELD_REF_USER_STRING ] = "LOAD_FIELD_REF_USER_STRING",
136 [ FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE ] = "LOAD_FIELD_REF_USER_SEQUENCE",
137
138 /*
139 * load immediate star globbing pattern (literal string)
140 * from immediate.
141 */
142 [ FILTER_OP_LOAD_STAR_GLOB_STRING ] = "LOAD_STAR_GLOB_STRING",
143
144 /* globbing pattern binary operator: apply to */
145 [ FILTER_OP_EQ_STAR_GLOB_STRING ] = "EQ_STAR_GLOB_STRING",
146 [ FILTER_OP_NE_STAR_GLOB_STRING ] = "NE_STAR_GLOB_STRING",
147
148 /*
149 * Instructions for recursive traversal through composed types.
150 */
151 [ FILTER_OP_GET_CONTEXT_ROOT ] = "GET_CONTEXT_ROOT",
152 [ FILTER_OP_GET_APP_CONTEXT_ROOT ] = "GET_APP_CONTEXT_ROOT",
153 [ FILTER_OP_GET_PAYLOAD_ROOT ] = "GET_PAYLOAD_ROOT",
154
155 [ FILTER_OP_GET_SYMBOL ] = "GET_SYMBOL",
156 [ FILTER_OP_GET_SYMBOL_FIELD ] = "GET_SYMBOL_FIELD",
157 [ FILTER_OP_GET_INDEX_U16 ] = "GET_INDEX_U16",
158 [ FILTER_OP_GET_INDEX_U64 ] = "GET_INDEX_U64",
159
160 [ FILTER_OP_LOAD_FIELD ] = "LOAD_FIELD",
161 [ FILTER_OP_LOAD_FIELD_S8 ] = "LOAD_FIELD_S8",
162 [ FILTER_OP_LOAD_FIELD_S16 ] = "LOAD_FIELD_S16",
163 [ FILTER_OP_LOAD_FIELD_S32 ] = "LOAD_FIELD_S32",
164 [ FILTER_OP_LOAD_FIELD_S64 ] = "LOAD_FIELD_S64",
165 [ FILTER_OP_LOAD_FIELD_U8 ] = "LOAD_FIELD_U8",
166 [ FILTER_OP_LOAD_FIELD_U16 ] = "LOAD_FIELD_U16",
167 [ FILTER_OP_LOAD_FIELD_U32 ] = "LOAD_FIELD_U32",
168 [ FILTER_OP_LOAD_FIELD_U64 ] = "LOAD_FIELD_U64",
169 [ FILTER_OP_LOAD_FIELD_STRING ] = "LOAD_FIELD_STRING",
170 [ FILTER_OP_LOAD_FIELD_SEQUENCE ] = "LOAD_FIELD_SEQUENCE",
171 [ FILTER_OP_LOAD_FIELD_DOUBLE ] = "LOAD_FIELD_DOUBLE",
172
173 [ FILTER_OP_UNARY_BIT_NOT ] = "UNARY_BIT_NOT",
174 };
175
176 const char *lttng_filter_print_op(enum filter_op op)
177 {
178 if (op >= NR_FILTER_OPS)
179 return "UNKNOWN";
180 else
181 return opnames[op];
182 }
183
184 static
185 int apply_field_reloc(struct lttng_event *event,
186 struct bytecode_runtime *runtime,
187 uint32_t runtime_len,
188 uint32_t reloc_offset,
189 const char *field_name,
190 enum filter_op filter_op)
191 {
192 const struct lttng_event_desc *desc;
193 const struct lttng_event_field *fields, *field = NULL;
194 unsigned int nr_fields, i;
195 struct load_op *op;
196 uint32_t field_offset = 0;
197
198 dbg_printk("Apply field reloc: %u %s\n", reloc_offset, field_name);
199
200 /* Lookup event by name */
201 desc = event->desc;
202 if (!desc)
203 return -EINVAL;
204 fields = desc->fields;
205 if (!fields)
206 return -EINVAL;
207 nr_fields = desc->nr_fields;
208 for (i = 0; i < nr_fields; i++) {
209 if (!strcmp(fields[i].name, field_name)) {
210 field = &fields[i];
211 break;
212 }
213 /* compute field offset */
214 switch (fields[i].type.atype) {
215 case atype_integer:
216 case atype_enum:
217 field_offset += sizeof(int64_t);
218 break;
219 case atype_array:
220 case atype_sequence:
221 case atype_array_bitfield:
222 case atype_sequence_bitfield:
223 field_offset += sizeof(unsigned long);
224 field_offset += sizeof(void *);
225 break;
226 case atype_string:
227 field_offset += sizeof(void *);
228 break;
229 case atype_struct: /* Unsupported. */
230 case atype_array_compound: /* Unsupported. */
231 case atype_sequence_compound: /* Unsupported. */
232 case atype_variant: /* Unsupported. */
233 default:
234 return -EINVAL;
235 }
236 }
237 if (!field)
238 return -EINVAL;
239
240 /* Check if field offset is too large for 16-bit offset */
241 if (field_offset > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
242 return -EINVAL;
243
244 /* set type */
245 op = (struct load_op *) &runtime->code[reloc_offset];
246
247 switch (filter_op) {
248 case FILTER_OP_LOAD_FIELD_REF:
249 {
250 struct field_ref *field_ref;
251
252 field_ref = (struct field_ref *) op->data;
253 switch (field->type.atype) {
254 case atype_integer:
255 case atype_enum:
256 op->op = FILTER_OP_LOAD_FIELD_REF_S64;
257 break;
258 case atype_array:
259 case atype_sequence:
260 if (field->user)
261 op->op = FILTER_OP_LOAD_FIELD_REF_USER_SEQUENCE;
262 else
263 op->op = FILTER_OP_LOAD_FIELD_REF_SEQUENCE;
264 break;
265 case atype_string:
266 if (field->user)
267 op->op = FILTER_OP_LOAD_FIELD_REF_USER_STRING;
268 else
269 op->op = FILTER_OP_LOAD_FIELD_REF_STRING;
270 break;
271 case atype_struct: /* Unsupported. */
272 case atype_array_compound: /* Unsupported. */
273 case atype_sequence_compound: /* Unsupported. */
274 case atype_variant: /* Unsupported. */
275 case atype_array_bitfield: /* Unsupported. */
276 case atype_sequence_bitfield: /* Unsupported. */
277 default:
278 return -EINVAL;
279 }
280 /* set offset */
281 field_ref->offset = (uint16_t) field_offset;
282 break;
283 }
284 default:
285 return -EINVAL;
286 }
287 return 0;
288 }
289
290 static
291 int apply_context_reloc(struct lttng_event *event,
292 struct bytecode_runtime *runtime,
293 uint32_t runtime_len,
294 uint32_t reloc_offset,
295 const char *context_name,
296 enum filter_op filter_op)
297 {
298 struct load_op *op;
299 struct lttng_ctx_field *ctx_field;
300 int idx;
301
302 dbg_printk("Apply context reloc: %u %s\n", reloc_offset, context_name);
303
304 /* Get context index */
305 idx = lttng_get_context_index(lttng_static_ctx, context_name);
306 if (idx < 0)
307 return -ENOENT;
308
309 /* Check if idx is too large for 16-bit offset */
310 if (idx > LTTNG_KERNEL_FILTER_BYTECODE_MAX_LEN - 1)
311 return -EINVAL;
312
313 /* Get context return type */
314 ctx_field = &lttng_static_ctx->fields[idx];
315 op = (struct load_op *) &runtime->code[reloc_offset];
316
317 switch (filter_op) {
318 case FILTER_OP_GET_CONTEXT_REF:
319 {
320 struct field_ref *field_ref;
321
322 field_ref = (struct field_ref *) op->data;
323 switch (ctx_field->event_field.type.atype) {
324 case atype_integer:
325 case atype_enum:
326 op->op = FILTER_OP_GET_CONTEXT_REF_S64;
327 break;
328 /* Sequence and array supported as string */
329 case atype_string:
330 case atype_array:
331 case atype_sequence:
332 BUG_ON(ctx_field->event_field.user);
333 op->op = FILTER_OP_GET_CONTEXT_REF_STRING;
334 break;
335 case atype_struct: /* Unsupported. */
336 case atype_array_compound: /* Unsupported. */
337 case atype_sequence_compound: /* Unsupported. */
338 case atype_variant: /* Unsupported. */
339 case atype_array_bitfield: /* Unsupported. */
340 case atype_sequence_bitfield: /* Unsupported. */
341 default:
342 return -EINVAL;
343 }
344 /* set offset to context index within channel contexts */
345 field_ref->offset = (uint16_t) idx;
346 break;
347 }
348 default:
349 return -EINVAL;
350 }
351 return 0;
352 }
353
354 static
355 int apply_reloc(struct lttng_event *event,
356 struct bytecode_runtime *runtime,
357 uint32_t runtime_len,
358 uint32_t reloc_offset,
359 const char *name)
360 {
361 struct load_op *op;
362
363 dbg_printk("Apply reloc: %u %s\n", reloc_offset, name);
364
365 /* Ensure that the reloc is within the code */
366 if (runtime_len - reloc_offset < sizeof(uint16_t))
367 return -EINVAL;
368
369 op = (struct load_op *) &runtime->code[reloc_offset];
370 switch (op->op) {
371 case FILTER_OP_LOAD_FIELD_REF:
372 return apply_field_reloc(event, runtime, runtime_len,
373 reloc_offset, name, op->op);
374 case FILTER_OP_GET_CONTEXT_REF:
375 return apply_context_reloc(event, runtime, runtime_len,
376 reloc_offset, name, op->op);
377 case FILTER_OP_GET_SYMBOL:
378 case FILTER_OP_GET_SYMBOL_FIELD:
379 /*
380 * Will be handled by load specialize phase or
381 * dynamically by interpreter.
382 */
383 return 0;
384 default:
385 printk(KERN_WARNING "Unknown reloc op type %u\n", op->op);
386 return -EINVAL;
387 }
388 return 0;
389 }
390
391 static
392 int bytecode_is_linked(struct lttng_filter_bytecode_node *filter_bytecode,
393 struct lttng_event *event)
394 {
395 struct lttng_bytecode_runtime *bc_runtime;
396
397 list_for_each_entry(bc_runtime,
398 &event->bytecode_runtime_head, node) {
399 if (bc_runtime->bc == filter_bytecode)
400 return 1;
401 }
402 return 0;
403 }
404
405 /*
406 * Take a bytecode with reloc table and link it to an event to create a
407 * bytecode runtime.
408 */
409 static
410 int _lttng_filter_event_link_bytecode(struct lttng_event *event,
411 struct lttng_filter_bytecode_node *filter_bytecode,
412 struct list_head *insert_loc)
413 {
414 int ret, offset, next_offset;
415 struct bytecode_runtime *runtime = NULL;
416 size_t runtime_alloc_len;
417
418 if (!filter_bytecode)
419 return 0;
420 /* Bytecode already linked */
421 if (bytecode_is_linked(filter_bytecode, event))
422 return 0;
423
424 dbg_printk("Linking...\n");
425
426 /* We don't need the reloc table in the runtime */
427 runtime_alloc_len = sizeof(*runtime) + filter_bytecode->bc.reloc_offset;
428 runtime = kzalloc(runtime_alloc_len, GFP_KERNEL);
429 if (!runtime) {
430 ret = -ENOMEM;
431 goto alloc_error;
432 }
433 runtime->p.bc = filter_bytecode;
434 runtime->p.event = event;
435 runtime->len = filter_bytecode->bc.reloc_offset;
436 /* copy original bytecode */
437 memcpy(runtime->code, filter_bytecode->bc.data, runtime->len);
438 /*
439 * apply relocs. Those are a uint16_t (offset in bytecode)
440 * followed by a string (field name).
441 */
442 for (offset = filter_bytecode->bc.reloc_offset;
443 offset < filter_bytecode->bc.len;
444 offset = next_offset) {
445 uint16_t reloc_offset =
446 *(uint16_t *) &filter_bytecode->bc.data[offset];
447 const char *name =
448 (const char *) &filter_bytecode->bc.data[offset + sizeof(uint16_t)];
449
450 ret = apply_reloc(event, runtime, runtime->len, reloc_offset, name);
451 if (ret) {
452 goto link_error;
453 }
454 next_offset = offset + sizeof(uint16_t) + strlen(name) + 1;
455 }
456 /* Validate bytecode */
457 ret = lttng_filter_validate_bytecode(runtime);
458 if (ret) {
459 goto link_error;
460 }
461 /* Specialize bytecode */
462 ret = lttng_filter_specialize_bytecode(event, runtime);
463 if (ret) {
464 goto link_error;
465 }
466 runtime->p.filter = lttng_filter_interpret_bytecode;
467 runtime->p.link_failed = 0;
468 list_add_rcu(&runtime->p.node, insert_loc);
469 dbg_printk("Linking successful.\n");
470 return 0;
471
472 link_error:
473 runtime->p.filter = lttng_filter_false;
474 runtime->p.link_failed = 1;
475 list_add_rcu(&runtime->p.node, insert_loc);
476 alloc_error:
477 dbg_printk("Linking failed.\n");
478 return ret;
479 }
480
481 void lttng_filter_sync_state(struct lttng_bytecode_runtime *runtime)
482 {
483 struct lttng_filter_bytecode_node *bc = runtime->bc;
484
485 if (!bc->enabler->enabled || runtime->link_failed)
486 runtime->filter = lttng_filter_false;
487 else
488 runtime->filter = lttng_filter_interpret_bytecode;
489 }
490
491 /*
492 * Link bytecode for all enablers referenced by an event.
493 */
494 void lttng_enabler_event_link_bytecode(struct lttng_event *event,
495 struct lttng_enabler *enabler)
496 {
497 struct lttng_filter_bytecode_node *bc;
498 struct lttng_bytecode_runtime *runtime;
499
500 /* Can only be called for events with desc attached */
501 WARN_ON_ONCE(!event->desc);
502
503 /* Link each bytecode. */
504 list_for_each_entry(bc, &enabler->filter_bytecode_head, node) {
505 int found = 0, ret;
506 struct list_head *insert_loc;
507
508 list_for_each_entry(runtime,
509 &event->bytecode_runtime_head, node) {
510 if (runtime->bc == bc) {
511 found = 1;
512 break;
513 }
514 }
515 /* Skip bytecode already linked */
516 if (found)
517 continue;
518
519 /*
520 * Insert at specified priority (seqnum) in increasing
521 * order.
522 */
523 list_for_each_entry_reverse(runtime,
524 &event->bytecode_runtime_head, node) {
525 if (runtime->bc->bc.seqnum < bc->bc.seqnum) {
526 /* insert here */
527 insert_loc = &runtime->node;
528 goto add_within;
529 }
530 }
531 /* Add to head to list */
532 insert_loc = &event->bytecode_runtime_head;
533 add_within:
534 dbg_printk("linking bytecode\n");
535 ret = _lttng_filter_event_link_bytecode(event, bc,
536 insert_loc);
537 if (ret) {
538 dbg_printk("[lttng filter] warning: cannot link event bytecode\n");
539 }
540 }
541 }
542
543 /*
544 * We own the filter_bytecode if we return success.
545 */
546 int lttng_filter_enabler_attach_bytecode(struct lttng_enabler *enabler,
547 struct lttng_filter_bytecode_node *filter_bytecode)
548 {
549 list_add(&filter_bytecode->node, &enabler->filter_bytecode_head);
550 return 0;
551 }
552
553 void lttng_free_enabler_filter_bytecode(struct lttng_enabler *enabler)
554 {
555 struct lttng_filter_bytecode_node *filter_bytecode, *tmp;
556
557 list_for_each_entry_safe(filter_bytecode, tmp,
558 &enabler->filter_bytecode_head, node) {
559 kfree(filter_bytecode);
560 }
561 }
562
563 void lttng_free_event_filter_runtime(struct lttng_event *event)
564 {
565 struct bytecode_runtime *runtime, *tmp;
566
567 list_for_each_entry_safe(runtime, tmp,
568 &event->bytecode_runtime_head, p.node) {
569 kfree(runtime->data);
570 kfree(runtime);
571 }
572 }
This page took 0.039841 seconds and 4 git commands to generate.