X-Git-Url: http://git.lttng.org/?p=lttng-modules.git;a=blobdiff_plain;f=lttng-tp-mempool.c;h=9db6046b7c7813b97cd3a4f854696fa690ec0f69;hp=7e1b51d3cf7ff2ac9f5b0e996444e09ab2cb9c2b;hb=HEAD;hpb=b7cdc18250880cc44edeef4a4b42c8ac7a135a6d diff --git a/lttng-tp-mempool.c b/lttng-tp-mempool.c deleted file mode 100644 index 7e1b51d3..00000000 --- a/lttng-tp-mempool.c +++ /dev/null @@ -1,152 +0,0 @@ -/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only) - * - * lttng-tp-mempool.c - * - * Copyright (C) 2018 Julien Desfossez - */ - -#include -#include - -#include - -struct lttng_tp_buf_entry { - int cpu; /* To make sure we return the entry to the right pool. */ - char buf[LTTNG_TP_MEMPOOL_BUF_SIZE]; - struct list_head list; -}; - -/* - * No exclusive access strategy for now, this memory pool is currently only - * used from a non-preemptible context, and the interrupt tracepoint probes do - * not use this facility. - */ -struct per_cpu_buf { - struct list_head free_list; /* Free struct lttng_tp_buf_entry. */ -}; - -static struct per_cpu_buf __percpu *pool; /* Per-cpu buffer. */ - -int lttng_tp_mempool_init(void) -{ - int ret, cpu; - - /* The pool is only supposed to be allocated once. */ - if (pool) { - WARN_ON_ONCE(1); - ret = -1; - goto end; - } - - pool = alloc_percpu(struct per_cpu_buf); - if (!pool) { - ret = -ENOMEM; - goto end; - } - - for_each_possible_cpu(cpu) { - struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu); - - INIT_LIST_HEAD(&cpu_buf->free_list); - } - - for_each_possible_cpu(cpu) { - int i; - struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu); - - for (i = 0; i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU; i++) { - struct lttng_tp_buf_entry *entry; - - entry = kzalloc_node(sizeof(struct lttng_tp_buf_entry), - GFP_KERNEL, cpu_to_node(cpu)); - if (!entry) { - ret = -ENOMEM; - goto error_free_pool; - } - entry->cpu = cpu; - list_add_tail(&entry->list, &cpu_buf->free_list); - } - } - - ret = 0; - goto end; - -error_free_pool: - lttng_tp_mempool_destroy(); -end: - return ret; -} - -void lttng_tp_mempool_destroy(void) -{ - int cpu; - - if (!pool) { - return; - } - - for_each_possible_cpu(cpu) { - struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu); - struct lttng_tp_buf_entry *entry, *tmp; - int i = 0; - - list_for_each_entry_safe(entry, tmp, &cpu_buf->free_list, list) { - list_del(&entry->list); - kfree(entry); - i++; - } - if (i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU) { - printk(KERN_WARNING "Leak detected in tp-mempool\n"); - } - } - free_percpu(pool); - pool = NULL; -} - -void *lttng_tp_mempool_alloc(size_t size) -{ - void *ret; - struct lttng_tp_buf_entry *entry; - struct per_cpu_buf *cpu_buf; - int cpu = smp_processor_id(); - - if (size > LTTNG_TP_MEMPOOL_BUF_SIZE) { - ret = NULL; - goto end; - } - - cpu_buf = per_cpu_ptr(pool, cpu); - if (list_empty(&cpu_buf->free_list)) { - ret = NULL; - goto end; - } - - entry = list_first_entry(&cpu_buf->free_list, struct lttng_tp_buf_entry, list); - /* Remove the entry from the free list. */ - list_del(&entry->list); - - memset(entry->buf, 0, LTTNG_TP_MEMPOOL_BUF_SIZE); - - ret = (void *) entry->buf; - -end: - return ret; -} - -void lttng_tp_mempool_free(void *ptr) -{ - struct lttng_tp_buf_entry *entry; - struct per_cpu_buf *cpu_buf; - - if (!ptr) - goto end; - entry = container_of(ptr, struct lttng_tp_buf_entry, buf); - cpu_buf = per_cpu_ptr(pool, entry->cpu); - if (!cpu_buf) - goto end; - /* Add it to the free list. */ - list_add_tail(&entry->list, &cpu_buf->free_list); - -end: - return; -}