callstack context: use delimiter when stack is incomplete
[lttng-modules.git] / lttng-tp-mempool.c
1 /*
2 * lttng-tp-mempool.c
3 *
4 * Copyright (C) 2018 Julien Desfossez <jdesfossez@efficios.com>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; only
9 * version 2.1 of the License.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <linux/slab.h>
22 #include <linux/percpu.h>
23
24 #include <lttng-tp-mempool.h>
25
26 struct lttng_tp_buf_entry {
27 int cpu; /* To make sure we return the entry to the right pool. */
28 char buf[LTTNG_TP_MEMPOOL_BUF_SIZE];
29 struct list_head list;
30 };
31
32 /*
33 * No exclusive access strategy for now, this memory pool is currently only
34 * used from a non-preemptible context, and the interrupt tracepoint probes do
35 * not use this facility.
36 */
37 struct per_cpu_buf {
38 struct list_head free_list; /* Free struct lttng_tp_buf_entry. */
39 };
40
41 static struct per_cpu_buf __percpu *pool; /* Per-cpu buffer. */
42
43 int lttng_tp_mempool_init(void)
44 {
45 int ret, cpu;
46
47 /* The pool is only supposed to be allocated once. */
48 if (pool) {
49 WARN_ON_ONCE(1);
50 ret = -1;
51 goto end;
52 }
53
54 pool = alloc_percpu(struct per_cpu_buf);
55 if (!pool) {
56 ret = -ENOMEM;
57 goto end;
58 }
59
60 for_each_possible_cpu(cpu) {
61 struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
62
63 INIT_LIST_HEAD(&cpu_buf->free_list);
64 }
65
66 for_each_possible_cpu(cpu) {
67 int i;
68 struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
69
70 for (i = 0; i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU; i++) {
71 struct lttng_tp_buf_entry *entry;
72
73 entry = kzalloc_node(sizeof(struct lttng_tp_buf_entry),
74 GFP_KERNEL, cpu_to_node(cpu));
75 if (!entry) {
76 ret = -ENOMEM;
77 goto error_free_pool;
78 }
79 entry->cpu = cpu;
80 list_add_tail(&entry->list, &cpu_buf->free_list);
81 }
82 }
83
84 ret = 0;
85 goto end;
86
87 error_free_pool:
88 lttng_tp_mempool_destroy();
89 end:
90 return ret;
91 }
92
93 void lttng_tp_mempool_destroy(void)
94 {
95 int cpu;
96
97 if (!pool) {
98 return;
99 }
100
101 for_each_possible_cpu(cpu) {
102 struct per_cpu_buf *cpu_buf = per_cpu_ptr(pool, cpu);
103 struct lttng_tp_buf_entry *entry, *tmp;
104 int i = 0;
105
106 list_for_each_entry_safe(entry, tmp, &cpu_buf->free_list, list) {
107 list_del(&entry->list);
108 kfree(entry);
109 i++;
110 }
111 if (i < LTTNG_TP_MEMPOOL_NR_BUF_PER_CPU) {
112 printk(KERN_WARNING "Leak detected in tp-mempool\n");
113 }
114 }
115 free_percpu(pool);
116 pool = NULL;
117 }
118
119 void *lttng_tp_mempool_alloc(size_t size)
120 {
121 void *ret;
122 struct lttng_tp_buf_entry *entry;
123 struct per_cpu_buf *cpu_buf;
124 int cpu = smp_processor_id();
125
126 if (size > LTTNG_TP_MEMPOOL_BUF_SIZE) {
127 ret = NULL;
128 goto end;
129 }
130
131 cpu_buf = per_cpu_ptr(pool, cpu);
132 if (list_empty(&cpu_buf->free_list)) {
133 ret = NULL;
134 goto end;
135 }
136
137 entry = list_first_entry(&cpu_buf->free_list, struct lttng_tp_buf_entry, list);
138 /* Remove the entry from the free list. */
139 list_del(&entry->list);
140
141 memset(entry->buf, 0, LTTNG_TP_MEMPOOL_BUF_SIZE);
142
143 ret = (void *) entry->buf;
144
145 end:
146 return ret;
147 }
148
149 void lttng_tp_mempool_free(void *ptr)
150 {
151 struct lttng_tp_buf_entry *entry;
152 struct per_cpu_buf *cpu_buf;
153
154 if (!ptr) {
155 goto end;
156 }
157
158 entry = container_of(ptr, struct lttng_tp_buf_entry, buf);
159 if (!entry) {
160 goto end;
161 }
162
163 cpu_buf = per_cpu_ptr(pool, entry->cpu);
164 if (!cpu_buf) {
165 goto end;
166 }
167 /* Add it to the free list. */
168 list_add_tail(&entry->list, &cpu_buf->free_list);
169
170 end:
171 return;
172 }
This page took 0.031885 seconds and 4 git commands to generate.