fix: vmalloc_sync_mappings was backported to v5.5.12
[lttng-modules.git] / include / wrapper / vmalloc.h
1 /* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
2 *
3 * wrapper/vmalloc.h
4 *
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
7 * modules.
8 *
9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
10 */
11
12 #ifndef _LTTNG_WRAPPER_VMALLOC_H
13 #define _LTTNG_WRAPPER_VMALLOC_H
14
15 #include <linux/version.h>
16 #include <linux/vmalloc.h>
17 #include <linux/mm.h>
18
19 #ifdef CONFIG_KALLSYMS
20
21 #include <linux/kallsyms.h>
22 #include <wrapper/kallsyms.h>
23 #include <lttng/kernel-version.h>
24
25 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
26 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
27 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
28 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
29 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
30 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
31 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
32 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
33 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
34 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
35 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
36
37 static inline
38 void wrapper_vmalloc_sync_mappings(void)
39 {
40 void (*vmalloc_sync_mappings_sym)(void);
41
42 vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
43 if (vmalloc_sync_mappings_sym) {
44 vmalloc_sync_mappings_sym();
45 } else {
46 #ifdef CONFIG_X86
47 /*
48 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
49 * trigger recursive page faults.
50 */
51 printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
52 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
53 #endif
54 }
55 }
56
57 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
58
59 /*
60 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7.
61 */
62 static inline
63 void wrapper_vmalloc_sync_mappings(void)
64 {
65 void (*vmalloc_sync_all_sym)(void);
66
67 vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
68 if (vmalloc_sync_all_sym) {
69 vmalloc_sync_all_sym();
70 } else {
71 #ifdef CONFIG_X86
72 /*
73 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
74 * trigger recursive page faults.
75 */
76 printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
77 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
78 #endif
79 }
80 }
81
82 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
83
84 #else
85
86 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
87 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
88 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
89 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
90 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
91 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
92 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
93 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
94 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
95 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
96 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
97
98 static inline
99 void wrapper_vmalloc_sync_mappings(void)
100 {
101 return vmalloc_sync_mappings();
102 }
103
104 #else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
105
106 static inline
107 void wrapper_vmalloc_sync_mappings(void)
108 {
109 return vmalloc_sync_all();
110 }
111
112 #endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
113
114 #endif
115
116 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
117 static inline
118 void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
119 {
120 void *ret;
121
122 ret = kvmalloc_node(size, flags, node);
123 if (is_vmalloc_addr(ret)) {
124 /*
125 * Make sure we don't trigger recursive page faults in the
126 * tracing fast path.
127 */
128 wrapper_vmalloc_sync_mappings();
129 }
130 return ret;
131 }
132
133 static inline
134 void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
135 {
136 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
137 }
138
139 static inline
140 void *lttng_kvmalloc(unsigned long size, gfp_t flags)
141 {
142 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
143 }
144
145 static inline
146 void *lttng_kvzalloc(unsigned long size, gfp_t flags)
147 {
148 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
149 }
150
151 static inline
152 void lttng_kvfree(const void *addr)
153 {
154 kvfree(addr);
155 }
156
157 #else
158
159 #include <linux/slab.h>
160
161 static inline
162 void print_vmalloc_node_range_warning(void)
163 {
164 printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
165 printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
166 printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
167 }
168
169 /*
170 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
171 */
172 static inline
173 void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
174 unsigned long start, unsigned long end, gfp_t gfp_mask,
175 pgprot_t prot, unsigned long vm_flags, int node,
176 const void *caller)
177 {
178 #ifdef CONFIG_KALLSYMS
179 /*
180 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
181 */
182 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
183 unsigned long start, unsigned long end, gfp_t gfp_mask,
184 pgprot_t prot, unsigned long vm_flags, int node,
185 const void *caller);
186
187 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
188 if (lttng__vmalloc_node_range)
189 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
190 vm_flags, node, caller);
191 #endif
192 if (node != NUMA_NO_NODE)
193 print_vmalloc_node_range_warning();
194 return __vmalloc(size, gfp_mask, prot);
195 }
196
197 /**
198 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
199 * failure, fall back to non-contiguous (vmalloc) allocation.
200 * @size: size of the request.
201 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
202 *
203 * Uses kmalloc to get the memory but if the allocation fails then falls back
204 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
205 *
206 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
207 */
208 static inline
209 void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
210 {
211 void *ret;
212
213 /*
214 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
215 * so the given set of flags has to be compatible.
216 */
217 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
218
219 /*
220 * If the allocation fits in a single page, do not fallback.
221 */
222 if (size <= PAGE_SIZE) {
223 return kmalloc_node(size, flags, node);
224 }
225
226 /*
227 * Make sure that larger requests are not too disruptive - no OOM
228 * killer and no allocation failure warnings as we have a fallback
229 */
230 ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
231 if (!ret) {
232 ret = __lttng_vmalloc_node_range(size, 1,
233 VMALLOC_START, VMALLOC_END,
234 flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
235 node, __builtin_return_address(0));
236 /*
237 * Make sure we don't trigger recursive page faults in the
238 * tracing fast path.
239 */
240 wrapper_vmalloc_sync_mappings();
241 }
242 return ret;
243 }
244
245 static inline
246 void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
247 {
248 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
249 }
250
251 static inline
252 void *lttng_kvmalloc(unsigned long size, gfp_t flags)
253 {
254 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
255 }
256
257 static inline
258 void *lttng_kvzalloc(unsigned long size, gfp_t flags)
259 {
260 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
261 }
262
263 static inline
264 void lttng_kvfree(const void *addr)
265 {
266 if (is_vmalloc_addr(addr)) {
267 vfree(addr);
268 } else {
269 kfree(addr);
270 }
271 }
272 #endif
273
274 #endif /* _LTTNG_WRAPPER_VMALLOC_H */
This page took 0.034409 seconds and 4 git commands to generate.