fix: adjust ranges for RHEL 8.2 and 8.3
[lttng-modules.git] / include / wrapper / vmalloc.h
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * wrapper/vmalloc.h
6d2a620c
MD
4 *
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
7 * modules.
8 *
886d51a3 9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6d2a620c
MD
10 */
11
9f36eaed
MJ
12#ifndef _LTTNG_WRAPPER_VMALLOC_H
13#define _LTTNG_WRAPPER_VMALLOC_H
14
5f4c791e 15#include <lttng/kernel-version.h>
48f5e0b5 16#include <linux/vmalloc.h>
01ab5113 17#include <linux/mm.h>
48f5e0b5 18
6d2a620c
MD
19#ifdef CONFIG_KALLSYMS
20
21#include <linux/kallsyms.h>
5a2f5e92 22#include <wrapper/kallsyms.h>
2b3dbafc 23#include <lttng/kernel-version.h>
6d2a620c 24
5f4c791e 25#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0))
0dcc94fa
MJ
26
27/*
28 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
29 * are now synchronized when they are created or torn down.
30 */
31static inline
32void wrapper_vmalloc_sync_mappings(void)
33{}
34
5f4c791e 35#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) \
9bfe744a 36 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
2b3dbafc
OP
37 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
38 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
39 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
40 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
41 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
05355f0b 42 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0) \
2e4c781e
SB
43 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
44 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
05355f0b
MJ
45 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) \
46 || LTTNG_RHEL_KERNEL_RANGE(4,18,0,240,0,0, 4,19,0,0,0,0))
263b6c88
MD
47
48static inline
49void wrapper_vmalloc_sync_mappings(void)
50{
51 void (*vmalloc_sync_mappings_sym)(void);
52
53 vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
54 if (vmalloc_sync_mappings_sym) {
55 vmalloc_sync_mappings_sym();
56 } else {
57#ifdef CONFIG_X86
58 /*
59 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
60 * trigger recursive page faults.
61 */
62 printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
5a15f70c 63 printk_once(KERN_WARNING "LTTng: Page fault handler and NMI tracing might trigger faults.\n");
263b6c88
MD
64#endif
65 }
66}
67
3dfec228
MJ
68/*
69 * Canary function to check for 'vmalloc_sync_mappings()' at compile time.
70 *
71 * From 'include/linux/vmalloc.h':
72 *
73 * void vmalloc_sync_mappings(void);
74 */
75static inline
76void __canary__vmalloc_sync_mappings(void)
77{
78 vmalloc_sync_mappings();
79}
80
5f4c791e 81#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */
263b6c88
MD
82
83/*
3dfec228 84 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6.
263b6c88 85 */
6d2a620c 86static inline
263b6c88 87void wrapper_vmalloc_sync_mappings(void)
6d2a620c
MD
88{
89 void (*vmalloc_sync_all_sym)(void);
90
c539a324 91 vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
6d2a620c
MD
92 if (vmalloc_sync_all_sym) {
93 vmalloc_sync_all_sym();
94 } else {
95#ifdef CONFIG_X86
96 /*
97 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
98 * trigger recursive page faults.
99 */
e36de50d 100 printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
5a15f70c 101 printk_once(KERN_WARNING "LTTng: Page fault handler and NMI tracing might trigger faults.\n");
6d2a620c
MD
102#endif
103 }
104}
263b6c88 105
3dfec228
MJ
106/*
107 * Canary function to check for 'vmalloc_sync_all()' at compile time.
108 *
109 * From 'include/linux/vmalloc.h':
110 *
111 * void vmalloc_sync_all(void);
112 */
113static inline
114void __canary__vmalloc_sync_all(void)
115{
116 vmalloc_sync_all();
117}
118
5f4c791e 119#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */
263b6c88 120
f740341a 121#else /* CONFIG_KALLSYMS */
6d2a620c 122
5f4c791e 123#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0))
f740341a
MJ
124
125/*
126 * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings
127 * are now synchronized when they are created or torn down.
128 */
129static inline
130void wrapper_vmalloc_sync_mappings(void)
131{}
132
5f4c791e 133#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) \
9bfe744a 134 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
2b3dbafc
OP
135 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
136 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
137 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
138 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
139 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
2e4c781e
SB
140 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
141 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
142 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
143 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
263b6c88 144
6d2a620c 145static inline
263b6c88
MD
146void wrapper_vmalloc_sync_mappings(void)
147{
148 return vmalloc_sync_mappings();
149}
150
5f4c791e 151#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */
263b6c88
MD
152
153static inline
154void wrapper_vmalloc_sync_mappings(void)
6d2a620c
MD
155{
156 return vmalloc_sync_all();
157}
263b6c88 158
5f4c791e 159#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */
263b6c88 160
6d2a620c 161#endif
b13f3ebe 162
5f4c791e 163#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0))
48f5e0b5
MJ
164static inline
165void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
166{
167 void *ret;
168
169 ret = kvmalloc_node(size, flags, node);
170 if (is_vmalloc_addr(ret)) {
171 /*
172 * Make sure we don't trigger recursive page faults in the
173 * tracing fast path.
174 */
263b6c88 175 wrapper_vmalloc_sync_mappings();
48f5e0b5
MJ
176 }
177 return ret;
178}
179
180static inline
181void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
182{
183 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
184}
185
186static inline
187void *lttng_kvmalloc(unsigned long size, gfp_t flags)
188{
189 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
190}
191
192static inline
193void *lttng_kvzalloc(unsigned long size, gfp_t flags)
194{
195 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
196}
197
198static inline
199void lttng_kvfree(const void *addr)
200{
201 kvfree(addr);
202}
203
204#else
205
206#include <linux/slab.h>
48f5e0b5 207
20eb87c9
MD
208static inline
209void print_vmalloc_node_range_warning(void)
210{
211 printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
5a15f70c
MJ
212 printk_once(KERN_WARNING "LTTng: Tracer performance will be degraded on NUMA systems.\n");
213 printk_once(KERN_WARNING "LTTng: Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
20eb87c9
MD
214}
215
5f4c791e 216#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,0,0))
fd094ddf 217
48f5e0b5
MJ
218/*
219 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
220 */
221static inline
20eb87c9
MD
222void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
223 unsigned long start, unsigned long end, gfp_t gfp_mask,
224 pgprot_t prot, unsigned long vm_flags, int node,
225 const void *caller)
48f5e0b5 226{
48f5e0b5
MJ
227#ifdef CONFIG_KALLSYMS
228 /*
20eb87c9 229 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
48f5e0b5 230 */
20eb87c9
MD
231 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
232 unsigned long start, unsigned long end, gfp_t gfp_mask,
233 pgprot_t prot, unsigned long vm_flags, int node,
234 const void *caller);
235
236 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
237 if (lttng__vmalloc_node_range)
238 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
239 vm_flags, node, caller);
48f5e0b5 240#endif
20eb87c9
MD
241 if (node != NUMA_NO_NODE)
242 print_vmalloc_node_range_warning();
243 return __vmalloc(size, gfp_mask, prot);
48f5e0b5 244}
3dfec228
MJ
245
246/*
247 * Canary function to check for '__vmalloc_node_range()' at compile time.
248 *
249 * From 'include/linux/vmalloc.h':
250 *
251 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
252 * unsigned long start, unsigned long end, gfp_t gfp_mask,
253 * pgprot_t prot, unsigned long vm_flags, int node,
254 * const void *caller);
255 */
256static inline
257void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align,
258 unsigned long start, unsigned long end, gfp_t gfp_mask,
259 pgprot_t prot, unsigned long vm_flags, int node,
260 const void *caller)
261{
262 return __vmalloc_node_range(size, align, start, end, gfp_mask, prot,
263 vm_flags, node, caller);
264}
48f5e0b5 265
5f4c791e 266#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0))
fd094ddf
MJ
267
268/*
269 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
270 */
271static inline
272void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
273 unsigned long start, unsigned long end, gfp_t gfp_mask,
274 pgprot_t prot, unsigned long vm_flags, int node,
275 const void *caller)
276{
277#ifdef CONFIG_KALLSYMS
278 /*
279 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
280 */
281 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
282 unsigned long start, unsigned long end, gfp_t gfp_mask,
283 pgprot_t prot, int node, const void *caller);
284
285 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
286 if (lttng__vmalloc_node_range)
287 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
288 node, caller);
289#endif
290 if (node != NUMA_NO_NODE)
291 print_vmalloc_node_range_warning();
292 return __vmalloc(size, gfp_mask, prot);
293}
294
295/*
296 * Canary function to check for '__vmalloc_node_range()' at compile time.
297 *
298 * From 'include/linux/vmalloc.h':
299 *
300 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
301 * unsigned long start, unsigned long end, gfp_t gfp_mask,
302 * pgprot_t prot, unsigned long vm_flags, int node,
303 * const void *caller);
304 */
305static inline
306void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align,
307 unsigned long start, unsigned long end, gfp_t gfp_mask,
308 pgprot_t prot, int node, const void *caller)
309{
310 return __vmalloc_node_range(size, align, start, end, gfp_mask, prot,
311 node, caller);
312}
313
5f4c791e 314#else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) */
0c8a119a
MJ
315
316/*
317 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
318 */
319static inline
320void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
321 unsigned long start, unsigned long end, gfp_t gfp_mask,
322 pgprot_t prot, unsigned long vm_flags, int node,
323 void *caller)
324{
325#ifdef CONFIG_KALLSYMS
326 /*
327 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
328 */
329 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
330 unsigned long start, unsigned long end, gfp_t gfp_mask,
331 pgprot_t prot, int node, void *caller);
332
333 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
334 if (lttng__vmalloc_node_range)
335 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
336 node, caller);
337#endif
338 if (node != NUMA_NO_NODE)
339 print_vmalloc_node_range_warning();
340 return __vmalloc(size, gfp_mask, prot);
341}
342
343/*
344 * Canary function to check for '__vmalloc_node_range()' at compile time.
345 *
346 * From 'include/linux/vmalloc.h':
347 *
348 * extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
349 * unsigned long start, unsigned long end, gfp_t gfp_mask,
350 * pgprot_t prot, unsigned long vm_flags, int node,
351 * void *caller);
352 */
353static inline
354void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align,
355 unsigned long start, unsigned long end, gfp_t gfp_mask,
356 pgprot_t prot, int node, void *caller)
357{
358 return __vmalloc_node_range(size, align, start, end, gfp_mask, prot,
359 node, caller);
360}
361
fd094ddf
MJ
362#endif
363
48f5e0b5
MJ
364/**
365 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
366 * failure, fall back to non-contiguous (vmalloc) allocation.
367 * @size: size of the request.
368 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
369 *
370 * Uses kmalloc to get the memory but if the allocation fails then falls back
371 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
372 *
373 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
374 */
375static inline
376void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
377{
378 void *ret;
379
380 /*
381 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
382 * so the given set of flags has to be compatible.
383 */
384 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
385
386 /*
387 * If the allocation fits in a single page, do not fallback.
388 */
389 if (size <= PAGE_SIZE) {
390 return kmalloc_node(size, flags, node);
391 }
392
393 /*
394 * Make sure that larger requests are not too disruptive - no OOM
395 * killer and no allocation failure warnings as we have a fallback
396 */
397 ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
398 if (!ret) {
20eb87c9
MD
399 ret = __lttng_vmalloc_node_range(size, 1,
400 VMALLOC_START, VMALLOC_END,
401 flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
402 node, __builtin_return_address(0));
48f5e0b5
MJ
403 /*
404 * Make sure we don't trigger recursive page faults in the
405 * tracing fast path.
406 */
f3e4ba5d 407 wrapper_vmalloc_sync_mappings();
48f5e0b5
MJ
408 }
409 return ret;
410}
411
412static inline
413void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
414{
415 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
416}
417
418static inline
419void *lttng_kvmalloc(unsigned long size, gfp_t flags)
420{
421 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
422}
423
424static inline
425void *lttng_kvzalloc(unsigned long size, gfp_t flags)
426{
427 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
428}
429
430static inline
431void lttng_kvfree(const void *addr)
432{
433 if (is_vmalloc_addr(addr)) {
434 vfree(addr);
435 } else {
436 kfree(addr);
437 }
438}
439#endif
440
a90917c3 441#endif /* _LTTNG_WRAPPER_VMALLOC_H */
This page took 0.066619 seconds and 4 git commands to generate.