fix: vmalloc_sync_mappings was backported to v5.5.12
[lttng-modules.git] / include / wrapper / vmalloc.h
CommitLineData
b7cdc182 1/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
9f36eaed 2 *
886d51a3 3 * wrapper/vmalloc.h
6d2a620c
MD
4 *
5 * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
6 * available, else we need to have a kernel that exports this function to GPL
7 * modules.
8 *
886d51a3 9 * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
6d2a620c
MD
10 */
11
9f36eaed
MJ
12#ifndef _LTTNG_WRAPPER_VMALLOC_H
13#define _LTTNG_WRAPPER_VMALLOC_H
14
48f5e0b5
MJ
15#include <linux/version.h>
16#include <linux/vmalloc.h>
01ab5113 17#include <linux/mm.h>
48f5e0b5 18
6d2a620c
MD
19#ifdef CONFIG_KALLSYMS
20
21#include <linux/kallsyms.h>
5a2f5e92 22#include <wrapper/kallsyms.h>
2b3dbafc 23#include <lttng/kernel-version.h>
6d2a620c 24
2b3dbafc 25#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
9bfe744a 26 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
2b3dbafc
OP
27 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
28 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
29 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
30 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
31 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
2e4c781e
SB
32 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
33 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \
34 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
35 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
263b6c88
MD
36
37static inline
38void wrapper_vmalloc_sync_mappings(void)
39{
40 void (*vmalloc_sync_mappings_sym)(void);
41
42 vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings");
43 if (vmalloc_sync_mappings_sym) {
44 vmalloc_sync_mappings_sym();
45 } else {
46#ifdef CONFIG_X86
47 /*
48 * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not
49 * trigger recursive page faults.
50 */
51 printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n");
52 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
53#endif
54 }
55}
56
1d618748 57#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
263b6c88
MD
58
59/*
60 * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.7.
61 */
6d2a620c 62static inline
263b6c88 63void wrapper_vmalloc_sync_mappings(void)
6d2a620c
MD
64{
65 void (*vmalloc_sync_all_sym)(void);
66
c539a324 67 vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
6d2a620c
MD
68 if (vmalloc_sync_all_sym) {
69 vmalloc_sync_all_sym();
70 } else {
71#ifdef CONFIG_X86
72 /*
73 * Only x86 needs vmalloc_sync_all to make sure LTTng does not
74 * trigger recursive page faults.
75 */
e36de50d
MD
76 printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
77 printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
6d2a620c
MD
78#endif
79 }
80}
263b6c88 81
1d618748 82#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
263b6c88 83
6d2a620c
MD
84#else
85
2b3dbafc 86#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0) \
9bfe744a 87 || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \
2b3dbafc
OP
88 || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \
89 || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \
90 || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \
91 || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \
92 || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \
2e4c781e
SB
93 || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \
94 || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \
95 || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \
96 || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0)
263b6c88 97
6d2a620c 98static inline
263b6c88
MD
99void wrapper_vmalloc_sync_mappings(void)
100{
101 return vmalloc_sync_mappings();
102}
103
1d618748 104#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
263b6c88
MD
105
106static inline
107void wrapper_vmalloc_sync_mappings(void)
6d2a620c
MD
108{
109 return vmalloc_sync_all();
110}
263b6c88 111
1d618748 112#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,6,0)) */
263b6c88 113
6d2a620c 114#endif
b13f3ebe 115
48f5e0b5
MJ
116#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
117static inline
118void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
119{
120 void *ret;
121
122 ret = kvmalloc_node(size, flags, node);
123 if (is_vmalloc_addr(ret)) {
124 /*
125 * Make sure we don't trigger recursive page faults in the
126 * tracing fast path.
127 */
263b6c88 128 wrapper_vmalloc_sync_mappings();
48f5e0b5
MJ
129 }
130 return ret;
131}
132
133static inline
134void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
135{
136 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
137}
138
139static inline
140void *lttng_kvmalloc(unsigned long size, gfp_t flags)
141{
142 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
143}
144
145static inline
146void *lttng_kvzalloc(unsigned long size, gfp_t flags)
147{
148 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
149}
150
151static inline
152void lttng_kvfree(const void *addr)
153{
154 kvfree(addr);
155}
156
157#else
158
159#include <linux/slab.h>
48f5e0b5 160
20eb87c9
MD
161static inline
162void print_vmalloc_node_range_warning(void)
163{
164 printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
165 printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
166 printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
167}
168
48f5e0b5
MJ
169/*
170 * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
171 */
172static inline
20eb87c9
MD
173void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
174 unsigned long start, unsigned long end, gfp_t gfp_mask,
175 pgprot_t prot, unsigned long vm_flags, int node,
176 const void *caller)
48f5e0b5 177{
48f5e0b5
MJ
178#ifdef CONFIG_KALLSYMS
179 /*
20eb87c9 180 * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
48f5e0b5 181 */
20eb87c9
MD
182 void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
183 unsigned long start, unsigned long end, gfp_t gfp_mask,
184 pgprot_t prot, unsigned long vm_flags, int node,
185 const void *caller);
186
187 lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
188 if (lttng__vmalloc_node_range)
189 return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
190 vm_flags, node, caller);
48f5e0b5 191#endif
20eb87c9
MD
192 if (node != NUMA_NO_NODE)
193 print_vmalloc_node_range_warning();
194 return __vmalloc(size, gfp_mask, prot);
48f5e0b5
MJ
195}
196
197/**
198 * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
199 * failure, fall back to non-contiguous (vmalloc) allocation.
200 * @size: size of the request.
201 * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
202 *
203 * Uses kmalloc to get the memory but if the allocation fails then falls back
204 * to the vmalloc allocator. Use lttng_kvfree to free the memory.
205 *
206 * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
207 */
208static inline
209void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
210{
211 void *ret;
212
213 /*
214 * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
215 * so the given set of flags has to be compatible.
216 */
217 WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
218
219 /*
220 * If the allocation fits in a single page, do not fallback.
221 */
222 if (size <= PAGE_SIZE) {
223 return kmalloc_node(size, flags, node);
224 }
225
226 /*
227 * Make sure that larger requests are not too disruptive - no OOM
228 * killer and no allocation failure warnings as we have a fallback
229 */
230 ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
231 if (!ret) {
20eb87c9
MD
232 ret = __lttng_vmalloc_node_range(size, 1,
233 VMALLOC_START, VMALLOC_END,
234 flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
235 node, __builtin_return_address(0));
48f5e0b5
MJ
236 /*
237 * Make sure we don't trigger recursive page faults in the
238 * tracing fast path.
239 */
f3e4ba5d 240 wrapper_vmalloc_sync_mappings();
48f5e0b5
MJ
241 }
242 return ret;
243}
244
245static inline
246void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
247{
248 return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
249}
250
251static inline
252void *lttng_kvmalloc(unsigned long size, gfp_t flags)
253{
254 return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
255}
256
257static inline
258void *lttng_kvzalloc(unsigned long size, gfp_t flags)
259{
260 return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
261}
262
263static inline
264void lttng_kvfree(const void *addr)
265{
266 if (is_vmalloc_addr(addr)) {
267 vfree(addr);
268 } else {
269 kfree(addr);
270 }
271}
272#endif
273
a90917c3 274#endif /* _LTTNG_WRAPPER_VMALLOC_H */
This page took 0.051828 seconds and 4 git commands to generate.