Commit | Line | Data |
---|---|---|
9f36eaed MJ |
1 | /* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) |
2 | * | |
886d51a3 | 3 | * wrapper/vmalloc.h |
6d2a620c MD |
4 | * |
5 | * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when | |
6 | * available, else we need to have a kernel that exports this function to GPL | |
7 | * modules. | |
8 | * | |
886d51a3 | 9 | * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com> |
6d2a620c MD |
10 | */ |
11 | ||
9f36eaed MJ |
12 | #ifndef _LTTNG_WRAPPER_VMALLOC_H |
13 | #define _LTTNG_WRAPPER_VMALLOC_H | |
14 | ||
48f5e0b5 MJ |
15 | #include <linux/version.h> |
16 | #include <linux/vmalloc.h> | |
01ab5113 | 17 | #include <linux/mm.h> |
48f5e0b5 | 18 | |
6d2a620c MD |
19 | #ifdef CONFIG_KALLSYMS |
20 | ||
21 | #include <linux/kallsyms.h> | |
5a2f5e92 | 22 | #include <wrapper/kallsyms.h> |
6d2a620c MD |
23 | |
24 | static inline | |
25 | void wrapper_vmalloc_sync_all(void) | |
26 | { | |
27 | void (*vmalloc_sync_all_sym)(void); | |
28 | ||
c539a324 | 29 | vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all"); |
6d2a620c MD |
30 | if (vmalloc_sync_all_sym) { |
31 | vmalloc_sync_all_sym(); | |
32 | } else { | |
33 | #ifdef CONFIG_X86 | |
34 | /* | |
35 | * Only x86 needs vmalloc_sync_all to make sure LTTng does not | |
36 | * trigger recursive page faults. | |
37 | */ | |
e36de50d MD |
38 | printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n"); |
39 | printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); | |
6d2a620c MD |
40 | #endif |
41 | } | |
42 | } | |
43 | #else | |
44 | ||
6d2a620c MD |
45 | static inline |
46 | void wrapper_vmalloc_sync_all(void) | |
47 | { | |
48 | return vmalloc_sync_all(); | |
49 | } | |
50 | #endif | |
b13f3ebe | 51 | |
48f5e0b5 MJ |
52 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) |
53 | static inline | |
54 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
55 | { | |
56 | void *ret; | |
57 | ||
58 | ret = kvmalloc_node(size, flags, node); | |
59 | if (is_vmalloc_addr(ret)) { | |
60 | /* | |
61 | * Make sure we don't trigger recursive page faults in the | |
62 | * tracing fast path. | |
63 | */ | |
64 | wrapper_vmalloc_sync_all(); | |
65 | } | |
66 | return ret; | |
67 | } | |
68 | ||
69 | static inline | |
70 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
71 | { | |
72 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
73 | } | |
74 | ||
75 | static inline | |
76 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
77 | { | |
78 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
79 | } | |
80 | ||
81 | static inline | |
82 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
83 | { | |
84 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
85 | } | |
86 | ||
87 | static inline | |
88 | void lttng_kvfree(const void *addr) | |
89 | { | |
90 | kvfree(addr); | |
91 | } | |
92 | ||
93 | #else | |
94 | ||
95 | #include <linux/slab.h> | |
48f5e0b5 | 96 | |
20eb87c9 MD |
97 | static inline |
98 | void print_vmalloc_node_range_warning(void) | |
99 | { | |
100 | printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n"); | |
101 | printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n"); | |
102 | printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n"); | |
103 | } | |
104 | ||
48f5e0b5 MJ |
105 | /* |
106 | * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. | |
107 | */ | |
108 | static inline | |
20eb87c9 MD |
109 | void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, |
110 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
111 | pgprot_t prot, unsigned long vm_flags, int node, | |
112 | const void *caller) | |
48f5e0b5 | 113 | { |
48f5e0b5 MJ |
114 | #ifdef CONFIG_KALLSYMS |
115 | /* | |
20eb87c9 | 116 | * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. |
48f5e0b5 | 117 | */ |
20eb87c9 MD |
118 | void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, |
119 | unsigned long start, unsigned long end, gfp_t gfp_mask, | |
120 | pgprot_t prot, unsigned long vm_flags, int node, | |
121 | const void *caller); | |
122 | ||
123 | lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); | |
124 | if (lttng__vmalloc_node_range) | |
125 | return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, | |
126 | vm_flags, node, caller); | |
48f5e0b5 | 127 | #endif |
20eb87c9 MD |
128 | if (node != NUMA_NO_NODE) |
129 | print_vmalloc_node_range_warning(); | |
130 | return __vmalloc(size, gfp_mask, prot); | |
48f5e0b5 MJ |
131 | } |
132 | ||
133 | /** | |
134 | * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon | |
135 | * failure, fall back to non-contiguous (vmalloc) allocation. | |
136 | * @size: size of the request. | |
137 | * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL. | |
138 | * | |
139 | * Uses kmalloc to get the memory but if the allocation fails then falls back | |
140 | * to the vmalloc allocator. Use lttng_kvfree to free the memory. | |
141 | * | |
142 | * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported | |
143 | */ | |
144 | static inline | |
145 | void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) | |
146 | { | |
147 | void *ret; | |
148 | ||
149 | /* | |
150 | * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables) | |
151 | * so the given set of flags has to be compatible. | |
152 | */ | |
153 | WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL); | |
154 | ||
155 | /* | |
156 | * If the allocation fits in a single page, do not fallback. | |
157 | */ | |
158 | if (size <= PAGE_SIZE) { | |
159 | return kmalloc_node(size, flags, node); | |
160 | } | |
161 | ||
162 | /* | |
163 | * Make sure that larger requests are not too disruptive - no OOM | |
164 | * killer and no allocation failure warnings as we have a fallback | |
165 | */ | |
166 | ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); | |
167 | if (!ret) { | |
20eb87c9 MD |
168 | ret = __lttng_vmalloc_node_range(size, 1, |
169 | VMALLOC_START, VMALLOC_END, | |
170 | flags | __GFP_HIGHMEM, PAGE_KERNEL, 0, | |
171 | node, __builtin_return_address(0)); | |
48f5e0b5 MJ |
172 | /* |
173 | * Make sure we don't trigger recursive page faults in the | |
174 | * tracing fast path. | |
175 | */ | |
176 | wrapper_vmalloc_sync_all(); | |
177 | } | |
178 | return ret; | |
179 | } | |
180 | ||
181 | static inline | |
182 | void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node) | |
183 | { | |
184 | return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node); | |
185 | } | |
186 | ||
187 | static inline | |
188 | void *lttng_kvmalloc(unsigned long size, gfp_t flags) | |
189 | { | |
190 | return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE); | |
191 | } | |
192 | ||
193 | static inline | |
194 | void *lttng_kvzalloc(unsigned long size, gfp_t flags) | |
195 | { | |
196 | return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE); | |
197 | } | |
198 | ||
199 | static inline | |
200 | void lttng_kvfree(const void *addr) | |
201 | { | |
202 | if (is_vmalloc_addr(addr)) { | |
203 | vfree(addr); | |
204 | } else { | |
205 | kfree(addr); | |
206 | } | |
207 | } | |
208 | #endif | |
209 | ||
a90917c3 | 210 | #endif /* _LTTNG_WRAPPER_VMALLOC_H */ |