X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=wrapper%2Fvmalloc.h;h=d65a8e9c37fe85bdfcaee069f6640ee167108f0e;hb=2d0428212cbfa3f8428feee160b095f48c2ac974;hp=2dd06cbf53e0fe565538b99fd633be504f7b7f9f;hpb=48f5e0b5bbe9c45935b483cda894b1d742e53b61;p=lttng-modules.git diff --git a/wrapper/vmalloc.h b/wrapper/vmalloc.h index 2dd06cbf..d65a8e9c 100644 --- a/wrapper/vmalloc.h +++ b/wrapper/vmalloc.h @@ -1,7 +1,5 @@ -#ifndef _LTTNG_WRAPPER_VMALLOC_H -#define _LTTNG_WRAPPER_VMALLOC_H - -/* +/* SPDX-License-Identifier: (GPL-2.0 or LGPL-2.1) + * * wrapper/vmalloc.h * * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when @@ -9,32 +7,83 @@ * modules. * * Copyright (C) 2011-2012 Mathieu Desnoyers - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; only - * version 2.1 of the License. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ -#include +#ifndef _LTTNG_WRAPPER_VMALLOC_H +#define _LTTNG_WRAPPER_VMALLOC_H + +#include #include +#include #ifdef CONFIG_KALLSYMS #include #include +#include + +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0)) + +/* + * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings + * are now synchronized when they are created or torn down. + */ +static inline +void wrapper_vmalloc_sync_mappings(void) +{} + +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) \ + || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \ + || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ + || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ + || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ + || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ + || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ + || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \ + || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,16,0,0) \ + || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \ + || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) + +static inline +void wrapper_vmalloc_sync_mappings(void) +{ + void (*vmalloc_sync_mappings_sym)(void); + + vmalloc_sync_mappings_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_mappings"); + if (vmalloc_sync_mappings_sym) { + vmalloc_sync_mappings_sym(); + } else { +#ifdef CONFIG_X86 + /* + * Only x86 needs vmalloc_sync_mappings to make sure LTTng does not + * trigger recursive page faults. + */ + printk_once(KERN_WARNING "LTTng: vmalloc_sync_mappings symbol lookup failed.\n"); + printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n"); +#endif + } +} +/* + * Canary function to check for 'vmalloc_sync_mappings()' at compile time. + * + * From 'include/linux/vmalloc.h': + * + * void vmalloc_sync_mappings(void); + */ +static inline +void __canary__vmalloc_sync_mappings(void) +{ + vmalloc_sync_mappings(); +} + +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */ + +/* + * Map vmalloc_sync_mappings to vmalloc_sync_all() on kernels before 5.6. + */ static inline -void wrapper_vmalloc_sync_all(void) +void wrapper_vmalloc_sync_mappings(void) { void (*vmalloc_sync_all_sym)(void); @@ -52,16 +101,65 @@ void wrapper_vmalloc_sync_all(void) #endif } } -#else + +/* + * Canary function to check for 'vmalloc_sync_all()' at compile time. + * + * From 'include/linux/vmalloc.h': + * + * void vmalloc_sync_all(void); + */ +static inline +void __canary__vmalloc_sync_all(void) +{ + vmalloc_sync_all(); +} + +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */ + +#else /* CONFIG_KALLSYMS */ + +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,8,0)) + +/* + * wrapper_vmalloc_sync_mappings was removed in v5.8, the vmalloc mappings + * are now synchronized when they are created or torn down. + */ +static inline +void wrapper_vmalloc_sync_mappings(void) +{} + +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0) \ + || LTTNG_KERNEL_RANGE(5,5,12, 5,6,0) \ + || LTTNG_KERNEL_RANGE(5,4,28, 5,5,0) \ + || LTTNG_KERNEL_RANGE(5,2,37, 5,3,0) \ + || LTTNG_KERNEL_RANGE(4,19,113, 4,20,0) \ + || LTTNG_KERNEL_RANGE(4,14,175, 4,15,0) \ + || LTTNG_KERNEL_RANGE(4,9,218, 4,10,0) \ + || LTTNG_KERNEL_RANGE(4,4,218, 4,5,0)) \ + || LTTNG_UBUNTU_KERNEL_RANGE(4,15,18,97, 4,18,0,0) \ + || LTTNG_UBUNTU_KERNEL_RANGE(5,0,21,48, 5,1,0,0) \ + || LTTNG_UBUNTU_KERNEL_RANGE(5,3,18,52, 5,4,0,0) + +static inline +void wrapper_vmalloc_sync_mappings(void) +{ + return vmalloc_sync_mappings(); +} + +#else /* #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */ static inline -void wrapper_vmalloc_sync_all(void) +void wrapper_vmalloc_sync_mappings(void) { return vmalloc_sync_all(); } + +#endif /* #else #if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(5,6,0)) */ + #endif -#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0)) +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,12,0)) static inline void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) { @@ -73,7 +171,7 @@ void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) * Make sure we don't trigger recursive page faults in the * tracing fast path. */ - wrapper_vmalloc_sync_all(); + wrapper_vmalloc_sync_mappings(); } return ret; } @@ -105,36 +203,163 @@ void lttng_kvfree(const void *addr) #else #include -#include + +static inline +void print_vmalloc_node_range_warning(void) +{ + printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n"); + printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n"); + printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n"); +} + +#if (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(4,0,0)) /* * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. */ static inline -void *__lttng_vmalloc_node_fallback(unsigned long size, unsigned long align, - gfp_t gfp_mask, pgprot_t prot, int node, void *caller) +void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, unsigned long vm_flags, int node, + const void *caller) { - void *ret; +#ifdef CONFIG_KALLSYMS + /* + * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. + */ + void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, unsigned long vm_flags, int node, + const void *caller); + lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); + if (lttng__vmalloc_node_range) + return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, + vm_flags, node, caller); +#endif + if (node != NUMA_NO_NODE) + print_vmalloc_node_range_warning(); + return __vmalloc(size, gfp_mask, prot); +} + +/* + * Canary function to check for '__vmalloc_node_range()' at compile time. + * + * From 'include/linux/vmalloc.h': + * + * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, + * unsigned long start, unsigned long end, gfp_t gfp_mask, + * pgprot_t prot, unsigned long vm_flags, int node, + * const void *caller); + */ +static inline +void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, unsigned long vm_flags, int node, + const void *caller) +{ + return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, + vm_flags, node, caller); +} + +#elif (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) + +/* + * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. + */ +static inline +void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, unsigned long vm_flags, int node, + const void *caller) +{ #ifdef CONFIG_KALLSYMS /* - * If we have KALLSYMS, get * __vmalloc_node which is not exported. + * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. */ - void *(*lttng__vmalloc_node)(unsigned long size, unsigned long align, - gfp_t gfp_mask, pgprot_t prot, int node, void *caller); + void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, int node, const void *caller); - lttng__vmalloc_node = (void *) kallsyms_lookup_funcptr("__vmalloc_node"); - ret = lttng__vmalloc_node(size, align, gfp_mask, prot, node, caller); -#else + lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); + if (lttng__vmalloc_node_range) + return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, + node, caller); +#endif + if (node != NUMA_NO_NODE) + print_vmalloc_node_range_warning(); + return __vmalloc(size, gfp_mask, prot); +} + +/* + * Canary function to check for '__vmalloc_node_range()' at compile time. + * + * From 'include/linux/vmalloc.h': + * + * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, + * unsigned long start, unsigned long end, gfp_t gfp_mask, + * pgprot_t prot, unsigned long vm_flags, int node, + * const void *caller); + */ +static inline +void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, int node, const void *caller) +{ + return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, + node, caller); +} + +#else /* (LTTNG_LINUX_VERSION_CODE >= LTTNG_KERNEL_VERSION(3,6,0)) */ + +/* + * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node. + */ +static inline +void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, unsigned long vm_flags, int node, + void *caller) +{ +#ifdef CONFIG_KALLSYMS /* - * If we don't have KALLSYMS, fallback to kmalloc_node. + * If we have KALLSYMS, get * __vmalloc_node_range which is not exported. */ - ret = kmalloc_node(size, flags, node); + void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, int node, void *caller); + + lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range"); + if (lttng__vmalloc_node_range) + return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot, + node, caller); #endif + if (node != NUMA_NO_NODE) + print_vmalloc_node_range_warning(); + return __vmalloc(size, gfp_mask, prot); +} - return ret; +/* + * Canary function to check for '__vmalloc_node_range()' at compile time. + * + * From 'include/linux/vmalloc.h': + * + * extern void *__vmalloc_node_range(unsigned long size, unsigned long align, + * unsigned long start, unsigned long end, gfp_t gfp_mask, + * pgprot_t prot, unsigned long vm_flags, int node, + * void *caller); + */ +static inline +void *__canary____lttng_vmalloc_node_range(unsigned long size, unsigned long align, + unsigned long start, unsigned long end, gfp_t gfp_mask, + pgprot_t prot, int node, void *caller) +{ + return __vmalloc_node_range(size, align, start, end, gfp_mask, prot, + node, caller); } +#endif + /** * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon * failure, fall back to non-contiguous (vmalloc) allocation. @@ -170,28 +395,15 @@ void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node) */ ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node); if (!ret) { - if (node == NUMA_NO_NODE) { - /* - * If no node was specified, use __vmalloc which is - * always exported. - */ - ret = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL); - } else { - /* - * Otherwise, we need to select a node but __vmalloc_node - * is not exported, use this fallback wrapper which uses - * kallsyms if available or falls back to kmalloc_node. - */ - ret = __lttng_vmalloc_node_fallback(size, 1, - flags | __GFP_HIGHMEM, PAGE_KERNEL, node, - __builtin_return_address(0)); - } - + ret = __lttng_vmalloc_node_range(size, 1, + VMALLOC_START, VMALLOC_END, + flags | __GFP_HIGHMEM, PAGE_KERNEL, 0, + node, __builtin_return_address(0)); /* * Make sure we don't trigger recursive page faults in the * tracing fast path. */ - wrapper_vmalloc_sync_all(); + wrapper_vmalloc_sync_mappings(); } return ret; }