#include <linux/slab.h>
#include <lib/prio_heap/lttng_prio_heap.h>
-#include <wrapper/vmalloc.h>
+#include <linux/mm.h>
#ifdef DEBUG_HEAP
void lttng_check_heap(const struct lttng_ptr_heap *heap)
return 0;
heap->alloc_len = max_t(size_t, new_len, heap->alloc_len << 1);
- new_ptrs = lttng_kvmalloc(heap->alloc_len * sizeof(void *), heap->gfpmask);
+ new_ptrs = kvmalloc_node(heap->alloc_len * sizeof(void *), heap->gfpmask,
+ NUMA_NO_NODE);
if (!new_ptrs)
return -ENOMEM;
if (heap->ptrs)
memcpy(new_ptrs, heap->ptrs, heap->len * sizeof(void *));
- lttng_kvfree(heap->ptrs);
+ kvfree(heap->ptrs);
heap->ptrs = new_ptrs;
return 0;
}
void lttng_heap_free(struct lttng_ptr_heap *heap)
{
- lttng_kvfree(heap->ptrs);
+ kvfree(heap->ptrs);
}
static void heapify(struct lttng_ptr_heap *heap, size_t i)
#include <linux/vmalloc.h>
#include <wrapper/mm.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
#include <wrapper/ringbuffer/config.h>
#include <wrapper/ringbuffer/backend.h>
#include <wrapper/ringbuffer/frontend.h>
if (unlikely(!pages))
goto pages_error;
- bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
- * num_subbuf_alloc,
- 1 << INTERNODE_CACHE_SHIFT),
+ bufb->array = kvmalloc_node(ALIGN(sizeof(*bufb->array)
+ * num_subbuf_alloc,
+ 1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
cpu_to_node(max(bufb->cpu, 0)));
if (unlikely(!bufb->array))
goto array_error;
-
for (i = 0; i < num_pages; i++) {
pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
bufb->array[i] =
- lttng_kvzalloc_node(ALIGN(
+ kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_pages) +
sizeof(struct lib_ring_buffer_backend_page)
* num_pages_per_subbuf,
}
/* Allocate write-side subbuffer table */
- bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
+ bufb->buf_wsb = kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_subbuffer)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
/* Allocate subbuffer packet counter table */
- bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
+ bufb->buf_cnt = kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_counts)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
}
}
- /*
- * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
- * will not fault.
- */
- wrapper_vmalloc_sync_all();
wrapper_clear_current_oom_origin();
vfree(pages);
return 0;
free_wsb:
- lttng_kvfree(bufb->buf_wsb);
+ kvfree(bufb->buf_wsb);
free_array:
for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
- lttng_kvfree(bufb->array[i]);
+ kvfree(bufb->array[i]);
depopulate:
/* Free all allocated pages */
for (i = 0; (i < num_pages && pages[i]); i++)
__free_page(pages[i]);
- lttng_kvfree(bufb->array);
+ kvfree(bufb->array);
array_error:
vfree(pages);
pages_error:
if (chanb->extra_reader_sb)
num_subbuf_alloc++;
- lttng_kvfree(bufb->buf_wsb);
- lttng_kvfree(bufb->buf_cnt);
+ kvfree(bufb->buf_wsb);
+ kvfree(bufb->buf_cnt);
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < bufb->num_pages_per_subbuf; j++)
__free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
- lttng_kvfree(bufb->array[i]);
+ kvfree(bufb->array[i]);
}
- lttng_kvfree(bufb->array);
+ kvfree(bufb->array);
bufb->allocated = 0;
}
#include <wrapper/kref.h>
#include <wrapper/percpu-defs.h>
#include <wrapper/timer.h>
-#include <wrapper/vmalloc.h>
/*
* Internal structure representing offsets to use at a sub-buffer switch.
struct channel *chan = buf->backend.chan;
lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
- lttng_kvfree(buf->commit_hot);
- lttng_kvfree(buf->commit_cold);
- lttng_kvfree(buf->ts_end);
+ kvfree(buf->commit_hot);
+ kvfree(buf->commit_cold);
+ kvfree(buf->ts_end);
lib_ring_buffer_backend_free(&buf->backend);
}
return ret;
buf->commit_hot =
- lttng_kvzalloc_node(ALIGN(sizeof(*buf->commit_hot)
+ kvzalloc_node(ALIGN(sizeof(*buf->commit_hot)
* chan->backend.num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
}
buf->commit_cold =
- lttng_kvzalloc_node(ALIGN(sizeof(*buf->commit_cold)
+ kvzalloc_node(ALIGN(sizeof(*buf->commit_cold)
* chan->backend.num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
}
buf->ts_end =
- lttng_kvzalloc_node(ALIGN(sizeof(*buf->ts_end)
+ kvzalloc_node(ALIGN(sizeof(*buf->ts_end)
* chan->backend.num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
chan->backend.cpumask));
cpumask_set_cpu(cpu, chan->backend.cpumask);
}
-
return 0;
/* Error handling */
free_init:
- lttng_kvfree(buf->ts_end);
+ kvfree(buf->ts_end);
free_commit_cold:
- lttng_kvfree(buf->commit_cold);
+ kvfree(buf->commit_cold);
free_commit:
- lttng_kvfree(buf->commit_hot);
+ kvfree(buf->commit_hot);
free_chanbuf:
lib_ring_buffer_backend_free(&buf->backend);
return ret;
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/err.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
#include <wrapper/ringbuffer/vfs.h>
#include <wrapper/ringbuffer/backend.h>
#include <wrapper/ringbuffer/frontend.h>
{
int ret = 0;
- wrapper_vmalloc_sync_all();
lttng_clock_ref();
ret = lttng_tp_mempool_init();
#include "lttng-events.h"
#include "wrapper/ringbuffer/backend.h"
#include "wrapper/ringbuffer/frontend.h"
-#include "wrapper/vmalloc.h"
#include "lttng-tracer.h"
#include "lttng-endian.h"
sequence_field->priv = fdata;
sequence_field->destroy = lttng_callstack_sequence_destroy;
- wrapper_vmalloc_sync_all();
return 0;
error_create:
#include <linux/cgroup.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/namespace.h>
#include <lttng-tracer.h>
field->record = cgroup_ns_record;
field->get_value = cgroup_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_cgroup_ns_to_ctx);
#include <linux/sched.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = cpu_id_record;
field->get_value = cpu_id_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_cpu_id_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = egid_record;
field->get_value = egid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_egid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = euid_record;
field->get_value = euid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_euid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = gid_record;
field->get_value = gid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_gid_to_ctx);
#include <linux/utsname.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
#define LTTNG_HOSTNAME_CTX_LEN (__NEW_UTS_LEN + 1)
field->record = hostname_record;
field->get_value = hostname_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_hostname_to_ctx);
#include <linux/irqflags.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
/*
field->record = interruptible_record;
field->get_value = interruptible_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_interruptible_to_ctx);
#include <linux/ipc_namespace.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/namespace.h>
#include <lttng-tracer.h>
field->record = ipc_ns_record;
field->get_value = ipc_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_ipc_ns_to_ctx);
#include <linux/irqflags.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = migratable_record;
field->get_value = migratable_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_migratable_to_ctx);
#include <lttng-events.h>
#include <linux/nsproxy.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/namespace.h>
#include <lttng-tracer.h>
field->record = mnt_ns_record;
field->get_value = mnt_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_mnt_ns_to_ctx);
#include <linux/irqflags.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = need_reschedule_record;
field->get_value = need_reschedule_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_need_reschedule_to_ctx);
#include <net/net_namespace.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/namespace.h>
#include <lttng-tracer.h>
field->record = net_ns_record;
field->get_value = net_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_net_ns_to_ctx);
#include <linux/sched.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = nice_record;
field->get_value = nice_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_nice_to_ctx);
#include <linux/list.h>
#include <linux/string.h>
#include <linux/cpu.h>
+#include <linux/mm.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/perf.h>
#include <lttng-tracer.h>
#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
kfree(field->event_field.name);
kfree(field->u.perf_counter->attr);
- lttng_kvfree(events);
+ kvfree(events);
kfree(field->u.perf_counter);
}
int ret;
char *name_alloc;
- events = lttng_kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
+ events = kvzalloc(num_possible_cpus() * sizeof(*events), GFP_KERNEL);
if (!events)
return -ENOMEM;
field->u.perf_counter = perf_field;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
error_alloc_perf_field:
kfree(attr);
error_attr:
- lttng_kvfree(events);
+ kvfree(events);
return ret;
}
#include <linux/pid_namespace.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/namespace.h>
#include <lttng-tracer.h>
field->record = pid_ns_record;
field->get_value = pid_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_pid_ns_to_ctx);
#include <linux/sched.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = pid_record;
field->get_value = pid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_pid_to_ctx);
#include <linux/syscalls.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = ppid_record;
field->get_value = ppid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_ppid_to_ctx);
#include <linux/irqflags.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
/*
field->record = preemptible_record;
field->get_value = preemptible_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_preemptible_to_ctx);
#include <linux/sched.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/kallsyms.h>
#include <lttng-tracer.h>
field->record = prio_record;
field->get_value = prio_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_prio_to_ctx);
#include <linux/sched.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
#include <lttng-endian.h>
field->record = procname_record;
field->get_value = procname_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_procname_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = sgid_record;
field->get_value = sgid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_sgid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = suid_record;
field->get_value = suid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_suid_to_ctx);
#include <linux/sched.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = tid_record;
field->get_value = tid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_tid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = uid_record;
field->get_value = uid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_uid_to_ctx);
#include <linux/user_namespace.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/namespace.h>
#include <lttng-tracer.h>
field->record = user_ns_record;
field->get_value = user_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_user_ns_to_ctx);
#include <linux/utsname.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/namespace.h>
#include <lttng-tracer.h>
field->record = uts_ns_record;
field->get_value = uts_ns_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_uts_ns_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = vegid_record;
field->get_value = vegid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vegid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = veuid_record;
field->get_value = veuid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_veuid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = vgid_record;
field->get_value = vgid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vgid_to_ctx);
#include <linux/sched.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = vpid_record;
field->get_value = vpid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vpid_to_ctx);
#include <linux/syscalls.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = vppid_record;
field->get_value = vppid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vppid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = vsgid_record;
field->get_value = vsgid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vsgid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = vsuid_record;
field->get_value = vsuid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vsuid_to_ctx);
#include <linux/sched.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer.h>
static
field->record = vtid_record;
field->get_value = vtid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vtid_to_ctx);
#include <lttng-events.h>
#include <lttng-tracer.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/user_namespace.h>
static
field->record = vuid_record;
field->get_value = vuid_get_value;
lttng_context_update(*ctx);
- wrapper_vmalloc_sync_all();
return 0;
}
EXPORT_SYMBOL_GPL(lttng_add_vuid_to_ctx);
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/slab.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
#include <lttng-events.h>
#include <lttng-tracer.h>
struct lttng_ctx_field *new_fields;
ctx->allocated_fields = max_t(size_t, 1, 2 * ctx->allocated_fields);
- new_fields = lttng_kvzalloc(ctx->allocated_fields * sizeof(struct lttng_ctx_field), GFP_KERNEL);
+ new_fields = kvzalloc_node(ctx->allocated_fields * sizeof(struct lttng_ctx_field),
+ GFP_KERNEL, NUMA_NO_NODE);
if (!new_fields)
return NULL;
if (ctx->fields)
memcpy(new_fields, ctx->fields, sizeof(*ctx->fields) * ctx->nr_fields);
- lttng_kvfree(ctx->fields);
+ kvfree(ctx->fields);
ctx->fields = new_fields;
}
field = &ctx->fields[ctx->nr_fields];
if (ctx->fields[i].destroy)
ctx->fields[i].destroy(&ctx->fields[i]);
}
- lttng_kvfree(ctx->fields);
+ kvfree(ctx->fields);
kfree(ctx);
}
#include <wrapper/file.h>
#include <linux/jhash.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include <linux/uuid.h>
#include <linux/dmi.h>
+#include <linux/vmalloc.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
#include <wrapper/random.h>
#include <wrapper/tracepoint.h>
#include <wrapper/list.h>
int i;
mutex_lock(&sessions_mutex);
- session = lttng_kvzalloc(sizeof(struct lttng_session), GFP_KERNEL);
+ session = kvzalloc_node(sizeof(struct lttng_session), GFP_KERNEL,
+ NUMA_NO_NODE);
if (!session)
goto err;
INIT_LIST_HEAD(&session->chan);
err_free_cache:
kfree(metadata_cache);
err_free_session:
- lttng_kvfree(session);
+ kvfree(session);
err:
mutex_unlock(&sessions_mutex);
return NULL;
kref_put(&session->metadata_cache->refcount, metadata_cache_destroy);
list_del(&session->list);
mutex_unlock(&sessions_mutex);
- lttng_kvfree(session);
+ kvfree(session);
}
int lttng_session_statedump(struct lttng_session *session)
* @transport: transport structure
*
* Registers a transport which can be used as output to extract the data out of
- * LTTng. The module calling this registration function must ensure that no
- * trap-inducing code will be executed by the transport functions. E.g.
- * vmalloc_sync_all() must be called between a vmalloc and the moment the memory
- * is made visible to the transport function. This registration acts as a
- * vmalloc_sync_all. Therefore, only if the module allocates virtual memory
- * after its registration must it synchronize the TLBs.
+ * LTTng.
*/
void lttng_transport_register(struct lttng_transport *transport)
{
- /*
- * Make sure no page fault can be triggered by the module about to be
- * registered. We deal with this here so we don't have to call
- * vmalloc_sync_all() in each module's init.
- */
- wrapper_vmalloc_sync_all();
-
mutex_lock(&sessions_mutex);
list_add_tail(&transport->node, <tng_transport_list);
mutex_unlock(&sessions_mutex);
#include <linux/module.h>
#include <linux/types.h>
#include <lib/bitfield.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
#include <wrapper/trace-clock.h>
#include <lttng-events.h>
#include <lttng-tracer.h>
static int __init lttng_ring_buffer_client_init(void)
{
- /*
- * This vmalloc sync all also takes care of the lib ring buffer
- * vmalloc'd module pages when it is built as a module into LTTng.
- */
- wrapper_vmalloc_sync_all();
lttng_transport_register(<tng_relay_transport);
return 0;
}
#include <linux/module.h>
#include <linux/types.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
#include <lttng-events.h>
#include <lttng-tracer.h>
static int __init lttng_ring_buffer_client_init(void)
{
- /*
- * This vmalloc sync all also takes care of the lib ring buffer
- * vmalloc'd module pages when it is built as a module into LTTng.
- */
- wrapper_vmalloc_sync_all();
lttng_transport_register(<tng_relay_transport);
return 0;
}
struct lttng_kernel_event ev;
int ret;
- wrapper_vmalloc_sync_all();
-
if (!chan->sc_table) {
/* create syscall table mapping syscall to events */
chan->sc_table = kzalloc(sizeof(struct lttng_event *)
#include <wrapper/trace-clock.h>
#include <wrapper/compiler.h>
-#include <wrapper/vmalloc.h>
#include <lttng-tracer-core.h>
#include <lttng-events.h>
#include <linux/slab.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/irqflags.h>
#include <lttng-tracer.h>
#include <blacklist/kprobes.h>
event->u.kprobe.kp.offset = offset;
event->u.kprobe.kp.addr = (void *) (unsigned long) addr;
- /*
- * Ensure the memory we just allocated don't trigger page faults.
- * Well.. kprobes itself puts the page fault handler on the blacklist,
- * but we can never be too careful.
- */
- wrapper_vmalloc_sync_all();
-
ret = register_kprobe(&event->u.kprobe.kp);
if (ret)
goto register_error;
#include <linux/kref.h>
#include <lttng-events.h>
#include <wrapper/ringbuffer/frontend_types.h>
-#include <wrapper/vmalloc.h>
#include <wrapper/irqflags.h>
#include <lttng-tracer.h>
#include <blacklist/kprobes.h>
kref_init(<tng_krp->kref_register);
kref_get(<tng_krp->kref_register); /* inc refcount to 2, no overflow. */
- /*
- * Ensure the memory we just allocated don't trigger page faults.
- * Well.. kprobes itself puts the page fault handler on the blacklist,
- * but we can never be too careful.
- */
- wrapper_vmalloc_sync_all();
-
ret = register_kretprobe(<tng_krp->krp);
if (ret)
goto register_error;
#include <probes/lttng.h>
#include <probes/lttng-types.h>
#include <probes/lttng-probe-user.h>
-#include <wrapper/vmalloc.h> /* for wrapper_vmalloc_sync_all() */
#include <wrapper/ringbuffer/frontend_types.h>
#include <wrapper/ringbuffer/backend.h>
#include <wrapper/rcu.h>
#ifndef TP_MODULE_NOINIT
static int TP_ID(__lttng_events_init__, TRACE_SYSTEM)(void)
{
- wrapper_vmalloc_sync_all();
return lttng_probe_register(&TP_ID(__probe_desc___, TRACE_SYSTEM));
}
#include <wrapper/irqflags.h>
#include <wrapper/ringbuffer/frontend_types.h>
#include <wrapper/uprobes.h>
-#include <wrapper/vmalloc.h>
static
int lttng_uprobes_handler_pre(struct uprobe_consumer *uc, struct pt_regs *regs)
goto end;
}
- /* Ensure the memory we just allocated don't trigger page faults. */
- wrapper_vmalloc_sync_all();
-
uprobe_handler->event = event;
uprobe_handler->up_consumer.handler = lttng_uprobes_handler_pre;
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/miscdevice.h>
-#include <wrapper/vmalloc.h>
#include <lttng-events.h>
#define TP_MODULE_NOAUTOLOAD
{
int ret = 0;
- wrapper_vmalloc_sync_all();
-
/* /dev/lttng-logger */
ret = misc_register(&logger_dev);
if (ret) {
int ret = 0;
(void) wrapper_lttng_fixup_sig(THIS_MODULE);
- wrapper_vmalloc_sync_all();
lttng_test_filter_event_dentry =
proc_create_data(LTTNG_TEST_FILTER_EVENT_FILE,
S_IRUGO | S_IWUGO, NULL,
+++ /dev/null
-/* SPDX-License-Identifier: (GPL-2.0-only or LGPL-2.1-only)
- *
- * wrapper/vmalloc.h
- *
- * wrapper around vmalloc_sync_all. Using KALLSYMS to get its address when
- * available, else we need to have a kernel that exports this function to GPL
- * modules.
- *
- * Copyright (C) 2011-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifndef _LTTNG_WRAPPER_VMALLOC_H
-#define _LTTNG_WRAPPER_VMALLOC_H
-
-#include <linux/version.h>
-#include <linux/vmalloc.h>
-#include <linux/mm.h>
-
-#ifdef CONFIG_KALLSYMS
-
-#include <linux/kallsyms.h>
-#include <wrapper/kallsyms.h>
-
-static inline
-void wrapper_vmalloc_sync_all(void)
-{
- void (*vmalloc_sync_all_sym)(void);
-
- vmalloc_sync_all_sym = (void *) kallsyms_lookup_funcptr("vmalloc_sync_all");
- if (vmalloc_sync_all_sym) {
- vmalloc_sync_all_sym();
- } else {
-#ifdef CONFIG_X86
- /*
- * Only x86 needs vmalloc_sync_all to make sure LTTng does not
- * trigger recursive page faults.
- */
- printk_once(KERN_WARNING "LTTng: vmalloc_sync_all symbol lookup failed.\n");
- printk_once(KERN_WARNING "Page fault handler and NMI tracing might trigger faults.\n");
-#endif
- }
-}
-#else
-
-static inline
-void wrapper_vmalloc_sync_all(void)
-{
- return vmalloc_sync_all();
-}
-#endif
-
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,12,0))
-static inline
-void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
-{
- void *ret;
-
- ret = kvmalloc_node(size, flags, node);
- if (is_vmalloc_addr(ret)) {
- /*
- * Make sure we don't trigger recursive page faults in the
- * tracing fast path.
- */
- wrapper_vmalloc_sync_all();
- }
- return ret;
-}
-
-static inline
-void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
-{
- return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-
-static inline
-void *lttng_kvmalloc(unsigned long size, gfp_t flags)
-{
- return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static inline
-void *lttng_kvzalloc(unsigned long size, gfp_t flags)
-{
- return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static inline
-void lttng_kvfree(const void *addr)
-{
- kvfree(addr);
-}
-
-#else
-
-#include <linux/slab.h>
-
-static inline
-void print_vmalloc_node_range_warning(void)
-{
- printk_once(KERN_WARNING "LTTng: __vmalloc_node_range symbol lookup failed.\n");
- printk_once(KERN_WARNING "Tracer performance will be degraded on NUMA systems.\n");
- printk_once(KERN_WARNING "Please rebuild your kernel with CONFIG_KALLSYMS enabled.\n");
-}
-
-/*
- * kallsyms wrapper of __vmalloc_node with a fallback to kmalloc_node.
- */
-static inline
-void *__lttng_vmalloc_node_range(unsigned long size, unsigned long align,
- unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller)
-{
-#ifdef CONFIG_KALLSYMS
- /*
- * If we have KALLSYMS, get * __vmalloc_node_range which is not exported.
- */
- void *(*lttng__vmalloc_node_range)(unsigned long size, unsigned long align,
- unsigned long start, unsigned long end, gfp_t gfp_mask,
- pgprot_t prot, unsigned long vm_flags, int node,
- const void *caller);
-
- lttng__vmalloc_node_range = (void *) kallsyms_lookup_funcptr("__vmalloc_node_range");
- if (lttng__vmalloc_node_range)
- return lttng__vmalloc_node_range(size, align, start, end, gfp_mask, prot,
- vm_flags, node, caller);
-#endif
- if (node != NUMA_NO_NODE)
- print_vmalloc_node_range_warning();
- return __vmalloc(size, gfp_mask, prot);
-}
-
-/**
- * lttng_kvmalloc_node - attempt to allocate physically contiguous memory, but upon
- * failure, fall back to non-contiguous (vmalloc) allocation.
- * @size: size of the request.
- * @flags: gfp mask for the allocation - must be compatible with GFP_KERNEL.
- *
- * Uses kmalloc to get the memory but if the allocation fails then falls back
- * to the vmalloc allocator. Use lttng_kvfree to free the memory.
- *
- * Reclaim modifiers - __GFP_NORETRY, __GFP_REPEAT and __GFP_NOFAIL are not supported
- */
-static inline
-void *lttng_kvmalloc_node(unsigned long size, gfp_t flags, int node)
-{
- void *ret;
-
- /*
- * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
- * so the given set of flags has to be compatible.
- */
- WARN_ON_ONCE((flags & GFP_KERNEL) != GFP_KERNEL);
-
- /*
- * If the allocation fits in a single page, do not fallback.
- */
- if (size <= PAGE_SIZE) {
- return kmalloc_node(size, flags, node);
- }
-
- /*
- * Make sure that larger requests are not too disruptive - no OOM
- * killer and no allocation failure warnings as we have a fallback
- */
- ret = kmalloc_node(size, flags | __GFP_NOWARN | __GFP_NORETRY, node);
- if (!ret) {
- ret = __lttng_vmalloc_node_range(size, 1,
- VMALLOC_START, VMALLOC_END,
- flags | __GFP_HIGHMEM, PAGE_KERNEL, 0,
- node, __builtin_return_address(0));
- /*
- * Make sure we don't trigger recursive page faults in the
- * tracing fast path.
- */
- wrapper_vmalloc_sync_all();
- }
- return ret;
-}
-
-static inline
-void *lttng_kvzalloc_node(unsigned long size, gfp_t flags, int node)
-{
- return lttng_kvmalloc_node(size, flags | __GFP_ZERO, node);
-}
-
-static inline
-void *lttng_kvmalloc(unsigned long size, gfp_t flags)
-{
- return lttng_kvmalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static inline
-void *lttng_kvzalloc(unsigned long size, gfp_t flags)
-{
- return lttng_kvzalloc_node(size, flags, NUMA_NO_NODE);
-}
-
-static inline
-void lttng_kvfree(const void *addr)
-{
- if (is_vmalloc_addr(addr)) {
- vfree(addr);
- } else {
- kfree(addr);
- }
-}
-#endif
-
-#endif /* _LTTNG_WRAPPER_VMALLOC_H */