-/*
+/* SPDX-License-Identifier: (GPL-2.0 OR LGPL-2.1)
+ *
* ring_buffer_backend.c
*
* Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/stddef.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/slab.h>
+#include <linux/oom.h>
#include <linux/cpu.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
unsigned long i;
num_pages = size >> PAGE_SHIFT;
+
+ /*
+ * Verify that the number of pages requested for that buffer is smaller
+ * than the number of available pages on the system. si_mem_available()
+ * returns an _estimate_ of the number of available pages.
+ */
+ if (num_pages > si_mem_available())
+ goto not_enough_pages;
+
+ /*
+ * Set the current user thread as the first target of the OOM killer.
+ * If the estimate received by si_mem_available() was off, and we do
+ * end up running out of memory because of this buffer allocation, we
+ * want to kill the offending app first.
+ */
+ set_current_oom_origin();
+
num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
subbuf_size = chanb->subbuf_size;
num_subbuf_alloc = num_subbuf;
if (unlikely(!pages))
goto pages_error;
- bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array)
+ bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
* num_subbuf_alloc,
1 << INTERNODE_CACHE_SHIFT),
GFP_KERNEL | __GFP_NOWARN,
/* Allocate backend pages array elements */
for (i = 0; i < num_subbuf_alloc; i++) {
bufb->array[i] =
- kzalloc_node(ALIGN(
+ lttng_kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_pages) +
sizeof(struct lib_ring_buffer_backend_page)
* num_pages_per_subbuf,
}
/* Allocate write-side subbuffer table */
- bufb->buf_wsb = kzalloc_node(ALIGN(
+ bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_subbuffer)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
/* Allocate subbuffer packet counter table */
- bufb->buf_cnt = kzalloc_node(ALIGN(
+ bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
sizeof(struct lib_ring_buffer_backend_counts)
* num_subbuf,
1 << INTERNODE_CACHE_SHIFT),
* will not fault.
*/
wrapper_vmalloc_sync_all();
+ clear_current_oom_origin();
vfree(pages);
return 0;
free_wsb:
- kfree(bufb->buf_wsb);
+ lttng_kvfree(bufb->buf_wsb);
free_array:
for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
- kfree(bufb->array[i]);
+ lttng_kvfree(bufb->array[i]);
depopulate:
/* Free all allocated pages */
for (i = 0; (i < num_pages && pages[i]); i++)
__free_page(pages[i]);
- kfree(bufb->array);
+ lttng_kvfree(bufb->array);
array_error:
vfree(pages);
pages_error:
+ clear_current_oom_origin();
+not_enough_pages:
return -ENOMEM;
}
if (chanb->extra_reader_sb)
num_subbuf_alloc++;
- kfree(bufb->buf_wsb);
- kfree(bufb->buf_cnt);
+ lttng_kvfree(bufb->buf_wsb);
+ lttng_kvfree(bufb->buf_cnt);
for (i = 0; i < num_subbuf_alloc; i++) {
for (j = 0; j < bufb->num_pages_per_subbuf; j++)
__free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
- kfree(bufb->array[i]);
+ lttng_kvfree(bufb->array[i]);
}
- kfree(bufb->array);
+ lttng_kvfree(bufb->array);
bufb->allocated = 0;
}
free_bufs:
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
- ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
- &chanb->cpuhp_prepare.node);
- WARN_ON(ret);
+ /*
+ * Teardown of lttng_rb_hp_prepare instance
+ * on "add" error is handled within cpu hotplug,
+ * no teardown to do from the caller.
+ */
#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
#ifdef CONFIG_HOTPLUG_CPU
put_online_cpus();