From 1f0ab1eb0409d23de5f67cc588c3ea4cee4d10e0 Mon Sep 17 00:00:00 2001 From: Francis Deslauriers Date: Thu, 11 Oct 2018 17:37:00 -0400 Subject: [PATCH] Prevent allocation of buffers if exceeding available memory Issue ===== The running system can be rendered unusable by creating a channel buffers larger than the available memory of the system, resulting in random processes being killed by the OOM-killer. These simple commands trigger the crash on my 15G of RAM laptop: lttng create lttng enable-channel -k --subbuf-size=16G --num-subbuf=1 chan0 Note that the subbuf-size * num-subbuf is larger than the physical memory. Solution ======== Get an estimate of the number of available pages and return ENOMEM if there are not enough pages to cover the needs of the caller. Also, mark the calling user thread as the first target for the OOM killer in case the estimate of available pages was wrong. This greatly reduces the attack surface of this issue as well as reducing its potential impact. This approach is inspired by the one taken by the Linux kernel trace ring buffer[1]. Drawback ======== This approach is imperfect because it's based on an estimate. [1]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/trace/ring_buffer.c#n1172 Signed-off-by: Francis Deslauriers Signed-off-by: Mathieu Desnoyers --- lib/ringbuffer/ring_buffer_backend.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/lib/ringbuffer/ring_buffer_backend.c b/lib/ringbuffer/ring_buffer_backend.c index 9d199e51..8207f764 100644 --- a/lib/ringbuffer/ring_buffer_backend.c +++ b/lib/ringbuffer/ring_buffer_backend.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -43,6 +44,23 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config unsigned long i; num_pages = size >> PAGE_SHIFT; + + /* + * Verify that the number of pages requested for that buffer is smaller + * than the number of available pages on the system. si_mem_available() + * returns an _estimate_ of the number of available pages. + */ + if (num_pages > si_mem_available()) + goto not_enough_pages; + + /* + * Set the current user thread as the first target of the OOM killer. + * If the estimate received by si_mem_available() was off, and we do + * end up running out of memory because of this buffer allocation, we + * want to kill the offending app first. + */ + set_current_oom_origin(); + num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf); subbuf_size = chanb->subbuf_size; num_subbuf_alloc = num_subbuf; @@ -137,6 +155,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config * will not fault. */ wrapper_vmalloc_sync_all(); + clear_current_oom_origin(); vfree(pages); return 0; @@ -153,6 +172,8 @@ depopulate: array_error: vfree(pages); pages_error: + clear_current_oom_origin(); +not_enough_pages: return -ENOMEM; } -- 2.34.1