X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=lib%2Fringbuffer%2Fring_buffer_backend.c;h=27554a1e46b447464b81f48c48d02eb87906a490;hb=df388b78db6422e49c9f628a8fa36d60092b35e1;hp=83a6e39b4b2df692c955c6930f04d432e4a2c88a;hpb=0112cb7bc68d85c9c98b496937bde7af50f13cc6;p=lttng-modules.git diff --git a/lib/ringbuffer/ring_buffer_backend.c b/lib/ringbuffer/ring_buffer_backend.c index 83a6e39b..27554a1e 100644 --- a/lib/ringbuffer/ring_buffer_backend.c +++ b/lib/ringbuffer/ring_buffer_backend.c @@ -27,11 +27,12 @@ #include #include #include +#include -#include "../../wrapper/vmalloc.h" /* for wrapper_vmalloc_sync_all() */ -#include "../../wrapper/ringbuffer/config.h" -#include "../../wrapper/ringbuffer/backend.h" -#include "../../wrapper/ringbuffer/frontend.h" +#include /* for wrapper_vmalloc_sync_all() */ +#include +#include +#include /** * lib_ring_buffer_backend_allocate - allocate a channel buffer @@ -64,22 +65,23 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config num_subbuf_alloc++; } - pages = kmalloc_node(ALIGN(sizeof(*pages) * num_pages, + pages = vmalloc_node(ALIGN(sizeof(*pages) * num_pages, 1 << INTERNODE_CACHE_SHIFT), - GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); + cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!pages)) goto pages_error; bufb->array = kmalloc_node(ALIGN(sizeof(*bufb->array) * num_subbuf_alloc, 1 << INTERNODE_CACHE_SHIFT), - GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); + GFP_KERNEL | __GFP_NOWARN, + cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->array)) goto array_error; for (i = 0; i < num_pages; i++) { pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)), - GFP_KERNEL | __GFP_ZERO, 0); + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0); if (unlikely(!pages[i])) goto depopulate; } @@ -93,7 +95,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config sizeof(struct lib_ring_buffer_backend_page) * num_pages_per_subbuf, 1 << INTERNODE_CACHE_SHIFT), - GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); + GFP_KERNEL | __GFP_NOWARN, + cpu_to_node(max(bufb->cpu, 0))); if (!bufb->array[i]) goto free_array; } @@ -103,7 +106,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), - GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); + GFP_KERNEL | __GFP_NOWARN, + cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->buf_wsb)) goto free_array; @@ -122,7 +126,8 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config sizeof(struct lib_ring_buffer_backend_counts) * num_subbuf, 1 << INTERNODE_CACHE_SHIFT), - GFP_KERNEL, cpu_to_node(max(bufb->cpu, 0))); + GFP_KERNEL | __GFP_NOWARN, + cpu_to_node(max(bufb->cpu, 0))); if (unlikely(!bufb->buf_cnt)) goto free_wsb; @@ -145,7 +150,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config * will not fault. */ wrapper_vmalloc_sync_all(); - kfree(pages); + vfree(pages); return 0; free_wsb: @@ -159,7 +164,7 @@ depopulate: __free_page(pages[i]); kfree(bufb->array); array_error: - kfree(pages); + vfree(pages); pages_error: return -ENOMEM; } @@ -422,6 +427,7 @@ free_bufs: } #ifdef CONFIG_HOTPLUG_CPU put_online_cpus(); + unregister_hotcpu_notifier(&chanb->cpu_hp_notifier); #endif free_percpu(chanb->buf); } else @@ -693,8 +699,7 @@ void _lib_ring_buffer_copy_from_user_inatomic(struct lib_ring_buffer_backend *bu + (offset & ~PAGE_MASK), src, pagecpy) != 0; if (ret > 0) { - offset += (pagecpy - ret); - len -= (pagecpy - ret); + /* Copy failed. */ _lib_ring_buffer_memset(bufb, offset, 0, len, 0); break; /* stop copy */ }