Move headers under include/
[lttng-modules.git] / lib / ringbuffer / ring_buffer_backend.c
index 468b0e9b86715809dbb7876c64a246a00f401cf0..ec819703f31255d95869d3319fe7600c2b7d33e5 100644 (file)
 #include <linux/cpu.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
+#include <linux/oom.h>
 
-#include <wrapper/mm.h>
-#include <wrapper/vmalloc.h>   /* for wrapper_vmalloc_sync_all() */
-#include <wrapper/ringbuffer/config.h>
-#include <wrapper/ringbuffer/backend.h>
-#include <wrapper/ringbuffer/frontend.h>
+#include <ringbuffer/config.h>
+#include <ringbuffer/backend.h>
+#include <ringbuffer/frontend.h>
 
 /**
  * lib_ring_buffer_backend_allocate - allocate a channel buffer
@@ -52,7 +51,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
         * and returns if there should be enough free pages based on the
         * current estimate.
         */
-       if (!wrapper_check_enough_free_pages(num_pages))
+       if (num_pages >= si_mem_available())
                goto not_enough_pages;
 
        /*
@@ -61,7 +60,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
         * end up running out of memory because of this buffer allocation, we
         * want to kill the offending app first.
         */
-       wrapper_set_current_oom_origin();
+       set_current_oom_origin();
 
        num_pages_per_subbuf = num_pages >> get_count_order(num_subbuf);
        subbuf_size = chanb->subbuf_size;
@@ -78,14 +77,13 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
        if (unlikely(!pages))
                goto pages_error;
 
-       bufb->array = lttng_kvmalloc_node(ALIGN(sizeof(*bufb->array)
-                                        * num_subbuf_alloc,
-                                 1 << INTERNODE_CACHE_SHIFT),
+       bufb->array = kvmalloc_node(ALIGN(sizeof(*bufb->array)
+                                   * num_subbuf_alloc,
+                                   1 << INTERNODE_CACHE_SHIFT),
                        GFP_KERNEL | __GFP_NOWARN,
                        cpu_to_node(max(bufb->cpu, 0)));
        if (unlikely(!bufb->array))
                goto array_error;
-
        for (i = 0; i < num_pages; i++) {
                pages[i] = alloc_pages_node(cpu_to_node(max(bufb->cpu, 0)),
                                GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 0);
@@ -97,7 +95,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
        /* Allocate backend pages array elements */
        for (i = 0; i < num_subbuf_alloc; i++) {
                bufb->array[i] =
-                       lttng_kvzalloc_node(ALIGN(
+                       kvzalloc_node(ALIGN(
                                sizeof(struct lib_ring_buffer_backend_pages) +
                                sizeof(struct lib_ring_buffer_backend_page)
                                * num_pages_per_subbuf,
@@ -109,7 +107,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
        }
 
        /* Allocate write-side subbuffer table */
-       bufb->buf_wsb = lttng_kvzalloc_node(ALIGN(
+       bufb->buf_wsb = kvzalloc_node(ALIGN(
                                sizeof(struct lib_ring_buffer_backend_subbuffer)
                                * num_subbuf,
                                1 << INTERNODE_CACHE_SHIFT),
@@ -129,7 +127,7 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                bufb->buf_rsb.id = subbuffer_id(config, 0, 1, 0);
 
        /* Allocate subbuffer packet counter table */
-       bufb->buf_cnt = lttng_kvzalloc_node(ALIGN(
+       bufb->buf_cnt = kvzalloc_node(ALIGN(
                                sizeof(struct lib_ring_buffer_backend_counts)
                                * num_subbuf,
                                1 << INTERNODE_CACHE_SHIFT),
@@ -152,29 +150,24 @@ int lib_ring_buffer_backend_allocate(const struct lib_ring_buffer_config *config
                }
        }
 
-       /*
-        * If kmalloc ever uses vmalloc underneath, make sure the buffer pages
-        * will not fault.
-        */
-       wrapper_vmalloc_sync_all();
-       wrapper_clear_current_oom_origin();
+       clear_current_oom_origin();
        vfree(pages);
        return 0;
 
 free_wsb:
-       lttng_kvfree(bufb->buf_wsb);
+       kvfree(bufb->buf_wsb);
 free_array:
        for (i = 0; (i < num_subbuf_alloc && bufb->array[i]); i++)
-               lttng_kvfree(bufb->array[i]);
+               kvfree(bufb->array[i]);
 depopulate:
        /* Free all allocated pages */
        for (i = 0; (i < num_pages && pages[i]); i++)
                __free_page(pages[i]);
-       lttng_kvfree(bufb->array);
+       kvfree(bufb->array);
 array_error:
        vfree(pages);
 pages_error:
-       wrapper_clear_current_oom_origin();
+       clear_current_oom_origin();
 not_enough_pages:
        return -ENOMEM;
 }
@@ -201,14 +194,14 @@ void lib_ring_buffer_backend_free(struct lib_ring_buffer_backend *bufb)
        if (chanb->extra_reader_sb)
                num_subbuf_alloc++;
 
-       lttng_kvfree(bufb->buf_wsb);
-       lttng_kvfree(bufb->buf_cnt);
+       kvfree(bufb->buf_wsb);
+       kvfree(bufb->buf_cnt);
        for (i = 0; i < num_subbuf_alloc; i++) {
                for (j = 0; j < bufb->num_pages_per_subbuf; j++)
                        __free_page(pfn_to_page(bufb->array[i]->p[j].pfn));
-               lttng_kvfree(bufb->array[i]);
+               kvfree(bufb->array[i]);
        }
-       lttng_kvfree(bufb->array);
+       kvfree(bufb->array);
        bufb->allocated = 0;
 }
 
@@ -259,8 +252,6 @@ void channel_backend_reset(struct channel_backend *chanb)
        chanb->start_tsc = config->cb.ring_buffer_clock_read(chan);
 }
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
-
 /*
  * No need to implement a "dead" callback to do a buffer switch here,
  * because it will happen when tracing is stopped, or will be done by
@@ -291,58 +282,6 @@ int lttng_cpuhp_rb_backend_prepare(unsigned int cpu,
 }
 EXPORT_SYMBOL_GPL(lttng_cpuhp_rb_backend_prepare);
 
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-#ifdef CONFIG_HOTPLUG_CPU
-
-/**
- *     lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
- *     @nb: notifier block
- *     @action: hotplug action to take
- *     @hcpu: CPU number
- *
- *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-static
-int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
-                                             unsigned long action,
-                                             void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-       struct channel_backend *chanb = container_of(nb, struct channel_backend,
-                                                    cpu_hp_notifier);
-       const struct lib_ring_buffer_config *config = &chanb->config;
-       struct lib_ring_buffer *buf;
-       int ret;
-
-       CHAN_WARN_ON(chanb, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       switch (action) {
-       case CPU_UP_PREPARE:
-       case CPU_UP_PREPARE_FROZEN:
-               buf = per_cpu_ptr(chanb->buf, cpu);
-               ret = lib_ring_buffer_create(buf, chanb, cpu);
-               if (ret) {
-                       printk(KERN_ERR
-                         "ring_buffer_cpu_hp_callback: cpu %d "
-                         "buffer creation failed\n", cpu);
-                       return NOTIFY_BAD;
-               }
-               break;
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               /* No need to do a buffer switch here, because it will happen
-                * when tracing is stopped, or will be done by switch timer CPU
-                * DEAD callback. */
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-#endif
-
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
 /**
  * channel_backend_init - initialize a channel backend
  * @chanb: channel backend
@@ -419,50 +358,11 @@ int channel_backend_init(struct channel_backend *chanb,
                if (!chanb->buf)
                        goto free_cpumask;
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
                chanb->cpuhp_prepare.component = LTTNG_RING_BUFFER_BACKEND;
                ret = cpuhp_state_add_instance(lttng_rb_hp_prepare,
                        &chanb->cpuhp_prepare.node);
                if (ret)
                        goto free_bufs;
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-
-               {
-                       /*
-                        * In case of non-hotplug cpu, if the ring-buffer is allocated
-                        * in early initcall, it will not be notified of secondary cpus.
-                        * In that off case, we need to allocate for all possible cpus.
-                        */
-#ifdef CONFIG_HOTPLUG_CPU
-                       /*
-                        * buf->backend.allocated test takes care of concurrent CPU
-                        * hotplug.
-                        * Priority higher than frontend, so we create the ring buffer
-                        * before we start the timer.
-                        */
-                       chanb->cpu_hp_notifier.notifier_call =
-                                       lib_ring_buffer_cpu_hp_callback;
-                       chanb->cpu_hp_notifier.priority = 5;
-                       register_hotcpu_notifier(&chanb->cpu_hp_notifier);
-
-                       get_online_cpus();
-                       for_each_online_cpu(i) {
-                               ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
-                                                        chanb, i);
-                               if (ret)
-                                       goto free_bufs; /* cpu hotplug locked */
-                       }
-                       put_online_cpus();
-#else
-                       for_each_possible_cpu(i) {
-                               ret = lib_ring_buffer_create(per_cpu_ptr(chanb->buf, i),
-                                                        chanb, i);
-                               if (ret)
-                                       goto free_bufs;
-                       }
-#endif
-               }
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
        } else {
                chanb->buf = kzalloc(sizeof(struct lib_ring_buffer), GFP_KERNEL);
                if (!chanb->buf)
@@ -477,18 +377,11 @@ int channel_backend_init(struct channel_backend *chanb,
 
 free_bufs:
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
                /*
                 * Teardown of lttng_rb_hp_prepare instance
                 * on "add" error is handled within cpu hotplug,
                 * no teardown to do from the caller.
                 */
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-#ifdef CONFIG_HOTPLUG_CPU
-               put_online_cpus();
-               unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-#endif
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
                for_each_possible_cpu(i) {
                        struct lib_ring_buffer *buf =
                                per_cpu_ptr(chanb->buf, i);
@@ -517,15 +410,11 @@ void channel_backend_unregister_notifiers(struct channel_backend *chanb)
        const struct lib_ring_buffer_config *config = &chanb->config;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0))
                int ret;
 
                ret = cpuhp_state_remove_instance(lttng_rb_hp_prepare,
                                &chanb->cpuhp_prepare.node);
                WARN_ON(ret);
-#else /* #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
-               unregister_hotcpu_notifier(&chanb->cpu_hp_notifier);
-#endif /* #else #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,10,0)) */
        }
 }
 
This page took 0.026204 seconds and 4 git commands to generate.