Cygwin: Pass file paths instead of file descriptors over UNIX sockets
[lttng-ust.git] / libringbuffer / ring_buffer_frontend.c
index a3f14f3d9926855b738d7e6e82c8a950844a12dc..c9de2923034231e2edd266b33d45cd554f5f3248 100644 (file)
@@ -1,7 +1,22 @@
 /*
  * ring_buffer_frontend.c
  *
- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
  *
  * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
  * recorder (overwrite) modes. See thesis:
  *   - splice one subbuffer worth of data to a pipe
  *   - splice the data from pipe to disk/network
  *   - put_subbuf
- *
- * Dual LGPL v2.1/GPL v2 license.
  */
 
-#include "config.h"
+#define _GNU_SOURCE
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <urcu/compiler.h>
+#include <urcu/ref.h>
+#include <helper.h>
+
+#include "smp.h"
+#include <lttng/ringbuffer-config.h>
+#include "vatomic.h"
 #include "backend.h"
 #include "frontend.h"
-#include "iterator.h"
-#include "nohz.h"
+#include "shm.h"
+#include "tlsfixup.h"
+
+#ifndef max
+#define max(a, b)      ((a) > (b) ? (a) : (b))
+#endif
+
+/* Print DBG() messages about events lost only every 1048576 hits */
+#define DBG_PRINT_NR_LOST      (1UL << 20)
+
+/*
+ * Use POSIX SHM: shm_open(3) and shm_unlink(3).
+ * close(2) to close the fd returned by shm_open.
+ * shm_unlink releases the shared memory object name.
+ * ftruncate(2) sets the size of the memory object.
+ * mmap/munmap maps the shared memory obj to a virtual address in the
+ * calling proceess (should be done both in libust and consumer).
+ * See shm_overview(7) for details.
+ * Pass file descriptor returned by shm_open(3) to ltt-sessiond through
+ * a UNIX socket.
+ *
+ * Since we don't need to access the object using its name, we can
+ * immediately shm_unlink(3) it, and only keep the handle with its file
+ * descriptor.
+ */
 
 /*
  * Internal structure representing offsets to use at a sub-buffer switch.
@@ -54,38 +101,17 @@ struct switch_offsets {
                     switch_old_end:1;
 };
 
-#ifdef CONFIG_NO_HZ
-enum tick_nohz_val {
-       TICK_NOHZ_STOP,
-       TICK_NOHZ_FLUSH,
-       TICK_NOHZ_RESTART,
-};
-
-static ATOMIC_NOTIFIER_HEAD(tick_nohz_notifier);
-#endif /* CONFIG_NO_HZ */
-
-static DEFINE_PER_CPU(spinlock_t, ring_buffer_nohz_lock);
-
-DEFINE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
-EXPORT_PER_CPU_SYMBOL(lib_ring_buffer_nesting);
-
-static
-void lib_ring_buffer_print_errors(struct channel *chan,
-                                 struct lib_ring_buffer *buf, int cpu);
+__thread unsigned int lib_ring_buffer_nesting;
 
 /*
- * Must be called under cpu hotplug protection.
+ * TODO: this is unused. Errors are saved within the ring buffer.
+ * Eventually, allow consumerd to print these errors.
  */
-void lib_ring_buffer_free(struct lib_ring_buffer *buf)
-{
-       struct channel *chan = buf->backend.chan;
-
-       lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
-       kfree(buf->commit_hot);
-       kfree(buf->commit_cold);
-
-       lib_ring_buffer_backend_free(&buf->backend);
-}
+static
+void lib_ring_buffer_print_errors(struct channel *chan,
+                                 struct lttng_ust_lib_ring_buffer *buf, int cpu,
+                                 struct lttng_ust_shm_handle *handle)
+       __attribute__((unused));
 
 /**
  * lib_ring_buffer_reset - Reset ring buffer to initial values.
@@ -96,27 +122,27 @@ void lib_ring_buffer_free(struct lib_ring_buffer *buf)
  * should not be using the iterator concurrently with reset. The previous
  * current iterator record is reset.
  */
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
+void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned int i;
 
        /*
         * Reset iterator first. It will put the subbuffer if it currently holds
         * it.
         */
-       lib_ring_buffer_iterator_reset(buf);
        v_set(config, &buf->offset, 0);
        for (i = 0; i < chan->backend.num_subbuf; i++) {
-               v_set(config, &buf->commit_hot[i].cc, 0);
-               v_set(config, &buf->commit_hot[i].seq, 0);
-               v_set(config, &buf->commit_cold[i].cc_sb, 0);
+               v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
+               v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
+               v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
        }
-       atomic_long_set(&buf->consumed, 0);
-       atomic_set(&buf->record_disabled, 0);
+       uatomic_set(&buf->consumed, 0);
+       uatomic_set(&buf->record_disabled, 0);
        v_set(config, &buf->last_tsc, 0);
-       lib_ring_buffer_backend_reset(&buf->backend);
+       lib_ring_buffer_backend_reset(&buf->backend, handle);
        /* Don't reset number of active readers */
        v_set(config, &buf->records_lost_full, 0);
        v_set(config, &buf->records_lost_wrap, 0);
@@ -125,7 +151,6 @@ void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
        v_set(config, &buf->records_overrun, 0);
        buf->finalized = 0;
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_reset);
 
 /**
  * channel_reset - Reset channel to initial values.
@@ -141,444 +166,257 @@ void channel_reset(struct channel *chan)
        /*
         * Reset iterators first. Will put the subbuffer if held for reading.
         */
-       channel_iterator_reset(chan);
-       atomic_set(&chan->record_disabled, 0);
+       uatomic_set(&chan->record_disabled, 0);
        /* Don't reset commit_count_mask, still valid */
        channel_backend_reset(&chan->backend);
        /* Don't reset switch/read timer interval */
        /* Don't reset notifiers and notifier enable bits */
        /* Don't reset reader reference count */
 }
-EXPORT_SYMBOL_GPL(channel_reset);
 
 /*
  * Must be called under cpu hotplug protection.
  */
-int lib_ring_buffer_create(struct lib_ring_buffer *buf,
-                          struct channel_backend *chanb, int cpu)
+int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
+                          struct channel_backend *chanb, int cpu,
+                          struct lttng_ust_shm_handle *handle,
+                          struct shm_object *shmobj)
 {
-       const struct lib_ring_buffer_config *config = chanb->config;
-       struct channel *chan = container_of(chanb, struct channel, backend);
-       void *priv = chanb->priv;
-       unsigned int num_subbuf;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
+       struct channel *chan = caa_container_of(chanb, struct channel, backend);
+       void *priv = channel_get_private(chan);
        size_t subbuf_header_size;
-       u64 tsc;
+       uint64_t tsc;
        int ret;
 
        /* Test for cpu hotplug */
        if (buf->backend.allocated)
                return 0;
 
-       /*
-        * Paranoia: per cpu dynamic allocation is not officially documented as
-        * zeroing the memory, so let's do it here too, just in case.
-        */
-       memset(buf, 0, sizeof(*buf));
-
-       ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, cpu);
+       ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
+                       cpu, handle, shmobj);
        if (ret)
                return ret;
 
-       buf->commit_hot =
-               kzalloc_node(ALIGN(sizeof(*buf->commit_hot)
-                                  * chan->backend.num_subbuf,
-                                  1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL, cpu_to_node(max(cpu, 0)));
-       if (!buf->commit_hot) {
+       align_shm(shmobj, __alignof__(struct commit_counters_hot));
+       set_shmp(buf->commit_hot,
+                zalloc_shm(shmobj,
+                       sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
+       if (!shmp(handle, buf->commit_hot)) {
                ret = -ENOMEM;
                goto free_chanbuf;
        }
 
-       buf->commit_cold =
-               kzalloc_node(ALIGN(sizeof(*buf->commit_cold)
-                                  * chan->backend.num_subbuf,
-                                  1 << INTERNODE_CACHE_SHIFT),
-                       GFP_KERNEL, cpu_to_node(max(cpu, 0)));
-       if (!buf->commit_cold) {
+       align_shm(shmobj, __alignof__(struct commit_counters_cold));
+       set_shmp(buf->commit_cold,
+                zalloc_shm(shmobj,
+                       sizeof(struct commit_counters_cold) * chan->backend.num_subbuf));
+       if (!shmp(handle, buf->commit_cold)) {
                ret = -ENOMEM;
                goto free_commit;
        }
 
-       num_subbuf = chan->backend.num_subbuf;
-       init_waitqueue_head(&buf->read_wait);
-       raw_spin_lock_init(&buf->raw_tick_nohz_spinlock);
-
        /*
         * Write the subbuffer header for first subbuffer so we know the total
         * duration of data gathering.
         */
        subbuf_header_size = config->cb.subbuffer_header_size();
        v_set(config, &buf->offset, subbuf_header_size);
-       subbuffer_id_clear_noref(config, &buf->backend.buf_wsb[0].id);
-       tsc = config->cb.ring_buffer_clock_read(buf->backend.chan);
-       config->cb.buffer_begin(buf, tsc, 0);
-       v_add(config, subbuf_header_size, &buf->commit_hot[0].cc);
+       subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
+       tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
+       config->cb.buffer_begin(buf, tsc, 0, handle);
+       v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
 
        if (config->cb.buffer_create) {
-               ret = config->cb.buffer_create(buf, priv, cpu, chanb->name);
+               ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
                if (ret)
                        goto free_init;
        }
-
-       /*
-        * Ensure the buffer is ready before setting it to allocated and setting
-        * the cpumask.
-        * Used for cpu hotplug vs cpumask iteration.
-        */
-       smp_wmb();
        buf->backend.allocated = 1;
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               CHAN_WARN_ON(chan, cpumask_test_cpu(cpu,
-                            chan->backend.cpumask));
-               cpumask_set_cpu(cpu, chan->backend.cpumask);
-       }
-
        return 0;
 
        /* Error handling */
 free_init:
-       kfree(buf->commit_cold);
+       /* commit_cold will be freed by shm teardown */
 free_commit:
-       kfree(buf->commit_hot);
+       /* commit_hot will be freed by shm teardown */
 free_chanbuf:
-       lib_ring_buffer_backend_free(&buf->backend);
        return ret;
 }
 
+#if 0
 static void switch_buffer_timer(unsigned long data)
 {
-       struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        /*
         * Only flush buffers periodically if readers are active.
         */
-       if (atomic_long_read(&buf->active_readers))
-               lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               mod_timer_pinned(&buf->switch_timer,
-                                jiffies + chan->switch_timer_interval);
-       else
-               mod_timer(&buf->switch_timer,
-                         jiffies + chan->switch_timer_interval);
+       if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
+               lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
+
+       //TODO timers
+       //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+       //      mod_timer_pinned(&buf->switch_timer,
+       //                       jiffies + chan->switch_timer_interval);
+       //else
+       //      mod_timer(&buf->switch_timer,
+       //                jiffies + chan->switch_timer_interval);
 }
+#endif //0
 
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       //const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        if (!chan->switch_timer_interval || buf->switch_timer_enabled)
                return;
-       init_timer(&buf->switch_timer);
-       buf->switch_timer.function = switch_buffer_timer;
-       buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
-       buf->switch_timer.data = (unsigned long)buf;
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               add_timer_on(&buf->switch_timer, buf->backend.cpu);
-       else
-               add_timer(&buf->switch_timer);
+       //TODO
+       //init_timer(&buf->switch_timer);
+       //buf->switch_timer.function = switch_buffer_timer;
+       //buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
+       //buf->switch_timer.data = (unsigned long)buf;
+       //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+       //      add_timer_on(&buf->switch_timer, buf->backend.cpu);
+       //else
+       //      add_timer(&buf->switch_timer);
        buf->switch_timer_enabled = 1;
 }
 
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = buf->backend.chan;
+       struct channel *chan = shmp(handle, buf->backend.chan);
 
        if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
                return;
 
-       del_timer_sync(&buf->switch_timer);
+       //TODO
+       //del_timer_sync(&buf->switch_timer);
        buf->switch_timer_enabled = 0;
 }
 
+#if 0
 /*
  * Polling timer to check the channels for data.
  */
 static void read_buffer_timer(unsigned long data)
 {
-       struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        CHAN_WARN_ON(chan, !buf->backend.allocated);
 
-       if (atomic_long_read(&buf->active_readers)
+       if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
            && lib_ring_buffer_poll_deliver(config, buf, chan)) {
-               wake_up_interruptible(&buf->read_wait);
-               wake_up_interruptible(&chan->read_wait);
+               //TODO
+               //wake_up_interruptible(&buf->read_wait);
+               //wake_up_interruptible(&chan->read_wait);
        }
 
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               mod_timer_pinned(&buf->read_timer,
-                                jiffies + chan->read_timer_interval);
-       else
-               mod_timer(&buf->read_timer,
-                         jiffies + chan->read_timer_interval);
+       //TODO
+       //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+       //      mod_timer_pinned(&buf->read_timer,
+       //                       jiffies + chan->read_timer_interval);
+       //else
+       //      mod_timer(&buf->read_timer,
+       //                jiffies + chan->read_timer_interval);
 }
+#endif //0
 
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_read_timer(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
            || !chan->read_timer_interval
            || buf->read_timer_enabled)
                return;
 
-       init_timer(&buf->read_timer);
-       buf->read_timer.function = read_buffer_timer;
-       buf->read_timer.expires = jiffies + chan->read_timer_interval;
-       buf->read_timer.data = (unsigned long)buf;
+       //TODO
+       //init_timer(&buf->read_timer);
+       //buf->read_timer.function = read_buffer_timer;
+       //buf->read_timer.expires = jiffies + chan->read_timer_interval;
+       //buf->read_timer.data = (unsigned long)buf;
 
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               add_timer_on(&buf->read_timer, buf->backend.cpu);
-       else
-               add_timer(&buf->read_timer);
+       //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+       //      add_timer_on(&buf->read_timer, buf->backend.cpu);
+       //else
+       //      add_timer(&buf->read_timer);
        buf->read_timer_enabled = 1;
 }
 
-/*
- * Called with ring_buffer_nohz_lock held for per-cpu buffers.
- */
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_read_timer(struct lttng_ust_lib_ring_buffer *buf,
+                          struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
 
        if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
            || !chan->read_timer_interval
            || !buf->read_timer_enabled)
                return;
 
-       del_timer_sync(&buf->read_timer);
+       //TODO
+       //del_timer_sync(&buf->read_timer);
        /*
         * do one more check to catch data that has been written in the last
         * timer period.
         */
-       if (lib_ring_buffer_poll_deliver(config, buf, chan)) {
-               wake_up_interruptible(&buf->read_wait);
-               wake_up_interruptible(&chan->read_wait);
+       if (lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
+               //TODO
+               //wake_up_interruptible(&buf->read_wait);
+               //wake_up_interruptible(&chan->read_wait);
        }
        buf->read_timer_enabled = 0;
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-/**
- *     lib_ring_buffer_cpu_hp_callback - CPU hotplug callback
- *     @nb: notifier block
- *     @action: hotplug action to take
- *     @hcpu: CPU number
- *
- *     Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
- */
-static
-int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
-                                             unsigned long action,
-                                             void *hcpu)
-{
-       unsigned int cpu = (unsigned long)hcpu;
-       struct channel *chan = container_of(nb, struct channel,
-                                           cpu_hp_notifier);
-       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf, cpu);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-
-       if (!chan->cpu_hp_enable)
-               return NOTIFY_DONE;
-
-       CHAN_WARN_ON(chan, config->alloc == RING_BUFFER_ALLOC_GLOBAL);
-
-       switch (action) {
-       case CPU_DOWN_FAILED:
-       case CPU_DOWN_FAILED_FROZEN:
-       case CPU_ONLINE:
-       case CPU_ONLINE_FROZEN:
-               wake_up_interruptible(&chan->hp_wait);
-               lib_ring_buffer_start_switch_timer(buf);
-               lib_ring_buffer_start_read_timer(buf);
-               return NOTIFY_OK;
-
-       case CPU_DOWN_PREPARE:
-       case CPU_DOWN_PREPARE_FROZEN:
-               lib_ring_buffer_stop_switch_timer(buf);
-               lib_ring_buffer_stop_read_timer(buf);
-               return NOTIFY_OK;
-
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               /*
-                * Performing a buffer switch on a remote CPU. Performed by
-                * the CPU responsible for doing the hotunplug after the target
-                * CPU stopped running completely. Ensures that all data
-                * from that remote CPU is flushed.
-                */
-               lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-               return NOTIFY_OK;
-
-       default:
-               return NOTIFY_DONE;
-       }
-}
-#endif
-
-#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
-/*
- * For per-cpu buffers, call the reader wakeups before switching the buffer, so
- * that wake-up-tracing generated events are flushed before going idle (in
- * tick_nohz). We test if the spinlock is locked to deal with the race where
- * readers try to sample the ring buffer before we perform the switch. We let
- * the readers retry in that case. If there is data in the buffer, the wake up
- * is going to forbid the CPU running the reader thread from going idle.
- */
-static int notrace ring_buffer_tick_nohz_callback(struct notifier_block *nb,
-                                                 unsigned long val,
-                                                 void *data)
-{
-       struct channel *chan = container_of(nb, struct channel,
-                                           tick_nohz_notifier);
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-       struct lib_ring_buffer *buf;
-       int cpu = smp_processor_id();
-
-       if (config->alloc != RING_BUFFER_ALLOC_PER_CPU) {
-               /*
-                * We don't support keeping the system idle with global buffers
-                * and streaming active. In order to do so, we would need to
-                * sample a non-nohz-cpumask racelessly with the nohz updates
-                * without adding synchronization overhead to nohz. Leave this
-                * use-case out for now.
-                */
-               return 0;
-       }
-
-       buf = channel_get_ring_buffer(config, chan, cpu);
-       switch (val) {
-       case TICK_NOHZ_FLUSH:
-               raw_spin_lock(&buf->raw_tick_nohz_spinlock);
-               if (config->wakeup == RING_BUFFER_WAKEUP_BY_TIMER
-                   && chan->read_timer_interval
-                   && atomic_long_read(&buf->active_readers)
-                   && (lib_ring_buffer_poll_deliver(config, buf, chan)
-                       || lib_ring_buffer_pending_data(config, buf, chan))) {
-                       wake_up_interruptible(&buf->read_wait);
-                       wake_up_interruptible(&chan->read_wait);
-               }
-               if (chan->switch_timer_interval)
-                       lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
-               raw_spin_unlock(&buf->raw_tick_nohz_spinlock);
-               break;
-       case TICK_NOHZ_STOP:
-               spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
-               lib_ring_buffer_stop_switch_timer(buf);
-               lib_ring_buffer_stop_read_timer(buf);
-               spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
-               break;
-       case TICK_NOHZ_RESTART:
-               spin_lock(&__get_cpu_var(ring_buffer_nohz_lock));
-               lib_ring_buffer_start_read_timer(buf);
-               lib_ring_buffer_start_switch_timer(buf);
-               spin_unlock(&__get_cpu_var(ring_buffer_nohz_lock));
-               break;
-       }
-
-       return 0;
-}
-
-void notrace lib_ring_buffer_tick_nohz_flush(void)
-{
-       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_FLUSH,
-                                  NULL);
-}
-
-void notrace lib_ring_buffer_tick_nohz_stop(void)
-{
-       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_STOP,
-                                  NULL);
-}
-
-void notrace lib_ring_buffer_tick_nohz_restart(void)
-{
-       atomic_notifier_call_chain(&tick_nohz_notifier, TICK_NOHZ_RESTART,
-                                  NULL);
-}
-#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
-
-/*
- * Holds CPU hotplug.
- */
-static void channel_unregister_notifiers(struct channel *chan)
+static void channel_unregister_notifiers(struct channel *chan,
+                          struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        int cpu;
 
-       channel_iterator_unregister_notifiers(chan);
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#ifdef CONFIG_NO_HZ
-               /*
-                * Remove the nohz notifier first, so we are certain we stop
-                * the timers.
-                */
-               atomic_notifier_chain_unregister(&tick_nohz_notifier,
-                                                &chan->tick_nohz_notifier);
-               /*
-                * ring_buffer_nohz_lock will not be needed below, because
-                * we just removed the notifiers, which were the only source of
-                * concurrency.
-                */
-#endif /* CONFIG_NO_HZ */
-#ifdef CONFIG_HOTPLUG_CPU
-               get_online_cpus();
-               chan->cpu_hp_enable = 0;
-               for_each_online_cpu(cpu) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                             cpu);
-                       lib_ring_buffer_stop_switch_timer(buf);
-                       lib_ring_buffer_stop_read_timer(buf);
-               }
-               put_online_cpus();
-               unregister_cpu_notifier(&chan->cpu_hp_notifier);
-#else
                for_each_possible_cpu(cpu) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                             cpu);
-                       lib_ring_buffer_stop_switch_timer(buf);
-                       lib_ring_buffer_stop_read_timer(buf);
+                       struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+
+                       lib_ring_buffer_stop_switch_timer(buf, handle);
+                       lib_ring_buffer_stop_read_timer(buf, handle);
                }
-#endif
        } else {
-               struct lib_ring_buffer *buf = chan->backend.buf;
+               struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
 
-               lib_ring_buffer_stop_switch_timer(buf);
-               lib_ring_buffer_stop_read_timer(buf);
+               lib_ring_buffer_stop_switch_timer(buf, handle);
+               lib_ring_buffer_stop_read_timer(buf, handle);
        }
-       channel_backend_unregister_notifiers(&chan->backend);
+       //channel_backend_unregister_notifiers(&chan->backend);
 }
 
-static void channel_free(struct channel *chan)
+static void channel_free(struct channel *chan, struct lttng_ust_shm_handle *handle,
+               int shadow)
 {
-       channel_iterator_free(chan);
-       channel_backend_free(&chan->backend);
-       kfree(chan);
+       if (!shadow)
+               channel_backend_free(&chan->backend, handle);
+       /* chan is freed by shm teardown */
+       shm_object_table_destroy(handle->table);
+       free(handle);
 }
 
 /**
  * channel_create - Create channel.
  * @config: ring buffer instance configuration
  * @name: name of the channel
- * @priv: ring buffer client private data
+ * @priv_data: ring buffer client private data area pointer (output)
+ * @priv_data_size: length, in bytes, of the private data area.
+ * @priv_data_init: initialization data for private data.
  * @buf_addr: pointer the the beginning of the preallocated buffer contiguous
  *            address mapping. It is used only by RING_BUFFER_STATIC
  *            configuration. It can be set to NULL for other backends.
@@ -592,103 +430,167 @@ static void channel_free(struct channel *chan)
  * Holds cpu hotplug.
  * Returns NULL on failure.
  */
-struct channel *channel_create(const struct lib_ring_buffer_config *config,
-                  const char *name, void *priv, void *buf_addr,
-                  size_t subbuf_size,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+                  const char *name,
+                  void **priv_data,
+                  size_t priv_data_align,
+                  size_t priv_data_size,
+                  void *priv_data_init,
+                  void *buf_addr, size_t subbuf_size,
                   size_t num_subbuf, unsigned int switch_timer_interval,
-                  unsigned int read_timer_interval)
+                  unsigned int read_timer_interval,
+                  int **shm_fd, char **shm_path,
+                  int **wait_fd, char **wait_pipe_path, uint64_t **memory_map_size)
 {
        int ret, cpu;
+       size_t shmsize, chansize;
        struct channel *chan;
+       struct lttng_ust_shm_handle *handle;
+       struct shm_object *shmobj;
+       struct shm_ref *ref;
 
        if (lib_ring_buffer_check_config(config, switch_timer_interval,
                                         read_timer_interval))
                return NULL;
 
-       chan = kzalloc(sizeof(struct channel), GFP_KERNEL);
-       if (!chan)
+       handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+       if (!handle)
                return NULL;
 
-       ret = channel_backend_init(&chan->backend, name, config, priv,
-                                  subbuf_size, num_subbuf);
-       if (ret)
-               goto error;
+       /* Allocate table for channel + per-cpu buffers */
+       handle->table = shm_object_table_create(1 + num_possible_cpus());
+       if (!handle->table)
+               goto error_table_alloc;
 
-       ret = channel_iterator_init(chan);
+       /* Calculate the shm allocation layout */
+       shmsize = sizeof(struct channel);
+       shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
+       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+               shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * num_possible_cpus();
+       else
+               shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp);
+       chansize = shmsize;
+       shmsize += offset_align(shmsize, priv_data_align);
+       shmsize += priv_data_size;
+
+       shmobj = shm_object_table_append(handle->table, shmsize);
+       if (!shmobj)
+               goto error_append;
+       /* struct channel is at object 0, offset 0 (hardcoded) */
+       set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
+       assert(handle->chan._ref.index == 0);
+       assert(handle->chan._ref.offset == 0);
+       chan = shmp(handle, handle->chan);
+       if (!chan)
+               goto error_append;
+
+       /* space for private data */
+       if (priv_data_size) {
+               DECLARE_SHMP(void, priv_data_alloc);
+
+               align_shm(shmobj, priv_data_align);
+               chan->priv_data_offset = shmobj->allocated_len;
+               set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
+               if (!shmp(handle, priv_data_alloc))
+                       goto error_append;
+               *priv_data = channel_get_private(chan);
+               memcpy(*priv_data, priv_data_init, priv_data_size);
+       } else {
+               chan->priv_data_offset = -1;
+               *priv_data = NULL;
+       }
+
+       ret = channel_backend_init(&chan->backend, name, config,
+                                  subbuf_size, num_subbuf, handle);
        if (ret)
-               goto error_free_backend;
+               goto error_backend_init;
 
        chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
-       chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
-       chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
-       kref_init(&chan->ref);
-       init_waitqueue_head(&chan->read_wait);
-       init_waitqueue_head(&chan->hp_wait);
+       //TODO
+       //chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
+       //chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
+       //TODO
+       //init_waitqueue_head(&chan->read_wait);
+       //init_waitqueue_head(&chan->hp_wait);
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-#if defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER)
-               /* Only benefit from NO_HZ idle with per-cpu buffers for now. */
-               chan->tick_nohz_notifier.notifier_call =
-                       ring_buffer_tick_nohz_callback;
-               chan->tick_nohz_notifier.priority = ~0U;
-               atomic_notifier_chain_register(&tick_nohz_notifier,
-                                      &chan->tick_nohz_notifier);
-#endif /* defined(CONFIG_NO_HZ) && defined(CONFIG_LIB_RING_BUFFER) */
-
                /*
                 * In case of non-hotplug cpu, if the ring-buffer is allocated
                 * in early initcall, it will not be notified of secondary cpus.
                 * In that off case, we need to allocate for all possible cpus.
                 */
-#ifdef CONFIG_HOTPLUG_CPU
-               chan->cpu_hp_notifier.notifier_call =
-                               lib_ring_buffer_cpu_hp_callback;
-               chan->cpu_hp_notifier.priority = 6;
-               register_cpu_notifier(&chan->cpu_hp_notifier);
-
-               get_online_cpus();
-               for_each_online_cpu(cpu) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                              cpu);
-                       spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
-                       lib_ring_buffer_start_switch_timer(buf);
-                       lib_ring_buffer_start_read_timer(buf);
-                       spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
-               }
-               chan->cpu_hp_enable = 1;
-               put_online_cpus();
-#else
                for_each_possible_cpu(cpu) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                             cpu);
-                       spin_lock(&per_cpu(ring_buffer_nohz_lock, cpu));
-                       lib_ring_buffer_start_switch_timer(buf);
-                       lib_ring_buffer_start_read_timer(buf);
-                       spin_unlock(&per_cpu(ring_buffer_nohz_lock, cpu));
+                       struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+                       lib_ring_buffer_start_switch_timer(buf, handle);
+                       lib_ring_buffer_start_read_timer(buf, handle);
                }
-#endif
        } else {
-               struct lib_ring_buffer *buf = chan->backend.buf;
+               struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
 
-               lib_ring_buffer_start_switch_timer(buf);
-               lib_ring_buffer_start_read_timer(buf);
+               lib_ring_buffer_start_switch_timer(buf, handle);
+               lib_ring_buffer_start_read_timer(buf, handle);
        }
+       ref = &handle->chan._ref;
+       shm_get_object_data(handle, ref, shm_fd, shm_path, wait_fd, wait_pipe_path, memory_map_size);
+       return handle;
+
+error_backend_init:
+error_append:
+       shm_object_table_destroy(handle->table);
+error_table_alloc:
+       free(handle);
+       return NULL;
+}
+
+struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd,
+                                       uint64_t memory_map_size)
+{
+       struct lttng_ust_shm_handle *handle;
+       struct shm_object *object;
 
-       return chan;
+       handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+       if (!handle)
+               return NULL;
 
-error_free_backend:
-       channel_backend_free(&chan->backend);
-error:
-       kfree(chan);
+       /* Allocate table for channel + per-cpu buffers */
+       handle->table = shm_object_table_create(1 + num_possible_cpus());
+       if (!handle->table)
+               goto error_table_alloc;
+       /* Add channel object */
+       object = shm_object_table_append_shadow(handle->table,
+                       shm_fd, wait_fd, memory_map_size);
+       if (!object)
+               goto error_table_object;
+       /* struct channel is at object 0, offset 0 (hardcoded) */
+       handle->chan._ref.index = 0;
+       handle->chan._ref.offset = 0;
+       return handle;
+
+error_table_object:
+       shm_object_table_destroy(handle->table);
+error_table_alloc:
+       free(handle);
        return NULL;
 }
-EXPORT_SYMBOL_GPL(channel_create);
+
+int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
+               int shm_fd, int wait_fd, uint64_t memory_map_size)
+{
+       struct shm_object *object;
+
+       /* Add stream object */
+       object = shm_object_table_append_shadow(handle->table,
+                       shm_fd, wait_fd, memory_map_size);
+       if (!object)
+               return -1;
+       return 0;
+}
 
 static
-void channel_release(struct kref *kref)
+void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle,
+               int shadow)
 {
-       struct channel *chan = container_of(kref, struct channel, ref);
-       channel_free(chan);
+       channel_free(chan, handle, shadow);
 }
 
 /**
@@ -696,108 +598,90 @@ void channel_release(struct kref *kref)
  * @chan: channel to destroy
  *
  * Holds cpu hotplug.
- * Call "destroy" callback, finalize channels, wait for readers to release their
- * reference, then destroy ring buffer data. Note that when readers have
- * completed data consumption of finalized channels, get_subbuf() will return
- * -ENODATA. They should release their handle at that point.
- * Returns the private data pointer.
+ * Call "destroy" callback, finalize channels, decrement the channel
+ * reference count. Note that when readers have completed data
+ * consumption of finalized channels, get_subbuf() will return -ENODATA.
+ * They should release their handle at that point. 
  */
-void *channel_destroy(struct channel *chan)
+void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
+               int shadow)
 {
-       int cpu;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-       void *priv;
+       if (shadow) {
+               channel_release(chan, handle, shadow);
+               return;
+       }
 
-       channel_unregister_notifiers(chan);
+       channel_unregister_notifiers(chan, handle);
 
-       if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-               /*
-                * No need to hold cpu hotplug, because all notifiers have been
-                * unregistered.
-                */
-               for_each_channel_cpu(cpu, chan) {
-                       struct lib_ring_buffer *buf = per_cpu_ptr(chan->backend.buf,
-                                                             cpu);
-
-                       if (config->cb.buffer_finalize)
-                               config->cb.buffer_finalize(buf,
-                                                          chan->backend.priv,
-                                                          cpu);
-                       if (buf->backend.allocated)
-                               lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
-                       /*
-                        * Perform flush before writing to finalized.
-                        */
-                       smp_wmb();
-                       ACCESS_ONCE(buf->finalized) = 1;
-                       wake_up_interruptible(&buf->read_wait);
-               }
-       } else {
-               struct lib_ring_buffer *buf = chan->backend.buf;
+       /*
+        * Note: the consumer takes care of finalizing and switching the
+        * buffers.
+        */
 
-               if (config->cb.buffer_finalize)
-                       config->cb.buffer_finalize(buf, chan->backend.priv, -1);
-               if (buf->backend.allocated)
-                       lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
-               /*
-                * Perform flush before writing to finalized.
-                */
-               smp_wmb();
-               ACCESS_ONCE(buf->finalized) = 1;
-               wake_up_interruptible(&buf->read_wait);
-       }
-       ACCESS_ONCE(chan->finalized) = 1;
-       wake_up_interruptible(&chan->hp_wait);
-       wake_up_interruptible(&chan->read_wait);
-       kref_put(&chan->ref, channel_release);
-       priv = chan->backend.priv;
-       return priv;
+       /*
+        * sessiond/consumer are keeping a reference on the shm file
+        * descriptor directly. No need to refcount.
+        */
+       channel_release(chan, handle, shadow);
+       return;
 }
-EXPORT_SYMBOL_GPL(channel_destroy);
 
-struct lib_ring_buffer *channel_get_ring_buffer(
-                                       const struct lib_ring_buffer_config *config,
-                                       struct channel *chan, int cpu)
+struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+                                       const struct lttng_ust_lib_ring_buffer_config *config,
+                                       struct channel *chan, int cpu,
+                                       struct lttng_ust_shm_handle *handle,
+                                       int **shm_fd, char **shm_path,
+                                       int **wait_fd, char **wait_pipe_path,
+                                       uint64_t **memory_map_size)
 {
-       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
-               return chan->backend.buf;
-       else
-               return per_cpu_ptr(chan->backend.buf, cpu);
+       struct shm_ref *ref;
+
+       if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+               ref = &chan->backend.buf[0].shmp._ref;
+               shm_get_object_data(handle, ref, shm_fd, shm_path,
+                                   wait_fd, wait_pipe_path, memory_map_size);
+               return shmp(handle, chan->backend.buf[0].shmp);
+       } else {
+               if (cpu >= num_possible_cpus())
+                       return NULL;
+               ref = &chan->backend.buf[cpu].shmp._ref;
+               shm_get_object_data(handle, ref, shm_fd, shm_path,
+                                   wait_fd, wait_pipe_path, memory_map_size);
+               return shmp(handle, chan->backend.buf[cpu].shmp);
+       }
 }
-EXPORT_SYMBOL_GPL(channel_get_ring_buffer);
 
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
+int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+                             struct lttng_ust_shm_handle *handle,
+                             int shadow)
 {
-       struct channel *chan = buf->backend.chan;
-
-       if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
+       if (shadow) {
+               if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0)
+                       return -EBUSY;
+               cmm_smp_mb();
+               return 0;
+       }
+       if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
                return -EBUSY;
-       kref_get(&chan->ref);
-       smp_mb__after_atomic_inc();
+       cmm_smp_mb();
        return 0;
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_open_read);
 
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
+void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+                                 struct lttng_ust_shm_handle *handle,
+                                 int shadow)
 {
-       struct channel *chan = buf->backend.chan;
-
-       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
-       smp_mb__before_atomic_dec();
-       atomic_long_dec(&buf->active_readers);
-       kref_put(&chan->ref, channel_release);
-}
-EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
+       struct channel *chan = shmp(handle, buf->backend.chan);
 
-/*
- * Promote compiler barrier to a smp_mb().
- * For the specific ring buffer case, this IPI call should be removed if the
- * architecture does not reorder writes.  This should eventually be provided by
- * a separate architecture-specific infrastructure.
- */
-static void remote_mb(void *info)
-{
-       smp_mb();
+       if (shadow) {
+               CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1);
+               cmm_smp_mb();
+               uatomic_dec(&buf->active_shadow_readers);
+               return;
+       }
+       CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+       cmm_smp_mb();
+       uatomic_dec(&buf->active_readers);
 }
 
 /**
@@ -808,24 +692,23 @@ static void remote_mb(void *info)
  *
  * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
  * data to read at consumed position, or 0 if the get operation succeeds.
- * Busy-loop trying to get data if the tick_nohz sequence lock is held.
  */
 
-int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
-                            unsigned long *consumed, unsigned long *produced)
+int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
+                            unsigned long *consumed, unsigned long *produced,
+                            struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long consumed_cur, write_offset;
        int finalized;
 
-retry:
-       finalized = ACCESS_ONCE(buf->finalized);
+       finalized = CMM_ACCESS_ONCE(buf->finalized);
        /*
         * Read finalized before counters.
         */
-       smp_rmb();
-       consumed_cur = atomic_long_read(&buf->consumed);
+       cmm_smp_rmb();
+       consumed_cur = uatomic_read(&buf->consumed);
        /*
         * No need to issue a memory barrier between consumed count read and
         * write offset read, because consumed count can only change
@@ -856,38 +739,36 @@ nodata:
         */
        if (finalized)
                return -ENODATA;
-       else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
-               goto retry;
        else
                return -EAGAIN;
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_snapshot);
 
 /**
  * lib_ring_buffer_put_snapshot - move consumed counter forward
  * @buf: ring buffer
  * @consumed_new: new consumed count value
  */
-void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
-                                  unsigned long consumed_new)
+void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
+                                  unsigned long consumed_new,
+                                  struct lttng_ust_shm_handle *handle)
 {
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
-       struct channel *chan = bufb->chan;
+       struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+       struct channel *chan = shmp(handle, bufb->chan);
        unsigned long consumed;
 
-       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
+       CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+                       && uatomic_read(&buf->active_shadow_readers) != 1);
 
        /*
         * Only push the consumed value forward.
         * If the consumed cmpxchg fails, this is because we have been pushed by
         * the writer in flight recorder mode.
         */
-       consumed = atomic_long_read(&buf->consumed);
+       consumed = uatomic_read(&buf->consumed);
        while ((long) consumed - (long) consumed_new < 0)
-               consumed = atomic_long_cmpxchg(&buf->consumed, consumed,
-                                              consumed_new);
+               consumed = uatomic_cmpxchg(&buf->consumed, consumed,
+                                          consumed_new);
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
 
 /**
  * lib_ring_buffer_get_subbuf - get exclusive access to subbuffer for reading
@@ -896,88 +777,37 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_move_consumer);
  *
  * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
  * data to read at consumed position, or 0 if the get operation succeeds.
- * Busy-loop trying to get data if the tick_nohz sequence lock is held.
  */
-int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
-                              unsigned long consumed)
+int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+                              unsigned long consumed,
+                              struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
        int ret;
        int finalized;
 
 retry:
-       finalized = ACCESS_ONCE(buf->finalized);
+       finalized = CMM_ACCESS_ONCE(buf->finalized);
        /*
         * Read finalized before counters.
         */
-       smp_rmb();
-       consumed_cur = atomic_long_read(&buf->consumed);
+       cmm_smp_rmb();
+       consumed_cur = uatomic_read(&buf->consumed);
        consumed_idx = subbuf_index(consumed, chan);
-       commit_count = v_read(config, &buf->commit_cold[consumed_idx].cc_sb);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
        /*
         * Make sure we read the commit count before reading the buffer
         * data and the write offset. Correct consumed offset ordering
         * wrt commit count is insured by the use of cmpxchg to update
         * the consumed offset.
-        * smp_call_function_single can fail if the remote CPU is offline,
-        * this is OK because then there is no wmb to execute there.
-        * If our thread is executing on the same CPU as the on the buffers
-        * belongs to, we don't have to synchronize it at all. If we are
-        * migrated, the scheduler will take care of the memory barriers.
-        * Normally, smp_call_function_single() should ensure program order when
-        * executing the remote function, which implies that it surrounds the
-        * function execution with :
-        * smp_mb()
-        * send IPI
-        * csd_lock_wait
-        *                recv IPI
-        *                smp_mb()
-        *                exec. function
-        *                smp_mb()
-        *                csd unlock
-        * smp_mb()
-        *
-        * However, smp_call_function_single() does not seem to clearly execute
-        * such barriers. It depends on spinlock semantic to provide the barrier
-        * before executing the IPI and, when busy-looping, csd_lock_wait only
-        * executes smp_mb() when it has to wait for the other CPU.
-        *
-        * I don't trust this code. Therefore, let's add the smp_mb() sequence
-        * required ourself, even if duplicated. It has no performance impact
-        * anyway.
-        *
-        * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
-        * read and write vs write. They do not ensure core synchronization. We
-        * really have to ensure total order between the 3 barriers running on
-        * the 2 CPUs.
         */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               if (config->sync == RING_BUFFER_SYNC_PER_CPU
-                   && config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
-                       if (raw_smp_processor_id() != buf->backend.cpu) {
-                               /* Total order with IPI handler smp_mb() */
-                               smp_mb();
-                               smp_call_function_single(buf->backend.cpu,
-                                                        remote_mb, NULL, 1);
-                               /* Total order with IPI handler smp_mb() */
-                               smp_mb();
-                       }
-               } else {
-                       /* Total order with IPI handler smp_mb() */
-                       smp_mb();
-                       smp_call_function(remote_mb, NULL, 1);
-                       /* Total order with IPI handler smp_mb() */
-                       smp_mb();
-               }
-       } else {
-               /*
-                * Local rmb to match the remote wmb to read the commit count
-                * before the buffer data and the write offset.
-                */
-               smp_rmb();
-       }
+       /*
+        * Local rmb to match the remote wmb to read the commit count
+        * before the buffer data and the write offset.
+        */
+       cmm_smp_rmb();
 
        write_offset = v_read(config, &buf->offset);
 
@@ -1016,7 +846,8 @@ retry:
         * looking for matches the one contained in the subbuffer id.
         */
        ret = update_read_sb_index(config, &buf->backend, &chan->backend,
-                                  consumed_idx, buf_trunc_val(consumed, chan));
+                                  consumed_idx, buf_trunc_val(consumed, chan),
+                                  handle);
        if (ret)
                goto retry;
        subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
@@ -1033,25 +864,24 @@ nodata:
         */
        if (finalized)
                return -ENODATA;
-       else if (raw_spin_is_locked(&buf->raw_tick_nohz_spinlock))
-               goto retry;
        else
                return -EAGAIN;
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_get_subbuf);
 
 /**
  * lib_ring_buffer_put_subbuf - release exclusive subbuffer access
  * @buf: ring buffer
  */
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
+void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+                               struct lttng_ust_shm_handle *handle)
 {
-       struct lib_ring_buffer_backend *bufb = &buf->backend;
-       struct channel *chan = bufb->chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+       struct channel *chan = shmp(handle, bufb->chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long read_sb_bindex, consumed_idx, consumed;
 
-       CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
+       CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+                       && uatomic_read(&buf->active_shadow_readers) != 1);
 
        if (!buf->get_subbuf) {
                /*
@@ -1072,9 +902,9 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
         */
        read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
        v_add(config, v_read(config,
-                            &bufb->array[read_sb_bindex]->records_unread),
+                            &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
              &bufb->records_read);
-       v_set(config, &bufb->array[read_sb_bindex]->records_unread, 0);
+       v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
        CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
        subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
@@ -1089,34 +919,34 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
         */
        consumed_idx = subbuf_index(consumed, chan);
        update_read_sb_index(config, &buf->backend, &chan->backend,
-                            consumed_idx, buf_trunc_val(consumed, chan));
+                            consumed_idx, buf_trunc_val(consumed, chan),
+                            handle);
        /*
         * update_read_sb_index return value ignored. Don't exchange sub-buffer
         * if the writer concurrently updated it.
         */
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_put_subbuf);
 
 /*
  * cons_offset is an iterator on all subbuffer offsets between the reader
  * position and the writer position. (inclusive)
  */
 static
-void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
                                            struct channel *chan,
                                            unsigned long cons_offset,
-                                           int cpu)
+                                           int cpu,
+                                           struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long cons_idx, commit_count, commit_count_sb;
 
        cons_idx = subbuf_index(cons_offset, chan);
-       commit_count = v_read(config, &buf->commit_hot[cons_idx].cc);
-       commit_count_sb = v_read(config, &buf->commit_cold[cons_idx].cc_sb);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
+       commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
 
        if (subbuf_offset(commit_count, chan) != 0)
-               printk(KERN_WARNING
-                      "ring buffer %s, cpu %d: "
+               DBG("ring buffer %s, cpu %d: "
                       "commit count in subbuffer %lu,\n"
                       "expecting multiples of %lu bytes\n"
                       "  [ %lu bytes committed, %lu bytes reader-visible ]\n",
@@ -1124,73 +954,74 @@ void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
                       chan->backend.subbuf_size,
                       commit_count, commit_count_sb);
 
-       printk(KERN_DEBUG "ring buffer: %s, cpu %d: %lu bytes committed\n",
+       DBG("ring buffer: %s, cpu %d: %lu bytes committed\n",
               chan->backend.name, cpu, commit_count);
 }
 
 static
-void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
                                         struct channel *chan,
-                                        void *priv, int cpu)
+                                        void *priv, int cpu,
+                                        struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long write_offset, cons_offset;
 
-       /*
-        * Can be called in the error path of allocation when
-        * trans_channel_data is not yet set.
-        */
-       if (!chan)
-               return;
        /*
         * No need to order commit_count, write_offset and cons_offset reads
         * because we execute at teardown when no more writer nor reader
         * references are left.
         */
        write_offset = v_read(config, &buf->offset);
-       cons_offset = atomic_long_read(&buf->consumed);
+       cons_offset = uatomic_read(&buf->consumed);
        if (write_offset != cons_offset)
-               printk(KERN_WARNING
-                      "ring buffer %s, cpu %d: "
+               DBG("ring buffer %s, cpu %d: "
                       "non-consumed data\n"
                       "  [ %lu bytes written, %lu bytes read ]\n",
                       chan->backend.name, cpu, write_offset, cons_offset);
 
-       for (cons_offset = atomic_long_read(&buf->consumed);
+       for (cons_offset = uatomic_read(&buf->consumed);
             (long) (subbuf_trunc((unsigned long) v_read(config, &buf->offset),
                                  chan)
                     - cons_offset) > 0;
             cons_offset = subbuf_align(cons_offset, chan))
                lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
-                                                      cpu);
+                                                      cpu, handle);
 }
 
 static
 void lib_ring_buffer_print_errors(struct channel *chan,
-                                 struct lib_ring_buffer *buf, int cpu)
+                                 struct lttng_ust_lib_ring_buffer *buf, int cpu,
+                                 struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-       void *priv = chan->backend.priv;
-
-       printk(KERN_DEBUG "ring buffer %s, cpu %d: %lu records written, "
-                         "%lu records overrun\n",
-                         chan->backend.name, cpu,
-                         v_read(config, &buf->records_count),
-                         v_read(config, &buf->records_overrun));
-
-       if (v_read(config, &buf->records_lost_full)
-           || v_read(config, &buf->records_lost_wrap)
-           || v_read(config, &buf->records_lost_big))
-               printk(KERN_WARNING
-                      "ring buffer %s, cpu %d: records were lost. Caused by:\n"
-                      "  [ %lu buffer full, %lu nest buffer wrap-around, "
-                      "%lu event too big ]\n",
-                      chan->backend.name, cpu,
-                      v_read(config, &buf->records_lost_full),
-                      v_read(config, &buf->records_lost_wrap),
-                      v_read(config, &buf->records_lost_big));
-
-       lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       void *priv = channel_get_private(chan);
+
+       if (!strcmp(chan->backend.name, "relay-metadata-mmap")) {
+               DBG("ring buffer %s: %lu records written, "
+                       "%lu records overrun\n",
+                       chan->backend.name,
+                       v_read(config, &buf->records_count),
+                       v_read(config, &buf->records_overrun));
+       } else {
+               DBG("ring buffer %s, cpu %d: %lu records written, "
+                       "%lu records overrun\n",
+                       chan->backend.name, cpu,
+                       v_read(config, &buf->records_count),
+                       v_read(config, &buf->records_overrun));
+
+               if (v_read(config, &buf->records_lost_full)
+                   || v_read(config, &buf->records_lost_wrap)
+                   || v_read(config, &buf->records_lost_big))
+                       DBG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
+                               "  [ %lu buffer full, %lu nest buffer wrap-around, "
+                               "%lu event too big ]\n",
+                               chan->backend.name, cpu,
+                               v_read(config, &buf->records_lost_full),
+                               v_read(config, &buf->records_lost_wrap),
+                               v_read(config, &buf->records_lost_big));
+       }
+       lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu, handle);
 }
 
 /*
@@ -1199,39 +1030,33 @@ void lib_ring_buffer_print_errors(struct channel *chan,
  * Only executed when the buffer is finalized, in SWITCH_FLUSH.
  */
 static
-void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
                                      struct channel *chan,
                                      struct switch_offsets *offsets,
-                                     u64 tsc)
+                                     uint64_t tsc,
+                                     struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long oldidx = subbuf_index(offsets->old, chan);
        unsigned long commit_count;
 
-       config->cb.buffer_begin(buf, tsc, oldidx);
+       config->cb.buffer_begin(buf, tsc, oldidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
         * determine that the subbuffer is full.
         */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
+       cmm_smp_wmb();
        v_add(config, config->cb.subbuffer_header_size(),
-             &buf->commit_hot[oldidx].cc);
-       commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
+             &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
-                                     commit_count, oldidx);
+                                     commit_count, oldidx, handle);
        lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
                                             offsets->old, commit_count,
-                                            config->cb.subbuffer_header_size());
+                                            config->cb.subbuffer_header_size(),
+                                            handle);
 }
 
 /*
@@ -1243,39 +1068,33 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
  * subbuffer.
  */
 static
-void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
                                    struct channel *chan,
                                    struct switch_offsets *offsets,
-                                   u64 tsc)
+                                   uint64_t tsc,
+                                   struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
        unsigned long commit_count, padding_size, data_size;
 
        data_size = subbuf_offset(offsets->old - 1, chan) + 1;
        padding_size = chan->backend.subbuf_size - data_size;
-       subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
+       subbuffer_set_data_size(config, &buf->backend, oldidx, data_size,
+                               handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
         * determine that the subbuffer is full.
         */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
-       v_add(config, padding_size, &buf->commit_hot[oldidx].cc);
-       commit_count = v_read(config, &buf->commit_hot[oldidx].cc);
+       cmm_smp_wmb();
+       v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
-                                     commit_count, oldidx);
+                                     commit_count, oldidx, handle);
        lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
                                             offsets->old, commit_count,
-                                            padding_size);
+                                            padding_size, handle);
 }
 
 /*
@@ -1286,39 +1105,33 @@ void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
  * that this code is executed before the deliver of this sub-buffer.
  */
 static
-void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
                                      struct channel *chan,
                                      struct switch_offsets *offsets,
-                                     u64 tsc)
+                                     uint64_t tsc,
+                                     struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long beginidx = subbuf_index(offsets->begin, chan);
        unsigned long commit_count;
 
-       config->cb.buffer_begin(buf, tsc, beginidx);
+       config->cb.buffer_begin(buf, tsc, beginidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
         * determine that the subbuffer is full.
         */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
+       cmm_smp_wmb();
        v_add(config, config->cb.subbuffer_header_size(),
-             &buf->commit_hot[beginidx].cc);
-       commit_count = v_read(config, &buf->commit_hot[beginidx].cc);
+             &shmp_index(handle, buf->commit_hot, beginidx)->cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
-                                     commit_count, beginidx);
+                                     commit_count, beginidx, handle);
        lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
                                             offsets->begin, commit_count,
-                                            config->cb.subbuffer_header_size());
+                                            config->cb.subbuffer_header_size(),
+                                            handle);
 }
 
 /*
@@ -1328,39 +1141,33 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
  * have to do the deliver themselves.
  */
 static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
-                                           struct channel *chan,
-                                           struct switch_offsets *offsets,
-                                           u64 tsc)
+void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
+                                   struct channel *chan,
+                                   struct switch_offsets *offsets,
+                                   uint64_t tsc,
+                                   struct lttng_ust_shm_handle *handle)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long endidx = subbuf_index(offsets->end - 1, chan);
        unsigned long commit_count, padding_size, data_size;
 
        data_size = subbuf_offset(offsets->end - 1, chan) + 1;
        padding_size = chan->backend.subbuf_size - data_size;
-       subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
+       subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
+                               handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
         * determine that the subbuffer is full.
         */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
-       v_add(config, padding_size, &buf->commit_hot[endidx].cc);
-       commit_count = v_read(config, &buf->commit_hot[endidx].cc);
+       cmm_smp_wmb();
+       v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
-                                 commit_count, endidx);
+                                 commit_count, endidx, handle);
        lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
                                             offsets->end, commit_count,
-                                            padding_size);
+                                            padding_size, handle);
 }
 
 /*
@@ -1370,12 +1177,12 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
  */
 static
 int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
-                                   struct lib_ring_buffer *buf,
+                                   struct lttng_ust_lib_ring_buffer *buf,
                                    struct channel *chan,
                                    struct switch_offsets *offsets,
-                                   u64 *tsc)
+                                   uint64_t *tsc)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long off;
 
        offsets->begin = v_read(config, &buf->offset);
@@ -1396,13 +1203,13 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
         * The next record that reserves space will be responsible for
         * populating the following subbuffer header. We choose not to populate
         * the next subbuffer header here because we want to be able to use
-        * SWITCH_ACTIVE for periodical buffer flush and CPU tick_nohz stop
-        * buffer flush, which must guarantee that all the buffer content
-        * (records and header timestamps) are visible to the reader. This is
-        * required for quiescence guarantees for the fusion merge.
+        * SWITCH_ACTIVE for periodical buffer flush, which must
+        * guarantee that all the buffer content (records and header
+        * timestamps) are visible to the reader. This is required for
+        * quiescence guarantees for the fusion merge.
         */
        if (mode == SWITCH_FLUSH || off > 0) {
-               if (unlikely(off == 0)) {
+               if (caa_unlikely(off == 0)) {
                        /*
                         * The client does not save any header information.
                         * Don't switch empty subbuffer on finalize, because it
@@ -1431,13 +1238,14 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
  * operations, this function must be called from the CPU which owns the buffer
  * for a ACTIVE flush.
  */
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+                                struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = buf->backend.chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       struct channel *chan = shmp(handle, buf->backend.chan);
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        struct switch_offsets offsets;
        unsigned long oldidx;
-       u64 tsc;
+       uint64_t tsc;
 
        offsets.size = 0;
 
@@ -1465,22 +1273,21 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m
        lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
 
        oldidx = subbuf_index(offsets.old, chan);
-       lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
+       lib_ring_buffer_clear_noref(config, &buf->backend, oldidx, handle);
 
        /*
         * May need to populate header start on SWITCH_FLUSH.
         */
        if (offsets.switch_old_start) {
-               lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc);
+               lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
                offsets.old += config->cb.subbuffer_header_size();
        }
 
        /*
         * Switch old subbuffer.
         */
-       lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc);
+       lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
 
 /*
  * Returns :
@@ -1490,12 +1297,13 @@ EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
  * -EIO if data cannot be written into the buffer for any other reason.
  */
 static
-int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                                     struct channel *chan,
                                     struct switch_offsets *offsets,
-                                    struct lib_ring_buffer_ctx *ctx)
+                                    struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
-       const struct lib_ring_buffer_config *config = chan->backend.config;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
        unsigned long reserve_commit_diff;
 
        offsets->begin = v_read(config, &buf->offset);
@@ -1512,7 +1320,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
        if (last_tsc_overflow(config, buf, ctx->tsc))
                ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
 
-       if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
                offsets->switch_new_start = 1;          /* For offsets->begin */
        } else {
                offsets->size = config->cb.record_header_size(config, chan,
@@ -1523,19 +1331,19 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan) +
+               if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
                             offsets->size > chan->backend.subbuf_size)) {
                        offsets->switch_old_end = 1;    /* For offsets->old */
                        offsets->switch_new_start = 1;  /* For offsets->begin */
                }
        }
-       if (unlikely(offsets->switch_new_start)) {
+       if (caa_unlikely(offsets->switch_new_start)) {
                unsigned long sb_index;
 
                /*
                 * We are typically not filling the previous buffer completely.
                 */
-               if (likely(offsets->switch_old_end))
+               if (caa_likely(offsets->switch_old_end))
                        offsets->begin = subbuf_align(offsets->begin, chan);
                offsets->begin = offsets->begin
                                 + config->cb.subbuffer_header_size();
@@ -1545,20 +1353,28 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                  (buf_trunc(offsets->begin, chan)
                   >> chan->backend.num_subbuf_order)
                  - ((unsigned long) v_read(config,
-                                           &buf->commit_cold[sb_index].cc_sb)
+                                           &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
                     & chan->commit_count_mask);
-               if (likely(reserve_commit_diff == 0)) {
+               if (caa_likely(reserve_commit_diff == 0)) {
                        /* Next subbuffer not being written to. */
-                       if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+                       if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
                                subbuf_trunc(offsets->begin, chan)
                                 - subbuf_trunc((unsigned long)
-                                    atomic_long_read(&buf->consumed), chan)
+                                    uatomic_read(&buf->consumed), chan)
                                >= chan->backend.buf_size)) {
+                               unsigned long nr_lost;
+
                                /*
                                 * We do not overwrite non consumed buffers
                                 * and we are full : record is lost.
                                 */
+                               nr_lost = v_read(config, &buf->records_lost_full);
                                v_inc(config, &buf->records_lost_full);
+                               if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+                                       DBG("%lu or more records lost in (%s:%d) (buffer full)\n",
+                                               nr_lost + 1, chan->backend.name,
+                                               buf->backend.cpu);
+                               }
                                return -ENOBUFS;
                        } else {
                                /*
@@ -1569,13 +1385,21 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                                 */
                        }
                } else {
+                       unsigned long nr_lost;
+
                        /*
                         * Next subbuffer reserve offset does not match the
                         * commit offset. Drop record in producer-consumer and
                         * overwrite mode. Caused by either a writer OOPS or too
                         * many nested writes over a reserve/commit pair.
                         */
+                       nr_lost = v_read(config, &buf->records_lost_wrap);
                        v_inc(config, &buf->records_lost_wrap);
+                       if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+                               DBG("%lu or more records lost in (%s:%d) (wrap-around)\n",
+                                       nr_lost + 1, chan->backend.name,
+                                       buf->backend.cpu);
+                       }
                        return -EIO;
                }
                offsets->size =
@@ -1587,13 +1411,22 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
                        + ctx->data_size;
-               if (unlikely(subbuf_offset(offsets->begin, chan)
+               if (caa_unlikely(subbuf_offset(offsets->begin, chan)
                             + offsets->size > chan->backend.subbuf_size)) {
+                       unsigned long nr_lost;
+
                        /*
                         * Record too big for subbuffers, report error, don't
                         * complete the sub-buffer switch.
                         */
+                       nr_lost = v_read(config, &buf->records_lost_big);
                        v_inc(config, &buf->records_lost_big);
+                       if ((nr_lost & (DBG_PRINT_NR_LOST - 1)) == 0) {
+                               DBG("%lu or more records lost in (%s:%d) record size "
+                                       " of %zu bytes is too large for buffer\n",
+                                       nr_lost + 1, chan->backend.name,
+                                       buf->backend.cpu, offsets->size);
+                       }
                        return -ENOSPC;
                } else {
                        /*
@@ -1609,7 +1442,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
        }
        offsets->end = offsets->begin + offsets->size;
 
-       if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+       if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
                /*
                 * The offset_end will fall at the very beginning of the next
                 * subbuffer.
@@ -1627,18 +1460,19 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
  * -EIO for other errors, else returns 0.
  * It will take care of sub-buffer switching.
  */
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
-       const struct lib_ring_buffer_config *config = chan->backend.config;
-       struct lib_ring_buffer *buf;
+       struct lttng_ust_shm_handle *handle = ctx->handle;
+       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       struct lttng_ust_lib_ring_buffer *buf;
        struct switch_offsets offsets;
        int ret;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
+               buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
        else
-               buf = chan->backend.buf;
+               buf = shmp(handle, chan->backend.buf[0].shmp);
        ctx->buf = buf;
 
        offsets.size = 0;
@@ -1646,9 +1480,9 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
        do {
                ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
                                                       ctx);
-               if (unlikely(ret))
+               if (caa_unlikely(ret))
                        return ret;
-       } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+       } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
                                    offsets.end)
                          != offsets.old));
 
@@ -1669,29 +1503,38 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
         * Clear noref flag for this subbuffer.
         */
        lib_ring_buffer_clear_noref(config, &buf->backend,
-                                   subbuf_index(offsets.end - 1, chan));
+                                   subbuf_index(offsets.end - 1, chan),
+                                   handle);
 
        /*
         * Switch old subbuffer if needed.
         */
-       if (unlikely(offsets.switch_old_end)) {
+       if (caa_unlikely(offsets.switch_old_end)) {
                lib_ring_buffer_clear_noref(config, &buf->backend,
-                                           subbuf_index(offsets.old - 1, chan));
-               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
+                                           subbuf_index(offsets.old - 1, chan),
+                                           handle);
+               lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc, handle);
        }
 
        /*
         * Populate new subbuffer.
         */
-       if (unlikely(offsets.switch_new_start))
-               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
+       if (caa_unlikely(offsets.switch_new_start))
+               lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
 
-       if (unlikely(offsets.switch_new_end))
-               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+       if (caa_unlikely(offsets.switch_new_end))
+               lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
 
        ctx->slot_size = offsets.size;
        ctx->pre_offset = offsets.begin;
        ctx->buf_offset = offsets.begin + offsets.pre_header_padding;
        return 0;
 }
-EXPORT_SYMBOL_GPL(lib_ring_buffer_reserve_slow);
+
+/*
+ * Force a read (imply TLS fixup for dlopen) of TLS variables.
+ */
+void lttng_fixup_ringbuffer_tls(void)
+{
+       asm volatile ("" : : "m" (lib_ring_buffer_nesting));
+}
This page took 0.050518 seconds and 4 git commands to generate.