Implement shm object table
[lttng-ust.git] / libringbuffer / frontend_api.h
index 391e59377905f846b8003d2fc94fd59508198751..1d8e29493655da9709cec47909167177a7c1bcc6 100644 (file)
  * Dual LGPL v2.1/GPL v2 license.
  */
 
-#include "../../wrapper/ringbuffer/frontend.h"
-#include <linux/errno.h>
+#include "frontend.h"
+#include "ust/core.h"
+#include <urcu-bp.h>
+#include <urcu/compiler.h>
 
 /**
  * lib_ring_buffer_get_cpu - Precedes ring buffer reserve/commit.
  *
- * Disables preemption (acts as a RCU read-side critical section) and keeps a
- * ring buffer nesting count as supplementary safety net to ensure tracer client
- * code will never trigger an endless recursion. Returns the processor ID on
- * success, -EPERM on failure (nesting count too high).
+ * Grabs RCU read-side lock and keeps a ring buffer nesting count as
+ * supplementary safety net to ensure tracer client code will never
+ * trigger an endless recursion. Returns the processor ID on success,
+ * -EPERM on failure (nesting count too high).
  *
  * asm volatile and "memory" clobber prevent the compiler from moving
  * instructions out of the ring buffer nesting count. This is required to ensure
@@ -39,15 +41,15 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
 {
        int cpu, nesting;
 
-       rcu_read_lock_sched_notrace();
-       cpu = smp_processor_id();
-       nesting = ++per_cpu(lib_ring_buffer_nesting, cpu);
-       barrier();
+       rcu_read_lock();
+       cpu = ust_get_cpu();
+       nesting = ++lib_ring_buffer_nesting;    /* TLS */
+       cmm_barrier();
 
        if (unlikely(nesting > 4)) {
                WARN_ON_ONCE(1);
-               per_cpu(lib_ring_buffer_nesting, cpu)--;
-               rcu_read_unlock_sched_notrace();
+               lib_ring_buffer_nesting--;      /* TLS */
+               rcu_read_unlock();
                return -EPERM;
        } else
                return cpu;
@@ -59,9 +61,9 @@ int lib_ring_buffer_get_cpu(const struct lib_ring_buffer_config *config)
 static inline
 void lib_ring_buffer_put_cpu(const struct lib_ring_buffer_config *config)
 {
-       barrier();
-       __get_cpu_var(lib_ring_buffer_nesting)--;
-       rcu_read_unlock_sched_notrace();
+       cmm_barrier();
+       lib_ring_buffer_nesting--;              /* TLS */
+       rcu_read_unlock();
 }
 
 /*
@@ -90,7 +92,7 @@ int lib_ring_buffer_try_reserve(const struct lib_ring_buffer_config *config,
         * commit counter to increment it and commit seq value to compare it to
         * the commit counter.
         */
-       prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
+       //prefetch(&buf->commit_hot[subbuf_index(*o_begin, chan)]);
 
        if (last_tsc_overflow(config, buf, ctx->tsc))
                ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
@@ -144,18 +146,19 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
                            struct lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
+       struct shm_handle *handle = ctx->handle;
        struct lib_ring_buffer *buf;
        unsigned long o_begin, o_end, o_old;
        size_t before_hdr_pad = 0;
 
-       if (atomic_read(&chan->record_disabled))
+       if (uatomic_read(&chan->record_disabled))
                return -EAGAIN;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
-               buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
+               buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
        else
-               buf = chan->backend.buf;
-       if (atomic_read(&buf->record_disabled))
+               buf = shmp(handle, chan->backend.buf[0].shmp);
+       if (uatomic_read(&buf->record_disabled))
                return -EAGAIN;
        ctx->buf = buf;
 
@@ -187,7 +190,7 @@ int lib_ring_buffer_reserve(const struct lib_ring_buffer_config *config,
         * Clear noref flag for this subbuffer.
         */
        lib_ring_buffer_clear_noref(config, &ctx->buf->backend,
-                               subbuf_index(o_end - 1, chan));
+                               subbuf_index(o_end - 1, chan), handle);
 
        ctx->pre_offset = o_begin;
        ctx->buf_offset = o_begin + before_hdr_pad;
@@ -212,9 +215,10 @@ slow_path:
  */
 static inline
 void lib_ring_buffer_switch(const struct lib_ring_buffer_config *config,
-                           struct lib_ring_buffer *buf, enum switch_mode mode)
+                           struct lib_ring_buffer *buf, enum switch_mode mode,
+                           struct shm_handle *handle)
 {
-       lib_ring_buffer_switch_slow(buf, mode);
+       lib_ring_buffer_switch_slow(buf, mode, handle);
 }
 
 /* See ring_buffer_frontend_api.h for lib_ring_buffer_reserve(). */
@@ -232,6 +236,7 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
                            const struct lib_ring_buffer_ctx *ctx)
 {
        struct channel *chan = ctx->chan;
+       struct shm_handle *handle = ctx->handle;
        struct lib_ring_buffer *buf = ctx->buf;
        unsigned long offset_end = ctx->buf_offset;
        unsigned long endidx = subbuf_index(offset_end - 1, chan);
@@ -240,23 +245,15 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
        /*
         * Must count record before incrementing the commit count.
         */
-       subbuffer_count_record(config, &buf->backend, endidx);
+       subbuffer_count_record(config, &buf->backend, endidx, handle);
 
        /*
         * Order all writes to buffer before the commit count update that will
         * determine that the subbuffer is full.
         */
-       if (config->ipi == RING_BUFFER_IPI_BARRIER) {
-               /*
-                * Must write slot data before incrementing commit count.  This
-                * compiler barrier is upgraded into a smp_mb() by the IPI sent
-                * by get_subbuf().
-                */
-               barrier();
-       } else
-               smp_wmb();
+       cmm_smp_wmb();
 
-       v_add(config, ctx->slot_size, &buf->commit_hot[endidx].cc);
+       v_add(config, ctx->slot_size, &shmp(handle, buf->commit_hot)[endidx].cc);
 
        /*
         * commit count read can race with concurrent OOO commit count updates.
@@ -276,17 +273,17 @@ void lib_ring_buffer_commit(const struct lib_ring_buffer_config *config,
         *   count reaches back the reserve offset for a specific sub-buffer,
         *   which is completely independent of the order.
         */
-       commit_count = v_read(config, &buf->commit_hot[endidx].cc);
+       commit_count = v_read(config, &shmp(handle, buf->commit_hot)[endidx].cc);
 
        lib_ring_buffer_check_deliver(config, buf, chan, offset_end - 1,
-                                     commit_count, endidx);
+                                     commit_count, endidx, handle);
        /*
         * Update used size at each commit. It's needed only for extracting
         * ring_buffer buffers from vmcore, after crash.
         */
        lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
                                             ctx->buf_offset, commit_count,
-                                        ctx->slot_size);
+                                            ctx->slot_size, handle);
 }
 
 /**
@@ -331,28 +328,28 @@ static inline
 void channel_record_disable(const struct lib_ring_buffer_config *config,
                            struct channel *chan)
 {
-       atomic_inc(&chan->record_disabled);
+       uatomic_inc(&chan->record_disabled);
 }
 
 static inline
 void channel_record_enable(const struct lib_ring_buffer_config *config,
                           struct channel *chan)
 {
-       atomic_dec(&chan->record_disabled);
+       uatomic_dec(&chan->record_disabled);
 }
 
 static inline
 void lib_ring_buffer_record_disable(const struct lib_ring_buffer_config *config,
                                    struct lib_ring_buffer *buf)
 {
-       atomic_inc(&buf->record_disabled);
+       uatomic_inc(&buf->record_disabled);
 }
 
 static inline
 void lib_ring_buffer_record_enable(const struct lib_ring_buffer_config *config,
                                   struct lib_ring_buffer *buf)
 {
-       atomic_dec(&buf->record_disabled);
+       uatomic_dec(&buf->record_disabled);
 }
 
 #endif /* _LINUX_RING_BUFFER_FRONTEND_API_H */
This page took 0.026026 seconds and 4 git commands to generate.