UST synchronization fix
[ust.git] / libust / buffers.c
index b7002dedf32c0c63353bd637f221a6f045de898a..c27b78877df625c33a23af1ff8f6af2f4277ad1b 100644 (file)
 #include <sys/ipc.h>
 #include <sys/shm.h>
 #include <fcntl.h>
-#include <ust/kernelcompat.h>
 #include <stdlib.h>
+
+#include <ust/clock.h>
+
 #include "buffers.h"
 #include "channels.h"
 #include "tracer.h"
@@ -240,14 +242,14 @@ int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_
 
        kref_init(&chan->kref);
 
-       mutex_lock(&ust_buffers_channels_mutex);
+       pthread_mutex_lock(&ust_buffers_channels_mutex);
        for(i=0; i<chan->n_cpus; i++) {
                result = ust_buffers_open_buf(chan, i);
                if (result == -1)
                        goto error;
        }
        list_add(&chan->list, &ust_buffers_channels);
-       mutex_unlock(&ust_buffers_channels_mutex);
+       pthread_mutex_unlock(&ust_buffers_channels_mutex);
 
        return 0;
 
@@ -260,7 +262,7 @@ error:
        }
 
        kref_put(&chan->kref, ust_buffers_destroy_channel);
-       mutex_unlock(&ust_buffers_channels_mutex);
+       pthread_mutex_unlock(&ust_buffers_channels_mutex);
        return -1;
 }
 
@@ -270,7 +272,7 @@ void ust_buffers_channel_close(struct ust_channel *chan)
        if(!chan)
                return;
 
-       mutex_lock(&ust_buffers_channels_mutex);
+       pthread_mutex_lock(&ust_buffers_channels_mutex);
        for(i=0; i<chan->n_cpus; i++) {
        /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
         * initialize to NULL so we cannot use this check. Should we? */
@@ -280,7 +282,7 @@ void ust_buffers_channel_close(struct ust_channel *chan)
 
        list_del(&chan->list);
        kref_put(&chan->kref, ust_buffers_destroy_channel);
-       mutex_unlock(&ust_buffers_channels_mutex);
+       pthread_mutex_unlock(&ust_buffers_channels_mutex);
 }
 
 /*
@@ -769,26 +771,6 @@ error:
        return -1;
 }
 
-/*
- * LTTng channel flush function.
- *
- * Must be called when no tracing is active in the channel, because of
- * accesses across CPUs.
- */
-static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf)
-{
-       int result;
-
-//ust//        buf->finalized = 1;
-       ltt_force_switch(buf, FORCE_FLUSH);
-
-       result = write(buf->data_ready_fd_write, "1", 1);
-       if(result == -1) {
-               PERROR("write (in ltt_relay_buffer_flush)");
-               ERR("this should never happen!");
-       }
-}
-
 static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
 {
 //ust//        unsigned int i;
@@ -811,7 +793,7 @@ static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cp
 
        if (channel->buf[cpu]) {
                struct ust_buffer *buf = channel->buf[cpu];
-               ltt_relay_buffer_flush(buf);
+               ltt_force_switch(buf, FORCE_FLUSH);
 //ust//                ltt_relay_wake_writers(ltt_buf);
                /* closing the pipe tells the consumer the buffer is finished */
                
@@ -876,7 +858,7 @@ static void ltt_reserve_switch_old_subbuf(
         * This compiler barrier is upgraded into a smp_wmb() by the IPI
         * sent by get_subbuf() when it does its smp_rmb().
         */
-       barrier();
+       smp_wmb();
        uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
        commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
        ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
@@ -905,7 +887,7 @@ static void ltt_reserve_switch_new_subbuf(
         * This compiler barrier is upgraded into a smp_wmb() by the IPI
         * sent by get_subbuf() when it does its smp_rmb().
         */
-       barrier();
+       smp_wmb();
        uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size());
        commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
        /* Check if the written buffer has to be delivered */
@@ -950,7 +932,7 @@ static void ltt_reserve_end_switch_current(
         * This compiler barrier is upgraded into a smp_wmb() by the IPI
         * sent by get_subbuf() when it does its smp_rmb().
         */
-       barrier();
+       smp_wmb();
        uatomic_add(&buf->commit_count[endidx].cc, padding_size);
        commit_count = uatomic_read(&buf->commit_count[endidx].cc);
        ltt_check_deliver(chan, buf,
@@ -1213,12 +1195,14 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe
  * Return : -ENOSPC if not enough space, else returns 0.
  * It will take care of sub-buffer switching.
  */
-int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
-               struct ust_channel *chan, void **transport_data,
-               size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc,
-               unsigned int *rflags, int largest_align, int cpu)
+int ltt_reserve_slot_lockless_slow(struct ust_channel *chan,
+               struct ust_trace *trace, size_t data_size,
+               int largest_align, int cpu,
+               struct ust_buffer **ret_buf,
+               size_t *slot_size, long *buf_offset,
+               u64 *tsc, unsigned int *rflags)
 {
-       struct ust_buffer *buf = chan->buf[cpu];
+       struct ust_buffer *buf = *ret_buf = chan->buf[cpu];
        struct ltt_reserve_switch_offsets offsets;
 
        offsets.size = 0;
@@ -1296,8 +1280,7 @@ static void __attribute__((destructor)) ust_buffers_exit(void)
        ltt_transport_unregister(&ust_relay_transport);
 }
 
-size_t ltt_write_event_header_slow(struct ust_trace *trace,
-               struct ust_channel *channel,
+size_t ltt_write_event_header_slow(struct ust_channel *channel,
                struct ust_buffer *buf, long buf_offset,
                u16 eID, u32 event_size,
                u64 tsc, unsigned int rflags)
This page took 0.02448 seconds and 4 git commands to generate.