X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=libust%2Fbuffers.c;h=c27b78877df625c33a23af1ff8f6af2f4277ad1b;hb=ee4c34673a261a035d224096db82e6b70ba085ca;hp=8cadeabf4207699473cbdadfc38c10da2c03d0f1;hpb=17bb07b445acdef0034194bdcebe113988a8db60;p=ust.git diff --git a/libust/buffers.c b/libust/buffers.c index 8cadeab..c27b788 100644 --- a/libust/buffers.c +++ b/libust/buffers.c @@ -25,8 +25,10 @@ #include #include #include -#include #include + +#include + #include "buffers.h" #include "channels.h" #include "tracer.h" @@ -240,14 +242,14 @@ int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_ kref_init(&chan->kref); - mutex_lock(&ust_buffers_channels_mutex); + pthread_mutex_lock(&ust_buffers_channels_mutex); for(i=0; in_cpus; i++) { result = ust_buffers_open_buf(chan, i); if (result == -1) goto error; } list_add(&chan->list, &ust_buffers_channels); - mutex_unlock(&ust_buffers_channels_mutex); + pthread_mutex_unlock(&ust_buffers_channels_mutex); return 0; @@ -260,7 +262,7 @@ error: } kref_put(&chan->kref, ust_buffers_destroy_channel); - mutex_unlock(&ust_buffers_channels_mutex); + pthread_mutex_unlock(&ust_buffers_channels_mutex); return -1; } @@ -270,7 +272,7 @@ void ust_buffers_channel_close(struct ust_channel *chan) if(!chan) return; - mutex_lock(&ust_buffers_channels_mutex); + pthread_mutex_lock(&ust_buffers_channels_mutex); for(i=0; in_cpus; i++) { /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't * initialize to NULL so we cannot use this check. Should we? */ @@ -280,7 +282,7 @@ void ust_buffers_channel_close(struct ust_channel *chan) list_del(&chan->list); kref_put(&chan->kref, ust_buffers_destroy_channel); - mutex_unlock(&ust_buffers_channels_mutex); + pthread_mutex_unlock(&ust_buffers_channels_mutex); } /* @@ -635,12 +637,6 @@ static int ust_buffers_init_buffer(struct ust_trace *trace, buf->data_ready_fd_read = fds[0]; buf->data_ready_fd_write = fds[1]; - /* FIXME: do we actually need this? */ - result = fcntl(fds[0], F_SETFL, O_NONBLOCK); - if(result == -1) { - PERROR("fcntl"); - } - //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs); //ust// if(!ltt_buf->commit_seq) { //ust// return -1; @@ -775,26 +771,6 @@ error: return -1; } -/* - * LTTng channel flush function. - * - * Must be called when no tracing is active in the channel, because of - * accesses across CPUs. - */ -static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf) -{ - int result; - -//ust// buf->finalized = 1; - ltt_force_switch(buf, FORCE_FLUSH); - - result = write(buf->data_ready_fd_write, "1", 1); - if(result == -1) { - PERROR("write (in ltt_relay_buffer_flush)"); - ERR("this should never happen!"); - } -} - static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) { //ust// unsigned int i; @@ -817,7 +793,7 @@ static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cp if (channel->buf[cpu]) { struct ust_buffer *buf = channel->buf[cpu]; - ltt_relay_buffer_flush(buf); + ltt_force_switch(buf, FORCE_FLUSH); //ust// ltt_relay_wake_writers(ltt_buf); /* closing the pipe tells the consumer the buffer is finished */ @@ -882,7 +858,7 @@ static void ltt_reserve_switch_old_subbuf( * This compiler barrier is upgraded into a smp_wmb() by the IPI * sent by get_subbuf() when it does its smp_rmb(). */ - barrier(); + smp_wmb(); uatomic_add(&buf->commit_count[oldidx].cc, padding_size); commit_count = uatomic_read(&buf->commit_count[oldidx].cc); ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx); @@ -911,7 +887,7 @@ static void ltt_reserve_switch_new_subbuf( * This compiler barrier is upgraded into a smp_wmb() by the IPI * sent by get_subbuf() when it does its smp_rmb(). */ - barrier(); + smp_wmb(); uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size()); commit_count = uatomic_read(&buf->commit_count[beginidx].cc); /* Check if the written buffer has to be delivered */ @@ -956,7 +932,7 @@ static void ltt_reserve_end_switch_current( * This compiler barrier is upgraded into a smp_wmb() by the IPI * sent by get_subbuf() when it does its smp_rmb(). */ - barrier(); + smp_wmb(); uatomic_add(&buf->commit_count[endidx].cc, padding_size); commit_count = uatomic_read(&buf->commit_count[endidx].cc); ltt_check_deliver(chan, buf, @@ -1219,12 +1195,14 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe * Return : -ENOSPC if not enough space, else returns 0. * It will take care of sub-buffer switching. */ -int ltt_reserve_slot_lockless_slow(struct ust_trace *trace, - struct ust_channel *chan, void **transport_data, - size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, - unsigned int *rflags, int largest_align, int cpu) +int ltt_reserve_slot_lockless_slow(struct ust_channel *chan, + struct ust_trace *trace, size_t data_size, + int largest_align, int cpu, + struct ust_buffer **ret_buf, + size_t *slot_size, long *buf_offset, + u64 *tsc, unsigned int *rflags) { - struct ust_buffer *buf = chan->buf[cpu]; + struct ust_buffer *buf = *ret_buf = chan->buf[cpu]; struct ltt_reserve_switch_offsets offsets; offsets.size = 0; @@ -1302,8 +1280,7 @@ static void __attribute__((destructor)) ust_buffers_exit(void) ltt_transport_unregister(&ust_relay_transport); } -size_t ltt_write_event_header_slow(struct ust_trace *trace, - struct ust_channel *channel, +size_t ltt_write_event_header_slow(struct ust_channel *channel, struct ust_buffer *buf, long buf_offset, u16 eID, u32 event_size, u64 tsc, unsigned int rflags)