X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=libust%2Fbuffers.c;h=2e4bb66057d7a610f2d0e1bb95d4c3e87e84aaad;hb=0222e1213f196b66cbc08cd29093aca4a28e9ffb;hp=b7002dedf32c0c63353bd637f221a6f045de898a;hpb=d73b06ee279339fd46d9fd5856da205ddd1c24fd;p=ust.git diff --git a/libust/buffers.c b/libust/buffers.c index b7002de..2e4bb66 100644 --- a/libust/buffers.c +++ b/libust/buffers.c @@ -25,8 +25,10 @@ #include #include #include -#include #include + +#include + #include "buffers.h" #include "channels.h" #include "tracer.h" @@ -41,7 +43,7 @@ struct ltt_reserve_switch_offsets { static DEFINE_MUTEX(ust_buffers_channels_mutex); -static LIST_HEAD(ust_buffers_channels); +static CDS_LIST_HEAD(ust_buffers_channels); static int get_n_cpus(void) { @@ -68,28 +70,68 @@ static int get_n_cpus(void) return n_cpus; } -/* _ust_buffers_write() +/** + * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer. + * @buf : buffer + * @offset : offset within the buffer + * @len : length to write + * @copied: string actually copied + * @terminated: does string end with \0 * - * @buf: destination buffer - * @offset: offset in destination - * @src: source buffer - * @len: length of source - * @cpy: already copied + * Fills string with "X" if incomplete. */ - -void _ust_buffers_write(struct ust_buffer *buf, size_t offset, - const void *src, size_t len, ssize_t cpy) +void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset, + size_t len, size_t copied, int terminated) { - do { - len -= cpy; - src += cpy; - offset += cpy; + size_t buf_offset, cpy; + + if (copied == len) { + /* + * Deal with non-terminated string. + */ + assert(!terminated); + offset += copied - 1; + buf_offset = BUFFER_OFFSET(offset, buf->chan); + /* + * Underlying layer should never ask for writes across + * subbuffers. + */ + assert(buf_offset + < buf->chan->subbuf_size*buf->chan->subbuf_cnt); + ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1); + return; + } + + /* + * Deal with incomplete string. + * Overwrite string's \0 with X too. + */ + cpy = copied - 1; + assert(terminated); + len -= cpy; + offset += cpy; + buf_offset = BUFFER_OFFSET(offset, buf->chan); + + /* + * Underlying layer should never ask for writes across subbuffers. + */ + assert(buf_offset + < buf->chan->subbuf_size*buf->chan->subbuf_cnt); - WARN_ON(offset >= buf->buf_size); + ust_buffers_do_memset(buf->buf_data + buf_offset, + 'X', len); - cpy = min_t(size_t, len, buf->buf_size - offset); - ust_buffers_do_copy(buf->buf_data + offset, src, cpy); - } while (unlikely(len != cpy)); + /* + * Overwrite last 'X' with '\0'. + */ + offset += len - 1; + buf_offset = BUFFER_OFFSET(offset, buf->chan); + /* + * Underlying layer should never ask for writes across subbuffers. + */ + assert(buf_offset + < buf->chan->subbuf_size*buf->chan->subbuf_cnt); + ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1); } static int ust_buffers_init_buffer(struct ust_trace *trace, @@ -161,7 +203,7 @@ int ust_buffers_create_buf(struct ust_channel *channel, int cpu) static void ust_buffers_destroy_channel(struct kref *kref) { - struct ust_channel *chan = container_of(kref, struct ust_channel, kref); + struct ust_channel *chan = _ust_container_of(kref, struct ust_channel, kref); free(chan); } @@ -183,7 +225,7 @@ static void ust_buffers_destroy_buf(struct ust_buffer *buf) /* called from kref_put */ static void ust_buffers_remove_buf(struct kref *kref) { - struct ust_buffer *buf = container_of(kref, struct ust_buffer, kref); + struct ust_buffer *buf = _ust_container_of(kref, struct ust_buffer, kref); ust_buffers_destroy_buf(buf); } @@ -240,14 +282,14 @@ int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_ kref_init(&chan->kref); - mutex_lock(&ust_buffers_channels_mutex); + pthread_mutex_lock(&ust_buffers_channels_mutex); for(i=0; in_cpus; i++) { result = ust_buffers_open_buf(chan, i); if (result == -1) goto error; } - list_add(&chan->list, &ust_buffers_channels); - mutex_unlock(&ust_buffers_channels_mutex); + cds_list_add(&chan->list, &ust_buffers_channels); + pthread_mutex_unlock(&ust_buffers_channels_mutex); return 0; @@ -260,7 +302,7 @@ error: } kref_put(&chan->kref, ust_buffers_destroy_channel); - mutex_unlock(&ust_buffers_channels_mutex); + pthread_mutex_unlock(&ust_buffers_channels_mutex); return -1; } @@ -270,7 +312,7 @@ void ust_buffers_channel_close(struct ust_channel *chan) if(!chan) return; - mutex_lock(&ust_buffers_channels_mutex); + pthread_mutex_lock(&ust_buffers_channels_mutex); for(i=0; in_cpus; i++) { /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't * initialize to NULL so we cannot use this check. Should we? */ @@ -278,9 +320,9 @@ void ust_buffers_channel_close(struct ust_channel *chan) ust_buffers_close_buf(chan->buf[i]); } - list_del(&chan->list); + cds_list_del(&chan->list); kref_put(&chan->kref, ust_buffers_destroy_channel); - mutex_unlock(&ust_buffers_channels_mutex); + pthread_mutex_unlock(&ust_buffers_channels_mutex); } /* @@ -307,7 +349,7 @@ static void ltt_buffer_begin(struct ust_buffer *buf, header->cycle_count_begin = tsc; header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */ header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */ - /* FIXME: add memory barrier? */ + /* FIXME: add memory cmm_barrier? */ ltt_write_trace_header(channel->trace, header); } @@ -344,7 +386,7 @@ static notrace void ltt_buf_unfull(struct ust_buffer *buf, } /* - * Promote compiler barrier to a smp_mb(). + * Promote compiler cmm_barrier to a smp_mb(). * For the specific LTTng case, this IPI call should be removed if the * architecture does not reorder writes. This should eventually be provided by * a separate architecture-specific infrastructure. @@ -372,7 +414,7 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) * this is OK because then there is no wmb to execute there. * If our thread is executing on the same CPU as the on the buffers * belongs to, we don't have to synchronize it at all. If we are - * migrated, the scheduler will take care of the memory barriers. + * migrated, the scheduler will take care of the memory cmm_barriers. * Normally, smp_call_function_single() should ensure program order when * executing the remote function, which implies that it surrounds the * function execution with : @@ -387,7 +429,7 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) * smp_mb() * * However, smp_call_function_single() does not seem to clearly execute - * such barriers. It depends on spinlock semantic to provide the barrier + * such cmm_barriers. It depends on spinlock semantic to provide the cmm_barrier * before executing the IPI and, when busy-looping, csd_lock_wait only * executes smp_mb() when it has to wait for the other CPU. * @@ -395,9 +437,9 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) * required ourself, even if duplicated. It has no performance impact * anyway. * - * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs + * smp_mb() is needed because cmm_smp_rmb() and cmm_smp_wmb() only order read vs * read and write vs write. They do not ensure core synchronization. We - * really have to ensure total order between the 3 barriers running on + * really have to ensure total order between the 3 cmm_barriers running on * the 2 CPUs. */ //ust// #ifdef LTT_NO_IPI_BARRIER @@ -405,7 +447,7 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) * Local rmb to match the remote wmb to read the commit count before the * buffer data and the write offset. */ - smp_rmb(); + cmm_smp_rmb(); //ust// #else //ust// if (raw_smp_processor_id() != buf->cpu) { //ust// smp_mb(); /* Total order with IPI handler smp_mb() */ @@ -550,7 +592,7 @@ static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu) static void ltt_relay_release_channel(struct kref *kref) { - struct ust_channel *ltt_chan = container_of(kref, + struct ust_channel *ltt_chan = _ust_container_of(kref, struct ust_channel, kref); free(ltt_chan->buf); } @@ -735,11 +777,11 @@ static int ust_buffers_create_channel(const char *trace_name, struct ust_trace * ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order); ltt_chan->n_cpus = get_n_cpus(); //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map); - ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *)); + ltt_chan->buf = (void *) zmalloc(ltt_chan->n_cpus * sizeof(void *)); if(ltt_chan->buf == NULL) { goto error; } - ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int)); + ltt_chan->buf_struct_shmids = (int *) zmalloc(ltt_chan->n_cpus * sizeof(int)); if(ltt_chan->buf_struct_shmids == NULL) goto free_buf; @@ -769,26 +811,6 @@ error: return -1; } -/* - * LTTng channel flush function. - * - * Must be called when no tracing is active in the channel, because of - * accesses across CPUs. - */ -static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf) -{ - int result; - -//ust// buf->finalized = 1; - ltt_force_switch(buf, FORCE_FLUSH); - - result = write(buf->data_ready_fd_write, "1", 1); - if(result == -1) { - PERROR("write (in ltt_relay_buffer_flush)"); - ERR("this should never happen!"); - } -} - static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) { //ust// unsigned int i; @@ -811,7 +833,7 @@ static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cp if (channel->buf[cpu]) { struct ust_buffer *buf = channel->buf[cpu]; - ltt_relay_buffer_flush(buf); + ltt_force_switch(buf, FORCE_FLUSH); //ust// ltt_relay_wake_writers(ltt_buf); /* closing the pipe tells the consumer the buffer is finished */ @@ -873,10 +895,10 @@ static void ltt_reserve_switch_old_subbuf( /* * Must write slot data before incrementing commit count. - * This compiler barrier is upgraded into a smp_wmb() by the IPI - * sent by get_subbuf() when it does its smp_rmb(). + * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI + * sent by get_subbuf() when it does its cmm_smp_rmb(). */ - barrier(); + cmm_smp_wmb(); uatomic_add(&buf->commit_count[oldidx].cc, padding_size); commit_count = uatomic_read(&buf->commit_count[oldidx].cc); ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx); @@ -902,10 +924,10 @@ static void ltt_reserve_switch_new_subbuf( /* * Must write slot data before incrementing commit count. - * This compiler barrier is upgraded into a smp_wmb() by the IPI - * sent by get_subbuf() when it does its smp_rmb(). + * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI + * sent by get_subbuf() when it does its cmm_smp_rmb(). */ - barrier(); + cmm_smp_wmb(); uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size()); commit_count = uatomic_read(&buf->commit_count[beginidx].cc); /* Check if the written buffer has to be delivered */ @@ -947,10 +969,10 @@ static void ltt_reserve_end_switch_current( /* * Must write slot data before incrementing commit count. - * This compiler barrier is upgraded into a smp_wmb() by the IPI - * sent by get_subbuf() when it does its smp_rmb(). + * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI + * sent by get_subbuf() when it does its cmm_smp_rmb(). */ - barrier(); + cmm_smp_wmb(); uatomic_add(&buf->commit_count[endidx].cc, padding_size); commit_count = uatomic_read(&buf->commit_count[endidx].cc); ltt_check_deliver(chan, buf, @@ -1213,12 +1235,14 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe * Return : -ENOSPC if not enough space, else returns 0. * It will take care of sub-buffer switching. */ -int ltt_reserve_slot_lockless_slow(struct ust_trace *trace, - struct ust_channel *chan, void **transport_data, - size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, - unsigned int *rflags, int largest_align, int cpu) +int ltt_reserve_slot_lockless_slow(struct ust_channel *chan, + struct ust_trace *trace, size_t data_size, + int largest_align, int cpu, + struct ust_buffer **ret_buf, + size_t *slot_size, long *buf_offset, + u64 *tsc, unsigned int *rflags) { - struct ust_buffer *buf = chan->buf[cpu]; + struct ust_buffer *buf = *ret_buf = chan->buf[cpu]; struct ltt_reserve_switch_offsets offsets; offsets.size = 0; @@ -1296,8 +1320,7 @@ static void __attribute__((destructor)) ust_buffers_exit(void) ltt_transport_unregister(&ust_relay_transport); } -size_t ltt_write_event_header_slow(struct ust_trace *trace, - struct ust_channel *channel, +size_t ltt_write_event_header_slow(struct ust_channel *channel, struct ust_buffer *buf, long buf_offset, u16 eID, u32 event_size, u64 tsc, unsigned int rflags)