X-Git-Url: http://git.lttng.org/?p=ust.git;a=blobdiff_plain;f=libust%2Fbuffers.c;h=374ec613240fde5028fbac694b9a4d49a22c20c8;hp=206d5e7404f75a793ae3ae949dcdf23c6081022b;hb=e9b58dc0868620ba4c0e63b605602f2044a02251;hpb=719569e4967369c8a2e952346250f3bf97fec5f4 diff --git a/libust/buffers.c b/libust/buffers.c index 206d5e7..374ec61 100644 --- a/libust/buffers.c +++ b/libust/buffers.c @@ -25,8 +25,10 @@ #include #include #include -#include -#include +#include + +#include + #include "buffers.h" #include "channels.h" #include "tracer.h" @@ -48,50 +50,88 @@ static int get_n_cpus(void) int result; static int n_cpus = 0; - if(n_cpus) { - return n_cpus; - } + if(!n_cpus) { + /* On Linux, when some processors are offline + * _SC_NPROCESSORS_CONF counts the offline + * processors, whereas _SC_NPROCESSORS_ONLN + * does not. If we used _SC_NPROCESSORS_ONLN, + * getcpu() could return a value greater than + * this sysconf, in which case the arrays + * indexed by processor would overflow. + */ + result = sysconf(_SC_NPROCESSORS_CONF); + if(result == -1) { + return -1; + } - /* On Linux, when some processors are offline - * _SC_NPROCESSORS_CONF counts the offline - * processors, whereas _SC_NPROCESSORS_ONLN - * does not. If we used _SC_NPROCESSORS_ONLN, - * getcpu() could return a value greater than - * this sysconf, in which case the arrays - * indexed by processor would overflow. - */ - result = sysconf(_SC_NPROCESSORS_CONF); - if(result == -1) { - return -1; + n_cpus = result; } - n_cpus = result; - - return result; + return n_cpus; } -/* _ust_buffers_write() +/** + * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer. + * @buf : buffer + * @offset : offset within the buffer + * @len : length to write + * @copied: string actually copied + * @terminated: does string end with \0 * - * @buf: destination buffer - * @offset: offset in destination - * @src: source buffer - * @len: length of source - * @cpy: already copied + * Fills string with "X" if incomplete. */ - -void _ust_buffers_write(struct ust_buffer *buf, size_t offset, - const void *src, size_t len, ssize_t cpy) +void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset, + size_t len, size_t copied, int terminated) { - do { - len -= cpy; - src += cpy; - offset += cpy; + size_t buf_offset, cpy; + + if (copied == len) { + /* + * Deal with non-terminated string. + */ + assert(!terminated); + offset += copied - 1; + buf_offset = BUFFER_OFFSET(offset, buf->chan); + /* + * Underlying layer should never ask for writes across + * subbuffers. + */ + assert(buf_offset + < buf->chan->subbuf_size*buf->chan->subbuf_cnt); + ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1); + return; + } + + /* + * Deal with incomplete string. + * Overwrite string's \0 with X too. + */ + cpy = copied - 1; + assert(terminated); + len -= cpy; + offset += cpy; + buf_offset = BUFFER_OFFSET(offset, buf->chan); - WARN_ON(offset >= buf->buf_size); + /* + * Underlying layer should never ask for writes across subbuffers. + */ + assert(buf_offset + < buf->chan->subbuf_size*buf->chan->subbuf_cnt); - cpy = min_t(size_t, len, buf->buf_size - offset); - ust_buffers_do_copy(buf->buf_data + offset, src, cpy); - } while (unlikely(len != cpy)); + ust_buffers_do_memset(buf->buf_data + buf_offset, + 'X', len); + + /* + * Overwrite last 'X' with '\0'. + */ + offset += len - 1; + buf_offset = BUFFER_OFFSET(offset, buf->chan); + /* + * Underlying layer should never ask for writes across subbuffers. + */ + assert(buf_offset + < buf->chan->subbuf_size*buf->chan->subbuf_cnt); + ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1); } static int ust_buffers_init_buffer(struct ust_trace *trace, @@ -242,14 +282,14 @@ int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_ kref_init(&chan->kref); - mutex_lock(&ust_buffers_channels_mutex); + pthread_mutex_lock(&ust_buffers_channels_mutex); for(i=0; in_cpus; i++) { result = ust_buffers_open_buf(chan, i); if (result == -1) goto error; } list_add(&chan->list, &ust_buffers_channels); - mutex_unlock(&ust_buffers_channels_mutex); + pthread_mutex_unlock(&ust_buffers_channels_mutex); return 0; @@ -262,7 +302,7 @@ error: } kref_put(&chan->kref, ust_buffers_destroy_channel); - mutex_unlock(&ust_buffers_channels_mutex); + pthread_mutex_unlock(&ust_buffers_channels_mutex); return -1; } @@ -272,7 +312,7 @@ void ust_buffers_channel_close(struct ust_channel *chan) if(!chan) return; - mutex_lock(&ust_buffers_channels_mutex); + pthread_mutex_lock(&ust_buffers_channels_mutex); for(i=0; in_cpus; i++) { /* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't * initialize to NULL so we cannot use this check. Should we? */ @@ -282,7 +322,7 @@ void ust_buffers_channel_close(struct ust_channel *chan) list_del(&chan->list); kref_put(&chan->kref, ust_buffers_destroy_channel); - mutex_unlock(&ust_buffers_channels_mutex); + pthread_mutex_unlock(&ust_buffers_channels_mutex); } /* @@ -329,8 +369,8 @@ static notrace void ltt_buffer_end(struct ust_buffer *buf, header->data_size = data_size; header->sb_size = PAGE_ALIGN(data_size); header->cycle_count_end = tsc; - header->events_lost = local_read(&buf->events_lost); - header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers); + header->events_lost = uatomic_read(&buf->events_lost); + header->subbuf_corrupt = uatomic_read(&buf->corrupted_subbuffers); if(unlikely(header->events_lost > 0)) { DBG("Some events (%d) were lost in %s_%d", header->events_lost, buf->chan->channel_name, buf->cpu); } @@ -362,9 +402,9 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) long consumed_old, consumed_idx, commit_count, write_offset; //ust// int retval; - consumed_old = atomic_long_read(&buf->consumed); + consumed_old = uatomic_read(&buf->consumed); consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan); - commit_count = local_read(&buf->commit_count[consumed_idx].cc_sb); + commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb); /* * Make sure we read the commit count before reading the buffer * data and the write offset. Correct consumed offset ordering @@ -416,7 +456,7 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed) //ust// } //ust// #endif - write_offset = local_read(&buf->offset); + write_offset = uatomic_read(&buf->offset); /* * Check that the subbuffer we are trying to consume has been * already fully committed. @@ -452,13 +492,13 @@ int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old) { long consumed_new, consumed_old; - consumed_old = atomic_long_read(&buf->consumed); + consumed_old = uatomic_read(&buf->consumed); consumed_old = consumed_old & (~0xFFFFFFFFL); consumed_old = consumed_old | uconsumed_old; consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); //ust// spin_lock(<t_buf->full_lock); - if (atomic_long_cmpxchg(&buf->consumed, consumed_old, + if (uatomic_cmpxchg(&buf->consumed, consumed_old, consumed_new) != consumed_old) { /* We have been pushed by the writer : the last @@ -478,93 +518,6 @@ int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old) return 0; } -//ust// static void switch_buffer(unsigned long data) -//ust// { -//ust// struct ltt_channel_buf_struct *ltt_buf = -//ust// (struct ltt_channel_buf_struct *)data; -//ust// struct rchan_buf *buf = ltt_buf->rbuf; -//ust// -//ust// if (buf) -//ust// ltt_force_switch(buf, FORCE_ACTIVE); -//ust// -//ust// ltt_buf->switch_timer.expires += ltt_buf->switch_timer_interval; -//ust// add_timer_on(<t_buf->switch_timer, smp_processor_id()); -//ust// } -//ust// -//ust// static void start_switch_timer(struct ltt_channel_struct *ltt_channel) -//ust// { -//ust// struct rchan *rchan = ltt_channel->trans_channel_data; -//ust// int cpu; -//ust// -//ust// if (!ltt_channel->switch_timer_interval) -//ust// return; -//ust// -//ust// // TODO : hotplug -//ust// for_each_online_cpu(cpu) { -//ust// struct ltt_channel_buf_struct *ltt_buf; -//ust// struct rchan_buf *buf; -//ust// -//ust// buf = rchan->buf[cpu]; -//ust// ltt_buf = buf->chan_private; -//ust// buf->random_access = 1; -//ust// ltt_buf->switch_timer_interval = -//ust// ltt_channel->switch_timer_interval; -//ust// init_timer(<t_buf->switch_timer); -//ust// ltt_buf->switch_timer.function = switch_buffer; -//ust// ltt_buf->switch_timer.expires = jiffies + -//ust// ltt_buf->switch_timer_interval; -//ust// ltt_buf->switch_timer.data = (unsigned long)ltt_buf; -//ust// add_timer_on(<t_buf->switch_timer, cpu); -//ust// } -//ust// } -//ust// -//ust// /* -//ust// * Cannot use del_timer_sync with add_timer_on, so use an IPI to locally -//ust// * delete the timer. -//ust// */ -//ust// static void stop_switch_timer_ipi(void *info) -//ust// { -//ust// struct ltt_channel_buf_struct *ltt_buf = -//ust// (struct ltt_channel_buf_struct *)info; -//ust// -//ust// del_timer(<t_buf->switch_timer); -//ust// } -//ust// -//ust// static void stop_switch_timer(struct ltt_channel_struct *ltt_channel) -//ust// { -//ust// struct rchan *rchan = ltt_channel->trans_channel_data; -//ust// int cpu; -//ust// -//ust// if (!ltt_channel->switch_timer_interval) -//ust// return; -//ust// -//ust// // TODO : hotplug -//ust// for_each_online_cpu(cpu) { -//ust// struct ltt_channel_buf_struct *ltt_buf; -//ust// struct rchan_buf *buf; -//ust// -//ust// buf = rchan->buf[cpu]; -//ust// ltt_buf = buf->chan_private; -//ust// smp_call_function(stop_switch_timer_ipi, ltt_buf, 1); -//ust// buf->random_access = 0; -//ust// } -//ust// } - -//ust// static void ust_buffers_print_written(struct ust_channel *chan, -//ust// long cons_off, unsigned int cpu) -//ust// { -//ust// struct ust_buffer *buf = chan->buf[cpu]; -//ust// long cons_idx, events_count; -//ust// -//ust// cons_idx = SUBBUF_INDEX(cons_off, chan); -//ust// events_count = local_read(&buf->commit_count[cons_idx].events); -//ust// -//ust// if (events_count) -//ust// printk(KERN_INFO -//ust// "channel %s: %lu events written (cpu %u, index %lu)\n", -//ust// chan->channel_name, events_count, cpu, cons_idx); -//ust// } - static void ltt_relay_print_subbuffer_errors( struct ust_channel *channel, long cons_off, int cpu) @@ -573,14 +526,14 @@ static void ltt_relay_print_subbuffer_errors( long cons_idx, commit_count, commit_count_sb, write_offset; cons_idx = SUBBUF_INDEX(cons_off, channel); - commit_count = local_read(<t_buf->commit_count[cons_idx].cc); - commit_count_sb = local_read(<t_buf->commit_count[cons_idx].cc_sb); + commit_count = uatomic_read(<t_buf->commit_count[cons_idx].cc); + commit_count_sb = uatomic_read(<t_buf->commit_count[cons_idx].cc_sb); /* * No need to order commit_count and write_offset reads because we * execute after trace is stopped when there are no readers left. */ - write_offset = local_read(<t_buf->offset); + write_offset = uatomic_read(<t_buf->offset); WARN( "LTT : unread channel %s offset is %ld " "and cons_off : %ld (cpu %d)\n", channel->channel_name, write_offset, cons_off, cpu); @@ -612,8 +565,8 @@ static void ltt_relay_print_errors(struct ust_trace *trace, //ust// for (cons_off = 0; cons_off < rchan->alloc_size; //ust// cons_off = SUBBUF_ALIGN(cons_off, rchan)) //ust// ust_buffers_print_written(ltt_chan, cons_off, cpu); - for (cons_off = atomic_long_read(<t_buf->consumed); - (SUBBUF_TRUNC(local_read(<t_buf->offset), + for (cons_off = uatomic_read(<t_buf->consumed); + (SUBBUF_TRUNC(uatomic_read(<t_buf->offset), channel) - cons_off) > 0; cons_off = SUBBUF_ALIGN(cons_off, channel)) @@ -625,14 +578,14 @@ static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu) struct ust_trace *trace = channel->trace; struct ust_buffer *ltt_buf = channel->buf[cpu]; - if (local_read(<t_buf->events_lost)) + if (uatomic_read(<t_buf->events_lost)) ERR("channel %s: %ld events lost (cpu %d)", channel->channel_name, - local_read(<t_buf->events_lost), cpu); - if (local_read(<t_buf->corrupted_subbuffers)) + uatomic_read(<t_buf->events_lost), cpu); + if (uatomic_read(<t_buf->corrupted_subbuffers)) ERR("channel %s : %ld corrupted subbuffers (cpu %d)", channel->channel_name, - local_read(<t_buf->corrupted_subbuffers), cpu); + uatomic_read(<t_buf->corrupted_subbuffers), cpu); ltt_relay_print_errors(trace, channel, cpu); } @@ -663,22 +616,22 @@ static void ltt_relay_release_channel(struct kref *kref) //ust// kref_get(&trace->kref); //ust// kref_get(&trace->ltt_transport_kref); //ust// kref_get(<t_chan->kref); -//ust// local_set(<t_buf->offset, ltt_subbuffer_header_size()); -//ust// atomic_long_set(<t_buf->consumed, 0); -//ust// atomic_long_set(<t_buf->active_readers, 0); +//ust// uatomic_set(<t_buf->offset, ltt_subbuffer_header_size()); +//ust// uatomic_set(<t_buf->consumed, 0); +//ust// uatomic_set(<t_buf->active_readers, 0); //ust// for (j = 0; j < n_subbufs; j++) -//ust// local_set(<t_buf->commit_count[j], 0); +//ust// uatomic_set(<t_buf->commit_count[j], 0); //ust// init_waitqueue_head(<t_buf->write_wait); -//ust// atomic_set(<t_buf->wakeup_readers, 0); +//ust// uatomic_set(<t_buf->wakeup_readers, 0); //ust// spin_lock_init(<t_buf->full_lock); //ust// //ust// ltt_buffer_begin_callback(buf, trace->start_tsc, 0); //ust// /* atomic_add made on local variable on data that belongs to //ust// * various CPUs : ok because tracing not started (for this cpu). */ -//ust// local_add(ltt_subbuffer_header_size(), <t_buf->commit_count[0]); +//ust// uatomic_add(<t_buf->commit_count[0], ltt_subbuffer_header_size()); //ust// -//ust// local_set(<t_buf->events_lost, 0); -//ust// local_set(<t_buf->corrupted_subbuffers, 0); +//ust// uatomic_set(<t_buf->events_lost, 0); +//ust// uatomic_set(<t_buf->corrupted_subbuffers, 0); //ust// //ust// return 0; //ust// } @@ -698,23 +651,23 @@ static int ust_buffers_init_buffer(struct ust_trace *trace, kref_get(&trace->kref); kref_get(&trace->ltt_transport_kref); kref_get(<t_chan->kref); - local_set(&buf->offset, ltt_subbuffer_header_size()); - atomic_long_set(&buf->consumed, 0); - atomic_long_set(&buf->active_readers, 0); + uatomic_set(&buf->offset, ltt_subbuffer_header_size()); + uatomic_set(&buf->consumed, 0); + uatomic_set(&buf->active_readers, 0); for (j = 0; j < n_subbufs; j++) { - local_set(&buf->commit_count[j].cc, 0); - local_set(&buf->commit_count[j].cc_sb, 0); + uatomic_set(&buf->commit_count[j].cc, 0); + uatomic_set(&buf->commit_count[j].cc_sb, 0); } //ust// init_waitqueue_head(&buf->write_wait); -//ust// atomic_set(&buf->wakeup_readers, 0); +//ust// uatomic_set(&buf->wakeup_readers, 0); //ust// spin_lock_init(&buf->full_lock); ltt_buffer_begin(buf, trace->start_tsc, 0); - local_add(ltt_subbuffer_header_size(), &buf->commit_count[0].cc); + uatomic_add(&buf->commit_count[0].cc, ltt_subbuffer_header_size()); - local_set(&buf->events_lost, 0); - local_set(&buf->corrupted_subbuffers, 0); + uatomic_set(&buf->events_lost, 0); + uatomic_set(&buf->corrupted_subbuffers, 0); result = pipe(fds); if(result == -1) { @@ -724,12 +677,6 @@ static int ust_buffers_init_buffer(struct ust_trace *trace, buf->data_ready_fd_read = fds[0]; buf->data_ready_fd_write = fds[1]; - /* FIXME: do we actually need this? */ - result = fcntl(fds[0], F_SETFL, O_NONBLOCK); - if(result == -1) { - PERROR("fcntl"); - } - //ust// buf->commit_seq = malloc(sizeof(buf->commit_seq) * n_subbufs); //ust// if(!ltt_buf->commit_seq) { //ust// return -1; @@ -751,7 +698,7 @@ static void ust_buffers_destroy_buffer(struct ust_channel *ltt_chan, int cpu) ltt_release_transport); ltt_relay_print_buffer_errors(ltt_chan, cpu); //ust// free(ltt_buf->commit_seq); - kfree(ltt_buf->commit_count); + free(ltt_buf->commit_count); ltt_buf->commit_count = NULL; kref_put(<t_chan->kref, ltt_relay_release_channel); kref_put(&trace->kref, ltt_release_trace); @@ -830,11 +777,11 @@ static int ust_buffers_create_channel(const char *trace_name, struct ust_trace * ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order); ltt_chan->n_cpus = get_n_cpus(); //ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map); - ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *)); + ltt_chan->buf = (void *) zmalloc(ltt_chan->n_cpus * sizeof(void *)); if(ltt_chan->buf == NULL) { goto error; } - ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int)); + ltt_chan->buf_struct_shmids = (int *) zmalloc(ltt_chan->n_cpus * sizeof(int)); if(ltt_chan->buf_struct_shmids == NULL) goto free_buf; @@ -864,26 +811,6 @@ error: return -1; } -/* - * LTTng channel flush function. - * - * Must be called when no tracing is active in the channel, because of - * accesses across CPUs. - */ -static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf) -{ - int result; - -//ust// buf->finalized = 1; - ltt_force_switch(buf, FORCE_FLUSH); - - result = write(buf->data_ready_fd_write, "1", 1); - if(result == -1) { - PERROR("write (in ltt_relay_buffer_flush)"); - ERR("this should never happen!"); - } -} - static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) { //ust// unsigned int i; @@ -893,8 +820,8 @@ static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel) //ust// struct ltt_channel_buf_struct *ltt_buf = //ust// percpu_ptr(ltt_channel->buf, i); //ust// -//ust// if (atomic_read(<t_buf->wakeup_readers) == 1) { -//ust// atomic_set(<t_buf->wakeup_readers, 0); +//ust// if (uatomic_read(<t_buf->wakeup_readers) == 1) { +//ust// uatomic_set(<t_buf->wakeup_readers, 0); //ust// wake_up_interruptible(&rchan->buf[i]->read_wait); //ust// } //ust// } @@ -906,7 +833,7 @@ static void ltt_relay_finish_buffer(struct ust_channel *channel, unsigned int cp if (channel->buf[cpu]) { struct ust_buffer *buf = channel->buf[cpu]; - ltt_relay_buffer_flush(buf); + ltt_force_switch(buf, FORCE_FLUSH); //ust// ltt_relay_wake_writers(ltt_buf); /* closing the pipe tells the consumer the buffer is finished */ @@ -935,392 +862,6 @@ static void ltt_relay_remove_channel(struct ust_channel *channel) kref_put(&channel->kref, ltt_relay_release_channel); } -//ust// /* -//ust// * Returns : -//ust// * 0 if ok -//ust// * !0 if execution must be aborted. -//ust// */ -//ust// static inline int ltt_relay_try_reserve( -//ust// struct ust_channel *channel, struct ust_buffer *buf, -//ust// struct ltt_reserve_switch_offsets *offsets, size_t data_size, -//ust// u64 *tsc, unsigned int *rflags, int largest_align) -//ust// { -//ust// offsets->begin = local_read(&buf->offset); -//ust// offsets->old = offsets->begin; -//ust// offsets->begin_switch = 0; -//ust// offsets->end_switch_current = 0; -//ust// offsets->end_switch_old = 0; -//ust// -//ust// *tsc = trace_clock_read64(); -//ust// if (last_tsc_overflow(buf, *tsc)) -//ust// *rflags = LTT_RFLAG_ID_SIZE_TSC; -//ust// -//ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) == 0) { -//ust// offsets->begin_switch = 1; /* For offsets->begin */ -//ust// } else { -//ust// offsets->size = ust_get_header_size(channel, -//ust// offsets->begin, data_size, -//ust// &offsets->before_hdr_pad, *rflags); -//ust// offsets->size += ltt_align(offsets->begin + offsets->size, -//ust// largest_align) -//ust// + data_size; -//ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) -//ust// > buf->chan->subbuf_size) { -//ust// offsets->end_switch_old = 1; /* For offsets->old */ -//ust// offsets->begin_switch = 1; /* For offsets->begin */ -//ust// } -//ust// } -//ust// if (offsets->begin_switch) { -//ust// long subbuf_index; -//ust// -//ust// if (offsets->end_switch_old) -//ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, -//ust// buf->chan); -//ust// offsets->begin = offsets->begin + ltt_subbuffer_header_size(); -//ust// /* Test new buffer integrity */ -//ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); -//ust// offsets->reserve_commit_diff = -//ust// (BUFFER_TRUNC(offsets->begin, buf->chan) -//ust// >> channel->n_subbufs_order) -//ust// - (local_read(&buf->commit_count[subbuf_index]) -//ust// & channel->commit_count_mask); -//ust// if (offsets->reserve_commit_diff == 0) { -//ust// long consumed; -//ust// -//ust// consumed = atomic_long_read(&buf->consumed); -//ust// -//ust// /* Next buffer not corrupted. */ -//ust// if (!channel->overwrite && -//ust// (SUBBUF_TRUNC(offsets->begin, buf->chan) -//ust// - SUBBUF_TRUNC(consumed, buf->chan)) -//ust// >= channel->alloc_size) { -//ust// -//ust// long consumed_idx = SUBBUF_INDEX(consumed, buf->chan); -//ust// long commit_count = local_read(&buf->commit_count[consumed_idx]); -//ust// if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) { -//ust// WARN("Event dropped. Caused by non-committed event."); -//ust// } -//ust// else { -//ust// WARN("Event dropped. Caused by non-consumed buffer."); -//ust// } -//ust// /* -//ust// * We do not overwrite non consumed buffers -//ust// * and we are full : event is lost. -//ust// */ -//ust// local_inc(&buf->events_lost); -//ust// return -1; -//ust// } else { -//ust// /* -//ust// * next buffer not corrupted, we are either in -//ust// * overwrite mode or the buffer is not full. -//ust// * It's safe to write in this new subbuffer. -//ust// */ -//ust// } -//ust// } else { -//ust// /* -//ust// * Next subbuffer corrupted. Force pushing reader even -//ust// * in normal mode. It's safe to write in this new -//ust// * subbuffer. -//ust// */ -//ust// } -//ust// offsets->size = ust_get_header_size(channel, -//ust// offsets->begin, data_size, -//ust// &offsets->before_hdr_pad, *rflags); -//ust// offsets->size += ltt_align(offsets->begin + offsets->size, -//ust// largest_align) -//ust// + data_size; -//ust// if ((SUBBUF_OFFSET(offsets->begin, buf->chan) + offsets->size) -//ust// > buf->chan->subbuf_size) { -//ust// /* -//ust// * Event too big for subbuffers, report error, don't -//ust// * complete the sub-buffer switch. -//ust// */ -//ust// local_inc(&buf->events_lost); -//ust// return -1; -//ust// } else { -//ust// /* -//ust// * We just made a successful buffer switch and the event -//ust// * fits in the new subbuffer. Let's write. -//ust// */ -//ust// } -//ust// } else { -//ust// /* -//ust// * Event fits in the current buffer and we are not on a switch -//ust// * boundary. It's safe to write. -//ust// */ -//ust// } -//ust// offsets->end = offsets->begin + offsets->size; -//ust// -//ust// if ((SUBBUF_OFFSET(offsets->end, buf->chan)) == 0) { -//ust// /* -//ust// * The offset_end will fall at the very beginning of the next -//ust// * subbuffer. -//ust// */ -//ust// offsets->end_switch_current = 1; /* For offsets->begin */ -//ust// } -//ust// return 0; -//ust// } -//ust// -//ust// /* -//ust// * Returns : -//ust// * 0 if ok -//ust// * !0 if execution must be aborted. -//ust// */ -//ust// static inline int ltt_relay_try_switch( -//ust// enum force_switch_mode mode, -//ust// struct ust_channel *channel, -//ust// struct ust_buffer *buf, -//ust// struct ltt_reserve_switch_offsets *offsets, -//ust// u64 *tsc) -//ust// { -//ust// long subbuf_index; -//ust// -//ust// offsets->begin = local_read(&buf->offset); -//ust// offsets->old = offsets->begin; -//ust// offsets->begin_switch = 0; -//ust// offsets->end_switch_old = 0; -//ust// -//ust// *tsc = trace_clock_read64(); -//ust// -//ust// if (SUBBUF_OFFSET(offsets->begin, buf->chan) != 0) { -//ust// offsets->begin = SUBBUF_ALIGN(offsets->begin, buf->chan); -//ust// offsets->end_switch_old = 1; -//ust// } else { -//ust// /* we do not have to switch : buffer is empty */ -//ust// return -1; -//ust// } -//ust// if (mode == FORCE_ACTIVE) -//ust// offsets->begin += ltt_subbuffer_header_size(); -//ust// /* -//ust// * Always begin_switch in FORCE_ACTIVE mode. -//ust// * Test new buffer integrity -//ust// */ -//ust// subbuf_index = SUBBUF_INDEX(offsets->begin, buf->chan); -//ust// offsets->reserve_commit_diff = -//ust// (BUFFER_TRUNC(offsets->begin, buf->chan) -//ust// >> channel->n_subbufs_order) -//ust// - (local_read(&buf->commit_count[subbuf_index]) -//ust// & channel->commit_count_mask); -//ust// if (offsets->reserve_commit_diff == 0) { -//ust// /* Next buffer not corrupted. */ -//ust// if (mode == FORCE_ACTIVE -//ust// && !channel->overwrite -//ust// && offsets->begin - atomic_long_read(&buf->consumed) -//ust// >= channel->alloc_size) { -//ust// /* -//ust// * We do not overwrite non consumed buffers and we are -//ust// * full : ignore switch while tracing is active. -//ust// */ -//ust// return -1; -//ust// } -//ust// } else { -//ust// /* -//ust// * Next subbuffer corrupted. Force pushing reader even in normal -//ust// * mode -//ust// */ -//ust// } -//ust// offsets->end = offsets->begin; -//ust// return 0; -//ust// } -//ust// -//ust// static inline void ltt_reserve_push_reader( -//ust// struct ust_channel *channel, -//ust// struct ust_buffer *buf, -//ust// struct ltt_reserve_switch_offsets *offsets) -//ust// { -//ust// long consumed_old, consumed_new; -//ust// -//ust// do { -//ust// consumed_old = atomic_long_read(&buf->consumed); -//ust// /* -//ust// * If buffer is in overwrite mode, push the reader consumed -//ust// * count if the write position has reached it and we are not -//ust// * at the first iteration (don't push the reader farther than -//ust// * the writer). This operation can be done concurrently by many -//ust// * writers in the same buffer, the writer being at the farthest -//ust// * write position sub-buffer index in the buffer being the one -//ust// * which will win this loop. -//ust// * If the buffer is not in overwrite mode, pushing the reader -//ust// * only happens if a sub-buffer is corrupted. -//ust// */ -//ust// if ((SUBBUF_TRUNC(offsets->end-1, buf->chan) -//ust// - SUBBUF_TRUNC(consumed_old, buf->chan)) -//ust// >= channel->alloc_size) -//ust// consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan); -//ust// else { -//ust// consumed_new = consumed_old; -//ust// break; -//ust// } -//ust// } while (atomic_long_cmpxchg(&buf->consumed, consumed_old, -//ust// consumed_new) != consumed_old); -//ust// -//ust// if (consumed_old != consumed_new) { -//ust// /* -//ust// * Reader pushed : we are the winner of the push, we can -//ust// * therefore reequilibrate reserve and commit. Atomic increment -//ust// * of the commit count permits other writers to play around -//ust// * with this variable before us. We keep track of -//ust// * corrupted_subbuffers even in overwrite mode : -//ust// * we never want to write over a non completely committed -//ust// * sub-buffer : possible causes : the buffer size is too low -//ust// * compared to the unordered data input, or there is a writer -//ust// * that died between the reserve and the commit. -//ust// */ -//ust// if (offsets->reserve_commit_diff) { -//ust// /* -//ust// * We have to alter the sub-buffer commit count. -//ust// * We do not deliver the previous subbuffer, given it -//ust// * was either corrupted or not consumed (overwrite -//ust// * mode). -//ust// */ -//ust// local_add(offsets->reserve_commit_diff, -//ust// &buf->commit_count[ -//ust// SUBBUF_INDEX(offsets->begin, -//ust// buf->chan)]); -//ust// if (!channel->overwrite -//ust// || offsets->reserve_commit_diff -//ust// != channel->subbuf_size) { -//ust// /* -//ust// * The reserve commit diff was not subbuf_size : -//ust// * it means the subbuffer was partly written to -//ust// * and is therefore corrupted. If it is multiple -//ust// * of subbuffer size and we are in flight -//ust// * recorder mode, we are skipping over a whole -//ust// * subbuffer. -//ust// */ -//ust// local_inc(&buf->corrupted_subbuffers); -//ust// } -//ust// } -//ust// } -//ust// } -//ust// -//ust// /** -//ust// * ltt_relay_reserve_slot - Atomic slot reservation in a LTTng buffer. -//ust// * @trace: the trace structure to log to. -//ust// * @ltt_channel: channel structure -//ust// * @transport_data: data structure specific to ltt relay -//ust// * @data_size: size of the variable length data to log. -//ust// * @slot_size: pointer to total size of the slot (out) -//ust// * @buf_offset : pointer to reserved buffer offset (out) -//ust// * @tsc: pointer to the tsc at the slot reservation (out) -//ust// * @cpu: cpuid -//ust// * -//ust// * Return : -ENOSPC if not enough space, else returns 0. -//ust// * It will take care of sub-buffer switching. -//ust// */ -//ust// static notrace int ltt_relay_reserve_slot(struct ust_trace *trace, -//ust// struct ust_channel *channel, void **transport_data, -//ust// size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, -//ust// unsigned int *rflags, int largest_align, int cpu) -//ust// { -//ust// struct ust_buffer *buf = *transport_data = channel->buf[cpu]; -//ust// struct ltt_reserve_switch_offsets offsets; -//ust// -//ust// offsets.reserve_commit_diff = 0; -//ust// offsets.size = 0; -//ust// -//ust// /* -//ust// * Perform retryable operations. -//ust// */ -//ust// if (ltt_nesting > 4) { -//ust// local_inc(&buf->events_lost); -//ust// return -EPERM; -//ust// } -//ust// do { -//ust// if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags, -//ust// largest_align)) -//ust// return -ENOSPC; -//ust// } while (local_cmpxchg(&buf->offset, offsets.old, -//ust// offsets.end) != offsets.old); -//ust// -//ust// /* -//ust// * Atomically update last_tsc. This update races against concurrent -//ust// * atomic updates, but the race will always cause supplementary full TSC -//ust// * events, never the opposite (missing a full TSC event when it would be -//ust// * needed). -//ust// */ -//ust// save_last_tsc(buf, *tsc); -//ust// -//ust// /* -//ust// * Push the reader if necessary -//ust// */ -//ust// ltt_reserve_push_reader(channel, buf, &offsets); -//ust// -//ust// /* -//ust// * Switch old subbuffer if needed. -//ust// */ -//ust// if (offsets.end_switch_old) -//ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, tsc); -//ust// -//ust// /* -//ust// * Populate new subbuffer. -//ust// */ -//ust// if (offsets.begin_switch) -//ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, tsc); -//ust// -//ust// if (offsets.end_switch_current) -//ust// ltt_reserve_end_switch_current(channel, buf, &offsets, tsc); -//ust// -//ust// *slot_size = offsets.size; -//ust// *buf_offset = offsets.begin + offsets.before_hdr_pad; -//ust// return 0; -//ust// } -//ust// -//ust// /* -//ust// * Force a sub-buffer switch for a per-cpu buffer. This operation is -//ust// * completely reentrant : can be called while tracing is active with -//ust// * absolutely no lock held. -//ust// * -//ust// * Note, however, that as a local_cmpxchg is used for some atomic -//ust// * operations, this function must be called from the CPU which owns the buffer -//ust// * for a ACTIVE flush. -//ust// */ -//ust// static notrace void ltt_force_switch(struct ust_buffer *buf, -//ust// enum force_switch_mode mode) -//ust// { -//ust// struct ust_channel *channel = buf->chan; -//ust// struct ltt_reserve_switch_offsets offsets; -//ust// u64 tsc; -//ust// -//ust// offsets.reserve_commit_diff = 0; -//ust// offsets.size = 0; -//ust// -//ust// /* -//ust// * Perform retryable operations. -//ust// */ -//ust// do { -//ust// if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc)) -//ust// return; -//ust// } while (local_cmpxchg(&buf->offset, offsets.old, -//ust// offsets.end) != offsets.old); -//ust// -//ust// /* -//ust// * Atomically update last_tsc. This update races against concurrent -//ust// * atomic updates, but the race will always cause supplementary full TSC -//ust// * events, never the opposite (missing a full TSC event when it would be -//ust// * needed). -//ust// */ -//ust// save_last_tsc(buf, tsc); -//ust// -//ust// /* -//ust// * Push the reader if necessary -//ust// */ -//ust// if (mode == FORCE_ACTIVE) -//ust// ltt_reserve_push_reader(channel, buf, &offsets); -//ust// -//ust// /* -//ust// * Switch old subbuffer if needed. -//ust// */ -//ust// if (offsets.end_switch_old) -//ust// ltt_reserve_switch_old_subbuf(channel, buf, &offsets, &tsc); -//ust// -//ust// /* -//ust// * Populate new subbuffer. -//ust// */ -//ust// if (mode == FORCE_ACTIVE) -//ust// ltt_reserve_switch_new_subbuf(channel, buf, &offsets, &tsc); -//ust// } - /* * ltt_reserve_switch_old_subbuf: switch old subbuffer * @@ -1357,10 +898,9 @@ static void ltt_reserve_switch_old_subbuf( * This compiler barrier is upgraded into a smp_wmb() by the IPI * sent by get_subbuf() when it does its smp_rmb(). */ - barrier(); - local_add(padding_size, - &buf->commit_count[oldidx].cc); - commit_count = local_read(&buf->commit_count[oldidx].cc); + smp_wmb(); + uatomic_add(&buf->commit_count[oldidx].cc, padding_size); + commit_count = uatomic_read(&buf->commit_count[oldidx].cc); ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx); ltt_write_commit_counter(chan, buf, oldidx, offsets->old, commit_count, padding_size); @@ -1387,10 +927,9 @@ static void ltt_reserve_switch_new_subbuf( * This compiler barrier is upgraded into a smp_wmb() by the IPI * sent by get_subbuf() when it does its smp_rmb(). */ - barrier(); - local_add(ltt_subbuffer_header_size(), - &buf->commit_count[beginidx].cc); - commit_count = local_read(&buf->commit_count[beginidx].cc); + smp_wmb(); + uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size()); + commit_count = uatomic_read(&buf->commit_count[beginidx].cc); /* Check if the written buffer has to be delivered */ ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx); ltt_write_commit_counter(chan, buf, beginidx, @@ -1433,10 +972,9 @@ static void ltt_reserve_end_switch_current( * This compiler barrier is upgraded into a smp_wmb() by the IPI * sent by get_subbuf() when it does its smp_rmb(). */ - barrier(); - local_add(padding_size, - &buf->commit_count[endidx].cc); - commit_count = local_read(&buf->commit_count[endidx].cc); + smp_wmb(); + uatomic_add(&buf->commit_count[endidx].cc, padding_size); + commit_count = uatomic_read(&buf->commit_count[endidx].cc); ltt_check_deliver(chan, buf, offsets->end - 1, commit_count, endidx); ltt_write_commit_counter(chan, buf, endidx, @@ -1458,7 +996,7 @@ static int ltt_relay_try_switch_slow( long subbuf_index; long reserve_commit_diff; - offsets->begin = local_read(&buf->offset); + offsets->begin = uatomic_read(&buf->offset); offsets->old = offsets->begin; offsets->begin_switch = 0; offsets->end_switch_old = 0; @@ -1482,13 +1020,13 @@ static int ltt_relay_try_switch_slow( reserve_commit_diff = (BUFFER_TRUNC(offsets->begin, buf->chan) >> chan->n_subbufs_order) - - (local_read(&buf->commit_count[subbuf_index].cc_sb) + - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb) & chan->commit_count_mask); if (reserve_commit_diff == 0) { /* Next buffer not corrupted. */ if (mode == FORCE_ACTIVE && !chan->overwrite - && offsets->begin - atomic_long_read(&buf->consumed) + && offsets->begin - uatomic_read(&buf->consumed) >= chan->alloc_size) { /* * We do not overwrite non consumed buffers and we are @@ -1510,10 +1048,6 @@ static int ltt_relay_try_switch_slow( * Force a sub-buffer switch for a per-cpu buffer. This operation is * completely reentrant : can be called while tracing is active with * absolutely no lock held. - * - * Note, however, that as a local_cmpxchg is used for some atomic - * operations, this function must be called from the CPU which owns the buffer - * for a ACTIVE flush. */ void ltt_force_switch_lockless_slow(struct ust_buffer *buf, enum force_switch_mode mode) @@ -1532,7 +1066,7 @@ void ltt_force_switch_lockless_slow(struct ust_buffer *buf, if (ltt_relay_try_switch_slow(mode, chan, buf, &offsets, &tsc)) return; - } while (local_cmpxchg(&buf->offset, offsets.old, + } while (uatomic_cmpxchg(&buf->offset, offsets.old, offsets.end) != offsets.old); /* @@ -1577,7 +1111,7 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe { long reserve_commit_diff; - offsets->begin = local_read(&buf->offset); + offsets->begin = uatomic_read(&buf->offset); offsets->old = offsets->begin; offsets->begin_switch = 0; offsets->end_switch_current = 0; @@ -1617,13 +1151,13 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe reserve_commit_diff = (BUFFER_TRUNC(offsets->begin, buf->chan) >> chan->n_subbufs_order) - - (local_read(&buf->commit_count[subbuf_index].cc_sb) + - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb) & chan->commit_count_mask); if (likely(reserve_commit_diff == 0)) { /* Next buffer not corrupted. */ if (unlikely(!chan->overwrite && (SUBBUF_TRUNC(offsets->begin, buf->chan) - - SUBBUF_TRUNC(atomic_long_read( + - SUBBUF_TRUNC(uatomic_read( &buf->consumed), buf->chan)) >= chan->alloc_size)) { @@ -1631,7 +1165,7 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe * We do not overwrite non consumed buffers * and we are full : event is lost. */ - local_inc(&buf->events_lost); + uatomic_inc(&buf->events_lost); return -1; } else { /* @@ -1646,7 +1180,7 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe * overwrite mode. Caused by either a writer OOPS or * too many nested writes over a reserve/commit pair. */ - local_inc(&buf->events_lost); + uatomic_inc(&buf->events_lost); return -1; } offsets->size = ust_get_header_size(chan, @@ -1661,7 +1195,7 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe * Event too big for subbuffers, report error, don't * complete the sub-buffer switch. */ - local_inc(&buf->events_lost); + uatomic_inc(&buf->events_lost); return -1; } else { /* @@ -1701,12 +1235,14 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe * Return : -ENOSPC if not enough space, else returns 0. * It will take care of sub-buffer switching. */ -int ltt_reserve_slot_lockless_slow(struct ust_trace *trace, - struct ust_channel *chan, void **transport_data, - size_t data_size, size_t *slot_size, long *buf_offset, u64 *tsc, - unsigned int *rflags, int largest_align, int cpu) +int ltt_reserve_slot_lockless_slow(struct ust_channel *chan, + struct ust_trace *trace, size_t data_size, + int largest_align, int cpu, + struct ust_buffer **ret_buf, + size_t *slot_size, long *buf_offset, + u64 *tsc, unsigned int *rflags) { - struct ust_buffer *buf = chan->buf[cpu]; + struct ust_buffer *buf = *ret_buf = chan->buf[cpu]; struct ltt_reserve_switch_offsets offsets; offsets.size = 0; @@ -1715,7 +1251,7 @@ int ltt_reserve_slot_lockless_slow(struct ust_trace *trace, if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets, data_size, tsc, rflags, largest_align))) return -ENOSPC; - } while (unlikely(local_cmpxchg(&buf->offset, offsets.old, + } while (unlikely(uatomic_cmpxchg(&buf->offset, offsets.old, offsets.end) != offsets.old)); /* @@ -1784,8 +1320,7 @@ static void __attribute__((destructor)) ust_buffers_exit(void) ltt_transport_unregister(&ust_relay_transport); } -size_t ltt_write_event_header_slow(struct ust_trace *trace, - struct ust_channel *channel, +size_t ltt_write_event_header_slow(struct ust_channel *channel, struct ust_buffer *buf, long buf_offset, u16 eID, u32 event_size, u64 tsc, unsigned int rflags)