static DEFINE_MUTEX(ust_buffers_channels_mutex);
-static LIST_HEAD(ust_buffers_channels);
+static CDS_LIST_HEAD(ust_buffers_channels);
static int get_n_cpus(void)
{
return n_cpus;
}
-/* _ust_buffers_write()
+/**
+ * _ust_buffers_strncpy_fixup - Fix an incomplete string in a ltt_relay buffer.
+ * @buf : buffer
+ * @offset : offset within the buffer
+ * @len : length to write
+ * @copied: string actually copied
+ * @terminated: does string end with \0
*
- * @buf: destination buffer
- * @offset: offset in destination
- * @src: source buffer
- * @len: length of source
- * @cpy: already copied
+ * Fills string with "X" if incomplete.
*/
-
-void _ust_buffers_write(struct ust_buffer *buf, size_t offset,
- const void *src, size_t len, ssize_t cpy)
+void _ust_buffers_strncpy_fixup(struct ust_buffer *buf, size_t offset,
+ size_t len, size_t copied, int terminated)
{
- do {
- len -= cpy;
- src += cpy;
- offset += cpy;
+ size_t buf_offset, cpy;
+
+ if (copied == len) {
+ /*
+ * Deal with non-terminated string.
+ */
+ assert(!terminated);
+ offset += copied - 1;
+ buf_offset = BUFFER_OFFSET(offset, buf->chan);
+ /*
+ * Underlying layer should never ask for writes across
+ * subbuffers.
+ */
+ assert(buf_offset
+ < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+ ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1);
+ return;
+ }
+
+ /*
+ * Deal with incomplete string.
+ * Overwrite string's \0 with X too.
+ */
+ cpy = copied - 1;
+ assert(terminated);
+ len -= cpy;
+ offset += cpy;
+ buf_offset = BUFFER_OFFSET(offset, buf->chan);
+
+ /*
+ * Underlying layer should never ask for writes across subbuffers.
+ */
+ assert(buf_offset
+ < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
- WARN_ON(offset >= buf->buf_size);
+ ust_buffers_do_memset(buf->buf_data + buf_offset,
+ 'X', len);
- cpy = min_t(size_t, len, buf->buf_size - offset);
- ust_buffers_do_copy(buf->buf_data + offset, src, cpy);
- } while (unlikely(len != cpy));
+ /*
+ * Overwrite last 'X' with '\0'.
+ */
+ offset += len - 1;
+ buf_offset = BUFFER_OFFSET(offset, buf->chan);
+ /*
+ * Underlying layer should never ask for writes across subbuffers.
+ */
+ assert(buf_offset
+ < buf->chan->subbuf_size*buf->chan->subbuf_cnt);
+ ust_buffers_do_memset(buf->buf_data + buf_offset, '\0', 1);
}
static int ust_buffers_init_buffer(struct ust_trace *trace,
return -1;
buf->chan = channel;
- kref_get(&channel->kref);
+ urcu_ref_get(&channel->urcu_ref);
return 0;
}
-static void ust_buffers_destroy_channel(struct kref *kref)
+static void ust_buffers_destroy_channel(struct urcu_ref *urcu_ref)
{
- struct ust_channel *chan = container_of(kref, struct ust_channel, kref);
+ struct ust_channel *chan = _ust_container_of(urcu_ref, struct ust_channel, urcu_ref);
free(chan);
}
//ust// chan->buf[buf->cpu] = NULL;
free(buf);
- kref_put(&chan->kref, ust_buffers_destroy_channel);
+ urcu_ref_put(&chan->urcu_ref, ust_buffers_destroy_channel);
}
-/* called from kref_put */
-static void ust_buffers_remove_buf(struct kref *kref)
+/* called from urcu_ref_put */
+static void ust_buffers_remove_buf(struct urcu_ref *urcu_ref)
{
- struct ust_buffer *buf = container_of(kref, struct ust_buffer, kref);
+ struct ust_buffer *buf = _ust_container_of(urcu_ref, struct ust_buffer, urcu_ref);
ust_buffers_destroy_buf(buf);
}
if (result == -1)
return -1;
- kref_init(&chan->buf[cpu]->kref);
+ urcu_ref_init(&chan->buf[cpu]->urcu_ref);
result = ust_buffers_init_buffer(chan->trace, chan, chan->buf[cpu], chan->subbuf_cnt);
if(result == -1)
*/
static void ust_buffers_close_buf(struct ust_buffer *buf)
{
- kref_put(&buf->kref, ust_buffers_remove_buf);
+ urcu_ref_put(&buf->urcu_ref, ust_buffers_remove_buf);
}
int ust_buffers_channel_open(struct ust_channel *chan, size_t subbuf_size, size_t subbuf_cnt)
chan->subbuf_size_order = get_count_order(subbuf_size);
chan->alloc_size = subbuf_size * subbuf_cnt;
- kref_init(&chan->kref);
+ urcu_ref_init(&chan->urcu_ref);
- mutex_lock(&ust_buffers_channels_mutex);
+ pthread_mutex_lock(&ust_buffers_channels_mutex);
for(i=0; i<chan->n_cpus; i++) {
result = ust_buffers_open_buf(chan, i);
if (result == -1)
goto error;
}
- list_add(&chan->list, &ust_buffers_channels);
- mutex_unlock(&ust_buffers_channels_mutex);
+ cds_list_add(&chan->list, &ust_buffers_channels);
+ pthread_mutex_unlock(&ust_buffers_channels_mutex);
return 0;
do {} while(0);
}
- kref_put(&chan->kref, ust_buffers_destroy_channel);
- mutex_unlock(&ust_buffers_channels_mutex);
+ urcu_ref_put(&chan->urcu_ref, ust_buffers_destroy_channel);
+ pthread_mutex_unlock(&ust_buffers_channels_mutex);
return -1;
}
if(!chan)
return;
- mutex_lock(&ust_buffers_channels_mutex);
+ pthread_mutex_lock(&ust_buffers_channels_mutex);
for(i=0; i<chan->n_cpus; i++) {
/* FIXME: if we make it here, then all buffers were necessarily allocated. Moreover, we don't
* initialize to NULL so we cannot use this check. Should we? */
ust_buffers_close_buf(chan->buf[i]);
}
- list_del(&chan->list);
- kref_put(&chan->kref, ust_buffers_destroy_channel);
- mutex_unlock(&ust_buffers_channels_mutex);
+ cds_list_del(&chan->list);
+ urcu_ref_put(&chan->urcu_ref, ust_buffers_destroy_channel);
+ pthread_mutex_unlock(&ust_buffers_channels_mutex);
}
/*
header->cycle_count_begin = tsc;
header->data_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
header->sb_size = 0xFFFFFFFF; /* for recognizing crashed buffers */
- /* FIXME: add memory barrier? */
+ /* FIXME: add memory cmm_barrier? */
ltt_write_trace_header(channel->trace, header);
}
}
/*
- * Promote compiler barrier to a smp_mb().
+ * Promote compiler cmm_barrier to a smp_mb().
* For the specific LTTng case, this IPI call should be removed if the
* architecture does not reorder writes. This should eventually be provided by
* a separate architecture-specific infrastructure.
* this is OK because then there is no wmb to execute there.
* If our thread is executing on the same CPU as the on the buffers
* belongs to, we don't have to synchronize it at all. If we are
- * migrated, the scheduler will take care of the memory barriers.
+ * migrated, the scheduler will take care of the memory cmm_barriers.
* Normally, smp_call_function_single() should ensure program order when
* executing the remote function, which implies that it surrounds the
* function execution with :
* smp_mb()
*
* However, smp_call_function_single() does not seem to clearly execute
- * such barriers. It depends on spinlock semantic to provide the barrier
+ * such cmm_barriers. It depends on spinlock semantic to provide the cmm_barrier
* before executing the IPI and, when busy-looping, csd_lock_wait only
* executes smp_mb() when it has to wait for the other CPU.
*
* required ourself, even if duplicated. It has no performance impact
* anyway.
*
- * smp_mb() is needed because smp_rmb() and smp_wmb() only order read vs
+ * smp_mb() is needed because cmm_smp_rmb() and cmm_smp_wmb() only order read vs
* read and write vs write. They do not ensure core synchronization. We
- * really have to ensure total order between the 3 barriers running on
+ * really have to ensure total order between the 3 cmm_barriers running on
* the 2 CPUs.
*/
//ust// #ifdef LTT_NO_IPI_BARRIER
* Local rmb to match the remote wmb to read the commit count before the
* buffer data and the write offset.
*/
- smp_rmb();
+ cmm_smp_rmb();
//ust// #else
//ust// if (raw_smp_processor_id() != buf->cpu) {
//ust// smp_mb(); /* Total order with IPI handler smp_mb() */
ltt_relay_print_errors(trace, channel, cpu);
}
-static void ltt_relay_release_channel(struct kref *kref)
+static void ltt_relay_release_channel(struct urcu_ref *urcu_ref)
{
- struct ust_channel *ltt_chan = container_of(kref,
- struct ust_channel, kref);
+ struct ust_channel *ltt_chan = _ust_container_of(urcu_ref,
+ struct ust_channel, urcu_ref);
free(ltt_chan->buf);
}
zmalloc(sizeof(*buf->commit_count) * n_subbufs);
if (!buf->commit_count)
return -ENOMEM;
- kref_get(&trace->kref);
- kref_get(&trace->ltt_transport_kref);
- kref_get(<t_chan->kref);
+ urcu_ref_get(&trace->urcu_ref);
+ urcu_ref_get(&trace->ltt_transport_urcu_ref);
+ urcu_ref_get(<t_chan->urcu_ref);
uatomic_set(&buf->offset, ltt_subbuffer_header_size());
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->active_readers, 0);
struct ust_trace *trace = ltt_chan->trace;
struct ust_buffer *ltt_buf = ltt_chan->buf[cpu];
- kref_put(<t_chan->trace->ltt_transport_kref,
+ urcu_ref_put(<t_chan->trace->ltt_transport_urcu_ref,
ltt_release_transport);
ltt_relay_print_buffer_errors(ltt_chan, cpu);
//ust// free(ltt_buf->commit_seq);
free(ltt_buf->commit_count);
ltt_buf->commit_count = NULL;
- kref_put(<t_chan->kref, ltt_relay_release_channel);
- kref_put(&trace->kref, ltt_release_trace);
+ urcu_ref_put(<t_chan->urcu_ref, ltt_relay_release_channel);
+ urcu_ref_put(&trace->urcu_ref, ltt_release_trace);
//ust// wake_up_interruptible(&trace->kref_wq);
}
{
int result;
- kref_init(<t_chan->kref);
+ urcu_ref_init(<t_chan->urcu_ref);
ltt_chan->trace = trace;
ltt_chan->overwrite = overwrite;
ltt_chan->commit_count_mask = (~0UL >> ltt_chan->n_subbufs_order);
ltt_chan->n_cpus = get_n_cpus();
//ust// ltt_chan->buf = percpu_alloc_mask(sizeof(struct ltt_channel_buf_struct), GFP_KERNEL, cpu_possible_map);
- ltt_chan->buf = (void *) malloc(ltt_chan->n_cpus * sizeof(void *));
+ ltt_chan->buf = (void *) zmalloc(ltt_chan->n_cpus * sizeof(void *));
if(ltt_chan->buf == NULL) {
goto error;
}
- ltt_chan->buf_struct_shmids = (int *) malloc(ltt_chan->n_cpus * sizeof(int));
+ ltt_chan->buf_struct_shmids = (int *) zmalloc(ltt_chan->n_cpus * sizeof(int));
if(ltt_chan->buf_struct_shmids == NULL)
goto free_buf;
return -1;
}
-/*
- * LTTng channel flush function.
- *
- * Must be called when no tracing is active in the channel, because of
- * accesses across CPUs.
- */
-static notrace void ltt_relay_buffer_flush(struct ust_buffer *buf)
-{
- int result;
-
-//ust// buf->finalized = 1;
- ltt_force_switch(buf, FORCE_FLUSH);
-
- result = write(buf->data_ready_fd_write, "1", 1);
- if(result == -1) {
- PERROR("write (in ltt_relay_buffer_flush)");
- ERR("this should never happen!");
- }
-}
-
static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
{
//ust// unsigned int i;
if (channel->buf[cpu]) {
struct ust_buffer *buf = channel->buf[cpu];
- ltt_relay_buffer_flush(buf);
+ ltt_force_switch(buf, FORCE_FLUSH);
//ust// ltt_relay_wake_writers(ltt_buf);
/* closing the pipe tells the consumer the buffer is finished */
static void ltt_relay_remove_channel(struct ust_channel *channel)
{
ust_buffers_channel_close(channel);
- kref_put(&channel->kref, ltt_relay_release_channel);
+ urcu_ref_put(&channel->urcu_ref, ltt_relay_release_channel);
}
/*
/*
* Must write slot data before incrementing commit count.
- * This compiler barrier is upgraded into a smp_wmb() by the IPI
- * sent by get_subbuf() when it does its smp_rmb().
+ * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its cmm_smp_rmb().
*/
- barrier();
+ cmm_smp_wmb();
uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
/*
* Must write slot data before incrementing commit count.
- * This compiler barrier is upgraded into a smp_wmb() by the IPI
- * sent by get_subbuf() when it does its smp_rmb().
+ * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its cmm_smp_rmb().
*/
- barrier();
+ cmm_smp_wmb();
uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size());
commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
/* Check if the written buffer has to be delivered */
/*
* Must write slot data before incrementing commit count.
- * This compiler barrier is upgraded into a smp_wmb() by the IPI
- * sent by get_subbuf() when it does its smp_rmb().
+ * This compiler cmm_barrier is upgraded into a cmm_smp_wmb() by the IPI
+ * sent by get_subbuf() when it does its cmm_smp_rmb().
*/
- barrier();
+ cmm_smp_wmb();
uatomic_add(&buf->commit_count[endidx].cc, padding_size);
commit_count = uatomic_read(&buf->commit_count[endidx].cc);
ltt_check_deliver(chan, buf,