convert to uatomic ops
authorPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Thu, 4 Mar 2010 16:33:09 +0000 (11:33 -0500)
committerPierre-Marc Fournier <pierre-marc.fournier@polymtl.ca>
Thu, 4 Mar 2010 16:33:09 +0000 (11:33 -0500)
libust/buffers.c
libust/buffers.h
libust/channels.c
libust/tracectl.c
ustd/lowlevel.c

index 206d5e7404f75a793ae3ae949dcdf23c6081022b..c487cbfdb57f978021c01cdd04ba2a63f8dd64c4 100644 (file)
@@ -329,8 +329,8 @@ static notrace void ltt_buffer_end(struct ust_buffer *buf,
        header->data_size = data_size;
        header->sb_size = PAGE_ALIGN(data_size);
        header->cycle_count_end = tsc;
-       header->events_lost = local_read(&buf->events_lost);
-       header->subbuf_corrupt = local_read(&buf->corrupted_subbuffers);
+       header->events_lost = uatomic_read(&buf->events_lost);
+       header->subbuf_corrupt = uatomic_read(&buf->corrupted_subbuffers);
        if(unlikely(header->events_lost > 0)) {
                DBG("Some events (%d) were lost in %s_%d", header->events_lost, buf->chan->channel_name, buf->cpu);
        }
@@ -362,9 +362,9 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
        long consumed_old, consumed_idx, commit_count, write_offset;
 //ust//        int retval;
 
-       consumed_old = atomic_long_read(&buf->consumed);
+       consumed_old = uatomic_read(&buf->consumed);
        consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
-       commit_count = local_read(&buf->commit_count[consumed_idx].cc_sb);
+       commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
        /*
         * Make sure we read the commit count before reading the buffer
         * data and the write offset. Correct consumed offset ordering
@@ -416,7 +416,7 @@ int ust_buffers_get_subbuf(struct ust_buffer *buf, long *consumed)
 //ust//        }
 //ust// #endif
 
-       write_offset = local_read(&buf->offset);
+       write_offset = uatomic_read(&buf->offset);
        /*
         * Check that the subbuffer we are trying to consume has been
         * already fully committed.
@@ -452,13 +452,13 @@ int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
 {
        long consumed_new, consumed_old;
 
-       consumed_old = atomic_long_read(&buf->consumed);
+       consumed_old = uatomic_read(&buf->consumed);
        consumed_old = consumed_old & (~0xFFFFFFFFL);
        consumed_old = consumed_old | uconsumed_old;
        consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
 
 //ust//        spin_lock(&ltt_buf->full_lock);
-       if (atomic_long_cmpxchg(&buf->consumed, consumed_old,
+       if (uatomic_cmpxchg(&buf->consumed, consumed_old,
                                consumed_new)
            != consumed_old) {
                /* We have been pushed by the writer : the last
@@ -557,7 +557,7 @@ int ust_buffers_put_subbuf(struct ust_buffer *buf, unsigned long uconsumed_old)
 //ust//        long cons_idx, events_count;
 //ust//
 //ust//        cons_idx = SUBBUF_INDEX(cons_off, chan);
-//ust//        events_count = local_read(&buf->commit_count[cons_idx].events);
+//ust//        events_count = uatomic_read(&buf->commit_count[cons_idx].events);
 //ust//
 //ust//        if (events_count)
 //ust//                printk(KERN_INFO
@@ -573,14 +573,14 @@ static void ltt_relay_print_subbuffer_errors(
        long cons_idx, commit_count, commit_count_sb, write_offset;
 
        cons_idx = SUBBUF_INDEX(cons_off, channel);
-       commit_count = local_read(&ltt_buf->commit_count[cons_idx].cc);
-       commit_count_sb = local_read(&ltt_buf->commit_count[cons_idx].cc_sb);
+       commit_count = uatomic_read(&ltt_buf->commit_count[cons_idx].cc);
+       commit_count_sb = uatomic_read(&ltt_buf->commit_count[cons_idx].cc_sb);
 
        /*
         * No need to order commit_count and write_offset reads because we
         * execute after trace is stopped when there are no readers left.
         */
-       write_offset = local_read(&ltt_buf->offset);
+       write_offset = uatomic_read(&ltt_buf->offset);
        WARN( "LTT : unread channel %s offset is %ld "
                "and cons_off : %ld (cpu %d)\n",
                channel->channel_name, write_offset, cons_off, cpu);
@@ -612,8 +612,8 @@ static void ltt_relay_print_errors(struct ust_trace *trace,
 //ust//        for (cons_off = 0; cons_off < rchan->alloc_size;
 //ust//             cons_off = SUBBUF_ALIGN(cons_off, rchan))
 //ust//                ust_buffers_print_written(ltt_chan, cons_off, cpu);
-       for (cons_off = atomic_long_read(&ltt_buf->consumed);
-                       (SUBBUF_TRUNC(local_read(&ltt_buf->offset),
+       for (cons_off = uatomic_read(&ltt_buf->consumed);
+                       (SUBBUF_TRUNC(uatomic_read(&ltt_buf->offset),
                                      channel)
                         - cons_off) > 0;
                        cons_off = SUBBUF_ALIGN(cons_off, channel))
@@ -625,14 +625,14 @@ static void ltt_relay_print_buffer_errors(struct ust_channel *channel, int cpu)
        struct ust_trace *trace = channel->trace;
        struct ust_buffer *ltt_buf = channel->buf[cpu];
 
-       if (local_read(&ltt_buf->events_lost))
+       if (uatomic_read(&ltt_buf->events_lost))
                ERR("channel %s: %ld events lost (cpu %d)",
                        channel->channel_name,
-                       local_read(&ltt_buf->events_lost), cpu);
-       if (local_read(&ltt_buf->corrupted_subbuffers))
+                       uatomic_read(&ltt_buf->events_lost), cpu);
+       if (uatomic_read(&ltt_buf->corrupted_subbuffers))
                ERR("channel %s : %ld corrupted subbuffers (cpu %d)",
                        channel->channel_name,
-                       local_read(&ltt_buf->corrupted_subbuffers), cpu);
+                       uatomic_read(&ltt_buf->corrupted_subbuffers), cpu);
 
        ltt_relay_print_errors(trace, channel, cpu);
 }
@@ -663,22 +663,22 @@ static void ltt_relay_release_channel(struct kref *kref)
 //ust//        kref_get(&trace->kref);
 //ust//        kref_get(&trace->ltt_transport_kref);
 //ust//        kref_get(&ltt_chan->kref);
-//ust//        local_set(&ltt_buf->offset, ltt_subbuffer_header_size());
-//ust//        atomic_long_set(&ltt_buf->consumed, 0);
-//ust//        atomic_long_set(&ltt_buf->active_readers, 0);
+//ust//        uatomic_set(&ltt_buf->offset, ltt_subbuffer_header_size());
+//ust//        uatomic_set(&ltt_buf->consumed, 0);
+//ust//        uatomic_set(&ltt_buf->active_readers, 0);
 //ust//        for (j = 0; j < n_subbufs; j++)
-//ust//                local_set(&ltt_buf->commit_count[j], 0);
+//ust//                uatomic_set(&ltt_buf->commit_count[j], 0);
 //ust//        init_waitqueue_head(&ltt_buf->write_wait);
-//ust//        atomic_set(&ltt_buf->wakeup_readers, 0);
+//ust//        uatomic_set(&ltt_buf->wakeup_readers, 0);
 //ust//        spin_lock_init(&ltt_buf->full_lock);
 //ust//
 //ust//        ltt_buffer_begin_callback(buf, trace->start_tsc, 0);
 //ust//        /* atomic_add made on local variable on data that belongs to
 //ust//         * various CPUs : ok because tracing not started (for this cpu). */
-//ust//        local_add(ltt_subbuffer_header_size(), &ltt_buf->commit_count[0]);
+//ust//        uatomic_add(&ltt_buf->commit_count[0], ltt_subbuffer_header_size());
 //ust//
-//ust//        local_set(&ltt_buf->events_lost, 0);
-//ust//        local_set(&ltt_buf->corrupted_subbuffers, 0);
+//ust//        uatomic_set(&ltt_buf->events_lost, 0);
+//ust//        uatomic_set(&ltt_buf->corrupted_subbuffers, 0);
 //ust//
 //ust//        return 0;
 //ust// }
@@ -698,23 +698,23 @@ static int ust_buffers_init_buffer(struct ust_trace *trace,
        kref_get(&trace->kref);
        kref_get(&trace->ltt_transport_kref);
        kref_get(&ltt_chan->kref);
-       local_set(&buf->offset, ltt_subbuffer_header_size());
-       atomic_long_set(&buf->consumed, 0);
-       atomic_long_set(&buf->active_readers, 0);
+       uatomic_set(&buf->offset, ltt_subbuffer_header_size());
+       uatomic_set(&buf->consumed, 0);
+       uatomic_set(&buf->active_readers, 0);
        for (j = 0; j < n_subbufs; j++) {
-               local_set(&buf->commit_count[j].cc, 0);
-               local_set(&buf->commit_count[j].cc_sb, 0);
+               uatomic_set(&buf->commit_count[j].cc, 0);
+               uatomic_set(&buf->commit_count[j].cc_sb, 0);
        }
 //ust//        init_waitqueue_head(&buf->write_wait);
-//ust//        atomic_set(&buf->wakeup_readers, 0);
+//ust//        uatomic_set(&buf->wakeup_readers, 0);
 //ust//        spin_lock_init(&buf->full_lock);
 
        ltt_buffer_begin(buf, trace->start_tsc, 0);
 
-       local_add(ltt_subbuffer_header_size(), &buf->commit_count[0].cc);
+       uatomic_add(&buf->commit_count[0].cc, ltt_subbuffer_header_size());
 
-       local_set(&buf->events_lost, 0);
-       local_set(&buf->corrupted_subbuffers, 0);
+       uatomic_set(&buf->events_lost, 0);
+       uatomic_set(&buf->corrupted_subbuffers, 0);
 
        result = pipe(fds);
        if(result == -1) {
@@ -893,8 +893,8 @@ static void ltt_relay_async_wakeup_chan(struct ust_channel *ltt_channel)
 //ust//                struct ltt_channel_buf_struct *ltt_buf =
 //ust//                        percpu_ptr(ltt_channel->buf, i);
 //ust//
-//ust//                if (atomic_read(&ltt_buf->wakeup_readers) == 1) {
-//ust//                        atomic_set(&ltt_buf->wakeup_readers, 0);
+//ust//                if (uatomic_read(&ltt_buf->wakeup_readers) == 1) {
+//ust//                        uatomic_set(&ltt_buf->wakeup_readers, 0);
 //ust//                        wake_up_interruptible(&rchan->buf[i]->read_wait);
 //ust//                }
 //ust//        }
@@ -945,7 +945,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//                struct ltt_reserve_switch_offsets *offsets, size_t data_size,
 //ust//                u64 *tsc, unsigned int *rflags, int largest_align)
 //ust// {
-//ust//        offsets->begin = local_read(&buf->offset);
+//ust//        offsets->begin = uatomic_read(&buf->offset);
 //ust//        offsets->old = offsets->begin;
 //ust//        offsets->begin_switch = 0;
 //ust//        offsets->end_switch_current = 0;
@@ -982,12 +982,12 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//                offsets->reserve_commit_diff =
 //ust//                        (BUFFER_TRUNC(offsets->begin, buf->chan)
 //ust//                         >> channel->n_subbufs_order)
-//ust//                        - (local_read(&buf->commit_count[subbuf_index])
+//ust//                        - (uatomic_read(&buf->commit_count[subbuf_index])
 //ust//                                & channel->commit_count_mask);
 //ust//                if (offsets->reserve_commit_diff == 0) {
 //ust//                        long consumed;
 //ust//
-//ust//                        consumed = atomic_long_read(&buf->consumed);
+//ust//                        consumed = uatomic_read(&buf->consumed);
 //ust//
 //ust//                        /* Next buffer not corrupted. */
 //ust//                        if (!channel->overwrite &&
@@ -996,7 +996,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//                                >= channel->alloc_size) {
 //ust//
 //ust//                                long consumed_idx = SUBBUF_INDEX(consumed, buf->chan);
-//ust//                                long commit_count = local_read(&buf->commit_count[consumed_idx]);
+//ust//                                long commit_count = uatomic_read(&buf->commit_count[consumed_idx]);
 //ust//                                if(((commit_count - buf->chan->subbuf_size) & channel->commit_count_mask) - (BUFFER_TRUNC(consumed, buf->chan) >> channel->n_subbufs_order) != 0) {
 //ust//                                        WARN("Event dropped. Caused by non-committed event.");
 //ust//                                }
@@ -1007,7 +1007,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//                                 * We do not overwrite non consumed buffers
 //ust//                                 * and we are full : event is lost.
 //ust//                                 */
-//ust//                                local_inc(&buf->events_lost);
+//ust//                                uatomic_inc(&buf->events_lost);
 //ust//                                return -1;
 //ust//                        } else {
 //ust//                                /*
@@ -1035,7 +1035,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//                         * Event too big for subbuffers, report error, don't
 //ust//                         * complete the sub-buffer switch.
 //ust//                         */
-//ust//                        local_inc(&buf->events_lost);
+//ust//                        uatomic_inc(&buf->events_lost);
 //ust//                        return -1;
 //ust//                } else {
 //ust//                        /*
@@ -1075,7 +1075,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust// {
 //ust//        long subbuf_index;
 //ust//
-//ust//        offsets->begin = local_read(&buf->offset);
+//ust//        offsets->begin = uatomic_read(&buf->offset);
 //ust//        offsets->old = offsets->begin;
 //ust//        offsets->begin_switch = 0;
 //ust//        offsets->end_switch_old = 0;
@@ -1099,13 +1099,13 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//        offsets->reserve_commit_diff =
 //ust//                (BUFFER_TRUNC(offsets->begin, buf->chan)
 //ust//                 >> channel->n_subbufs_order)
-//ust//                - (local_read(&buf->commit_count[subbuf_index])
+//ust//                - (uatomic_read(&buf->commit_count[subbuf_index])
 //ust//                        & channel->commit_count_mask);
 //ust//        if (offsets->reserve_commit_diff == 0) {
 //ust//                /* Next buffer not corrupted. */
 //ust//                if (mode == FORCE_ACTIVE
 //ust//                    && !channel->overwrite
-//ust//                    && offsets->begin - atomic_long_read(&buf->consumed)
+//ust//                    && offsets->begin - uatomic_read(&buf->consumed)
 //ust//                       >= channel->alloc_size) {
 //ust//                        /*
 //ust//                         * We do not overwrite non consumed buffers and we are
@@ -1131,7 +1131,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//        long consumed_old, consumed_new;
 //ust//
 //ust//        do {
-//ust//                consumed_old = atomic_long_read(&buf->consumed);
+//ust//                consumed_old = uatomic_read(&buf->consumed);
 //ust//                /*
 //ust//                 * If buffer is in overwrite mode, push the reader consumed
 //ust//                 * count if the write position has reached it and we are not
@@ -1151,7 +1151,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//                        consumed_new = consumed_old;
 //ust//                        break;
 //ust//                }
-//ust//        } while (atomic_long_cmpxchg(&buf->consumed, consumed_old,
+//ust//        } while (uatomic_cmpxchg(&buf->consumed, consumed_old,
 //ust//                        consumed_new) != consumed_old);
 //ust//
 //ust//        if (consumed_old != consumed_new) {
@@ -1173,10 +1173,8 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//                         * was either corrupted or not consumed (overwrite
 //ust//                         * mode).
 //ust//                         */
-//ust//                        local_add(offsets->reserve_commit_diff,
-//ust//                                  &buf->commit_count[
-//ust//                                        SUBBUF_INDEX(offsets->begin,
-//ust//                                                     buf->chan)]);
+//ust//                        uatomic_add(&buf->commit_count[SUBBUF_INDEX(offsets->begin, buf->chan)],
+//ust//                                        offsets->reserve_commit_diff);
 //ust//                        if (!channel->overwrite
 //ust//                            || offsets->reserve_commit_diff
 //ust//                               != channel->subbuf_size) {
@@ -1188,7 +1186,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//                                 * recorder mode, we are skipping over a whole
 //ust//                                 * subbuffer.
 //ust//                                 */
-//ust//                                local_inc(&buf->corrupted_subbuffers);
+//ust//                                uatomic_inc(&buf->corrupted_subbuffers);
 //ust//                        }
 //ust//                }
 //ust//        }
@@ -1223,14 +1221,14 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//         * Perform retryable operations.
 //ust//         */
 //ust//        if (ltt_nesting > 4) {
-//ust//                local_inc(&buf->events_lost);
+//ust//                uatomic_inc(&buf->events_lost);
 //ust//                return -EPERM;
 //ust//        }
 //ust//        do {
 //ust//                if (ltt_relay_try_reserve(channel, buf, &offsets, data_size, tsc, rflags,
 //ust//                                largest_align))
 //ust//                        return -ENOSPC;
-//ust//        } while (local_cmpxchg(&buf->offset, offsets.old,
+//ust//        } while (uatomic_cmpxchg(&buf->offset, offsets.old,
 //ust//                        offsets.end) != offsets.old);
 //ust//
 //ust//        /*
@@ -1270,10 +1268,6 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//  * Force a sub-buffer switch for a per-cpu buffer. This operation is
 //ust//  * completely reentrant : can be called while tracing is active with
 //ust//  * absolutely no lock held.
-//ust//  *
-//ust//  * Note, however, that as a local_cmpxchg is used for some atomic
-//ust//  * operations, this function must be called from the CPU which owns the buffer
-//ust//  * for a ACTIVE flush.
 //ust//  */
 //ust// static notrace void ltt_force_switch(struct ust_buffer *buf,
 //ust//                enum force_switch_mode mode)
@@ -1291,7 +1285,7 @@ static void ltt_relay_remove_channel(struct ust_channel *channel)
 //ust//        do {
 //ust//                if (ltt_relay_try_switch(mode, channel, buf, &offsets, &tsc))
 //ust//                        return;
-//ust//        } while (local_cmpxchg(&buf->offset, offsets.old,
+//ust//        } while (uatomic_cmpxchg(&buf->offset, offsets.old,
 //ust//                        offsets.end) != offsets.old);
 //ust//
 //ust//        /*
@@ -1358,9 +1352,8 @@ static void ltt_reserve_switch_old_subbuf(
         * sent by get_subbuf() when it does its smp_rmb().
         */
        barrier();
-       local_add(padding_size,
-                 &buf->commit_count[oldidx].cc);
-       commit_count = local_read(&buf->commit_count[oldidx].cc);
+       uatomic_add(&buf->commit_count[oldidx].cc, padding_size);
+       commit_count = uatomic_read(&buf->commit_count[oldidx].cc);
        ltt_check_deliver(chan, buf, offsets->old - 1, commit_count, oldidx);
        ltt_write_commit_counter(chan, buf, oldidx,
                offsets->old, commit_count, padding_size);
@@ -1388,9 +1381,8 @@ static void ltt_reserve_switch_new_subbuf(
         * sent by get_subbuf() when it does its smp_rmb().
         */
        barrier();
-       local_add(ltt_subbuffer_header_size(),
-                 &buf->commit_count[beginidx].cc);
-       commit_count = local_read(&buf->commit_count[beginidx].cc);
+       uatomic_add(&buf->commit_count[beginidx].cc, ltt_subbuffer_header_size());
+       commit_count = uatomic_read(&buf->commit_count[beginidx].cc);
        /* Check if the written buffer has to be delivered */
        ltt_check_deliver(chan, buf, offsets->begin, commit_count, beginidx);
        ltt_write_commit_counter(chan, buf, beginidx,
@@ -1434,9 +1426,8 @@ static void ltt_reserve_end_switch_current(
         * sent by get_subbuf() when it does its smp_rmb().
         */
        barrier();
-       local_add(padding_size,
-                 &buf->commit_count[endidx].cc);
-       commit_count = local_read(&buf->commit_count[endidx].cc);
+       uatomic_add(&buf->commit_count[endidx].cc, padding_size);
+       commit_count = uatomic_read(&buf->commit_count[endidx].cc);
        ltt_check_deliver(chan, buf,
                offsets->end - 1, commit_count, endidx);
        ltt_write_commit_counter(chan, buf, endidx,
@@ -1458,7 +1449,7 @@ static int ltt_relay_try_switch_slow(
        long subbuf_index;
        long reserve_commit_diff;
 
-       offsets->begin = local_read(&buf->offset);
+       offsets->begin = uatomic_read(&buf->offset);
        offsets->old = offsets->begin;
        offsets->begin_switch = 0;
        offsets->end_switch_old = 0;
@@ -1482,13 +1473,13 @@ static int ltt_relay_try_switch_slow(
        reserve_commit_diff =
                (BUFFER_TRUNC(offsets->begin, buf->chan)
                 >> chan->n_subbufs_order)
-               - (local_read(&buf->commit_count[subbuf_index].cc_sb)
+               - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb)
                        & chan->commit_count_mask);
        if (reserve_commit_diff == 0) {
                /* Next buffer not corrupted. */
                if (mode == FORCE_ACTIVE
                    && !chan->overwrite
-                   && offsets->begin - atomic_long_read(&buf->consumed)
+                   && offsets->begin - uatomic_read(&buf->consumed)
                       >= chan->alloc_size) {
                        /*
                         * We do not overwrite non consumed buffers and we are
@@ -1510,10 +1501,6 @@ static int ltt_relay_try_switch_slow(
  * Force a sub-buffer switch for a per-cpu buffer. This operation is
  * completely reentrant : can be called while tracing is active with
  * absolutely no lock held.
- *
- * Note, however, that as a local_cmpxchg is used for some atomic
- * operations, this function must be called from the CPU which owns the buffer
- * for a ACTIVE flush.
  */
 void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
                enum force_switch_mode mode)
@@ -1532,7 +1519,7 @@ void ltt_force_switch_lockless_slow(struct ust_buffer *buf,
                if (ltt_relay_try_switch_slow(mode, chan, buf,
                                &offsets, &tsc))
                        return;
-       } while (local_cmpxchg(&buf->offset, offsets.old,
+       } while (uatomic_cmpxchg(&buf->offset, offsets.old,
                        offsets.end) != offsets.old);
 
        /*
@@ -1577,7 +1564,7 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe
 {
        long reserve_commit_diff;
 
-       offsets->begin = local_read(&buf->offset);
+       offsets->begin = uatomic_read(&buf->offset);
        offsets->old = offsets->begin;
        offsets->begin_switch = 0;
        offsets->end_switch_current = 0;
@@ -1617,13 +1604,13 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe
                reserve_commit_diff =
                  (BUFFER_TRUNC(offsets->begin, buf->chan)
                   >> chan->n_subbufs_order)
-                 - (local_read(&buf->commit_count[subbuf_index].cc_sb)
+                 - (uatomic_read(&buf->commit_count[subbuf_index].cc_sb)
                                & chan->commit_count_mask);
                if (likely(reserve_commit_diff == 0)) {
                        /* Next buffer not corrupted. */
                        if (unlikely(!chan->overwrite &&
                                (SUBBUF_TRUNC(offsets->begin, buf->chan)
-                                - SUBBUF_TRUNC(atomic_long_read(
+                                - SUBBUF_TRUNC(uatomic_read(
                                                        &buf->consumed),
                                                buf->chan))
                                >= chan->alloc_size)) {
@@ -1631,7 +1618,7 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe
                                 * We do not overwrite non consumed buffers
                                 * and we are full : event is lost.
                                 */
-                               local_inc(&buf->events_lost);
+                               uatomic_inc(&buf->events_lost);
                                return -1;
                        } else {
                                /*
@@ -1646,7 +1633,7 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe
                         * overwrite mode. Caused by either a writer OOPS or
                         * too many nested writes over a reserve/commit pair.
                         */
-                       local_inc(&buf->events_lost);
+                       uatomic_inc(&buf->events_lost);
                        return -1;
                }
                offsets->size = ust_get_header_size(chan,
@@ -1661,7 +1648,7 @@ static int ltt_relay_try_reserve_slow(struct ust_channel *chan, struct ust_buffe
                         * Event too big for subbuffers, report error, don't
                         * complete the sub-buffer switch.
                         */
-                       local_inc(&buf->events_lost);
+                       uatomic_inc(&buf->events_lost);
                        return -1;
                } else {
                        /*
@@ -1715,7 +1702,7 @@ int ltt_reserve_slot_lockless_slow(struct ust_trace *trace,
                if (unlikely(ltt_relay_try_reserve_slow(chan, buf, &offsets,
                                data_size, tsc, rflags, largest_align)))
                        return -ENOSPC;
-       } while (unlikely(local_cmpxchg(&buf->offset, offsets.old,
+       } while (unlikely(uatomic_cmpxchg(&buf->offset, offsets.old,
                        offsets.end) != offsets.old));
 
        /*
index a2f17a9f78d01e83f3e1ee2a5d4e7eb40e92f081..90ca5d92f99f7b0166393701d5c7d6233e74e8ae 100644 (file)
 /**************************************/
 
 struct commit_counters {
-       local_t cc;
-       local_t cc_sb;                  /* Incremented _once_ at sb switch */
+       long cc;                        /* ATOMIC */
+       long cc_sb;                     /* ATOMIC - Incremented _once_ at sb switch */
 };
 
 struct ust_buffer {
        /* First 32 bytes cache-hot cacheline */
-       local_t offset;                 /* Current offset in the buffer */
+       long offset;                    /* Current offset in the buffer *atomic* */
        struct commit_counters *commit_count;   /* Commit count per sub-buffer */
-       atomic_long_t consumed;         /*
-                                        * Current offset in the buffer
-                                        * standard atomic access (shared)
-                                        */
+       long consumed;                  /* Current offset in the buffer *atomic* access (shared) */
        unsigned long last_tsc;         /*
                                         * Last timestamp written in the buffer.
                                         */
        /* End of first 32 bytes cacheline */
-       atomic_long_t active_readers;   /*
-                                        * Active readers count
-                                        * standard atomic access (shared)
-                                        */
-       local_t events_lost;
-       local_t corrupted_subbuffers;
+       long active_readers;    /* ATOMIC - Active readers count standard atomic access (shared) */
+       long events_lost;       /* ATOMIC */
+       long corrupted_subbuffers; /* *ATOMIC* */
        /* one byte is written to this pipe when data is available, in order
            to wake the consumer */
        /* portability: Single byte writes must be as quick as possible. The kernel-side
@@ -86,7 +80,7 @@ struct ust_buffer {
        unsigned int cpu;
 
        /* commit count per subbuffer; must be at end of struct */
-       local_t commit_seq[0] ____cacheline_aligned;
+       long commit_seq[0] ____cacheline_aligned; /* ATOMIC */
 } ____cacheline_aligned;
 
 /*
@@ -184,7 +178,7 @@ static __inline__ void ltt_reserve_push_reader(
        long consumed_old, consumed_new;
 
        do {
-               consumed_old = atomic_long_read(&buf->consumed);
+               consumed_old = uatomic_read(&buf->consumed);
                /*
                 * If buffer is in overwrite mode, push the reader consumed
                 * count if the write position has reached it and we are not
@@ -202,7 +196,7 @@ static __inline__ void ltt_reserve_push_reader(
                        consumed_new = SUBBUF_ALIGN(consumed_old, buf->chan);
                else
                        return;
-       } while (unlikely(atomic_long_cmpxchg(&buf->consumed, consumed_old,
+       } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
                        consumed_new) != consumed_old));
 }
 
@@ -210,7 +204,7 @@ static __inline__ void ltt_vmcore_check_deliver(
                struct ust_buffer *buf,
                long commit_count, long idx)
 {
-       local_set(&buf->commit_seq[idx], commit_count);
+       uatomic_set(&buf->commit_seq[idx], commit_count);
 }
 
 static __inline__ void ltt_check_deliver(struct ust_channel *chan,
@@ -230,7 +224,7 @@ static __inline__ void ltt_check_deliver(struct ust_channel *chan,
                 * value without adding a add_return atomic operation to the
                 * fast path.
                 */
-               if (likely(local_cmpxchg(&buf->commit_count[idx].cc_sb,
+               if (likely(uatomic_cmpxchg(&buf->commit_count[idx].cc_sb,
                                         old_commit_count, commit_count)
                           == old_commit_count)) {
                        int result;
@@ -255,16 +249,16 @@ static __inline__ int ltt_poll_deliver(struct ust_channel *chan, struct ust_buff
 {
        long consumed_old, consumed_idx, commit_count, write_offset;
 
-       consumed_old = atomic_long_read(&buf->consumed);
+       consumed_old = uatomic_read(&buf->consumed);
        consumed_idx = SUBBUF_INDEX(consumed_old, buf->chan);
-       commit_count = local_read(&buf->commit_count[consumed_idx].cc_sb);
+       commit_count = uatomic_read(&buf->commit_count[consumed_idx].cc_sb);
        /*
         * No memory barrier here, since we are only interested
         * in a statistically correct polling result. The next poll will
         * get the data is we are racing. The mb() that ensures correct
         * memory order is in get_subbuf.
         */
-       write_offset = local_read(&buf->offset);
+       write_offset = uatomic_read(&buf->offset);
 
        /*
         * Check that the subbuffer we are trying to consume has been
@@ -302,7 +296,7 @@ static __inline__ int ltt_relay_try_reserve(
                long *o_begin, long *o_end, long *o_old,
                size_t *before_hdr_pad, size_t *size)
 {
-       *o_begin = local_read(&buf->offset);
+       *o_begin = uatomic_read(&buf->offset);
        *o_old = *o_begin;
 
        *tsc = trace_clock_read64();
@@ -358,7 +352,7 @@ static __inline__ int ltt_reserve_slot(struct ust_trace *trace,
        /* FIXME: make this rellay per cpu? */
        if (unlikely(LOAD_SHARED(ltt_nesting) > 4)) {
                DBG("Dropping event because nesting is too deep.");
-               local_inc(&buf->events_lost);
+               uatomic_inc(&buf->events_lost);
                return -EPERM;
        }
 
@@ -368,7 +362,7 @@ static __inline__ int ltt_reserve_slot(struct ust_trace *trace,
                        &before_hdr_pad, slot_size)))
                goto slow_path;
 
-       if (unlikely(local_cmpxchg(&buf->offset, o_old, o_end) != o_old))
+       if (unlikely(uatomic_cmpxchg(&buf->offset, o_old, o_end) != o_old))
                goto slow_path;
 
        /*
@@ -401,10 +395,6 @@ slow_path:
  * Force a sub-buffer switch for a per-cpu buffer. This operation is
  * completely reentrant : can be called while tracing is active with
  * absolutely no lock held.
- *
- * Note, however, that as a local_cmpxchg is used for some atomic
- * operations, this function must be called from the CPU which owns the buffer
- * for a ACTIVE flush.
  */
 static __inline__ void ltt_force_switch(struct ust_buffer *buf,
                enum force_switch_mode mode)
@@ -437,9 +427,9 @@ static __inline__ void ltt_write_commit_counter(struct ust_channel *chan,
        if (unlikely(SUBBUF_OFFSET(offset - commit_count, buf->chan)))
                return;
 
-       commit_seq_old = local_read(&buf->commit_seq[idx]);
+       commit_seq_old = uatomic_read(&buf->commit_seq[idx]);
        while (commit_seq_old < commit_count)
-               commit_seq_old = local_cmpxchg(&buf->commit_seq[idx],
+               commit_seq_old = uatomic_cmpxchg(&buf->commit_seq[idx],
                                         commit_seq_old, commit_count);
 
        DBG("commit_seq for channel %s_%d, subbuf %ld is now %ld", buf->chan->channel_name, buf->cpu, idx, commit_count);
@@ -482,7 +472,7 @@ static __inline__ void ltt_commit_slot(
         */
        barrier();
 #endif
-       local_add(slot_size, &buf->commit_count[endidx].cc);
+       uatomic_add(&buf->commit_count[endidx].cc, slot_size);
        /*
         * commit count read can race with concurrent OOO commit count updates.
         * This is only needed for ltt_check_deliver (for non-polling delivery
@@ -492,7 +482,7 @@ static __inline__ void ltt_commit_slot(
         * - Multiple delivery for the same sub-buffer (which is handled
         *   gracefully by the reader code) if the value is for a full
         *   sub-buffer. It's important that we can never miss a sub-buffer
-        *   delivery. Re-reading the value after the local_add ensures this.
+        *   delivery. Re-reading the value after the uatomic_add ensures this.
         * - Reading a commit_count with a higher value that what was actually
         *   added to it for the ltt_write_commit_counter call (again caused by
         *   a concurrent committer). It does not matter, because this function
@@ -500,7 +490,7 @@ static __inline__ void ltt_commit_slot(
         *   reserve offset for a specific sub-buffer, which is completely
         *   independent of the order.
         */
-       commit_count = local_read(&buf->commit_count[endidx].cc);
+       commit_count = uatomic_read(&buf->commit_count[endidx].cc);
 
        ltt_check_deliver(chan, buf, offset_end - 1, commit_count, endidx);
        /*
index c90733cbbc863061684adb6495bfec232736d8e7..b7f35439a86e189413e7fa8ef81374c30a945d12 100644 (file)
@@ -72,8 +72,8 @@ static void release_channel_setting(struct kref *kref)
                struct ltt_channel_setting, kref);
        struct ltt_channel_setting *iter;
 
-       if (atomic_read(&index_kref.refcount) == 0
-           && atomic_read(&setting->kref.refcount) == 0) {
+       if (uatomic_read(&index_kref.refcount) == 0
+           && uatomic_read(&setting->kref.refcount) == 0) {
                list_del(&setting->list);
                kfree(setting);
 
@@ -113,7 +113,7 @@ int ltt_channels_register(const char *name)
        mutex_lock(&ltt_channel_mutex);
        setting = lookup_channel(name);
        if (setting) {
-               if (atomic_read(&setting->kref.refcount) == 0)
+               if (uatomic_read(&setting->kref.refcount) == 0)
                        goto init_kref;
                else {
                        kref_get(&setting->kref);
@@ -149,7 +149,7 @@ int ltt_channels_unregister(const char *name)
 
        mutex_lock(&ltt_channel_mutex);
        setting = lookup_channel(name);
-       if (!setting || atomic_read(&setting->kref.refcount) == 0) {
+       if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
                ret = -ENOENT;
                goto end;
        }
@@ -175,7 +175,7 @@ int ltt_channels_set_default(const char *name,
 
        mutex_lock(&ltt_channel_mutex);
        setting = lookup_channel(name);
-       if (!setting || atomic_read(&setting->kref.refcount) == 0) {
+       if (!setting || uatomic_read(&setting->kref.refcount) == 0) {
                ret = -ENOENT;
                goto end;
        }
@@ -199,7 +199,7 @@ const char *ltt_channels_get_name_from_index(unsigned int index)
        struct ltt_channel_setting *iter;
 
        list_for_each_entry(iter, &ltt_channels, list)
-               if (iter->index == index && atomic_read(&iter->kref.refcount))
+               if (iter->index == index && uatomic_read(&iter->kref.refcount))
                        return iter->name;
        return NULL;
 }
@@ -212,7 +212,7 @@ ltt_channels_get_setting_from_name(const char *name)
 
        list_for_each_entry(iter, &ltt_channels, list)
                if (!strcmp(iter->name, name)
-                   && atomic_read(&iter->kref.refcount))
+                   && uatomic_read(&iter->kref.refcount))
                        return iter;
        return NULL;
 }
@@ -259,7 +259,7 @@ struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
                WARN("ltt_channels_trace_alloc: no free_index; are there any probes connected?");
                goto end;
        }
-       if (!atomic_read(&index_kref.refcount))
+       if (!uatomic_read(&index_kref.refcount))
                kref_init(&index_kref);
        else
                kref_get(&index_kref);
@@ -271,7 +271,7 @@ struct ust_channel *ltt_channels_trace_alloc(unsigned int *nr_channels,
                goto end;
        }
        list_for_each_entry(iter, &ltt_channels, list) {
-               if (!atomic_read(&iter->kref.refcount))
+               if (!uatomic_read(&iter->kref.refcount))
                        continue;
                channel[iter->index].subbuf_size = iter->subbuf_size;
                channel[iter->index].subbuf_cnt = iter->subbuf_cnt;
index 57218a6bd62765b217a27338104b1cbb88a21061..25e96450ca70492ad1edfb1a644a759f47f35252 100644 (file)
@@ -26,6 +26,7 @@
 #include <fcntl.h>
 #include <poll.h>
 #include <regex.h>
+#include <urcu/uatomic_arch.h>
 
 #include <ust/marker.h>
 #include <ust/tracectl.h>
@@ -684,7 +685,7 @@ static int do_cmd_get_subbuffer(const char *recvbuf, struct ustcomm_source *src)
                        /* Being here is the proof the daemon has mapped the buffer in its
                         * memory. We may now decrement buffers_to_export.
                         */
-                       if(atomic_long_read(&buf->consumed) == 0) {
+                       if(uatomic_read(&buf->consumed) == 0) {
                                DBG("decrementing buffers_to_export");
                                buffers_to_export--;
                        }
index 4c179c5cbe1317b71b6724f276fcd920cdee4ac8..57a9f6631c1ba53ce035a17237a4718b2e7bd802 100644 (file)
@@ -65,8 +65,8 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
 {
        struct ust_buffer *ustbuf = buf->bufstruct_mem;
 
-       long write_offset = local_read(&ustbuf->offset);
-       long consumed_offset = atomic_long_read(&ustbuf->consumed);
+       long write_offset = uatomic_read(&ustbuf->offset);
+       long consumed_offset = uatomic_read(&ustbuf->consumed);
 
        long i_subbuf;
 
@@ -95,7 +95,7 @@ void finish_consuming_dead_subbuffer(struct buffer_info *buf)
                void *tmp;
                /* commit_seq is the offset in the buffer of the end of the last sequential commit.
                 * Bytes beyond this limit cannot be recovered. This is a free-running counter. */
-               long commit_seq = local_read(&ustbuf->commit_seq[i_subbuf]);
+               long commit_seq = uatomic_read(&ustbuf->commit_seq[i_subbuf]);
 
                unsigned long valid_length = buf->subbuf_size;
                long n_subbufs_order = get_count_order(buf->n_subbufs);
This page took 0.038529 seconds and 4 git commands to generate.