-#ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
-#define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
+#ifndef _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
+#define _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H
/*
- * linux/ringbuffer/frontend_internal.h
- *
- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ * libringbuffer/frontend_internal.h
*
* Ring Buffer Library Synchronization Header (internal helpers).
*
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ *
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
*/
#include <urcu/compiler.h>
+#include <urcu/tls-compat.h>
+#include <signal.h>
+#include <pthread.h>
-#include <ust/ringbuffer-config.h>
+#include <lttng/ringbuffer-config.h>
#include "backend_types.h"
#include "frontend_types.h"
#include "shm.h"
#if (CAA_BITS_PER_LONG == 32)
static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
{
unsigned long tsc_shifted;
return 0;
tsc_shifted = (unsigned long)(tsc >> config->tsc_bits);
- if (unlikely(tsc_shifted
+ if (caa_unlikely(tsc_shifted
- (unsigned long)v_read(config, &buf->last_tsc)))
return 1;
else
}
#else
static inline
-void save_last_tsc(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+void save_last_tsc(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return;
}
static inline
-int last_tsc_overflow(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf, u64 tsc)
+int last_tsc_overflow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf, uint64_t tsc)
{
if (config->tsc_bits == 0 || config->tsc_bits == 64)
return 0;
- if (unlikely((tsc - v_read(config, &buf->last_tsc))
+ if (caa_unlikely((tsc - v_read(config, &buf->last_tsc))
>> config->tsc_bits))
return 1;
else
#endif
extern
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx);
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+ void *client_ctx);
extern
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf,
enum switch_mode mode,
- struct shm_handle *handle);
+ struct lttng_ust_shm_handle *handle);
+
+void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
+ struct channel *chan,
+ unsigned long offset,
+ unsigned long commit_count,
+ unsigned long idx,
+ struct lttng_ust_shm_handle *handle,
+ uint64_t tsc);
/* Buffer write helpers */
static inline
-void lib_ring_buffer_reserve_push_reader(struct lib_ring_buffer *buf,
+void lib_ring_buffer_reserve_push_reader(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset)
{
* write position sub-buffer index in the buffer being the one
* which will win this loop.
*/
- if (unlikely(subbuf_trunc(offset, chan)
+ if (caa_unlikely(subbuf_trunc(offset, chan)
- subbuf_trunc(consumed_old, chan)
>= chan->backend.buf_size))
consumed_new = subbuf_align(consumed_old, chan);
else
return;
- } while (unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
+ } while (caa_unlikely(uatomic_cmpxchg(&buf->consumed, consumed_old,
consumed_new) != consumed_old));
}
static inline
-void lib_ring_buffer_vmcore_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
- unsigned long commit_count,
- unsigned long idx,
- struct shm_handle *handle)
-{
- if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
- v_set(config, &shmp(handle, buf->commit_hot)[idx].seq, commit_count);
-}
-
-static inline
-int lib_ring_buffer_poll_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
- struct channel *chan,
- struct shm_handle *handle)
-{
- unsigned long consumed_old, consumed_idx, commit_count, write_offset;
-
- consumed_old = uatomic_read(&buf->consumed);
- consumed_idx = subbuf_index(consumed_old, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_cold)[consumed_idx].cc_sb);
- /*
- * No memory barrier here, since we are only interested
- * in a statistically correct polling result. The next poll will
- * get the data is we are racing. The mb() that ensures correct
- * memory order is in get_subbuf.
- */
- write_offset = v_read(config, &buf->offset);
-
- /*
- * Check that the subbuffer we are trying to consume has been
- * already fully committed.
- */
-
- if (((commit_count - chan->backend.subbuf_size)
- & chan->commit_count_mask)
- - (buf_trunc(consumed_old, chan)
- >> chan->backend.num_subbuf_order)
- != 0)
- return 0;
-
- /*
- * Check that we are not about to read the same subbuffer in
- * which the writer head is.
- */
- if (subbuf_trunc(write_offset, chan) - subbuf_trunc(consumed_old, chan)
- == 0)
- return 0;
-
- return 1;
-
-}
-
-static inline
-int lib_ring_buffer_pending_data(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_pending_data(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan)
{
return !!subbuf_offset(v_read(config, &buf->offset), chan);
}
static inline
-unsigned long lib_ring_buffer_get_data_size(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+unsigned long lib_ring_buffer_get_data_size(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
unsigned long idx,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
return subbuffer_get_data_size(config, &buf->backend, idx, handle);
}
* This is a very specific ftrace use-case, so we keep this as "internal" API.
*/
static inline
-int lib_ring_buffer_reserve_committed(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+int lib_ring_buffer_reserve_committed(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle)
{
unsigned long offset, idx, commit_count;
+ struct commit_counters_hot *cc_hot = shmp_index(handle, buf->commit_hot, idx);
CHAN_WARN_ON(chan, config->alloc != RING_BUFFER_ALLOC_PER_CPU);
CHAN_WARN_ON(chan, config->sync != RING_BUFFER_SYNC_PER_CPU);
+ if (caa_unlikely(!cc_hot))
+ return 0;
+
/*
* Read offset and commit count in a loop so they are both read
* atomically wrt interrupts. By deal with interrupt concurrency by
do {
offset = v_read(config, &buf->offset);
idx = subbuf_index(offset, chan);
- commit_count = v_read(config, &shmp(handle, buf->commit_hot)[idx].cc);
+ commit_count = v_read(config, &cc_hot->cc);
} while (offset != v_read(config, &buf->offset));
return ((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- (commit_count & chan->commit_count_mask) == 0);
}
+/*
+ * Receive end of subbuffer TSC as parameter. It has been read in the
+ * space reservation loop of either reserve or switch, which ensures it
+ * progresses monotonically with event records in the buffer. Therefore,
+ * it ensures that the end timestamp of a subbuffer is <= begin
+ * timestamp of the following subbuffers.
+ */
static inline
-void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_check_deliver(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset,
unsigned long commit_count,
unsigned long idx,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle,
+ uint64_t tsc)
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
- u64 tsc;
/* Check if all commits have been done */
- if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
- - (old_commit_count & chan->commit_count_mask) == 0)) {
- /*
- * If we succeeded at updating cc_sb below, we are the subbuffer
- * writer delivering the subbuffer. Deals with concurrent
- * updates of the "cc" value without adding a add_return atomic
- * operation to the fast path.
- *
- * We are doing the delivery in two steps:
- * - First, we cmpxchg() cc_sb to the new value
- * old_commit_count + 1. This ensures that we are the only
- * subbuffer user successfully filling the subbuffer, but we
- * do _not_ set the cc_sb value to "commit_count" yet.
- * Therefore, other writers that would wrap around the ring
- * buffer and try to start writing to our subbuffer would
- * have to drop records, because it would appear as
- * non-filled.
- * We therefore have exclusive access to the subbuffer control
- * structures. This mutual exclusion with other writers is
- * crucially important to perform record overruns count in
- * flight recorder mode locklessly.
- * - When we are ready to release the subbuffer (either for
- * reading or for overrun by other writers), we simply set the
- * cc_sb value to "commit_count" and perform delivery.
- *
- * The subbuffer size is least 2 bytes (minimum size: 1 page).
- * This guarantees that old_commit_count + 1 != commit_count.
- */
- if (likely(v_cmpxchg(config, &shmp(handle, buf->commit_cold)[idx].cc_sb,
- old_commit_count, old_commit_count + 1)
- == old_commit_count)) {
- /*
- * Start of exclusive subbuffer access. We are
- * guaranteed to be the last writer in this subbuffer
- * and any other writer trying to access this subbuffer
- * in this state is required to drop records.
- */
- tsc = config->cb.ring_buffer_clock_read(chan);
- v_add(config,
- subbuffer_get_records_count(config,
- &buf->backend,
- idx, handle),
- &buf->records_count);
- v_add(config,
- subbuffer_count_records_overrun(config,
- &buf->backend,
- idx, handle),
- &buf->records_overrun);
- config->cb.buffer_end(buf, tsc, idx,
- lib_ring_buffer_get_data_size(config,
- buf,
- idx,
- handle),
- handle);
-
- /*
- * Set noref flag and offset for this subbuffer id.
- * Contains a memory barrier that ensures counter stores
- * are ordered before set noref and offset.
- */
- lib_ring_buffer_set_noref_offset(config, &buf->backend, idx,
- buf_trunc_val(offset, chan), handle);
-
- /*
- * Order set_noref and record counter updates before the
- * end of subbuffer exclusive access. Orders with
- * respect to writers coming into the subbuffer after
- * wrap around, and also order wrt concurrent readers.
- */
- cmm_smp_mb();
- /* End of exclusive subbuffer access */
- v_set(config, &shmp(handle, buf->commit_cold)[idx].cc_sb,
- commit_count);
- lib_ring_buffer_vmcore_check_deliver(config, buf,
- commit_count, idx, handle);
-
- /*
- * RING_BUFFER_WAKEUP_BY_WRITER wakeup is not lock-free.
- */
- if (config->wakeup == RING_BUFFER_WAKEUP_BY_WRITER
- && uatomic_read(&buf->active_readers)
- && lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
- int wakeup_fd = shm_get_wakeup_fd(handle, &buf->self._ref);
-
- if (wakeup_fd >= 0) {
- int ret;
- /*
- * Wake-up the other end by
- * writing a null byte in the
- * pipe (non-blocking).
- */
- do {
- ret = write(wakeup_fd, "", 1);
- } while (ret == -1L && errno == EINTR);
- }
- }
-
- }
- }
+ if (caa_unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
+ - (old_commit_count & chan->commit_count_mask) == 0))
+ lib_ring_buffer_check_deliver_slow(config, buf, chan, offset,
+ commit_count, idx, handle, tsc);
}
/*
* useful for crash dump.
*/
static inline
-void lib_ring_buffer_write_commit_counter(const struct lib_ring_buffer_config *config,
- struct lib_ring_buffer *buf,
+void lib_ring_buffer_write_commit_counter(const struct lttng_ust_lib_ring_buffer_config *config,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
- unsigned long idx,
unsigned long buf_offset,
unsigned long commit_count,
- size_t slot_size,
- struct shm_handle *handle)
+ struct lttng_ust_shm_handle *handle,
+ struct commit_counters_hot *cc_hot)
{
- unsigned long offset, commit_seq_old;
+ unsigned long commit_seq_old;
if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
return;
- offset = buf_offset + slot_size;
-
/*
* subbuf_offset includes commit_count_mask. We can simply
* compare the offsets within the subbuffer without caring about
* buffer full/empty mismatch because offset is never zero here
* (subbuffer header and record headers have non-zero length).
*/
- if (unlikely(subbuf_offset(offset - commit_count, chan)))
+ if (caa_unlikely(subbuf_offset(buf_offset - commit_count, chan)))
return;
- commit_seq_old = v_read(config, &shmp(handle, buf->commit_hot)[idx].seq);
- while ((long) (commit_seq_old - commit_count) < 0)
- commit_seq_old = v_cmpxchg(config, &shmp(handle, buf->commit_hot)[idx].seq,
- commit_seq_old, commit_count);
+ commit_seq_old = v_read(config, &cc_hot->seq);
+ if (caa_likely((long) (commit_seq_old - commit_count) < 0))
+ v_set(config, &cc_hot->seq, commit_count);
}
-extern int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+extern int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
- struct shm_handle *handle,
+ struct lttng_ust_shm_handle *handle,
struct shm_object *shmobj);
-extern void lib_ring_buffer_free(struct lib_ring_buffer *buf,
- struct shm_handle *handle);
+extern void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle);
/* Keep track of trap nesting inside ring buffer code */
-extern __thread unsigned int lib_ring_buffer_nesting;
+extern DECLARE_URCU_TLS(unsigned int, lib_ring_buffer_nesting);
-#endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */
+#endif /* _LTTNG_RING_BUFFER_FRONTEND_INTERNAL_H */