-#ifndef _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
-#define _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H
+#ifndef _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
+#define _LIB_RING_BUFFER_FRONTEND_INTERNAL_H
/*
* linux/ringbuffer/frontend_internal.h
*
- * (C) Copyright 2005-2010 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- *
* Ring Buffer Library Synchronization Header (internal helpers).
*
+ * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; only
+ * version 2.1 of the License.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
* Author:
* Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*
* See ring_buffer_frontend.c for more information on wait-free algorithms.
- *
- * Dual LGPL v2.1/GPL v2 license.
*/
#include "../../wrapper/ringbuffer/config.h"
void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf,
enum switch_mode mode);
+extern
+void lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf);
+
/* Buffer write helpers */
static inline
- (commit_count & chan->commit_count_mask) == 0);
}
+/*
+ * Receive end of subbuffer TSC as parameter. It has been read in the
+ * space reservation loop of either reserve or switch, which ensures it
+ * progresses monotonically with event records in the buffer. Therefore,
+ * it ensures that the end timestamp of a subbuffer is <= begin
+ * timestamp of the following subbuffers.
+ */
static inline
void lib_ring_buffer_check_deliver(const struct lib_ring_buffer_config *config,
struct lib_ring_buffer *buf,
struct channel *chan,
unsigned long offset,
unsigned long commit_count,
- unsigned long idx)
+ unsigned long idx,
+ u64 tsc)
{
unsigned long old_commit_count = commit_count
- chan->backend.subbuf_size;
- u64 tsc;
/* Check if all commits have been done */
if (unlikely((buf_trunc(offset, chan) >> chan->backend.num_subbuf_order)
* The subbuffer size is least 2 bytes (minimum size: 1 page).
* This guarantees that old_commit_count + 1 != commit_count.
*/
+
+ /*
+ * Order prior updates to reserve count prior to the
+ * commit_cold cc_sb update.
+ */
+ smp_wmb();
if (likely(v_cmpxchg(config, &buf->commit_cold[idx].cc_sb,
old_commit_count, old_commit_count + 1)
== old_commit_count)) {
* and any other writer trying to access this subbuffer
* in this state is required to drop records.
*/
- tsc = config->cb.ring_buffer_clock_read(chan);
v_add(config,
subbuffer_get_records_count(config,
&buf->backend, idx),
/* End of exclusive subbuffer access */
v_set(config, &buf->commit_cold[idx].cc_sb,
commit_count);
+ /*
+ * Order later updates to reserve count after
+ * the commit_cold cc_sb update.
+ */
+ smp_wmb();
lib_ring_buffer_vmcore_check_deliver(config, buf,
commit_count, idx);
struct channel *chan,
unsigned long idx,
unsigned long buf_offset,
- unsigned long commit_count,
- size_t slot_size)
+ unsigned long commit_count)
{
- unsigned long offset, commit_seq_old;
+ unsigned long commit_seq_old;
if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
return;
- offset = buf_offset + slot_size;
-
/*
* subbuf_offset includes commit_count_mask. We can simply
* compare the offsets within the subbuffer without caring about
* buffer full/empty mismatch because offset is never zero here
* (subbuffer header and record headers have non-zero length).
*/
- if (unlikely(subbuf_offset(offset - commit_count, chan)))
+ if (unlikely(subbuf_offset(buf_offset - commit_count, chan)))
return;
commit_seq_old = v_read(config, &buf->commit_hot[idx].seq);
/* Keep track of trap nesting inside ring buffer code */
DECLARE_PER_CPU(unsigned int, lib_ring_buffer_nesting);
-#endif /* _LINUX_RING_BUFFER_FRONTEND_INTERNAL_H */
+#endif /* _LIB_RING_BUFFER_FRONTEND_INTERNAL_H */