struct switch_offsets {
unsigned long begin, end, old;
size_t pre_header_padding, size;
- unsigned int switch_new_start:1, switch_new_end:1, switch_old_start:1,
- switch_old_end:1;
+ unsigned int switch_new_start:1, switch_old_start:1, switch_old_end:1;
};
#ifdef CONFIG_NO_HZ
* Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
*/
static
-int __cpuinit lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
+int lib_ring_buffer_cpu_hp_callback(struct notifier_block *nb,
unsigned long action,
void *hcpu)
{
int ret;
int finalized;
+ if (buf->get_subbuf) {
+ /*
+ * Reader is trying to get a subbuffer twice.
+ */
+ CHAN_WARN_ON(chan, 1);
+ return -EBUSY;
+ }
retry:
finalized = ACCESS_ONCE(buf->finalized);
/*
config->cb.subbuffer_header_size());
}
-/*
- * lib_ring_buffer_switch_new_end: finish switching current subbuffer
- *
- * The only remaining threads could be the ones with pending commits. They will
- * have to do the deliver themselves.
- */
-static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
- struct channel *chan,
- struct switch_offsets *offsets,
- u64 tsc)
-{
- const struct lib_ring_buffer_config *config = &chan->backend.config;
- unsigned long endidx = subbuf_index(offsets->end - 1, chan);
- unsigned long commit_count, padding_size, data_size;
-
- data_size = subbuf_offset(offsets->end - 1, chan) + 1;
- padding_size = chan->backend.subbuf_size - data_size;
- subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
-
- /*
- * Order all writes to buffer before the commit count update that will
- * determine that the subbuffer is full.
- */
- if (config->ipi == RING_BUFFER_IPI_BARRIER) {
- /*
- * Must write slot data before incrementing commit count. This
- * compiler barrier is upgraded into a smp_mb() by the IPI sent
- * by get_subbuf().
- */
- barrier();
- } else
- smp_wmb();
- v_add(config, padding_size, &buf->commit_hot[endidx].cc);
- commit_count = v_read(config, &buf->commit_hot[endidx].cc);
- lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
- commit_count, endidx);
- lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
- offsets->end, commit_count,
- padding_size);
-}
-
/*
* Returns :
* 0 if ok
*/
if (mode == SWITCH_FLUSH || off > 0) {
if (unlikely(off == 0)) {
+ /*
+ * A final flush that encounters an empty
+ * sub-buffer cannot switch buffer if a
+ * reader is located within this sub-buffer.
+ * Anyway, the purpose of final flushing of a
+ * sub-buffer at offset 0 is to handle the case
+ * of entirely empty stream.
+ */
+ if (unlikely(subbuf_trunc(offsets->begin, chan)
+ - subbuf_trunc((unsigned long)
+ atomic_long_read(&buf->consumed), chan)
+ >= chan->backend.buf_size))
+ return -1;
/*
* The client does not save any header information.
* Don't switch empty subbuffer on finalize, because it
offsets->begin = v_read(config, &buf->offset);
offsets->old = offsets->begin;
offsets->switch_new_start = 0;
- offsets->switch_new_end = 0;
offsets->switch_old_end = 0;
offsets->pre_header_padding = 0;
*/
}
offsets->end = offsets->begin + offsets->size;
-
- if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
- /*
- * The offset_end will fall at the very beginning of the next
- * subbuffer.
- */
- offsets->switch_new_end = 1; /* For offsets->begin */
- }
return 0;
}
if (unlikely(offsets.switch_new_start))
lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
- if (unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
-
ctx->slot_size = offsets.size;
ctx->pre_offset = offsets.begin;
ctx->buf_offset = offsets.begin + offsets.pre_header_padding;