chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval);
chan->read_timer_interval = usecs_to_jiffies(read_timer_interval);
+ kref_init(&chan->ref);
init_waitqueue_head(&chan->read_wait);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
}
EXPORT_SYMBOL_GPL(channel_create);
+static
+void channel_release(struct kref *kref)
+{
+ struct channel *chan = container_of(kref, struct channel, ref);
+ channel_free(chan);
+}
+
/**
* channel_destroy - Finalize, wait for q.s. and destroy channel.
* @chan: channel to destroy
wake_up_interruptible(&buf->read_wait);
}
wake_up_interruptible(&chan->read_wait);
-
- while (atomic_long_read(&chan->read_ref) > 0)
- msleep(100);
- /* Finish waiting for refcount before free */
- smp_mb();
+ kref_put(&chan->ref, channel_release);
priv = chan->backend.priv;
- channel_free(chan);
return priv;
}
EXPORT_SYMBOL_GPL(channel_destroy);
if (!atomic_long_add_unless(&buf->active_readers, 1, 1))
return -EBUSY;
- atomic_long_inc(&chan->read_ref);
+ kref_get(&chan->ref);
smp_mb__after_atomic_inc();
return 0;
}
CHAN_WARN_ON(chan, atomic_long_read(&buf->active_readers) != 1);
smp_mb__before_atomic_dec();
- atomic_long_dec(&chan->read_ref);
atomic_long_dec(&buf->active_readers);
+ kref_put(&chan->ref, channel_release);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_release_read);
/*
* Returns :
* 0 if ok
- * !0 if execution must be aborted.
+ * -ENOSPC if event size is too large for packet.
+ * -ENOBUFS if there is currently not enough space in buffer for the event.
+ * -EIO if data cannot be written into the buffer for any other reason.
*/
static
int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
offsets->pre_header_padding = 0;
ctx->tsc = config->cb.ring_buffer_clock_read(chan);
+ if ((int64_t) ctx->tsc == -EIO)
+ return -EIO;
if (last_tsc_overflow(config, buf, ctx->tsc))
- ctx->rflags = RING_BUFFER_RFLAG_FULL_TSC;
+ ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
} else {
offsets->size = config->cb.record_header_size(config, chan,
offsets->begin,
- ctx->data_size,
&offsets->pre_header_padding,
- ctx->rflags, ctx);
+ ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
* and we are full : record is lost.
*/
v_inc(config, &buf->records_lost_full);
- return -1;
+ return -ENOBUFS;
} else {
/*
* Next subbuffer not being written to, and we
* many nested writes over a reserve/commit pair.
*/
v_inc(config, &buf->records_lost_wrap);
- return -1;
+ return -EIO;
}
offsets->size =
config->cb.record_header_size(config, chan,
offsets->begin,
- ctx->data_size,
&offsets->pre_header_padding,
- ctx->rflags, ctx);
+ ctx);
offsets->size +=
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
* complete the sub-buffer switch.
*/
v_inc(config, &buf->records_lost_big);
- return -1;
+ return -ENOSPC;
} else {
/*
* We just made a successful buffer switch and the
* lib_ring_buffer_reserve_slow - Atomic slot reservation in a buffer.
* @ctx: ring buffer context.
*
- * Return : -ENOSPC if not enough space, else returns 0.
+ * Return : -NOBUFS if not enough space, -ENOSPC if event size too large,
+ * -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
const struct lib_ring_buffer_config *config = chan->backend.config;
struct lib_ring_buffer *buf;
struct switch_offsets offsets;
+ int ret;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
buf = per_cpu_ptr(chan->backend.buf, ctx->cpu);
offsets.size = 0;
do {
- if (unlikely(lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
- ctx)))
- return -ENOSPC;
+ ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
+ ctx);
+ if (unlikely(ret))
+ return ret;
} while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
offsets.end)
!= offsets.old));