#include <wrapper/atomic.h>
#include <wrapper/kref.h>
#include <wrapper/percpu-defs.h>
+#include <wrapper/timer.h>
/*
* Internal structure representing offsets to use at a sub-buffer switch.
lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- mod_timer_pinned(&buf->switch_timer,
+ lttng_mod_timer_pinned(&buf->switch_timer,
jiffies + chan->switch_timer_interval);
else
mod_timer(&buf->switch_timer,
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
- init_timer(&buf->switch_timer);
+
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ lttng_init_timer_pinned(&buf->switch_timer);
+ else
+ init_timer(&buf->switch_timer);
+
buf->switch_timer.function = switch_buffer_timer;
buf->switch_timer.expires = jiffies + chan->switch_timer_interval;
buf->switch_timer.data = (unsigned long)buf;
}
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- mod_timer_pinned(&buf->read_timer,
+ lttng_mod_timer_pinned(&buf->read_timer,
jiffies + chan->read_timer_interval);
else
mod_timer(&buf->read_timer,
|| buf->read_timer_enabled)
return;
- init_timer(&buf->read_timer);
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ lttng_init_timer_pinned(&buf->read_timer);
+ else
+ init_timer(&buf->read_timer);
+
buf->read_timer.function = read_buffer_timer;
buf->read_timer.expires = jiffies + chan->read_timer_interval;
buf->read_timer.data = (unsigned long)buf;
unsigned long consumed_cur, write_offset;
int finalized;
- /*
- * First, ensure we perform a "final" flush onto the stream. This will
- * ensure we create a packet of padding if we encounter an empty
- * packet. This ensures the time-stamps right before the snapshot is
- * used as end of packet timestamp.
- */
- if (!buf->quiescent)
- _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
-
retry:
finalized = ACCESS_ONCE(buf->finalized);
/*
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_slow);
+struct switch_param {
+ struct lib_ring_buffer *buf;
+ enum switch_mode mode;
+};
+
static void remote_switch(void *info)
{
- struct lib_ring_buffer *buf = info;
+ struct switch_param *param = info;
+ struct lib_ring_buffer *buf = param->buf;
- lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+ lib_ring_buffer_switch_slow(buf, param->mode);
}
static void _lib_ring_buffer_switch_remote(struct lib_ring_buffer *buf,
struct channel *chan = buf->backend.chan;
const struct lib_ring_buffer_config *config = &chan->backend.config;
int ret;
+ struct switch_param param;
/*
* With global synchronization we don't need to use the IPI scheme.
* switch.
*/
get_online_cpus();
+ param.buf = buf;
+ param.mode = mode;
ret = smp_call_function_single(buf->backend.cpu,
- remote_switch, buf, 1);
+ remote_switch, ¶m, 1);
if (ret) {
/* Remote CPU is offline, do it ourself. */
lib_ring_buffer_switch_slow(buf, mode);
}
EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote);
+/* Switch sub-buffer even if current sub-buffer is empty. */
+void lib_ring_buffer_switch_remote_empty(struct lib_ring_buffer *buf)
+{
+ _lib_ring_buffer_switch_remote(buf, SWITCH_FLUSH);
+}
+EXPORT_SYMBOL_GPL(lib_ring_buffer_switch_remote_empty);
+
/*
* Returns :
* 0 if ok