#include <fcntl.h>
#include <urcu/compiler.h>
#include <urcu/ref.h>
+#include <helper.h>
#include "smp.h"
-#include <ust/ringbuffer-config.h>
+#include <lttng/ringbuffer-config.h>
+#include "vatomic.h"
#include "backend.h"
#include "frontend.h"
#include "shm.h"
__thread unsigned int lib_ring_buffer_nesting;
-static
-void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu);
-
/*
- * Must be called under cpu hotplug protection.
+ * TODO: this is unused. Errors are saved within the ring buffer.
+ * Eventually, allow consumerd to print these errors.
*/
-void lib_ring_buffer_free(struct lib_ring_buffer *buf)
-{
- struct channel *chan = shmp(buf->backend.chan);
-
- lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu);
- /* buf->commit_hot will be freed by shm teardown */
- /* buf->commit_cold will be freed by shm teardown */
-
- lib_ring_buffer_backend_free(&buf->backend);
-}
+static
+void lib_ring_buffer_print_errors(struct channel *chan,
+ struct lttng_ust_lib_ring_buffer *buf, int cpu,
+ struct lttng_ust_shm_handle *handle);
/**
* lib_ring_buffer_reset - Reset ring buffer to initial values.
* should not be using the iterator concurrently with reset. The previous
* current iterator record is reset.
*/
-void lib_ring_buffer_reset(struct lib_ring_buffer *buf)
+void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned int i;
/*
*/
v_set(config, &buf->offset, 0);
for (i = 0; i < chan->backend.num_subbuf; i++) {
- v_set(config, &shmp(buf->commit_hot)[i].cc, 0);
- v_set(config, &shmp(buf->commit_hot)[i].seq, 0);
- v_set(config, &shmp(buf->commit_cold)[i].cc_sb, 0);
+ v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
+ v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
+ v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
}
uatomic_set(&buf->consumed, 0);
uatomic_set(&buf->record_disabled, 0);
v_set(config, &buf->last_tsc, 0);
- lib_ring_buffer_backend_reset(&buf->backend);
+ lib_ring_buffer_backend_reset(&buf->backend, handle);
/* Don't reset number of active readers */
v_set(config, &buf->records_lost_full, 0);
v_set(config, &buf->records_lost_wrap, 0);
/*
* Must be called under cpu hotplug protection.
*/
-int lib_ring_buffer_create(struct lib_ring_buffer *buf,
+int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
struct channel_backend *chanb, int cpu,
- struct shm_header *shm_header)
+ struct lttng_ust_shm_handle *handle,
+ struct shm_object *shmobj)
{
- const struct lib_ring_buffer_config *config = chanb->config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
struct channel *chan = caa_container_of(chanb, struct channel, backend);
- void *priv = chanb->priv;
- unsigned int num_subbuf;
+ void *priv = channel_get_private(chan);
size_t subbuf_header_size;
- u64 tsc;
+ uint64_t tsc;
int ret;
/* Test for cpu hotplug */
return 0;
ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
- cpu, shm_header);
+ cpu, handle, shmobj);
if (ret)
return ret;
- align_shm(shm_header,
- max(__alignof__(struct commit_counters_hot),
- __alignof__(struct commit_counters_cold)));
- set_shmp(&buf->commit_hot,
- zalloc_shm(shm_header,
- sizeof(*buf->commit_hot) * chan->backend.num_subbuf));
- if (!shmp(buf->commit_hot)) {
+ align_shm(shmobj, __alignof__(struct commit_counters_hot));
+ set_shmp(buf->commit_hot,
+ zalloc_shm(shmobj,
+ sizeof(struct commit_counters_hot) * chan->backend.num_subbuf));
+ if (!shmp(handle, buf->commit_hot)) {
ret = -ENOMEM;
goto free_chanbuf;
}
- align_shm(shm_header, __alignof__(struct commit_counters_cold));
- set_shmp(&buf->commit_cold,
- zalloc_shm(shm_header,
- sizeof(*buf->commit_cold) * chan->backend.num_subbuf));
- if (!shmp(buf->commit_cold)) {
+ align_shm(shmobj, __alignof__(struct commit_counters_cold));
+ set_shmp(buf->commit_cold,
+ zalloc_shm(shmobj,
+ sizeof(struct commit_counters_cold) * chan->backend.num_subbuf));
+ if (!shmp(handle, buf->commit_cold)) {
ret = -ENOMEM;
goto free_commit;
}
- num_subbuf = chan->backend.num_subbuf;
- //init_waitqueue_head(&buf->read_wait);
-
/*
* Write the subbuffer header for first subbuffer so we know the total
* duration of data gathering.
*/
subbuf_header_size = config->cb.subbuffer_header_size();
v_set(config, &buf->offset, subbuf_header_size);
- subbuffer_id_clear_noref(config, &shmp(buf->backend.buf_wsb)[0].id);
- tsc = config->cb.ring_buffer_clock_read(shmp(buf->backend.chan));
- config->cb.buffer_begin(buf, tsc, 0);
- v_add(config, subbuf_header_size, &shmp(buf->commit_hot)[0].cc);
+ subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
+ tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
+ config->cb.buffer_begin(buf, tsc, 0, handle);
+ v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
if (config->cb.buffer_create) {
- ret = config->cb.buffer_create(buf, priv, cpu, chanb->name);
+ ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
if (ret)
goto free_init;
}
free_commit:
/* commit_hot will be freed by shm teardown */
free_chanbuf:
- lib_ring_buffer_backend_free(&buf->backend);
return ret;
}
+#if 0
static void switch_buffer_timer(unsigned long data)
{
- struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
/*
* Only flush buffers periodically if readers are active.
*/
- if (uatomic_read(&buf->active_readers))
- lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE);
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
+ lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle);
//TODO timers
//if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
// mod_timer(&buf->switch_timer,
// jiffies + chan->switch_timer_interval);
}
+#endif //0
-static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ //const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (!chan->switch_timer_interval || buf->switch_timer_enabled)
return;
buf->switch_timer_enabled = 1;
}
-static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_switch_timer(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(buf->backend.chan);
+ struct channel *chan = shmp(handle, buf->backend.chan);
if (!chan->switch_timer_interval || !buf->switch_timer_enabled)
return;
buf->switch_timer_enabled = 0;
}
+#if 0
/*
* Polling timer to check the channels for data.
*/
static void read_buffer_timer(unsigned long data)
{
- struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data;
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
CHAN_WARN_ON(chan, !buf->backend.allocated);
- if (uatomic_read(&buf->active_readers)
+ if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers))
&& lib_ring_buffer_poll_deliver(config, buf, chan)) {
//TODO
//wake_up_interruptible(&buf->read_wait);
// mod_timer(&buf->read_timer,
// jiffies + chan->read_timer_interval);
}
+#endif //0
-static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_start_read_timer(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
buf->read_timer_enabled = 1;
}
-static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf)
+static void lib_ring_buffer_stop_read_timer(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER
|| !chan->read_timer_interval
* do one more check to catch data that has been written in the last
* timer period.
*/
- if (lib_ring_buffer_poll_deliver(config, buf, chan)) {
+ if (lib_ring_buffer_poll_deliver(config, buf, chan, handle)) {
//TODO
//wake_up_interruptible(&buf->read_wait);
//wake_up_interruptible(&chan->read_wait);
buf->read_timer_enabled = 0;
}
-static void channel_unregister_notifiers(struct channel *chan)
+static void channel_unregister_notifiers(struct channel *chan,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
int cpu;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = &shmp(chan->backend.buf)[cpu];
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
+ lib_ring_buffer_stop_switch_timer(buf, handle);
+ lib_ring_buffer_stop_read_timer(buf, handle);
}
} else {
- struct lib_ring_buffer *buf = shmp(chan->backend.buf);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
- lib_ring_buffer_stop_switch_timer(buf);
- lib_ring_buffer_stop_read_timer(buf);
+ lib_ring_buffer_stop_switch_timer(buf, handle);
+ lib_ring_buffer_stop_read_timer(buf, handle);
}
//channel_backend_unregister_notifiers(&chan->backend);
}
-static void channel_free(struct shm_handle *handle)
+static void channel_free(struct channel *chan, struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- struct shm_header *header = handle->header;
- struct channel *chan = shmp(header->chan);
- int ret;
-
- channel_backend_free(&chan->backend);
+ if (!shadow)
+ channel_backend_free(&chan->backend, handle);
/* chan is freed by shm teardown */
- ret = munmap(header, header->shm_size);
- if (ret) {
- PERROR("umnmap");
- assert(0);
- }
- ret = close(handle->shmfd);
- if (ret) {
- PERROR("close");
- assert(0);
- }
+ shm_object_table_destroy(handle->table);
+ free(handle);
}
/**
* channel_create - Create channel.
* @config: ring buffer instance configuration
* @name: name of the channel
- * @priv: ring buffer client private data
+ * @priv_data: ring buffer client private data area pointer (output)
+ * @priv_data_size: length, in bytes, of the private data area.
+ * @priv_data_init: initialization data for private data.
* @buf_addr: pointer the the beginning of the preallocated buffer contiguous
* address mapping. It is used only by RING_BUFFER_STATIC
* configuration. It can be set to NULL for other backends.
* Holds cpu hotplug.
* Returns NULL on failure.
*/
-struct shm_handle *channel_create(const struct lib_ring_buffer_config *config,
- const char *name, void *priv, void *buf_addr,
- size_t subbuf_size,
+struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config,
+ const char *name,
+ void **priv_data,
+ size_t priv_data_align,
+ size_t priv_data_size,
+ void *priv_data_init,
+ void *buf_addr, size_t subbuf_size,
size_t num_subbuf, unsigned int switch_timer_interval,
- unsigned int read_timer_interval)
+ unsigned int read_timer_interval,
+ int **shm_fd, int **wait_fd, uint64_t **memory_map_size)
{
- int ret, cpu, shmfd;
+ int ret, cpu;
+ size_t shmsize, chansize;
struct channel *chan;
- size_t shmsize, bufshmsize, bufshmalign;
- struct shm_header *shm_header;
- unsigned long num_subbuf_alloc;
- struct shm_handle *handle;
+ struct lttng_ust_shm_handle *handle;
+ struct shm_object *shmobj;
+ struct shm_ref *ref;
if (lib_ring_buffer_check_config(config, switch_timer_interval,
read_timer_interval))
return NULL;
- handle = zmalloc(sizeof(struct shm_handle));
+ handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
if (!handle)
return NULL;
- /* Calculate the shm allocation layout */
- shmsize = sizeof(struct shm_header);
- shmsize += offset_align(shmsize, __alignof__(struct channel));
- shmsize += sizeof(struct channel);
-
- /* Per-cpu buffer size: control (prior to backend) */
- shmsize += offset_align(shmsize, __alignof__(struct lib_ring_buffer));
- bufshmsize = sizeof(struct lib_ring_buffer);
- shmsize += bufshmsize * num_possible_cpus();
-
- /* Per-cpu buffer size: backend */
- shmsize += offset_align(shmsize, PAGE_SIZE);
- /* num_subbuf + 1 is the worse case */
- num_subbuf_alloc = num_subbuf + 1;
- bufshmsize = sizeof(struct lib_ring_buffer_backend_pages *) * num_subbuf_alloc;
- bufshmsize += offset_align(bufshmsize, PAGE_SIZE);
- bufshmsize += subbuf_size * num_subbuf_alloc;
- bufshmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_pages));
- bufshmsize += sizeof(struct lib_ring_buffer_backend_pages) * num_subbuf_alloc;
- bufshmsize += offset_align(bufshmsize, __alignof__(struct lib_ring_buffer_backend_subbuffer));
- bufshmsize += sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf;
- bufshmsize += offset_align(bufshmsize, PAGE_SIZE);
- shmsize += bufshmsize * num_possible_cpus();
-
- /* Per-cpu buffer size: control (after backend) */
- shmsize += offset_align(shmsize,
- max(__alignof__(struct commit_counters_hot),
- __alignof__(struct commit_counters_cold)));
- bufshmsize = sizeof(struct commit_counters_hot) * num_subbuf;
- bufshmsize += offset_align(bufshmsize, __alignof__(struct commit_counters_cold));
- bufshmsize += sizeof(struct commit_counters_cold) * num_subbuf;
- shmsize += bufshmsize * num_possible_cpus();
+ /* Allocate table for channel + per-cpu buffers */
+ handle->table = shm_object_table_create(1 + num_possible_cpus());
+ if (!handle->table)
+ goto error_table_alloc;
- /*
- * Allocate shm, and immediately unlink its shm oject, keeping
- * only the file descriptor as a reference to the object. If it
- * already exists (caused by short race window during which the
- * global object exists in a concurrent shm_open), simply retry.
- */
- do {
- shmfd = shm_open("/ust-shm-tmp",
- O_CREAT | O_EXCL | O_RDWR, 0700);
- } while (shmfd < 0 && errno == EEXIST);
- if (shmfd < 0) {
- PERROR("shm_open");
- goto error_shm_open;
- }
- ret = shm_unlink("/ust-shm-tmp");
- if (ret) {
- PERROR("shm_unlink");
- goto error_unlink;
- }
- ret = ftruncate(shmfd, shmsize);
- if (ret) {
- PERROR("ftruncate");
- goto error_ftruncate;
- }
-
- shm_header = mmap(NULL, shmsize, PROT_READ | PROT_WRITE,
- MAP_SHARED, shmfd, 0);
- if (shm_header == MAP_FAILED) {
- PERROR("mmap");
- goto error_mmap;
- }
-
- shm_header->magic = SHM_MAGIC;
- shm_header->major = SHM_MAJOR;
- shm_header->major = SHM_MINOR;
- shm_header->bits_per_long = CAA_BITS_PER_LONG;
- shm_header->shm_size = shmsize;
- shm_header->shm_allocated = sizeof(struct shm_header);
-
- align_shm(shm_header, __alignof__(struct channel));
- chan = zalloc_shm(shm_header, sizeof(struct channel));
+ /* Calculate the shm allocation layout */
+ shmsize = sizeof(struct channel);
+ shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
+ if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * num_possible_cpus();
+ else
+ shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp);
+ chansize = shmsize;
+ shmsize += offset_align(shmsize, priv_data_align);
+ shmsize += priv_data_size;
+
+ shmobj = shm_object_table_append(handle->table, shmsize);
+ if (!shmobj)
+ goto error_append;
+ /* struct channel is at object 0, offset 0 (hardcoded) */
+ set_shmp(handle->chan, zalloc_shm(shmobj, chansize));
+ assert(handle->chan._ref.index == 0);
+ assert(handle->chan._ref.offset == 0);
+ chan = shmp(handle, handle->chan);
if (!chan)
- goto destroy_shmem;
- set_shmp(shm_header->chan, chan);
+ goto error_append;
+
+ /* space for private data */
+ if (priv_data_size) {
+ DECLARE_SHMP(void, priv_data_alloc);
+
+ align_shm(shmobj, priv_data_align);
+ chan->priv_data_offset = shmobj->allocated_len;
+ set_shmp(priv_data_alloc, zalloc_shm(shmobj, priv_data_size));
+ if (!shmp(handle, priv_data_alloc))
+ goto error_append;
+ *priv_data = channel_get_private(chan);
+ memcpy(*priv_data, priv_data_init, priv_data_size);
+ } else {
+ chan->priv_data_offset = -1;
+ *priv_data = NULL;
+ }
- ret = channel_backend_init(&chan->backend, name, config, priv,
- subbuf_size, num_subbuf, shm_header);
+ ret = channel_backend_init(&chan->backend, name, config,
+ subbuf_size, num_subbuf, handle);
if (ret)
- goto destroy_shmem;
+ goto error_backend_init;
chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order);
//TODO
* In that off case, we need to allocate for all possible cpus.
*/
for_each_possible_cpu(cpu) {
- struct lib_ring_buffer *buf = &shmp(chan->backend.buf)[cpu];
- lib_ring_buffer_start_switch_timer(buf);
- lib_ring_buffer_start_read_timer(buf);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp);
+ lib_ring_buffer_start_switch_timer(buf, handle);
+ lib_ring_buffer_start_read_timer(buf, handle);
}
} else {
- struct lib_ring_buffer *buf = shmp(chan->backend.buf);
+ struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp);
- lib_ring_buffer_start_switch_timer(buf);
- lib_ring_buffer_start_read_timer(buf);
+ lib_ring_buffer_start_switch_timer(buf, handle);
+ lib_ring_buffer_start_read_timer(buf, handle);
}
+ ref = &handle->chan._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd, memory_map_size);
+ return handle;
+
+error_backend_init:
+error_append:
+ shm_object_table_destroy(handle->table);
+error_table_alloc:
+ free(handle);
+ return NULL;
+}
+
+struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd,
+ uint64_t memory_map_size)
+{
+ struct lttng_ust_shm_handle *handle;
+ struct shm_object *object;
- handle->header = shm_header;
- handle->shmfd = shmfd;
+ handle = zmalloc(sizeof(struct lttng_ust_shm_handle));
+ if (!handle)
+ return NULL;
+
+ /* Allocate table for channel + per-cpu buffers */
+ handle->table = shm_object_table_create(1 + num_possible_cpus());
+ if (!handle->table)
+ goto error_table_alloc;
+ /* Add channel object */
+ object = shm_object_table_append_shadow(handle->table,
+ shm_fd, wait_fd, memory_map_size);
+ if (!object)
+ goto error_table_object;
+ /* struct channel is at object 0, offset 0 (hardcoded) */
+ handle->chan._ref.index = 0;
+ handle->chan._ref.offset = 0;
return handle;
-destroy_shmem:
- ret = munmap(shm_header, shmsize);
- if (ret) {
- PERROR("umnmap");
- assert(0);
- }
-error_mmap:
-error_ftruncate:
-error_unlink:
- ret = close(shmfd);
- if (ret) {
- PERROR("close");
- assert(0);
- }
-error_shm_open:
+error_table_object:
+ shm_object_table_destroy(handle->table);
+error_table_alloc:
free(handle);
return NULL;
}
+int channel_handle_add_stream(struct lttng_ust_shm_handle *handle,
+ int shm_fd, int wait_fd, uint64_t memory_map_size)
+{
+ struct shm_object *object;
+
+ /* Add stream object */
+ object = shm_object_table_append_shadow(handle->table,
+ shm_fd, wait_fd, memory_map_size);
+ if (!object)
+ return -1;
+ return 0;
+}
+
static
-void channel_release(struct shm_handle *handle)
+void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- channel_free(handle);
+ channel_free(chan, handle, shadow);
}
/**
* Call "destroy" callback, finalize channels, decrement the channel
* reference count. Note that when readers have completed data
* consumption of finalized channels, get_subbuf() will return -ENODATA.
- * They should release their handle at that point. Returns the private
- * data pointer.
+ * They should release their handle at that point.
*/
-void *channel_destroy(struct shm_handle *handle)
+void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- struct shm_header *header = handle->header;
- struct channel *chan = shmp(header->chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
- void *priv;
- int cpu;
+ if (shadow) {
+ channel_release(chan, handle, shadow);
+ return;
+ }
- channel_unregister_notifiers(chan);
+ channel_unregister_notifiers(chan, handle);
- if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) {
- for_each_channel_cpu(cpu, chan) {
- struct lib_ring_buffer *buf = &shmp(chan->backend.buf)[cpu];
-
- if (config->cb.buffer_finalize)
- config->cb.buffer_finalize(buf,
- chan->backend.priv,
- cpu);
- if (buf->backend.allocated)
- lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
- /*
- * Perform flush before writing to finalized.
- */
- cmm_smp_wmb();
- CMM_ACCESS_ONCE(buf->finalized) = 1;
- //wake_up_interruptible(&buf->read_wait);
- }
- } else {
- struct lib_ring_buffer *buf = shmp(chan->backend.buf);
+ /*
+ * Note: the consumer takes care of finalizing and switching the
+ * buffers.
+ */
- if (config->cb.buffer_finalize)
- config->cb.buffer_finalize(buf, chan->backend.priv, -1);
- if (buf->backend.allocated)
- lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH);
- /*
- * Perform flush before writing to finalized.
- */
- cmm_smp_wmb();
- CMM_ACCESS_ONCE(buf->finalized) = 1;
- //wake_up_interruptible(&buf->read_wait);
- }
- CMM_ACCESS_ONCE(chan->finalized) = 1;
- //wake_up_interruptible(&chan->hp_wait);
- //wake_up_interruptible(&chan->read_wait);
/*
* sessiond/consumer are keeping a reference on the shm file
* descriptor directly. No need to refcount.
*/
- channel_release(handle);
- priv = chan->backend.priv;
- return priv;
+ channel_release(chan, handle, shadow);
+ return;
}
-struct lib_ring_buffer *channel_get_ring_buffer(
- const struct lib_ring_buffer_config *config,
- struct channel *chan, int cpu)
+struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer(
+ const struct lttng_ust_lib_ring_buffer_config *config,
+ struct channel *chan, int cpu,
+ struct lttng_ust_shm_handle *handle,
+ int **shm_fd, int **wait_fd,
+ uint64_t **memory_map_size)
{
- if (config->alloc == RING_BUFFER_ALLOC_GLOBAL)
- return shmp(chan->backend.buf);
- else
- return &shmp(chan->backend.buf)[cpu];
+ struct shm_ref *ref;
+
+ if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) {
+ ref = &chan->backend.buf[0].shmp._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd,
+ memory_map_size);
+ return shmp(handle, chan->backend.buf[0].shmp);
+ } else {
+ if (cpu >= num_possible_cpus())
+ return NULL;
+ ref = &chan->backend.buf[cpu].shmp._ref;
+ shm_get_object_data(handle, ref, shm_fd, wait_fd,
+ memory_map_size);
+ return shmp(handle, chan->backend.buf[cpu].shmp);
+ }
}
-int lib_ring_buffer_open_read(struct lib_ring_buffer *buf)
+int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- struct channel *chan = shmp(buf->backend.chan);
-
+ if (shadow) {
+ if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0)
+ return -EBUSY;
+ cmm_smp_mb();
+ return 0;
+ }
if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0)
return -EBUSY;
cmm_smp_mb();
return 0;
}
-void lib_ring_buffer_release_read(struct lib_ring_buffer *buf)
+void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle,
+ int shadow)
{
- struct channel *chan = shmp(buf->backend.chan);
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ if (shadow) {
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1);
+ cmm_smp_mb();
+ uatomic_dec(&buf->active_shadow_readers);
+ return;
+ }
CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
cmm_smp_mb();
uatomic_dec(&buf->active_readers);
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf,
- unsigned long *consumed, unsigned long *produced)
+int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long *consumed, unsigned long *produced,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, write_offset;
int finalized;
* @buf: ring buffer
* @consumed_new: new consumed count value
*/
-void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf,
- unsigned long consumed_new)
+void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long consumed_new,
+ struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = shmp(bufb->chan);
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+ struct channel *chan = shmp(handle, bufb->chan);
unsigned long consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
/*
* Only push the consumed value forward.
* Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no
* data to read at consumed position, or 0 if the get operation succeeds.
*/
-int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf,
- unsigned long consumed)
+int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ unsigned long consumed,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
int ret;
int finalized;
cmm_smp_rmb();
consumed_cur = uatomic_read(&buf->consumed);
consumed_idx = subbuf_index(consumed, chan);
- commit_count = v_read(config, &shmp(buf->commit_cold)[consumed_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
/*
* Make sure we read the commit count before reading the buffer
* data and the write offset. Correct consumed offset ordering
* looking for matches the one contained in the subbuffer id.
*/
ret = update_read_sb_index(config, &buf->backend, &chan->backend,
- consumed_idx, buf_trunc_val(consumed, chan));
+ consumed_idx, buf_trunc_val(consumed, chan),
+ handle);
if (ret)
goto retry;
subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id);
* lib_ring_buffer_put_subbuf - release exclusive subbuffer access
* @buf: ring buffer
*/
-void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf)
+void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
+ struct lttng_ust_shm_handle *handle)
{
- struct lib_ring_buffer_backend *bufb = &buf->backend;
- struct channel *chan = shmp(bufb->chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
+ struct channel *chan = shmp(handle, bufb->chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long read_sb_bindex, consumed_idx, consumed;
- CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
+ CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1
+ && uatomic_read(&buf->active_shadow_readers) != 1);
if (!buf->get_subbuf) {
/*
*/
read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
v_add(config, v_read(config,
- &shmp(bufb->array)[read_sb_bindex]->records_unread),
+ &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
&bufb->records_read);
- v_set(config, &shmp(bufb->array)[read_sb_bindex]->records_unread, 0);
+ v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
&& subbuffer_id_is_noref(config, bufb->buf_rsb.id));
subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
*/
consumed_idx = subbuf_index(consumed, chan);
update_read_sb_index(config, &buf->backend, &chan->backend,
- consumed_idx, buf_trunc_val(consumed, chan));
+ consumed_idx, buf_trunc_val(consumed, chan),
+ handle);
/*
* update_read_sb_index return value ignored. Don't exchange sub-buffer
* if the writer concurrently updated it.
* position and the writer position. (inclusive)
*/
static
-void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
unsigned long cons_offset,
- int cpu)
+ int cpu,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long cons_idx, commit_count, commit_count_sb;
cons_idx = subbuf_index(cons_offset, chan);
- commit_count = v_read(config, &shmp(buf->commit_hot)[cons_idx].cc);
- commit_count_sb = v_read(config, &shmp(buf->commit_cold)[cons_idx].cc_sb);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
+ commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
if (subbuf_offset(commit_count, chan) != 0)
- ERRMSG("ring buffer %s, cpu %d: "
+ DBG("ring buffer %s, cpu %d: "
"commit count in subbuffer %lu,\n"
"expecting multiples of %lu bytes\n"
" [ %lu bytes committed, %lu bytes reader-visible ]\n",
chan->backend.subbuf_size,
commit_count, commit_count_sb);
- ERRMSG("ring buffer: %s, cpu %d: %lu bytes committed\n",
+ DBG("ring buffer: %s, cpu %d: %lu bytes committed\n",
chan->backend.name, cpu, commit_count);
}
static
-void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf,
+void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
- void *priv, int cpu)
+ void *priv, int cpu,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long write_offset, cons_offset;
- /*
- * Can be called in the error path of allocation when
- * trans_channel_data is not yet set.
- */
- if (!chan)
- return;
/*
* No need to order commit_count, write_offset and cons_offset reads
* because we execute at teardown when no more writer nor reader
write_offset = v_read(config, &buf->offset);
cons_offset = uatomic_read(&buf->consumed);
if (write_offset != cons_offset)
- ERRMSG("ring buffer %s, cpu %d: "
+ DBG("ring buffer %s, cpu %d: "
"non-consumed data\n"
" [ %lu bytes written, %lu bytes read ]\n",
chan->backend.name, cpu, write_offset, cons_offset);
- cons_offset) > 0;
cons_offset = subbuf_align(cons_offset, chan))
lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset,
- cpu);
+ cpu, handle);
}
static
void lib_ring_buffer_print_errors(struct channel *chan,
- struct lib_ring_buffer *buf, int cpu)
+ struct lttng_ust_lib_ring_buffer *buf, int cpu,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
- void *priv = chan->backend.priv;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ void *priv = channel_get_private(chan);
- ERRMSG("ring buffer %s, cpu %d: %lu records written, "
+ DBG("ring buffer %s, cpu %d: %lu records written, "
"%lu records overrun\n",
chan->backend.name, cpu,
v_read(config, &buf->records_count),
if (v_read(config, &buf->records_lost_full)
|| v_read(config, &buf->records_lost_wrap)
|| v_read(config, &buf->records_lost_big))
- ERRMSG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
+ DBG("ring buffer %s, cpu %d: records were lost. Caused by:\n"
" [ %lu buffer full, %lu nest buffer wrap-around, "
"%lu event too big ]\n",
chan->backend.name, cpu,
v_read(config, &buf->records_lost_wrap),
v_read(config, &buf->records_lost_big));
- lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu);
+ lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu, handle);
}
/*
* Only executed when the buffer is finalized, in SWITCH_FLUSH.
*/
static
-void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc)
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old, chan);
unsigned long commit_count;
- config->cb.buffer_begin(buf, tsc, oldidx);
+ config->cb.buffer_begin(buf, tsc, oldidx, handle);
/*
* Order all writes to buffer before the commit count update that will
*/
cmm_smp_wmb();
v_add(config, config->cb.subbuffer_header_size(),
- &shmp(buf->commit_hot)[oldidx].cc);
- commit_count = v_read(config, &shmp(buf->commit_hot)[oldidx].cc);
+ &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
- commit_count, oldidx);
+ commit_count, oldidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
offsets->old, commit_count,
- config->cb.subbuffer_header_size());
+ config->cb.subbuffer_header_size(),
+ handle);
}
/*
* subbuffer.
*/
static
-void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc)
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
unsigned long commit_count, padding_size, data_size;
data_size = subbuf_offset(offsets->old - 1, chan) + 1;
padding_size = chan->backend.subbuf_size - data_size;
- subbuffer_set_data_size(config, &buf->backend, oldidx, data_size);
+ subbuffer_set_data_size(config, &buf->backend, oldidx, data_size,
+ handle);
/*
* Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full.
*/
cmm_smp_wmb();
- v_add(config, padding_size, &shmp(buf->commit_hot)[oldidx].cc);
- commit_count = v_read(config, &shmp(buf->commit_hot)[oldidx].cc);
+ v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
- commit_count, oldidx);
+ commit_count, oldidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
offsets->old, commit_count,
- padding_size);
+ padding_size, handle);
}
/*
* that this code is executed before the deliver of this sub-buffer.
*/
static
-void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf,
+void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 tsc)
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long beginidx = subbuf_index(offsets->begin, chan);
unsigned long commit_count;
- config->cb.buffer_begin(buf, tsc, beginidx);
+ config->cb.buffer_begin(buf, tsc, beginidx, handle);
/*
* Order all writes to buffer before the commit count update that will
*/
cmm_smp_wmb();
v_add(config, config->cb.subbuffer_header_size(),
- &shmp(buf->commit_hot)[beginidx].cc);
- commit_count = v_read(config, &shmp(buf->commit_hot)[beginidx].cc);
+ &shmp_index(handle, buf->commit_hot, beginidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
/* Check if the written buffer has to be delivered */
lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
- commit_count, beginidx);
+ commit_count, beginidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
offsets->begin, commit_count,
- config->cb.subbuffer_header_size());
+ config->cb.subbuffer_header_size(),
+ handle);
}
/*
* have to do the deliver themselves.
*/
static
-void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf,
- struct channel *chan,
- struct switch_offsets *offsets,
- u64 tsc)
+void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
+ struct channel *chan,
+ struct switch_offsets *offsets,
+ uint64_t tsc,
+ struct lttng_ust_shm_handle *handle)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long endidx = subbuf_index(offsets->end - 1, chan);
unsigned long commit_count, padding_size, data_size;
data_size = subbuf_offset(offsets->end - 1, chan) + 1;
padding_size = chan->backend.subbuf_size - data_size;
- subbuffer_set_data_size(config, &buf->backend, endidx, data_size);
+ subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
+ handle);
/*
* Order all writes to buffer before the commit count update that will
* determine that the subbuffer is full.
*/
cmm_smp_wmb();
- v_add(config, padding_size, &shmp(buf->commit_hot)[endidx].cc);
- commit_count = v_read(config, &shmp(buf->commit_hot)[endidx].cc);
+ v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc);
+ commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc);
lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1,
- commit_count, endidx);
+ commit_count, endidx, handle);
lib_ring_buffer_write_commit_counter(config, buf, chan, endidx,
offsets->end, commit_count,
- padding_size);
+ padding_size, handle);
}
/*
*/
static
int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
- struct lib_ring_buffer *buf,
+ struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- u64 *tsc)
+ uint64_t *tsc)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
unsigned long off;
offsets->begin = v_read(config, &buf->offset);
* quiescence guarantees for the fusion merge.
*/
if (mode == SWITCH_FLUSH || off > 0) {
- if (unlikely(off == 0)) {
+ if (caa_unlikely(off == 0)) {
/*
* The client does not save any header information.
* Don't switch empty subbuffer on finalize, because it
* operations, this function must be called from the CPU which owns the buffer
* for a ACTIVE flush.
*/
-void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode)
+void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
+ struct lttng_ust_shm_handle *handle)
{
- struct channel *chan = shmp(buf->backend.chan);
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ struct channel *chan = shmp(handle, buf->backend.chan);
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
struct switch_offsets offsets;
unsigned long oldidx;
- u64 tsc;
+ uint64_t tsc;
offsets.size = 0;
lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old);
oldidx = subbuf_index(offsets.old, chan);
- lib_ring_buffer_clear_noref(config, &buf->backend, oldidx);
+ lib_ring_buffer_clear_noref(config, &buf->backend, oldidx, handle);
/*
* May need to populate header start on SWITCH_FLUSH.
*/
if (offsets.switch_old_start) {
- lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc);
+ lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle);
offsets.old += config->cb.subbuffer_header_size();
}
/*
* Switch old subbuffer.
*/
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
}
/*
* -EIO if data cannot be written into the buffer for any other reason.
*/
static
-int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf,
+int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
struct channel *chan,
struct switch_offsets *offsets,
- struct lib_ring_buffer_ctx *ctx)
+ struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
- const struct lib_ring_buffer_config *config = chan->backend.config;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_shm_handle *handle = ctx->handle;
unsigned long reserve_commit_diff;
offsets->begin = v_read(config, &buf->offset);
if (last_tsc_overflow(config, buf, ctx->tsc))
ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC;
- if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
+ if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) {
offsets->switch_new_start = 1; /* For offsets->begin */
} else {
offsets->size = config->cb.record_header_size(config, chan,
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
- if (unlikely(subbuf_offset(offsets->begin, chan) +
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan) +
offsets->size > chan->backend.subbuf_size)) {
offsets->switch_old_end = 1; /* For offsets->old */
offsets->switch_new_start = 1; /* For offsets->begin */
}
}
- if (unlikely(offsets->switch_new_start)) {
+ if (caa_unlikely(offsets->switch_new_start)) {
unsigned long sb_index;
/*
* We are typically not filling the previous buffer completely.
*/
- if (likely(offsets->switch_old_end))
+ if (caa_likely(offsets->switch_old_end))
offsets->begin = subbuf_align(offsets->begin, chan);
offsets->begin = offsets->begin
+ config->cb.subbuffer_header_size();
(buf_trunc(offsets->begin, chan)
>> chan->backend.num_subbuf_order)
- ((unsigned long) v_read(config,
- &shmp(buf->commit_cold)[sb_index].cc_sb)
+ &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb)
& chan->commit_count_mask);
- if (likely(reserve_commit_diff == 0)) {
+ if (caa_likely(reserve_commit_diff == 0)) {
/* Next subbuffer not being written to. */
- if (unlikely(config->mode != RING_BUFFER_OVERWRITE &&
+ if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE &&
subbuf_trunc(offsets->begin, chan)
- subbuf_trunc((unsigned long)
uatomic_read(&buf->consumed), chan)
lib_ring_buffer_align(offsets->begin + offsets->size,
ctx->largest_align)
+ ctx->data_size;
- if (unlikely(subbuf_offset(offsets->begin, chan)
+ if (caa_unlikely(subbuf_offset(offsets->begin, chan)
+ offsets->size > chan->backend.subbuf_size)) {
/*
* Record too big for subbuffers, report error, don't
}
offsets->end = offsets->begin + offsets->size;
- if (unlikely(subbuf_offset(offsets->end, chan) == 0)) {
+ if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) {
/*
* The offset_end will fall at the very beginning of the next
* subbuffer.
* -EIO for other errors, else returns 0.
* It will take care of sub-buffer switching.
*/
-int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
{
struct channel *chan = ctx->chan;
- const struct lib_ring_buffer_config *config = chan->backend.config;
- struct lib_ring_buffer *buf;
+ struct lttng_ust_shm_handle *handle = ctx->handle;
+ const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+ struct lttng_ust_lib_ring_buffer *buf;
struct switch_offsets offsets;
int ret;
if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
- buf = &shmp(chan->backend.buf)[ctx->cpu];
+ buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
else
- buf = shmp(chan->backend.buf);
+ buf = shmp(handle, chan->backend.buf[0].shmp);
ctx->buf = buf;
offsets.size = 0;
do {
ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
ctx);
- if (unlikely(ret))
+ if (caa_unlikely(ret))
return ret;
- } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
+ } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
offsets.end)
!= offsets.old));
* Clear noref flag for this subbuffer.
*/
lib_ring_buffer_clear_noref(config, &buf->backend,
- subbuf_index(offsets.end - 1, chan));
+ subbuf_index(offsets.end - 1, chan),
+ handle);
/*
* Switch old subbuffer if needed.
*/
- if (unlikely(offsets.switch_old_end)) {
+ if (caa_unlikely(offsets.switch_old_end)) {
lib_ring_buffer_clear_noref(config, &buf->backend,
- subbuf_index(offsets.old - 1, chan));
- lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc);
+ subbuf_index(offsets.old - 1, chan),
+ handle);
+ lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc, handle);
}
/*
* Populate new subbuffer.
*/
- if (unlikely(offsets.switch_new_start))
- lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc);
+ if (caa_unlikely(offsets.switch_new_start))
+ lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle);
- if (unlikely(offsets.switch_new_end))
- lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc);
+ if (caa_unlikely(offsets.switch_new_end))
+ lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle);
ctx->slot_size = offsets.size;
ctx->pre_offset = offsets.begin;