X-Git-Url: http://git.lttng.org/?a=blobdiff_plain;f=libringbuffer%2Fring_buffer_frontend.c;h=310e0b93e51a435b74e6d0b9d0c6b4ebc6226abf;hb=4318ae1be57eb7983ab4857a7a8eeb4a030a8216;hp=2b4ecc24b1c44769afe455f40773b6c6db5233c8;hpb=a6352fd40a2090fd883a6c369144bf405c9e9ec4;p=lttng-ust.git diff --git a/libringbuffer/ring_buffer_frontend.c b/libringbuffer/ring_buffer_frontend.c index 2b4ecc24..310e0b93 100644 --- a/libringbuffer/ring_buffer_frontend.c +++ b/libringbuffer/ring_buffer_frontend.c @@ -39,16 +39,38 @@ */ #include -#include +#include +#include +#include #include #include #include "smp.h" -#include "config.h" +#include #include "backend.h" #include "frontend.h" #include "shm.h" +#ifndef max +#define max(a, b) ((a) > (b) ? (a) : (b)) +#endif + +/* + * Use POSIX SHM: shm_open(3) and shm_unlink(3). + * close(2) to close the fd returned by shm_open. + * shm_unlink releases the shared memory object name. + * ftruncate(2) sets the size of the memory object. + * mmap/munmap maps the shared memory obj to a virtual address in the + * calling proceess (should be done both in libust and consumer). + * See shm_overview(7) for details. + * Pass file descriptor returned by shm_open(3) to ltt-sessiond through + * a UNIX socket. + * + * Since we don't need to access the object using its name, we can + * immediately shm_unlink(3) it, and only keep the handle with its file + * descriptor. + */ + /* * Internal structure representing offsets to use at a sub-buffer switch. */ @@ -63,18 +85,20 @@ __thread unsigned int lib_ring_buffer_nesting; static void lib_ring_buffer_print_errors(struct channel *chan, - struct lib_ring_buffer *buf, int cpu); + struct lttng_ust_lib_ring_buffer *buf, int cpu, + struct lttng_ust_shm_handle *handle); /* * Must be called under cpu hotplug protection. */ -void lib_ring_buffer_free(struct lib_ring_buffer *buf) +void lib_ring_buffer_free(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); + struct channel *chan = shmp(handle, buf->backend.chan); - lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu); - free(shmp(buf->commit_hot)); - free(shmp(buf->commit_cold)); + lib_ring_buffer_print_errors(chan, buf, buf->backend.cpu, handle); + /* buf->commit_hot will be freed by shm teardown */ + /* buf->commit_cold will be freed by shm teardown */ lib_ring_buffer_backend_free(&buf->backend); } @@ -88,10 +112,11 @@ void lib_ring_buffer_free(struct lib_ring_buffer *buf) * should not be using the iterator concurrently with reset. The previous * current iterator record is reset. */ -void lib_ring_buffer_reset(struct lib_ring_buffer *buf) +void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct channel *chan = shmp(handle, buf->backend.chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned int i; /* @@ -100,14 +125,14 @@ void lib_ring_buffer_reset(struct lib_ring_buffer *buf) */ v_set(config, &buf->offset, 0); for (i = 0; i < chan->backend.num_subbuf; i++) { - v_set(config, &shmp(buf->commit_hot)[i].cc, 0); - v_set(config, &shmp(buf->commit_hot)[i].seq, 0); - v_set(config, &shmp(buf->commit_cold)[i].cc_sb, 0); + v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0); + v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0); + v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0); } uatomic_set(&buf->consumed, 0); uatomic_set(&buf->record_disabled, 0); v_set(config, &buf->last_tsc, 0); - lib_ring_buffer_backend_reset(&buf->backend); + lib_ring_buffer_backend_reset(&buf->backend, handle); /* Don't reset number of active readers */ v_set(config, &buf->records_lost_full, 0); v_set(config, &buf->records_lost_wrap, 0); @@ -142,11 +167,12 @@ void channel_reset(struct channel *chan) /* * Must be called under cpu hotplug protection. */ -int lib_ring_buffer_create(struct lib_ring_buffer *buf, +int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf, struct channel_backend *chanb, int cpu, - struct shm_header *shm_header) + struct lttng_ust_shm_handle *handle, + struct shm_object *shmobj) { - const struct lib_ring_buffer_config *config = chanb->config; + const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config; struct channel *chan = caa_container_of(chanb, struct channel, backend); void *priv = chanb->priv; unsigned int num_subbuf; @@ -159,22 +185,24 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, return 0; ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend, - cpu, shm_header); + cpu, handle, shmobj); if (ret) return ret; - set_shmp(&buf->commit_hot, - zalloc_shm(shm_header, - sizeof(*buf->commit_hot) * chan->backend.num_subbuf)); - if (!shmp(buf->commit_hot)) { + align_shm(shmobj, __alignof__(struct commit_counters_hot)); + set_shmp(buf->commit_hot, + zalloc_shm(shmobj, + sizeof(struct commit_counters_hot) * chan->backend.num_subbuf)); + if (!shmp(handle, buf->commit_hot)) { ret = -ENOMEM; goto free_chanbuf; } - set_shmp(&buf->commit_cold, - zalloc_shm(shm_header, - sizeof(*buf->commit_cold) * chan->backend.num_subbuf)); - if (!shmp(buf->commit_cold)) { + align_shm(shmobj, __alignof__(struct commit_counters_cold)); + set_shmp(buf->commit_cold, + zalloc_shm(shmobj, + sizeof(struct commit_counters_cold) * chan->backend.num_subbuf)); + if (!shmp(handle, buf->commit_cold)) { ret = -ENOMEM; goto free_commit; } @@ -188,13 +216,13 @@ int lib_ring_buffer_create(struct lib_ring_buffer *buf, */ subbuf_header_size = config->cb.subbuffer_header_size(); v_set(config, &buf->offset, subbuf_header_size); - subbuffer_id_clear_noref(config, &shmp(buf->backend.buf_wsb)[0].id); - tsc = config->cb.ring_buffer_clock_read(shmp(buf->backend.chan)); - config->cb.buffer_begin(buf, tsc, 0); - v_add(config, subbuf_header_size, &shmp(buf->commit_hot)[0].cc); + subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id); + tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan)); + config->cb.buffer_begin(buf, tsc, 0, handle); + v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc); if (config->cb.buffer_create) { - ret = config->cb.buffer_create(buf, priv, cpu, chanb->name); + ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle); if (ret) goto free_init; } @@ -211,17 +239,18 @@ free_chanbuf: return ret; } +#if 0 static void switch_buffer_timer(unsigned long data) { - struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data; - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data; + struct channel *chan = shmp(handle, buf->backend.chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; /* * Only flush buffers periodically if readers are active. */ - if (uatomic_read(&buf->active_readers)) - lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE); + if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers)) + lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE, handle); //TODO timers //if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) @@ -231,11 +260,13 @@ static void switch_buffer_timer(unsigned long data) // mod_timer(&buf->switch_timer, // jiffies + chan->switch_timer_interval); } +#endif //0 -static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf) +static void lib_ring_buffer_start_switch_timer(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct channel *chan = shmp(handle, buf->backend.chan); + //const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; if (!chan->switch_timer_interval || buf->switch_timer_enabled) return; @@ -251,9 +282,10 @@ static void lib_ring_buffer_start_switch_timer(struct lib_ring_buffer *buf) buf->switch_timer_enabled = 1; } -static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf) +static void lib_ring_buffer_stop_switch_timer(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); + struct channel *chan = shmp(handle, buf->backend.chan); if (!chan->switch_timer_interval || !buf->switch_timer_enabled) return; @@ -263,18 +295,19 @@ static void lib_ring_buffer_stop_switch_timer(struct lib_ring_buffer *buf) buf->switch_timer_enabled = 0; } +#if 0 /* * Polling timer to check the channels for data. */ static void read_buffer_timer(unsigned long data) { - struct lib_ring_buffer *buf = (struct lib_ring_buffer *)data; - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct lttng_ust_lib_ring_buffer *buf = (struct lttng_ust_lib_ring_buffer *)data; + struct channel *chan = shmp(handle, buf->backend.chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; CHAN_WARN_ON(chan, !buf->backend.allocated); - if (uatomic_read(&buf->active_readers) + if (uatomic_read(&buf->active_readers) || uatomic_read(&buf->active_shadow_readers)) && lib_ring_buffer_poll_deliver(config, buf, chan)) { //TODO //wake_up_interruptible(&buf->read_wait); @@ -289,11 +322,13 @@ static void read_buffer_timer(unsigned long data) // mod_timer(&buf->read_timer, // jiffies + chan->read_timer_interval); } +#endif //0 -static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf) +static void lib_ring_buffer_start_read_timer(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct channel *chan = shmp(handle, buf->backend.chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER || !chan->read_timer_interval @@ -313,10 +348,11 @@ static void lib_ring_buffer_start_read_timer(struct lib_ring_buffer *buf) buf->read_timer_enabled = 1; } -static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) +static void lib_ring_buffer_stop_read_timer(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct channel *chan = shmp(handle, buf->backend.chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; if (config->wakeup != RING_BUFFER_WAKEUP_BY_TIMER || !chan->read_timer_interval @@ -329,7 +365,7 @@ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) * do one more check to catch data that has been written in the last * timer period. */ - if (lib_ring_buffer_poll_deliver(config, buf, chan)) { + if (lib_ring_buffer_poll_deliver(config, buf, chan, handle)) { //TODO //wake_up_interruptible(&buf->read_wait); //wake_up_interruptible(&chan->read_wait); @@ -337,31 +373,36 @@ static void lib_ring_buffer_stop_read_timer(struct lib_ring_buffer *buf) buf->read_timer_enabled = 0; } -static void channel_unregister_notifiers(struct channel *chan) +static void channel_unregister_notifiers(struct channel *chan, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; int cpu; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { for_each_possible_cpu(cpu) { - struct lib_ring_buffer *buf = &shmp(chan->backend.buf)[cpu]; + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp); - lib_ring_buffer_stop_switch_timer(buf); - lib_ring_buffer_stop_read_timer(buf); + lib_ring_buffer_stop_switch_timer(buf, handle); + lib_ring_buffer_stop_read_timer(buf, handle); } } else { - struct lib_ring_buffer *buf = shmp(chan->backend.buf); + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp); - lib_ring_buffer_stop_switch_timer(buf); - lib_ring_buffer_stop_read_timer(buf); + lib_ring_buffer_stop_switch_timer(buf, handle); + lib_ring_buffer_stop_read_timer(buf, handle); } - channel_backend_unregister_notifiers(&chan->backend); + //channel_backend_unregister_notifiers(&chan->backend); } -static void channel_free(struct channel *chan) +static void channel_free(struct channel *chan, struct lttng_ust_shm_handle *handle, + int shadow) { - channel_backend_free(&chan->backend); - free(chan); + if (!shadow) + channel_backend_free(&chan->backend, handle); + /* chan is freed by shm teardown */ + shm_object_table_destroy(handle->table); + free(handle); } /** @@ -378,96 +419,64 @@ static void channel_free(struct channel *chan) * padding to let readers get those sub-buffers. * Used for live streaming. * @read_timer_interval: Time interval (in us) to wake up pending readers. - * @shmid: shared memory ID (output) * * Holds cpu hotplug. * Returns NULL on failure. */ -struct channel *channel_create(const struct lib_ring_buffer_config *config, +struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buffer_config *config, const char *name, void *priv, void *buf_addr, size_t subbuf_size, size_t num_subbuf, unsigned int switch_timer_interval, unsigned int read_timer_interval, - int *shmid) + int *shm_fd, int *wait_fd, uint64_t *memory_map_size) { int ret, cpu; + size_t shmsize; struct channel *chan; - size_t shmsize, bufshmsize; - struct shm_header *shm_header; - unsigned long num_subbuf_alloc; + struct lttng_ust_shm_handle *handle; + struct shm_object *shmobj; + struct shm_ref *ref; if (lib_ring_buffer_check_config(config, switch_timer_interval, read_timer_interval)) return NULL; - /* Calculate the shm allocation layout */ - shmsize = sizeof(struct shm_header); - shmsize += sizeof(struct channel); - - /* Per-cpu buffer size: control (prior to backend) */ - bufshmsize = sizeof(struct lib_ring_buffer); - shmsize += bufshmsize * num_possible_cpus(); - - /* Per-cpu buffer size: backend */ - /* num_subbuf + 1 is the worse case */ - num_subbuf_alloc = num_subbuf + 1; - bufshmsize = sizeof(struct lib_ring_buffer_backend_pages *) * num_subbuf_alloc; - bufshmsize += subbuf_size * (num_subbuf_alloc); - bufshmsize += (sizeof(struct lib_ring_buffer_backend_pages) + subbuf_size) * num_subbuf_alloc; - bufshmsize += sizeof(struct lib_ring_buffer_backend_subbuffer) * num_subbuf; - shmsize += bufshmsize * num_possible_cpus(); - - /* Per-cpu buffer size: control (after backend) */ - bufshmsize += sizeof(struct commit_counters_hot) * num_subbuf; - bufshmsize += sizeof(struct commit_counters_cold) * num_subbuf; - - /* Allocate shm */ - *shmid = shmget(getpid(), shmsize, IPC_CREAT | IPC_EXCL | 0700); - if (*shmid < 0) { - if (errno == EINVAL) - ERR("shmget() returned EINVAL; maybe /proc/sys/kernel/shmmax should be increased."); - else - PERROR("shmget"); + handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); + if (!handle) return NULL; - } - - shm_header = shmat(*shmid, NULL, 0); - if (shm_header == (void *) -1) { - perror("shmat"); - goto destroy_shmem; - } - - /* Already mark the shared memory for destruction. This will occur only - * when all users have detached. - */ - ret = shmctl(*shmid, IPC_RMID, NULL); - if (ret == -1) { - perror("shmctl"); - goto destroy_shmem; - } - shm_header->magic = SHM_MAGIC; - shm_header->major = SHM_MAJOR; - shm_header->major = SHM_MINOR; - shm_header->bits_per_long = CAA_BITS_PER_LONG; - shm_header->shm_size = shmsize; - shm_header->shm_allocated = sizeof(struct shm_header); + /* Allocate table for channel + per-cpu buffers */ + handle->table = shm_object_table_create(1 + num_possible_cpus()); + if (!handle->table) + goto error_table_alloc; - chan = zalloc_shm(shm_header, sizeof(struct channel)); + /* Calculate the shm allocation layout */ + shmsize = sizeof(struct channel); + if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) + shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * num_possible_cpus(); + else + shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp); + + shmobj = shm_object_table_append(handle->table, shmsize); + if (!shmobj) + goto error_append; + /* struct channel is at object 0, offset 0 (hardcoded) */ + set_shmp(handle->chan, zalloc_shm(shmobj, sizeof(struct channel))); + assert(handle->chan._ref.index == 0); + assert(handle->chan._ref.offset == 0); + chan = shmp(handle, handle->chan); if (!chan) - goto destroy_shmem; - set_shmp(shm_header->chan, chan); + goto error_append; ret = channel_backend_init(&chan->backend, name, config, priv, - subbuf_size, num_subbuf, shm_header); + subbuf_size, num_subbuf, handle); if (ret) - goto destroy_shmem; + goto error_backend_init; chan->commit_count_mask = (~0UL >> chan->backend.num_subbuf_order); //TODO //chan->switch_timer_interval = usecs_to_jiffies(switch_timer_interval); //chan->read_timer_interval = usecs_to_jiffies(read_timer_interval); - urcu_ref_init(&chan->ref); //TODO //init_waitqueue_head(&chan->read_wait); //init_waitqueue_head(&chan->hp_wait); @@ -479,32 +488,77 @@ struct channel *channel_create(const struct lib_ring_buffer_config *config, * In that off case, we need to allocate for all possible cpus. */ for_each_possible_cpu(cpu) { - struct lib_ring_buffer *buf = &shmp(chan->backend.buf)[cpu]; - lib_ring_buffer_start_switch_timer(buf); - lib_ring_buffer_start_read_timer(buf); + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp); + lib_ring_buffer_start_switch_timer(buf, handle); + lib_ring_buffer_start_read_timer(buf, handle); } } else { - struct lib_ring_buffer *buf = shmp(chan->backend.buf); + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp); - lib_ring_buffer_start_switch_timer(buf); - lib_ring_buffer_start_read_timer(buf); + lib_ring_buffer_start_switch_timer(buf, handle); + lib_ring_buffer_start_read_timer(buf, handle); } + ref = &handle->chan._ref; + shm_get_object_data(handle, ref, shm_fd, wait_fd, memory_map_size); + return handle; + +error_backend_init: +error_append: + shm_object_table_destroy(handle->table); +error_table_alloc: + free(handle); + return NULL; +} + +struct lttng_ust_shm_handle *channel_handle_create(int shm_fd, int wait_fd, + uint64_t memory_map_size) +{ + struct lttng_ust_shm_handle *handle; + struct shm_object *object; - return chan; + handle = zmalloc(sizeof(struct lttng_ust_shm_handle)); + if (!handle) + return NULL; -destroy_shmem: - ret = shmctl(*shmid, IPC_RMID, NULL); - if (ret == -1) { - perror("shmctl"); - } + /* Allocate table for channel + per-cpu buffers */ + handle->table = shm_object_table_create(1 + num_possible_cpus()); + if (!handle->table) + goto error_table_alloc; + /* Add channel object */ + object = shm_object_table_append_shadow(handle->table, + shm_fd, wait_fd, memory_map_size); + if (!object) + goto error_table_object; + /* struct channel is at object 0, offset 0 (hardcoded) */ + handle->chan._ref.index = 0; + handle->chan._ref.offset = 0; + return handle; + +error_table_object: + shm_object_table_destroy(handle->table); +error_table_alloc: + free(handle); return NULL; } +int channel_handle_add_stream(struct lttng_ust_shm_handle *handle, + int shm_fd, int wait_fd, uint64_t memory_map_size) +{ + struct shm_object *object; + + /* Add stream object */ + object = shm_object_table_append_shadow(handle->table, + shm_fd, wait_fd, memory_map_size); + if (!object) + return -1; + return 0; +} + static -void channel_release(struct urcu_ref *ref) +void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle, + int shadow) { - struct channel *chan = caa_container_of(ref, struct channel, ref); - channel_free(chan); + channel_free(chan, handle, shadow); } /** @@ -512,30 +566,37 @@ void channel_release(struct urcu_ref *ref) * @chan: channel to destroy * * Holds cpu hotplug. - * Call "destroy" callback, finalize channels, wait for readers to release their - * reference, then destroy ring buffer data. Note that when readers have - * completed data consumption of finalized channels, get_subbuf() will return - * -ENODATA. They should release their handle at that point. - * Returns the private data pointer. + * Call "destroy" callback, finalize channels, decrement the channel + * reference count. Note that when readers have completed data + * consumption of finalized channels, get_subbuf() will return -ENODATA. + * They should release their handle at that point. Returns the private + * data pointer. */ -void *channel_destroy(struct channel *chan) +void *channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle, + int shadow) { - int cpu; - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; void *priv; + int cpu; + + if (shadow) { + channel_release(chan, handle, shadow); + return NULL; + } - channel_unregister_notifiers(chan); + channel_unregister_notifiers(chan, handle); if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) { for_each_channel_cpu(cpu, chan) { - struct lib_ring_buffer *buf = &shmp(chan->backend.buf)[cpu]; + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[cpu].shmp); if (config->cb.buffer_finalize) config->cb.buffer_finalize(buf, chan->backend.priv, - cpu); + cpu, handle); if (buf->backend.allocated) - lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH); + lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH, + handle); /* * Perform flush before writing to finalized. */ @@ -544,12 +605,13 @@ void *channel_destroy(struct channel *chan) //wake_up_interruptible(&buf->read_wait); } } else { - struct lib_ring_buffer *buf = shmp(chan->backend.buf); + struct lttng_ust_lib_ring_buffer *buf = shmp(handle, chan->backend.buf[0].shmp); if (config->cb.buffer_finalize) - config->cb.buffer_finalize(buf, chan->backend.priv, -1); + config->cb.buffer_finalize(buf, chan->backend.priv, -1, handle); if (buf->backend.allocated) - lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH); + lib_ring_buffer_switch_slow(buf, SWITCH_FLUSH, + handle); /* * Perform flush before writing to finalized. */ @@ -560,40 +622,70 @@ void *channel_destroy(struct channel *chan) CMM_ACCESS_ONCE(chan->finalized) = 1; //wake_up_interruptible(&chan->hp_wait); //wake_up_interruptible(&chan->read_wait); - urcu_ref_put(&chan->ref, channel_release); + /* + * sessiond/consumer are keeping a reference on the shm file + * descriptor directly. No need to refcount. + */ priv = chan->backend.priv; + channel_release(chan, handle, shadow); return priv; } -struct lib_ring_buffer *channel_get_ring_buffer( - const struct lib_ring_buffer_config *config, - struct channel *chan, int cpu) +struct lttng_ust_lib_ring_buffer *channel_get_ring_buffer( + const struct lttng_ust_lib_ring_buffer_config *config, + struct channel *chan, int cpu, + struct lttng_ust_shm_handle *handle, + int *shm_fd, int *wait_fd, + uint64_t *memory_map_size) { - if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) - return shmp(chan->backend.buf); - else - return &shmp(chan->backend.buf)[cpu]; + struct shm_ref *ref; + + if (config->alloc == RING_BUFFER_ALLOC_GLOBAL) { + ref = &chan->backend.buf[0].shmp._ref; + shm_get_object_data(handle, ref, shm_fd, wait_fd, + memory_map_size); + return shmp(handle, chan->backend.buf[0].shmp); + } else { + if (cpu >= num_possible_cpus()) + return NULL; + ref = &chan->backend.buf[cpu].shmp._ref; + shm_get_object_data(handle, ref, shm_fd, wait_fd, + memory_map_size); + return shmp(handle, chan->backend.buf[cpu].shmp); + } } -int lib_ring_buffer_open_read(struct lib_ring_buffer *buf) +int lib_ring_buffer_open_read(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle, + int shadow) { - struct channel *chan = shmp(buf->backend.chan); - + if (shadow) { + if (uatomic_cmpxchg(&buf->active_shadow_readers, 0, 1) != 0) + return -EBUSY; + cmm_smp_mb(); + return 0; + } if (uatomic_cmpxchg(&buf->active_readers, 0, 1) != 0) return -EBUSY; - urcu_ref_get(&chan->ref); cmm_smp_mb(); return 0; } -void lib_ring_buffer_release_read(struct lib_ring_buffer *buf) +void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle, + int shadow) { - struct channel *chan = shmp(buf->backend.chan); + struct channel *chan = shmp(handle, buf->backend.chan); + if (shadow) { + CHAN_WARN_ON(chan, uatomic_read(&buf->active_shadow_readers) != 1); + cmm_smp_mb(); + uatomic_dec(&buf->active_shadow_readers); + return; + } CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1); cmm_smp_mb(); uatomic_dec(&buf->active_readers); - urcu_ref_put(&chan->ref, channel_release); } /** @@ -606,11 +698,12 @@ void lib_ring_buffer_release_read(struct lib_ring_buffer *buf) * data to read at consumed position, or 0 if the get operation succeeds. */ -int lib_ring_buffer_snapshot(struct lib_ring_buffer *buf, - unsigned long *consumed, unsigned long *produced) +int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf, + unsigned long *consumed, unsigned long *produced, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct channel *chan = shmp(handle, buf->backend.chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long consumed_cur, write_offset; int finalized; @@ -659,14 +752,16 @@ nodata: * @buf: ring buffer * @consumed_new: new consumed count value */ -void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf, - unsigned long consumed_new) +void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf, + unsigned long consumed_new, + struct lttng_ust_shm_handle *handle) { - struct lib_ring_buffer_backend *bufb = &buf->backend; - struct channel *chan = shmp(bufb->chan); + struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend; + struct channel *chan = shmp(handle, bufb->chan); unsigned long consumed; - CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1); + CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1 + && uatomic_read(&buf->active_shadow_readers) != 1); /* * Only push the consumed value forward. @@ -687,11 +782,12 @@ void lib_ring_buffer_move_consumer(struct lib_ring_buffer *buf, * Returns -ENODATA if buffer is finalized, -EAGAIN if there is currently no * data to read at consumed position, or 0 if the get operation succeeds. */ -int lib_ring_buffer_get_subbuf(struct lib_ring_buffer *buf, - unsigned long consumed) +int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf, + unsigned long consumed, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct channel *chan = shmp(handle, buf->backend.chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long consumed_cur, consumed_idx, commit_count, write_offset; int ret; int finalized; @@ -704,7 +800,7 @@ retry: cmm_smp_rmb(); consumed_cur = uatomic_read(&buf->consumed); consumed_idx = subbuf_index(consumed, chan); - commit_count = v_read(config, &shmp(buf->commit_cold)[consumed_idx].cc_sb); + commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb); /* * Make sure we read the commit count before reading the buffer * data and the write offset. Correct consumed offset ordering @@ -754,7 +850,8 @@ retry: * looking for matches the one contained in the subbuffer id. */ ret = update_read_sb_index(config, &buf->backend, &chan->backend, - consumed_idx, buf_trunc_val(consumed, chan)); + consumed_idx, buf_trunc_val(consumed, chan), + handle); if (ret) goto retry; subbuffer_id_clear_noref(config, &buf->backend.buf_rsb.id); @@ -779,14 +876,16 @@ nodata: * lib_ring_buffer_put_subbuf - release exclusive subbuffer access * @buf: ring buffer */ -void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf) +void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf, + struct lttng_ust_shm_handle *handle) { - struct lib_ring_buffer_backend *bufb = &buf->backend; - struct channel *chan = shmp(bufb->chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend; + struct channel *chan = shmp(handle, bufb->chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long read_sb_bindex, consumed_idx, consumed; - CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1); + CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1 + && uatomic_read(&buf->active_shadow_readers) != 1); if (!buf->get_subbuf) { /* @@ -807,9 +906,9 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf) */ read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id); v_add(config, v_read(config, - &shmp(bufb->array)[read_sb_bindex]->records_unread), + &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread), &bufb->records_read); - v_set(config, &shmp(bufb->array)[read_sb_bindex]->records_unread, 0); + v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0); CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE && subbuffer_id_is_noref(config, bufb->buf_rsb.id)); subbuffer_id_set_noref(config, &bufb->buf_rsb.id); @@ -824,7 +923,8 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf) */ consumed_idx = subbuf_index(consumed, chan); update_read_sb_index(config, &buf->backend, &chan->backend, - consumed_idx, buf_trunc_val(consumed, chan)); + consumed_idx, buf_trunc_val(consumed, chan), + handle); /* * update_read_sb_index return value ignored. Don't exchange sub-buffer * if the writer concurrently updated it. @@ -836,17 +936,18 @@ void lib_ring_buffer_put_subbuf(struct lib_ring_buffer *buf) * position and the writer position. (inclusive) */ static -void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf, +void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, unsigned long cons_offset, - int cpu) + int cpu, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long cons_idx, commit_count, commit_count_sb; cons_idx = subbuf_index(cons_offset, chan); - commit_count = v_read(config, &shmp(buf->commit_hot)[cons_idx].cc); - commit_count_sb = v_read(config, &shmp(buf->commit_cold)[cons_idx].cc_sb); + commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc); + commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb); if (subbuf_offset(commit_count, chan) != 0) ERRMSG("ring buffer %s, cpu %d: " @@ -862,11 +963,12 @@ void lib_ring_buffer_print_subbuffer_errors(struct lib_ring_buffer *buf, } static -void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, +void lib_ring_buffer_print_buffer_errors(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, - void *priv, int cpu) + void *priv, int cpu, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long write_offset, cons_offset; /* @@ -894,14 +996,15 @@ void lib_ring_buffer_print_buffer_errors(struct lib_ring_buffer *buf, - cons_offset) > 0; cons_offset = subbuf_align(cons_offset, chan)) lib_ring_buffer_print_subbuffer_errors(buf, chan, cons_offset, - cpu); + cpu, handle); } static void lib_ring_buffer_print_errors(struct channel *chan, - struct lib_ring_buffer *buf, int cpu) + struct lttng_ust_lib_ring_buffer *buf, int cpu, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; void *priv = chan->backend.priv; ERRMSG("ring buffer %s, cpu %d: %lu records written, " @@ -921,7 +1024,7 @@ void lib_ring_buffer_print_errors(struct channel *chan, v_read(config, &buf->records_lost_wrap), v_read(config, &buf->records_lost_big)); - lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu); + lib_ring_buffer_print_buffer_errors(buf, chan, priv, cpu, handle); } /* @@ -930,16 +1033,17 @@ void lib_ring_buffer_print_errors(struct channel *chan, * Only executed when the buffer is finalized, in SWITCH_FLUSH. */ static -void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, +void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - u64 tsc) + u64 tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long oldidx = subbuf_index(offsets->old, chan); unsigned long commit_count; - config->cb.buffer_begin(buf, tsc, oldidx); + config->cb.buffer_begin(buf, tsc, oldidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -947,14 +1051,15 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, */ cmm_smp_wmb(); v_add(config, config->cb.subbuffer_header_size(), - &shmp(buf->commit_hot)[oldidx].cc); - commit_count = v_read(config, &shmp(buf->commit_hot)[oldidx].cc); + &shmp_index(handle, buf->commit_hot, oldidx)->cc); + commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc); /* Check if the written buffer has to be delivered */ lib_ring_buffer_check_deliver(config, buf, chan, offsets->old, - commit_count, oldidx); + commit_count, oldidx, handle); lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx, offsets->old, commit_count, - config->cb.subbuffer_header_size()); + config->cb.subbuffer_header_size(), + handle); } /* @@ -966,31 +1071,33 @@ void lib_ring_buffer_switch_old_start(struct lib_ring_buffer *buf, * subbuffer. */ static -void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf, +void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - u64 tsc) + u64 tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long oldidx = subbuf_index(offsets->old - 1, chan); unsigned long commit_count, padding_size, data_size; data_size = subbuf_offset(offsets->old - 1, chan) + 1; padding_size = chan->backend.subbuf_size - data_size; - subbuffer_set_data_size(config, &buf->backend, oldidx, data_size); + subbuffer_set_data_size(config, &buf->backend, oldidx, data_size, + handle); /* * Order all writes to buffer before the commit count update that will * determine that the subbuffer is full. */ cmm_smp_wmb(); - v_add(config, padding_size, &shmp(buf->commit_hot)[oldidx].cc); - commit_count = v_read(config, &shmp(buf->commit_hot)[oldidx].cc); + v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc); + commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc); lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1, - commit_count, oldidx); + commit_count, oldidx, handle); lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx, offsets->old, commit_count, - padding_size); + padding_size, handle); } /* @@ -1001,16 +1108,17 @@ void lib_ring_buffer_switch_old_end(struct lib_ring_buffer *buf, * that this code is executed before the deliver of this sub-buffer. */ static -void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, +void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - u64 tsc) + u64 tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long beginidx = subbuf_index(offsets->begin, chan); unsigned long commit_count; - config->cb.buffer_begin(buf, tsc, beginidx); + config->cb.buffer_begin(buf, tsc, beginidx, handle); /* * Order all writes to buffer before the commit count update that will @@ -1018,14 +1126,15 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, */ cmm_smp_wmb(); v_add(config, config->cb.subbuffer_header_size(), - &shmp(buf->commit_hot)[beginidx].cc); - commit_count = v_read(config, &shmp(buf->commit_hot)[beginidx].cc); + &shmp_index(handle, buf->commit_hot, beginidx)->cc); + commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc); /* Check if the written buffer has to be delivered */ lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin, - commit_count, beginidx); + commit_count, beginidx, handle); lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx, offsets->begin, commit_count, - config->cb.subbuffer_header_size()); + config->cb.subbuffer_header_size(), + handle); } /* @@ -1035,31 +1144,33 @@ void lib_ring_buffer_switch_new_start(struct lib_ring_buffer *buf, * have to do the deliver themselves. */ static -void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, - struct channel *chan, - struct switch_offsets *offsets, - u64 tsc) +void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf, + struct channel *chan, + struct switch_offsets *offsets, + u64 tsc, + struct lttng_ust_shm_handle *handle) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long endidx = subbuf_index(offsets->end - 1, chan); unsigned long commit_count, padding_size, data_size; data_size = subbuf_offset(offsets->end - 1, chan) + 1; padding_size = chan->backend.subbuf_size - data_size; - subbuffer_set_data_size(config, &buf->backend, endidx, data_size); + subbuffer_set_data_size(config, &buf->backend, endidx, data_size, + handle); /* * Order all writes to buffer before the commit count update that will * determine that the subbuffer is full. */ cmm_smp_wmb(); - v_add(config, padding_size, &shmp(buf->commit_hot)[endidx].cc); - commit_count = v_read(config, &shmp(buf->commit_hot)[endidx].cc); + v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, endidx)->cc); + commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, endidx)->cc); lib_ring_buffer_check_deliver(config, buf, chan, offsets->end - 1, - commit_count, endidx); + commit_count, endidx, handle); lib_ring_buffer_write_commit_counter(config, buf, chan, endidx, offsets->end, commit_count, - padding_size); + padding_size, handle); } /* @@ -1069,12 +1180,12 @@ void lib_ring_buffer_switch_new_end(struct lib_ring_buffer *buf, */ static int lib_ring_buffer_try_switch_slow(enum switch_mode mode, - struct lib_ring_buffer *buf, + struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, u64 *tsc) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; unsigned long off; offsets->begin = v_read(config, &buf->offset); @@ -1101,7 +1212,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, * quiescence guarantees for the fusion merge. */ if (mode == SWITCH_FLUSH || off > 0) { - if (unlikely(off == 0)) { + if (caa_unlikely(off == 0)) { /* * The client does not save any header information. * Don't switch empty subbuffer on finalize, because it @@ -1130,10 +1241,11 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode, * operations, this function must be called from the CPU which owns the buffer * for a ACTIVE flush. */ -void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode mode) +void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode, + struct lttng_ust_shm_handle *handle) { - struct channel *chan = shmp(buf->backend.chan); - const struct lib_ring_buffer_config *config = chan->backend.config; + struct channel *chan = shmp(handle, buf->backend.chan); + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; struct switch_offsets offsets; unsigned long oldidx; u64 tsc; @@ -1164,20 +1276,20 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m lib_ring_buffer_reserve_push_reader(buf, chan, offsets.old); oldidx = subbuf_index(offsets.old, chan); - lib_ring_buffer_clear_noref(config, &buf->backend, oldidx); + lib_ring_buffer_clear_noref(config, &buf->backend, oldidx, handle); /* * May need to populate header start on SWITCH_FLUSH. */ if (offsets.switch_old_start) { - lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc); + lib_ring_buffer_switch_old_start(buf, chan, &offsets, tsc, handle); offsets.old += config->cb.subbuffer_header_size(); } /* * Switch old subbuffer. */ - lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc); + lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle); } /* @@ -1188,12 +1300,13 @@ void lib_ring_buffer_switch_slow(struct lib_ring_buffer *buf, enum switch_mode m * -EIO if data cannot be written into the buffer for any other reason. */ static -int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, +int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf, struct channel *chan, struct switch_offsets *offsets, - struct lib_ring_buffer_ctx *ctx) + struct lttng_ust_lib_ring_buffer_ctx *ctx) { - const struct lib_ring_buffer_config *config = chan->backend.config; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + struct lttng_ust_shm_handle *handle = ctx->handle; unsigned long reserve_commit_diff; offsets->begin = v_read(config, &buf->offset); @@ -1210,7 +1323,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, if (last_tsc_overflow(config, buf, ctx->tsc)) ctx->rflags |= RING_BUFFER_RFLAG_FULL_TSC; - if (unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { + if (caa_unlikely(subbuf_offset(offsets->begin, ctx->chan) == 0)) { offsets->switch_new_start = 1; /* For offsets->begin */ } else { offsets->size = config->cb.record_header_size(config, chan, @@ -1221,19 +1334,19 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) + ctx->data_size; - if (unlikely(subbuf_offset(offsets->begin, chan) + + if (caa_unlikely(subbuf_offset(offsets->begin, chan) + offsets->size > chan->backend.subbuf_size)) { offsets->switch_old_end = 1; /* For offsets->old */ offsets->switch_new_start = 1; /* For offsets->begin */ } } - if (unlikely(offsets->switch_new_start)) { + if (caa_unlikely(offsets->switch_new_start)) { unsigned long sb_index; /* * We are typically not filling the previous buffer completely. */ - if (likely(offsets->switch_old_end)) + if (caa_likely(offsets->switch_old_end)) offsets->begin = subbuf_align(offsets->begin, chan); offsets->begin = offsets->begin + config->cb.subbuffer_header_size(); @@ -1243,11 +1356,11 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, (buf_trunc(offsets->begin, chan) >> chan->backend.num_subbuf_order) - ((unsigned long) v_read(config, - &shmp(buf->commit_cold)[sb_index].cc_sb) + &shmp_index(handle, buf->commit_cold, sb_index)->cc_sb) & chan->commit_count_mask); - if (likely(reserve_commit_diff == 0)) { + if (caa_likely(reserve_commit_diff == 0)) { /* Next subbuffer not being written to. */ - if (unlikely(config->mode != RING_BUFFER_OVERWRITE && + if (caa_unlikely(config->mode != RING_BUFFER_OVERWRITE && subbuf_trunc(offsets->begin, chan) - subbuf_trunc((unsigned long) uatomic_read(&buf->consumed), chan) @@ -1285,7 +1398,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, lib_ring_buffer_align(offsets->begin + offsets->size, ctx->largest_align) + ctx->data_size; - if (unlikely(subbuf_offset(offsets->begin, chan) + if (caa_unlikely(subbuf_offset(offsets->begin, chan) + offsets->size > chan->backend.subbuf_size)) { /* * Record too big for subbuffers, report error, don't @@ -1307,7 +1420,7 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, } offsets->end = offsets->begin + offsets->size; - if (unlikely(subbuf_offset(offsets->end, chan) == 0)) { + if (caa_unlikely(subbuf_offset(offsets->end, chan) == 0)) { /* * The offset_end will fall at the very beginning of the next * subbuffer. @@ -1325,18 +1438,19 @@ int lib_ring_buffer_try_reserve_slow(struct lib_ring_buffer *buf, * -EIO for other errors, else returns 0. * It will take care of sub-buffer switching. */ -int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) +int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx) { struct channel *chan = ctx->chan; - const struct lib_ring_buffer_config *config = chan->backend.config; - struct lib_ring_buffer *buf; + struct lttng_ust_shm_handle *handle = ctx->handle; + const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config; + struct lttng_ust_lib_ring_buffer *buf; struct switch_offsets offsets; int ret; if (config->alloc == RING_BUFFER_ALLOC_PER_CPU) - buf = &shmp(chan->backend.buf)[ctx->cpu]; + buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp); else - buf = shmp(chan->backend.buf); + buf = shmp(handle, chan->backend.buf[0].shmp); ctx->buf = buf; offsets.size = 0; @@ -1344,9 +1458,9 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) do { ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets, ctx); - if (unlikely(ret)) + if (caa_unlikely(ret)) return ret; - } while (unlikely(v_cmpxchg(config, &buf->offset, offsets.old, + } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old, offsets.end) != offsets.old)); @@ -1367,25 +1481,27 @@ int lib_ring_buffer_reserve_slow(struct lib_ring_buffer_ctx *ctx) * Clear noref flag for this subbuffer. */ lib_ring_buffer_clear_noref(config, &buf->backend, - subbuf_index(offsets.end - 1, chan)); + subbuf_index(offsets.end - 1, chan), + handle); /* * Switch old subbuffer if needed. */ - if (unlikely(offsets.switch_old_end)) { + if (caa_unlikely(offsets.switch_old_end)) { lib_ring_buffer_clear_noref(config, &buf->backend, - subbuf_index(offsets.old - 1, chan)); - lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc); + subbuf_index(offsets.old - 1, chan), + handle); + lib_ring_buffer_switch_old_end(buf, chan, &offsets, ctx->tsc, handle); } /* * Populate new subbuffer. */ - if (unlikely(offsets.switch_new_start)) - lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc); + if (caa_unlikely(offsets.switch_new_start)) + lib_ring_buffer_switch_new_start(buf, chan, &offsets, ctx->tsc, handle); - if (unlikely(offsets.switch_new_end)) - lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc); + if (caa_unlikely(offsets.switch_new_end)) + lib_ring_buffer_switch_new_end(buf, chan, &offsets, ctx->tsc, handle); ctx->slot_size = offsets.size; ctx->pre_offset = offsets.begin;