Move to kernel style SPDX license identifiers
[lttng-ust.git] / libringbuffer / ring_buffer_frontend.c
index 14ae8eb334b5352c5d831f398a30ad5849acf7fe..90459f006491e2e63512f6d1af3e407f6fca4ba5 100644 (file)
@@ -1,23 +1,8 @@
 /*
- * ring_buffer_frontend.c
+ * SPDX-License-Identifier: LGPL-2.1-only
  *
  * Copyright (C) 2005-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; only
- * version 2.1 of the License.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- *
- *
  * Ring buffer wait-free buffer synchronization. Producer-consumer and flight
  * recorder (overwrite) modes. See thesis:
  *
@@ -51,7 +36,6 @@
  *   - put_subbuf
  */
 
-#define _GNU_SOURCE
 #define _LGPL_SOURCE
 #include <sys/types.h>
 #include <sys/mman.h>
@@ -60,6 +44,8 @@
 #include <fcntl.h>
 #include <signal.h>
 #include <time.h>
+#include <stdbool.h>
+#include <stdint.h>
 #include <urcu/compiler.h>
 #include <urcu/ref.h>
 #include <urcu/tls-compat.h>
@@ -72,7 +58,7 @@
 #include "backend.h"
 #include "frontend.h"
 #include "shm.h"
-#include "tlsfixup.h"
+#include "rb-init.h"
 #include "../liblttng-ust/compat.h"    /* For ENODATA */
 
 /* Print DBG() messages about events lost only every 1048576 hits */
@@ -84,6 +70,7 @@
 #define CLOCKID                CLOCK_MONOTONIC
 #define LTTNG_UST_RING_BUFFER_GET_RETRY                10
 #define LTTNG_UST_RING_BUFFER_RETRY_DELAY_MS   10
+#define RETRY_DELAY_MS                         100     /* 100 ms. */
 
 /*
  * Non-static to ensure the compiler does not optimize away the xor.
@@ -149,6 +136,21 @@ static struct timer_signal_data timer_signal = {
        .lock = PTHREAD_MUTEX_INITIALIZER,
 };
 
+static bool lttng_ust_allow_blocking;
+
+void lttng_ust_ringbuffer_set_allow_blocking(void)
+{
+       lttng_ust_allow_blocking = true;
+}
+
+/* Get blocking timeout, in ms */
+static int lttng_ust_ringbuffer_get_timeout(struct channel *chan)
+{
+       if (!lttng_ust_allow_blocking)
+               return 0;
+       return chan->u.s.blocking_timeout_ms;
+}
+
 /**
  * lib_ring_buffer_reset - Reset ring buffer to initial values.
  * @buf: Ring buffer.
@@ -167,7 +169,7 @@ void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
 
        chan = shmp(handle, buf->backend.chan);
        if (!chan)
-               abort();
+               return;
        config = &chan->backend.config;
        /*
         * Reset iterator first. It will put the subbuffer if it currently holds
@@ -175,9 +177,23 @@ void lib_ring_buffer_reset(struct lttng_ust_lib_ring_buffer *buf,
         */
        v_set(config, &buf->offset, 0);
        for (i = 0; i < chan->backend.num_subbuf; i++) {
-               v_set(config, &shmp_index(handle, buf->commit_hot, i)->cc, 0);
-               v_set(config, &shmp_index(handle, buf->commit_hot, i)->seq, 0);
-               v_set(config, &shmp_index(handle, buf->commit_cold, i)->cc_sb, 0);
+               struct commit_counters_hot *cc_hot;
+               struct commit_counters_cold *cc_cold;
+               uint64_t *ts_end;
+
+               cc_hot = shmp_index(handle, buf->commit_hot, i);
+               if (!cc_hot)
+                       return;
+               cc_cold = shmp_index(handle, buf->commit_cold, i);
+               if (!cc_cold)
+                       return;
+               ts_end = shmp_index(handle, buf->ts_end, i);
+               if (!ts_end)
+                       return;
+               v_set(config, &cc_hot->cc, 0);
+               v_set(config, &cc_hot->seq, 0);
+               v_set(config, &cc_cold->cc_sb, 0);
+               *ts_end = 0;
        }
        uatomic_set(&buf->consumed, 0);
        uatomic_set(&buf->record_disabled, 0);
@@ -313,6 +329,9 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chanb->config;
        struct channel *chan = caa_container_of(chanb, struct channel, backend);
+       struct lttng_ust_lib_ring_buffer_backend_subbuffer *wsb;
+       struct channel *shmp_chan;
+       struct commit_counters_hot *cc_hot;
        void *priv = channel_get_private(chan);
        size_t subbuf_header_size;
        uint64_t tsc;
@@ -339,6 +358,16 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
                goto free_commit;
        }
 
+       align_shm(shmobj, __alignof__(uint64_t));
+       set_shmp(buf->ts_end,
+                zalloc_shm(shmobj,
+                       sizeof(uint64_t) * chan->backend.num_subbuf));
+       if (!shmp(handle, buf->ts_end)) {
+               ret = -ENOMEM;
+               goto free_commit_cold;
+       }
+
+
        ret = lib_ring_buffer_backend_create(&buf->backend, &chan->backend,
                        cpu, handle, shmobj);
        if (ret) {
@@ -351,11 +380,26 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
         */
        subbuf_header_size = config->cb.subbuffer_header_size();
        v_set(config, &buf->offset, subbuf_header_size);
-       subbuffer_id_clear_noref(config, &shmp_index(handle, buf->backend.buf_wsb, 0)->id);
-       tsc = config->cb.ring_buffer_clock_read(shmp(handle, buf->backend.chan));
+       wsb = shmp_index(handle, buf->backend.buf_wsb, 0);
+       if (!wsb) {
+               ret = -EPERM;
+               goto free_chanbuf;
+       }
+       subbuffer_id_clear_noref(config, &wsb->id);
+       shmp_chan = shmp(handle, buf->backend.chan);
+       if (!shmp_chan) {
+               ret = -EPERM;
+               goto free_chanbuf;
+       }
+       tsc = config->cb.ring_buffer_clock_read(shmp_chan);
        config->cb.buffer_begin(buf, tsc, 0, handle);
-       v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->cc);
-       v_add(config, subbuf_header_size, &shmp_index(handle, buf->commit_hot, 0)->seq);
+       cc_hot = shmp_index(handle, buf->commit_hot, 0);
+       if (!cc_hot) {
+               ret = -EPERM;
+               goto free_chanbuf;
+       }
+       v_add(config, subbuf_header_size, &cc_hot->cc);
+       v_add(config, subbuf_header_size, &cc_hot->seq);
 
        if (config->cb.buffer_create) {
                ret = config->cb.buffer_create(buf, priv, cpu, chanb->name, handle);
@@ -370,6 +414,8 @@ int lib_ring_buffer_create(struct lttng_ust_lib_ring_buffer *buf,
 
        /* Error handling */
 free_init:
+       /* ts_end will be freed by shm teardown */
+free_commit_cold:
        /* commit_cold will be freed by shm teardown */
 free_commit:
        /* commit_hot will be freed by shm teardown */
@@ -403,7 +449,7 @@ void lib_ring_buffer_channel_switch_timer(int sig, siginfo_t *si, void *uc)
                                shmp(handle, chan->backend.buf[cpu].shmp);
 
                        if (!buf)
-                               abort();
+                               goto end;
                        if (uatomic_read(&buf->active_readers))
                                lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
                                        chan->handle);
@@ -413,11 +459,12 @@ void lib_ring_buffer_channel_switch_timer(int sig, siginfo_t *si, void *uc)
                        shmp(handle, chan->backend.buf[0].shmp);
 
                if (!buf)
-                       abort();
+                       goto end;
                if (uatomic_read(&buf->active_readers))
                        lib_ring_buffer_switch_slow(buf, SWITCH_ACTIVE,
                                chan->handle);
        }
+end:
        pthread_mutex_unlock(&wakeup_fd_mutex);
        return;
 }
@@ -429,10 +476,14 @@ int lib_ring_buffer_poll_deliver(const struct lttng_ust_lib_ring_buffer_config *
                                 struct lttng_ust_shm_handle *handle)
 {
        unsigned long consumed_old, consumed_idx, commit_count, write_offset;
+       struct commit_counters_cold *cc_cold;
 
        consumed_old = uatomic_read(&buf->consumed);
        consumed_idx = subbuf_index(consumed_old, chan);
-       commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
+       cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
+       if (!cc_cold)
+               return 0;
+       commit_count = v_read(config, &cc_cold->cc_sb);
        /*
         * No memory barrier here, since we are only interested
         * in a statistically correct polling result. The next poll will
@@ -549,7 +600,7 @@ void lib_ring_buffer_channel_do_read(struct channel *chan)
                                shmp(handle, chan->backend.buf[cpu].shmp);
 
                        if (!buf)
-                               abort();
+                               goto end;
                        if (uatomic_read(&buf->active_readers)
                            && lib_ring_buffer_poll_deliver(config, buf,
                                        chan, handle)) {
@@ -561,13 +612,14 @@ void lib_ring_buffer_channel_do_read(struct channel *chan)
                        shmp(handle, chan->backend.buf[0].shmp);
 
                if (!buf)
-                       abort();
+                       goto end;
                if (uatomic_read(&buf->active_readers)
                    && lib_ring_buffer_poll_deliver(config, buf,
                                chan, handle)) {
                        lib_ring_buffer_wakeup(buf, handle);
                }
        }
+end:
        pthread_mutex_unlock(&wakeup_fd_mutex);
 }
 
@@ -737,6 +789,7 @@ void lib_ring_buffer_channel_switch_timer_start(struct channel *chan)
 
        lib_ring_buffer_setup_timer_thread();
 
+       memset(&sev, 0, sizeof(sev));
        sev.sigev_notify = SIGEV_SIGNAL;
        sev.sigev_signo = LTTNG_UST_RB_SIG_FLUSH;
        sev.sigev_value.sival_ptr = chan;
@@ -855,22 +908,25 @@ static void channel_print_errors(struct channel *chan,
                for_each_possible_cpu(cpu) {
                        struct lttng_ust_lib_ring_buffer *buf =
                                shmp(handle, chan->backend.buf[cpu].shmp);
-                       lib_ring_buffer_print_errors(chan, buf, cpu, handle);
+                       if (buf)
+                               lib_ring_buffer_print_errors(chan, buf, cpu, handle);
                }
        } else {
                struct lttng_ust_lib_ring_buffer *buf =
                        shmp(handle, chan->backend.buf[0].shmp);
 
-               lib_ring_buffer_print_errors(chan, buf, -1, handle);
+               if (buf)
+                       lib_ring_buffer_print_errors(chan, buf, -1, handle);
        }
 }
 
 static void channel_free(struct channel *chan,
-               struct lttng_ust_shm_handle *handle)
+               struct lttng_ust_shm_handle *handle,
+               int consumer)
 {
        channel_backend_free(&chan->backend, handle);
        /* chan is freed by shm teardown */
-       shm_object_table_destroy(handle->table);
+       shm_object_table_destroy(handle->table, consumer);
        free(handle);
 }
 
@@ -905,7 +961,8 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
                   void *buf_addr, size_t subbuf_size,
                   size_t num_subbuf, unsigned int switch_timer_interval,
                   unsigned int read_timer_interval,
-                  const int *stream_fds, int nr_stream_fds)
+                  const int *stream_fds, int nr_stream_fds,
+                  int64_t blocking_timeout)
 {
        int ret;
        size_t shmsize, chansize;
@@ -913,6 +970,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
        struct lttng_ust_shm_handle *handle;
        struct shm_object *shmobj;
        unsigned int nr_streams;
+       int64_t blocking_timeout_ms;
 
        if (config->alloc == RING_BUFFER_ALLOC_PER_CPU)
                nr_streams = num_possible_cpus();
@@ -922,6 +980,19 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
        if (nr_stream_fds != nr_streams)
                return NULL;
 
+       if (blocking_timeout < -1) {
+               return NULL;
+       }
+       /* usec to msec */
+       if (blocking_timeout == -1) {
+               blocking_timeout_ms = -1;
+       } else {
+               blocking_timeout_ms = blocking_timeout / 1000;
+               if (blocking_timeout_ms != (int32_t) blocking_timeout_ms) {
+                       return NULL;
+               }
+       }
+
        if (lib_ring_buffer_check_config(config, switch_timer_interval,
                                         read_timer_interval))
                return NULL;
@@ -937,16 +1008,16 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
 
        /* Calculate the shm allocation layout */
        shmsize = sizeof(struct channel);
-       shmsize += offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
+       shmsize += lttng_ust_offset_align(shmsize, __alignof__(struct lttng_ust_lib_ring_buffer_shmp));
        shmsize += sizeof(struct lttng_ust_lib_ring_buffer_shmp) * nr_streams;
        chansize = shmsize;
        if (priv_data_align)
-               shmsize += offset_align(shmsize, priv_data_align);
+               shmsize += lttng_ust_offset_align(shmsize, priv_data_align);
        shmsize += priv_data_size;
 
        /* Allocate normal memory for channel (not shared) */
        shmobj = shm_object_table_alloc(handle->table, shmsize, SHM_OBJECT_MEM,
-                       -1);
+                       -1, -1);
        if (!shmobj)
                goto error_append;
        /* struct channel is at object 0, offset 0 (hardcoded) */
@@ -975,6 +1046,8 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
                        *priv_data = NULL;
        }
 
+       chan->u.s.blocking_timeout_ms = (int32_t) blocking_timeout_ms;
+
        ret = channel_backend_init(&chan->backend, name, config,
                                   subbuf_size, num_subbuf, handle,
                                   stream_fds);
@@ -993,7 +1066,7 @@ struct lttng_ust_shm_handle *channel_create(const struct lttng_ust_lib_ring_buff
 
 error_backend_init:
 error_append:
-       shm_object_table_destroy(handle->table);
+       shm_object_table_destroy(handle->table, 1);
 error_table_alloc:
        free(handle);
        return NULL;
@@ -1025,7 +1098,7 @@ struct lttng_ust_shm_handle *channel_handle_create(void *data,
        return handle;
 
 error_table_object:
-       shm_object_table_destroy(handle->table);
+       shm_object_table_destroy(handle->table, 0);
 error_table_alloc:
        free(handle);
        return NULL;
@@ -1053,9 +1126,10 @@ unsigned int channel_handle_get_nr_streams(struct lttng_ust_shm_handle *handle)
 }
 
 static
-void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle)
+void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle,
+               int consumer)
 {
-       channel_free(chan, handle);
+       channel_free(chan, handle, consumer);
 }
 
 /**
@@ -1066,7 +1140,7 @@ void channel_release(struct channel *chan, struct lttng_ust_shm_handle *handle)
  * Call "destroy" callback, finalize channels, decrement the channel
  * reference count. Note that when readers have completed data
  * consumption of finalized channels, get_subbuf() will return -ENODATA.
- * They should release their handle at that point. 
+ * They should release their handle at that point.
  */
 void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
                int consumer)
@@ -1087,7 +1161,7 @@ void channel_destroy(struct channel *chan, struct lttng_ust_shm_handle *handle,
         * sessiond/consumer are keeping a reference on the shm file
         * descriptor directly. No need to refcount.
         */
-       channel_release(chan, handle);
+       channel_release(chan, handle, consumer);
        return;
 }
 
@@ -1188,6 +1262,8 @@ void lib_ring_buffer_release_read(struct lttng_ust_lib_ring_buffer *buf,
 {
        struct channel *chan = shmp(handle, buf->backend.chan);
 
+       if (!chan)
+               return;
        CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
        cmm_smp_mb();
        uatomic_dec(&buf->active_readers);
@@ -1207,11 +1283,15 @@ int lib_ring_buffer_snapshot(struct lttng_ust_lib_ring_buffer *buf,
                             unsigned long *consumed, unsigned long *produced,
                             struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       struct channel *chan;
+       const struct lttng_ust_lib_ring_buffer_config *config;
        unsigned long consumed_cur, write_offset;
        int finalized;
 
+       chan = shmp(handle, buf->backend.chan);
+       if (!chan)
+               return -EPERM;
+       config = &chan->backend.config;
        finalized = CMM_ACCESS_ONCE(buf->finalized);
        /*
         * Read finalized before counters.
@@ -1252,6 +1332,43 @@ nodata:
                return -EAGAIN;
 }
 
+/**
+ * Performs the same function as lib_ring_buffer_snapshot(), but the positions
+ * are saved regardless of whether the consumed and produced positions are
+ * in the same subbuffer.
+ * @buf: ring buffer
+ * @consumed: consumed byte count indicating the last position read
+ * @produced: produced byte count indicating the last position written
+ *
+ * This function is meant to provide information on the exact producer and
+ * consumer positions without regard for the "snapshot" feature.
+ */
+int lib_ring_buffer_snapshot_sample_positions(
+                            struct lttng_ust_lib_ring_buffer *buf,
+                            unsigned long *consumed, unsigned long *produced,
+                            struct lttng_ust_shm_handle *handle)
+{
+       struct channel *chan;
+       const struct lttng_ust_lib_ring_buffer_config *config;
+
+       chan = shmp(handle, buf->backend.chan);
+       if (!chan)
+               return -EPERM;
+       config = &chan->backend.config;
+       cmm_smp_rmb();
+       *consumed = uatomic_read(&buf->consumed);
+       /*
+        * No need to issue a memory barrier between consumed count read and
+        * write offset read, because consumed count can only change
+        * concurrently in overwrite mode, and we keep a sequence counter
+        * identifier derived from the write offset to check we are getting
+        * the same sub-buffer we are expecting (the sub-buffers are atomically
+        * "tagged" upon writes, tags are checked upon read).
+        */
+       *produced = v_read(config, &buf->offset);
+       return 0;
+}
+
 /**
  * lib_ring_buffer_move_consumer - move consumed counter forward
  * @buf: ring buffer
@@ -1262,9 +1379,12 @@ void lib_ring_buffer_move_consumer(struct lttng_ust_lib_ring_buffer *buf,
                                   struct lttng_ust_shm_handle *handle)
 {
        struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
-       struct channel *chan = shmp(handle, bufb->chan);
+       struct channel *chan;
        unsigned long consumed;
 
+       chan = shmp(handle, bufb->chan);
+       if (!chan)
+               return;
        CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
 
        /*
@@ -1290,11 +1410,16 @@ int lib_ring_buffer_get_subbuf(struct lttng_ust_lib_ring_buffer *buf,
                               unsigned long consumed,
                               struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       struct channel *chan;
+       const struct lttng_ust_lib_ring_buffer_config *config;
        unsigned long consumed_cur, consumed_idx, commit_count, write_offset;
        int ret, finalized, nr_retry = LTTNG_UST_RING_BUFFER_GET_RETRY;
+       struct commit_counters_cold *cc_cold;
 
+       chan = shmp(handle, buf->backend.chan);
+       if (!chan)
+               return -EPERM;
+       config = &chan->backend.config;
 retry:
        finalized = CMM_ACCESS_ONCE(buf->finalized);
        /*
@@ -1303,7 +1428,10 @@ retry:
        cmm_smp_rmb();
        consumed_cur = uatomic_read(&buf->consumed);
        consumed_idx = subbuf_index(consumed, chan);
-       commit_count = v_read(config, &shmp_index(handle, buf->commit_cold, consumed_idx)->cc_sb);
+       cc_cold = shmp_index(handle, buf->commit_cold, consumed_idx);
+       if (!cc_cold)
+               return -EPERM;
+       commit_count = v_read(config, &cc_cold->cc_sb);
        /*
         * Make sure we read the commit count before reading the buffer
         * data and the write offset. Correct consumed offset ordering
@@ -1446,10 +1574,16 @@ void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
                                struct lttng_ust_shm_handle *handle)
 {
        struct lttng_ust_lib_ring_buffer_backend *bufb = &buf->backend;
-       struct channel *chan = shmp(handle, bufb->chan);
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
-       unsigned long read_sb_bindex, consumed_idx, consumed;
+       struct channel *chan;
+       const struct lttng_ust_lib_ring_buffer_config *config;
+       unsigned long sb_bindex, consumed_idx, consumed;
+       struct lttng_ust_lib_ring_buffer_backend_pages_shmp *rpages;
+       struct lttng_ust_lib_ring_buffer_backend_pages *backend_pages;
 
+       chan = shmp(handle, bufb->chan);
+       if (!chan)
+               return;
+       config = &chan->backend.config;
        CHAN_WARN_ON(chan, uatomic_read(&buf->active_readers) != 1);
 
        if (!buf->get_subbuf) {
@@ -1469,11 +1603,16 @@ void lib_ring_buffer_put_subbuf(struct lttng_ust_lib_ring_buffer *buf,
         * Can be below zero if an iterator is used on a snapshot more than
         * once.
         */
-       read_sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
-       v_add(config, v_read(config,
-                            &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread),
-             &bufb->records_read);
-       v_set(config, &shmp(handle, shmp_index(handle, bufb->array, read_sb_bindex)->shmp)->records_unread, 0);
+       sb_bindex = subbuffer_id_get_index(config, bufb->buf_rsb.id);
+       rpages = shmp_index(handle, bufb->array, sb_bindex);
+       if (!rpages)
+               return;
+       backend_pages = shmp(handle, rpages->shmp);
+       if (!backend_pages)
+               return;
+       v_add(config, v_read(config, &backend_pages->records_unread),
+                       &bufb->records_read);
+       v_set(config, &backend_pages->records_unread, 0);
        CHAN_WARN_ON(chan, config->mode == RING_BUFFER_OVERWRITE
                     && subbuffer_id_is_noref(config, bufb->buf_rsb.id));
        subbuffer_id_set_noref(config, &bufb->buf_rsb.id);
@@ -1509,10 +1648,18 @@ void lib_ring_buffer_print_subbuffer_errors(struct lttng_ust_lib_ring_buffer *bu
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long cons_idx, commit_count, commit_count_sb;
+       struct commit_counters_hot *cc_hot;
+       struct commit_counters_cold *cc_cold;
 
        cons_idx = subbuf_index(cons_offset, chan);
-       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, cons_idx)->cc);
-       commit_count_sb = v_read(config, &shmp_index(handle, buf->commit_cold, cons_idx)->cc_sb);
+       cc_hot = shmp_index(handle, buf->commit_hot, cons_idx);
+       if (!cc_hot)
+               return;
+       cc_cold = shmp_index(handle, buf->commit_cold, cons_idx);
+       if (!cc_cold)
+               return;
+       commit_count = v_read(config, &cc_hot->cc);
+       commit_count_sb = v_read(config, &cc_cold->cc_sb);
 
        if (subbuf_offset(commit_count, chan) != 0)
                DBG("ring buffer %s, cpu %d: "
@@ -1609,6 +1756,7 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long oldidx = subbuf_index(offsets->old, chan);
        unsigned long commit_count;
+       struct commit_counters_hot *cc_hot;
 
        config->cb.buffer_begin(buf, tsc, oldidx, handle);
 
@@ -1617,15 +1765,18 @@ void lib_ring_buffer_switch_old_start(struct lttng_ust_lib_ring_buffer *buf,
         * determine that the subbuffer is full.
         */
        cmm_smp_wmb();
+       cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
+       if (!cc_hot)
+               return;
        v_add(config, config->cb.subbuffer_header_size(),
-             &shmp_index(handle, buf->commit_hot, oldidx)->cc);
-       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+             &cc_hot->cc);
+       commit_count = v_read(config, &cc_hot->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old,
                                      commit_count, oldidx, handle, tsc);
-       lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
                        offsets->old + config->cb.subbuffer_header_size(),
-                       commit_count, handle);
+                       commit_count, handle, cc_hot);
 }
 
 /*
@@ -1646,23 +1797,42 @@ void lib_ring_buffer_switch_old_end(struct lttng_ust_lib_ring_buffer *buf,
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long oldidx = subbuf_index(offsets->old - 1, chan);
        unsigned long commit_count, padding_size, data_size;
+       struct commit_counters_hot *cc_hot;
+       uint64_t *ts_end;
 
        data_size = subbuf_offset(offsets->old - 1, chan) + 1;
        padding_size = chan->backend.subbuf_size - data_size;
        subbuffer_set_data_size(config, &buf->backend, oldidx, data_size,
                                handle);
 
+       ts_end = shmp_index(handle, buf->ts_end, oldidx);
+       if (!ts_end)
+               return;
        /*
-        * Order all writes to buffer before the commit count update that will
-        * determine that the subbuffer is full.
+        * This is the last space reservation in that sub-buffer before
+        * it gets delivered. This provides exclusive access to write to
+        * this sub-buffer's ts_end. There are also no concurrent
+        * readers of that ts_end because delivery of that sub-buffer is
+        * postponed until the commit counter is incremented for the
+        * current space reservation.
+        */
+       *ts_end = tsc;
+
+       /*
+        * Order all writes to buffer and store to ts_end before the commit
+        * count update that will determine that the subbuffer is full.
         */
        cmm_smp_wmb();
-       v_add(config, padding_size, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
-       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, oldidx)->cc);
+       cc_hot = shmp_index(handle, buf->commit_hot, oldidx);
+       if (!cc_hot)
+               return;
+       v_add(config, padding_size, &cc_hot->cc);
+       commit_count = v_read(config, &cc_hot->cc);
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->old - 1,
                                      commit_count, oldidx, handle, tsc);
-       lib_ring_buffer_write_commit_counter(config, buf, chan, oldidx,
-                       offsets->old + padding_size, commit_count, handle);
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
+                       offsets->old + padding_size, commit_count, handle,
+                       cc_hot);
 }
 
 /*
@@ -1682,6 +1852,7 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long beginidx = subbuf_index(offsets->begin, chan);
        unsigned long commit_count;
+       struct commit_counters_hot *cc_hot;
 
        config->cb.buffer_begin(buf, tsc, beginidx, handle);
 
@@ -1690,15 +1861,17 @@ void lib_ring_buffer_switch_new_start(struct lttng_ust_lib_ring_buffer *buf,
         * determine that the subbuffer is full.
         */
        cmm_smp_wmb();
-       v_add(config, config->cb.subbuffer_header_size(),
-             &shmp_index(handle, buf->commit_hot, beginidx)->cc);
-       commit_count = v_read(config, &shmp_index(handle, buf->commit_hot, beginidx)->cc);
+       cc_hot = shmp_index(handle, buf->commit_hot, beginidx);
+       if (!cc_hot)
+               return;
+       v_add(config, config->cb.subbuffer_header_size(), &cc_hot->cc);
+       commit_count = v_read(config, &cc_hot->cc);
        /* Check if the written buffer has to be delivered */
        lib_ring_buffer_check_deliver(config, buf, chan, offsets->begin,
                                      commit_count, beginidx, handle, tsc);
-       lib_ring_buffer_write_commit_counter(config, buf, chan, beginidx,
+       lib_ring_buffer_write_commit_counter(config, buf, chan,
                        offsets->begin + config->cb.subbuffer_header_size(),
-                       commit_count, handle);
+                       commit_count, handle, cc_hot);
 }
 
 /*
@@ -1718,11 +1891,24 @@ void lib_ring_buffer_switch_new_end(struct lttng_ust_lib_ring_buffer *buf,
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        unsigned long endidx, data_size;
+       uint64_t *ts_end;
 
        endidx = subbuf_index(offsets->end - 1, chan);
        data_size = subbuf_offset(offsets->end - 1, chan) + 1;
        subbuffer_set_data_size(config, &buf->backend, endidx, data_size,
                                handle);
+       ts_end = shmp_index(handle, buf->ts_end, endidx);
+       if (!ts_end)
+               return;
+       /*
+        * This is the last space reservation in that sub-buffer before
+        * it gets delivered. This provides exclusive access to write to
+        * this sub-buffer's ts_end. There are also no concurrent
+        * readers of that ts_end because delivery of that sub-buffer is
+        * postponed until the commit counter is incremented for the
+        * current space reservation.
+        */
+       *ts_end = tsc;
 }
 
 /*
@@ -1769,6 +1955,7 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
 
        if (caa_unlikely(off == 0)) {
                unsigned long sb_index, commit_count;
+               struct commit_counters_cold *cc_cold;
 
                /*
                 * We are performing a SWITCH_FLUSH. There may be concurrent
@@ -1785,9 +1972,10 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
 
                /* Test new buffer integrity */
                sb_index = subbuf_index(offsets->begin, chan);
-               commit_count = v_read(config,
-                               &shmp_index(handle, buf->commit_cold,
-                                       sb_index)->cc_sb);
+               cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
+               if (!cc_cold)
+                       return -1;
+               commit_count = v_read(config, &cc_cold->cc_sb);
                reserve_commit_diff =
                  (buf_trunc(offsets->begin, chan)
                   >> chan->backend.num_subbuf_order)
@@ -1838,19 +2026,26 @@ int lib_ring_buffer_try_switch_slow(enum switch_mode mode,
  * Force a sub-buffer switch. This operation is completely reentrant : can be
  * called while tracing is active with absolutely no lock held.
  *
- * Note, however, that as a v_cmpxchg is used for some atomic
- * operations, this function must be called from the CPU which owns the buffer
- * for a ACTIVE flush.
+ * For RING_BUFFER_SYNC_PER_CPU ring buffers, as a v_cmpxchg is used for
+ * some atomic operations, this function must be called from the CPU
+ * which owns the buffer for a ACTIVE flush. However, for
+ * RING_BUFFER_SYNC_GLOBAL ring buffers, this function can be called
+ * from any CPU.
  */
 void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum switch_mode mode,
                                 struct lttng_ust_shm_handle *handle)
 {
-       struct channel *chan = shmp(handle, buf->backend.chan);
-       const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
+       struct channel *chan;
+       const struct lttng_ust_lib_ring_buffer_config *config;
        struct switch_offsets offsets;
        unsigned long oldidx;
        uint64_t tsc;
 
+       chan = shmp(handle, buf->backend.chan);
+       if (!chan)
+               return;
+       config = &chan->backend.config;
+
        offsets.size = 0;
 
        /*
@@ -1893,6 +2088,23 @@ void lib_ring_buffer_switch_slow(struct lttng_ust_lib_ring_buffer *buf, enum swi
        lib_ring_buffer_switch_old_end(buf, chan, &offsets, tsc, handle);
 }
 
+static
+bool handle_blocking_retry(int *timeout_left_ms)
+{
+       int timeout = *timeout_left_ms, delay;
+
+       if (caa_likely(!timeout))
+               return false;   /* Do not retry, discard event. */
+       if (timeout < 0)        /* Wait forever. */
+               delay = RETRY_DELAY_MS;
+       else
+               delay = min_t(int, timeout, RETRY_DELAY_MS);
+       (void) poll(NULL, 0, delay);
+       if (timeout > 0)
+               *timeout_left_ms -= delay;
+       return true;    /* Retry. */
+}
+
 /*
  * Returns :
  * 0 if ok
@@ -1904,11 +2116,13 @@ static
 int lib_ring_buffer_try_reserve_slow(struct lttng_ust_lib_ring_buffer *buf,
                                     struct channel *chan,
                                     struct switch_offsets *offsets,
-                                    struct lttng_ust_lib_ring_buffer_ctx *ctx)
+                                    struct lttng_ust_lib_ring_buffer_ctx *ctx,
+                                    void *client_ctx)
 {
        const struct lttng_ust_lib_ring_buffer_config *config = &chan->backend.config;
        struct lttng_ust_shm_handle *handle = ctx->handle;
        unsigned long reserve_commit_diff, offset_cmp;
+       int timeout_left_ms = lttng_ust_ringbuffer_get_timeout(chan);
 
 retry:
        offsets->begin = offset_cmp = v_read(config, &buf->offset);
@@ -1931,7 +2145,7 @@ retry:
                offsets->size = config->cb.record_header_size(config, chan,
                                                offsets->begin,
                                                &offsets->pre_header_padding,
-                                               ctx);
+                                               ctx, client_ctx);
                offsets->size +=
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
@@ -1944,6 +2158,7 @@ retry:
        }
        if (caa_unlikely(offsets->switch_new_start)) {
                unsigned long sb_index, commit_count;
+               struct commit_counters_cold *cc_cold;
 
                /*
                 * We are typically not filling the previous buffer completely.
@@ -1962,9 +2177,10 @@ retry:
                 * are not seen reordered when updated by another CPU.
                 */
                cmm_smp_rmb();
-               commit_count = v_read(config,
-                               &shmp_index(handle, buf->commit_cold,
-                                       sb_index)->cc_sb);
+               cc_cold = shmp_index(handle, buf->commit_cold, sb_index);
+               if (!cc_cold)
+                       return -1;
+               commit_count = v_read(config, &cc_cold->cc_sb);
                /* Read buf->commit_cold[sb_index].cc_sb before buf->offset. */
                cmm_smp_rmb();
                if (caa_unlikely(offset_cmp != v_read(config, &buf->offset))) {
@@ -1989,6 +2205,9 @@ retry:
                                >= chan->backend.buf_size)) {
                                unsigned long nr_lost;
 
+                               if (handle_blocking_retry(&timeout_left_ms))
+                                       goto retry;
+
                                /*
                                 * We do not overwrite non consumed buffers
                                 * and we are full : record is lost.
@@ -2032,7 +2251,7 @@ retry:
                        config->cb.record_header_size(config, chan,
                                                offsets->begin,
                                                &offsets->pre_header_padding,
-                                               ctx);
+                                               ctx, client_ctx);
                offsets->size +=
                        lib_ring_buffer_align(offsets->begin + offsets->size,
                                              ctx->largest_align)
@@ -2086,7 +2305,8 @@ retry:
  * -EIO for other errors, else returns 0.
  * It will take care of sub-buffer switching.
  */
-int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
+int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx,
+               void *client_ctx)
 {
        struct channel *chan = ctx->chan;
        struct lttng_ust_shm_handle *handle = ctx->handle;
@@ -2099,13 +2319,15 @@ int lib_ring_buffer_reserve_slow(struct lttng_ust_lib_ring_buffer_ctx *ctx)
                buf = shmp(handle, chan->backend.buf[ctx->cpu].shmp);
        else
                buf = shmp(handle, chan->backend.buf[0].shmp);
+       if (!buf)
+               return -EIO;
        ctx->buf = buf;
 
        offsets.size = 0;
 
        do {
                ret = lib_ring_buffer_try_reserve_slow(buf, chan, &offsets,
-                                                      ctx);
+                                                      ctx, client_ctx);
                if (caa_unlikely(ret))
                        return ret;
        } while (caa_unlikely(v_cmpxchg(config, &buf->offset, offsets.old,
@@ -2164,10 +2386,44 @@ void lib_ring_buffer_vmcore_check_deliver(const struct lttng_ust_lib_ring_buffer
                                          unsigned long idx,
                                          struct lttng_ust_shm_handle *handle)
 {
-       if (config->oops == RING_BUFFER_OOPS_CONSISTENCY)
-               v_set(config, &shmp_index(handle, buf->commit_hot, idx)->seq, commit_count);
+       struct commit_counters_hot *cc_hot;
+
+       if (config->oops != RING_BUFFER_OOPS_CONSISTENCY)
+               return;
+       cc_hot = shmp_index(handle, buf->commit_hot, idx);
+       if (!cc_hot)
+               return;
+       v_set(config, &cc_hot->seq, commit_count);
 }
 
+/*
+ * The ring buffer can count events recorded and overwritten per buffer,
+ * but it is disabled by default due to its performance overhead.
+ */
+#ifdef LTTNG_RING_BUFFER_COUNT_EVENTS
+static
+void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
+               struct lttng_ust_lib_ring_buffer *buf,
+               unsigned long idx,
+               struct lttng_ust_shm_handle *handle)
+{
+       v_add(config, subbuffer_get_records_count(config,
+                       &buf->backend, idx, handle),
+               &buf->records_count);
+       v_add(config, subbuffer_count_records_overrun(config,
+                       &buf->backend, idx, handle),
+               &buf->records_overrun);
+}
+#else /* LTTNG_RING_BUFFER_COUNT_EVENTS */
+static
+void deliver_count_events(const struct lttng_ust_lib_ring_buffer_config *config,
+               struct lttng_ust_lib_ring_buffer *buf,
+               unsigned long idx,
+               struct lttng_ust_shm_handle *handle)
+{
+}
+#endif /* #else LTTNG_RING_BUFFER_COUNT_EVENTS */
+
 void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_config *config,
                                   struct lttng_ust_lib_ring_buffer *buf,
                                   struct channel *chan,
@@ -2179,6 +2435,7 @@ void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_c
 {
        unsigned long old_commit_count = commit_count
                                         - chan->backend.subbuf_size;
+       struct commit_counters_cold *cc_cold;
 
        /*
         * If we succeeded at updating cc_sb below, we are the subbuffer
@@ -2212,26 +2469,32 @@ void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_c
         * commit_cold cc_sb update.
         */
        cmm_smp_wmb();
-       if (caa_likely(v_cmpxchg(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
+       cc_cold = shmp_index(handle, buf->commit_cold, idx);
+       if (!cc_cold)
+               return;
+       if (caa_likely(v_cmpxchg(config, &cc_cold->cc_sb,
                                 old_commit_count, old_commit_count + 1)
                   == old_commit_count)) {
+               uint64_t *ts_end;
+
                /*
                 * Start of exclusive subbuffer access. We are
                 * guaranteed to be the last writer in this subbuffer
                 * and any other writer trying to access this subbuffer
                 * in this state is required to drop records.
+                *
+                * We can read the ts_end for the current sub-buffer
+                * which has been saved by the very last space
+                * reservation for the current sub-buffer.
+                *
+                * Order increment of commit counter before reading ts_end.
                 */
-               v_add(config,
-                     subbuffer_get_records_count(config,
-                                                 &buf->backend,
-                                                 idx, handle),
-                     &buf->records_count);
-               v_add(config,
-                     subbuffer_count_records_overrun(config,
-                                                     &buf->backend,
-                                                     idx, handle),
-                     &buf->records_overrun);
-               config->cb.buffer_end(buf, tsc, idx,
+               cmm_smp_mb();
+               ts_end = shmp_index(handle, buf->ts_end, idx);
+               if (!ts_end)
+                       return;
+               deliver_count_events(config, buf, idx, handle);
+               config->cb.buffer_end(buf, *ts_end, idx,
                                      lib_ring_buffer_get_data_size(config,
                                                                buf,
                                                                idx,
@@ -2260,8 +2523,7 @@ void lib_ring_buffer_check_deliver_slow(const struct lttng_ust_lib_ring_buffer_c
                 */
                cmm_smp_mb();
                /* End of exclusive subbuffer access */
-               v_set(config, &shmp_index(handle, buf->commit_cold, idx)->cc_sb,
-                     commit_count);
+               v_set(config, &cc_cold->cc_sb, commit_count);
                /*
                 * Order later updates to reserve count after
                 * the commit cold cc_sb update.
This page took 0.035637 seconds and 4 git commands to generate.