Replace explicit rcu_read_lock/unlock with lttng::urcu::read_lock_guard
authorJérémie Galarneau <jeremie.galarneau@efficios.com>
Sat, 28 Jan 2023 09:54:09 +0000 (04:54 -0500)
committerJérémie Galarneau <jeremie.galarneau@efficios.com>
Thu, 2 Feb 2023 22:06:41 +0000 (17:06 -0500)
The explicit use of rcu_read_lock/unlock() has been a frequent source of
bugs in the code base as the locks can often end-up imbalanced when
error paths are taken (through goto's).

Since lttng::urcu::read_lock_guard was implemented in a previous commit,
replace all usages of the rcu_read_lock/unlock() API with an RAII
lock_guard wrapper.

Essentially, lttng::urcu::read_lock_guard acquires the RCU reader lock
on construction, and releases it when it goes out of scope. This
automates what is accomplished in the various error paths by jumping to
error handling labels.

For more info, see: https://en.cppreference.com/w/cpp/thread/lock_guard

No user-visible change of behaviour is intended by this patch. The scope
of some critical sections has been reduced when possible and when it
didn't matter from a correctness standpoint. The RCU reader lock would
often be held longer than necessary to simplify the error paths.

Explicit scopes are used to limit the lifetime of the reader lock guards
when used in long functions or when it is only intended to protect the
iteration over cds_lfht instances. In those cases, the following pattern
is used:

```cpp
void foo()
{
  [...]
  {
    lttng::urcu::read_lock_guard read_guard;

    cds_lfht_for_each(...) {
      [...]
  }
  [...]
}
```

This explicitly bounds the critical section and also serves as a hint as
to why the read guard is being used. It is fairly verbose, but I think
it's a good compromise until we add an idiomatic urcu wrapper that
automates this stuff too.

Signed-off-by: Jérémie Galarneau <jeremie.galarneau@efficios.com>
Change-Id: Ie3ef8ddf0f1ab813971522176115688939696370

45 files changed:
src/bin/lttng-relayd/connection.cpp
src/bin/lttng-relayd/ctf-trace.cpp
src/bin/lttng-relayd/index.cpp
src/bin/lttng-relayd/live.cpp
src/bin/lttng-relayd/main.cpp
src/bin/lttng-relayd/session.cpp
src/bin/lttng-relayd/sessiond-trace-chunks.cpp
src/bin/lttng-relayd/stream.cpp
src/bin/lttng-relayd/viewer-session.cpp
src/bin/lttng-relayd/viewer-stream.cpp
src/bin/lttng-sessiond/action-executor.cpp
src/bin/lttng-sessiond/agent-thread.cpp
src/bin/lttng-sessiond/agent.cpp
src/bin/lttng-sessiond/buffer-registry.cpp
src/bin/lttng-sessiond/channel.cpp
src/bin/lttng-sessiond/cmd.cpp
src/bin/lttng-sessiond/consumer.cpp
src/bin/lttng-sessiond/context.cpp
src/bin/lttng-sessiond/dispatch.cpp
src/bin/lttng-sessiond/event-notifier-error-accounting.cpp
src/bin/lttng-sessiond/event.cpp
src/bin/lttng-sessiond/kernel-consumer.cpp
src/bin/lttng-sessiond/kernel.cpp
src/bin/lttng-sessiond/lttng-syscall.cpp
src/bin/lttng-sessiond/manage-kernel.cpp
src/bin/lttng-sessiond/notification-thread-events.cpp
src/bin/lttng-sessiond/rotation-thread.cpp
src/bin/lttng-sessiond/save.cpp
src/bin/lttng-sessiond/session.cpp
src/bin/lttng-sessiond/snapshot.cpp
src/bin/lttng-sessiond/trace-ust.cpp
src/bin/lttng-sessiond/tracker.cpp
src/bin/lttng-sessiond/ust-app.cpp
src/bin/lttng-sessiond/ust-consumer.cpp
src/common/channel.cpp
src/common/consumer/consumer-stream.cpp
src/common/consumer/consumer-timer.cpp
src/common/consumer/consumer.cpp
src/common/fd-tracker/fd-tracker.cpp
src/common/fd-tracker/inode.cpp
src/common/hashtable/hashtable.cpp
src/common/kernel-consumer/kernel-consumer.cpp
src/common/macros.hpp
src/common/trace-chunk.cpp
src/common/ust-consumer/ust-consumer.cpp

index 139f2e4dc5cbef3f59f0d1425a6095df0b899bf5..668a61e7a1a6b3674191a54964569bc6eba52982 100644 (file)
@@ -13,6 +13,7 @@
 #include "viewer-session.hpp"
 
 #include <common/common.hpp>
+#include <common/urcu.hpp>
 
 #include <urcu/rculist.h>
 
@@ -29,7 +30,7 @@ struct relay_connection *connection_get_by_sock(struct lttng_ht *relay_connectio
 
        LTTNG_ASSERT(sock >= 0);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_lookup(relay_connections_ht, (void *) ((unsigned long) sock), &iter);
        node = lttng_ht_iter_get_node_ulong(&iter);
        if (!node) {
@@ -41,7 +42,6 @@ struct relay_connection *connection_get_by_sock(struct lttng_ht *relay_connectio
                conn = nullptr;
        }
 end:
-       rcu_read_unlock();
        return conn;
 }
 
@@ -150,9 +150,8 @@ static void connection_release(struct urcu_ref *ref)
 
 void connection_put(struct relay_connection *conn)
 {
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        urcu_ref_put(&conn->ref, connection_release);
-       rcu_read_unlock();
 }
 
 void connection_ht_add(struct lttng_ht *relay_connections_ht, struct relay_connection *conn)
index 826f0e9e48b304ca2a608a37ef7c5542edde8a70..cde9f3927e381297b3f3b8bd53a00532ea2f2e98 100644 (file)
@@ -14,6 +14,7 @@
 #include "stream.hpp"
 
 #include <common/common.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <urcu/rculist.h>
@@ -149,7 +150,7 @@ struct ctf_trace *ctf_trace_get_by_path_or_create(struct relay_session *session,
        struct lttng_ht_iter iter;
        struct ctf_trace *trace = nullptr;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_lookup(session->ctf_traces_ht, subpath, &iter);
        node = lttng_ht_iter_get_node_str(&iter);
        if (!node) {
@@ -161,7 +162,6 @@ struct ctf_trace *ctf_trace_get_by_path_or_create(struct relay_session *session,
                trace = nullptr;
        }
 end:
-       rcu_read_unlock();
        if (!trace) {
                /* Try to create */
                trace = ctf_trace_create(session, subpath);
@@ -171,16 +171,15 @@ end:
 
 void ctf_trace_put(struct ctf_trace *trace)
 {
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        urcu_ref_put(&trace->ref, ctf_trace_release);
-       rcu_read_unlock();
 }
 
 int ctf_trace_close(struct ctf_trace *trace)
 {
        struct relay_stream *stream;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_list_for_each_entry_rcu(stream, &trace->stream_list, stream_node)
        {
                /*
@@ -189,7 +188,6 @@ int ctf_trace_close(struct ctf_trace *trace)
                 */
                try_stream_close(stream);
        }
-       rcu_read_unlock();
        /*
         * Since all references to the trace are held by its streams, we
         * don't need to do any self-ref put.
@@ -201,7 +199,7 @@ struct relay_viewer_stream *ctf_trace_get_viewer_metadata_stream(struct ctf_trac
 {
        struct relay_viewer_stream *vstream;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        vstream = rcu_dereference(trace->viewer_metadata_stream);
        if (!vstream) {
                goto end;
@@ -210,6 +208,5 @@ struct relay_viewer_stream *ctf_trace_get_viewer_metadata_stream(struct ctf_trac
                vstream = nullptr;
        }
 end:
-       rcu_read_unlock();
        return vstream;
 }
index 94e2d557a8d54aa8305e683893e2e7dea4873d89..e3b8bde8230717a4f5e225d124f307aba135a4f1 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <common/common.hpp>
 #include <common/compat/endian.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 /*
@@ -119,7 +120,7 @@ struct relay_index *relay_index_get_by_id_or_create(struct relay_stream *stream,
             stream->stream_handle,
             net_seq_num);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_lookup(stream->indexes_ht, &net_seq_num, &iter);
        node = lttng_ht_iter_get_node_u64(&iter);
        if (node) {
@@ -148,7 +149,6 @@ struct relay_index *relay_index_get_by_id_or_create(struct relay_stream *stream,
                }
        }
 end:
-       rcu_read_unlock();
        DBG2("Index %sfound or created in HT for stream ID %" PRIu64 " and seqnum %" PRIu64,
             (index == NULL) ? "NOT " : "",
             stream->stream_handle,
@@ -249,14 +249,13 @@ void relay_index_put(struct relay_index *index)
        /*
         * Ensure existence of index->lock for index unlock.
         */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        /*
         * Index lock ensures that concurrent test and update of stream
         * ref is atomic.
         */
        LTTNG_ASSERT(index->ref.refcount != 0);
        urcu_ref_put(&index->ref, index_release);
-       rcu_read_unlock();
 }
 
 /*
@@ -306,12 +305,12 @@ void relay_index_close_all(struct relay_stream *stream)
        struct lttng_ht_iter iter;
        struct relay_index *index;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
+
        cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
                /* Put self-ref from index. */
                relay_index_put(index);
        }
-       rcu_read_unlock();
 }
 
 void relay_index_close_partial_fd(struct relay_stream *stream)
@@ -319,7 +318,8 @@ void relay_index_close_partial_fd(struct relay_stream *stream)
        struct lttng_ht_iter iter;
        struct relay_index *index;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
+
        cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
                if (!index->index_file) {
                        continue;
@@ -331,7 +331,6 @@ void relay_index_close_partial_fd(struct relay_stream *stream)
                 */
                relay_index_put(index);
        }
-       rcu_read_unlock();
 }
 
 uint64_t relay_index_find_last(struct relay_stream *stream)
@@ -340,13 +339,14 @@ uint64_t relay_index_find_last(struct relay_stream *stream)
        struct relay_index *index;
        uint64_t net_seq_num = -1ULL;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
+
        cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
                if (net_seq_num == -1ULL || index->index_n.key > net_seq_num) {
                        net_seq_num = index->index_n.key;
                }
        }
-       rcu_read_unlock();
+
        return net_seq_num;
 }
 
@@ -390,16 +390,16 @@ int relay_index_switch_all_files(struct relay_stream *stream)
        struct relay_index *index;
        int ret = 0;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
+
        cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
                ret = relay_index_switch_file(
                        index, stream->index_file, stream->pos_after_last_complete_data_index);
                if (ret) {
-                       goto end;
+                       return ret;
                }
        }
-end:
-       rcu_read_unlock();
+
        return ret;
 }
 
index 786feaae83651acf86f3115b0cb0f3530552cf2a..416d812a64f2d237238170faa26788fc125e8aea 100644 (file)
@@ -33,6 +33,7 @@
 #include <common/sessiond-comm/inet.hpp>
 #include <common/sessiond-comm/relayd.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
 #include <common/uri.hpp>
 #include <common/utils.hpp>
 
@@ -243,22 +244,24 @@ static int check_new_streams(struct relay_connection *conn)
        if (!conn->viewer_session) {
                goto end;
        }
-       rcu_read_lock();
-       cds_list_for_each_entry_rcu(
-               session, &conn->viewer_session->session_list, viewer_session_node)
+
        {
-               if (!session_get(session)) {
-                       continue;
-               }
-               current_val = uatomic_cmpxchg(&session->new_streams, 1, 0);
-               ret = current_val;
-               session_put(session);
-               if (ret == 1) {
-                       goto end;
+               lttng::urcu::read_lock_guard read_lock;
+               cds_list_for_each_entry_rcu(
+                       session, &conn->viewer_session->session_list, viewer_session_node)
+               {
+                       if (!session_get(session)) {
+                               continue;
+                       }
+                       current_val = uatomic_cmpxchg(&session->new_streams, 1, 0);
+                       ret = current_val;
+                       session_put(session);
+                       if (ret == 1) {
+                               goto end;
+                       }
                }
        }
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -275,63 +278,65 @@ send_viewer_streams(struct lttcomm_sock *sock, uint64_t session_id, unsigned int
        struct lttng_ht_iter iter;
        struct relay_viewer_stream *vstream;
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
-               struct ctf_trace *ctf_trace;
-               struct lttng_viewer_stream send_stream = {};
+               cds_lfht_for_each_entry (
+                       viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
+                       struct ctf_trace *ctf_trace;
+                       struct lttng_viewer_stream send_stream = {};
 
-               health_code_update();
+                       health_code_update();
 
-               if (!viewer_stream_get(vstream)) {
-                       continue;
-               }
+                       if (!viewer_stream_get(vstream)) {
+                               continue;
+                       }
 
-               pthread_mutex_lock(&vstream->stream->lock);
-               /* Ignore if not the same session. */
-               if (vstream->stream->trace->session->id != session_id ||
-                   (!ignore_sent_flag && vstream->sent_flag)) {
-                       pthread_mutex_unlock(&vstream->stream->lock);
-                       viewer_stream_put(vstream);
-                       continue;
-               }
+                       pthread_mutex_lock(&vstream->stream->lock);
+                       /* Ignore if not the same session. */
+                       if (vstream->stream->trace->session->id != session_id ||
+                           (!ignore_sent_flag && vstream->sent_flag)) {
+                               pthread_mutex_unlock(&vstream->stream->lock);
+                               viewer_stream_put(vstream);
+                               continue;
+                       }
 
-               ctf_trace = vstream->stream->trace;
-               send_stream.id = htobe64(vstream->stream->stream_handle);
-               send_stream.ctf_trace_id = htobe64(ctf_trace->id);
-               send_stream.metadata_flag = htobe32(vstream->stream->is_metadata);
-               if (lttng_strncpy(send_stream.path_name,
-                                 vstream->path_name,
-                                 sizeof(send_stream.path_name))) {
-                       pthread_mutex_unlock(&vstream->stream->lock);
-                       viewer_stream_put(vstream);
-                       ret = -1; /* Error. */
-                       goto end_unlock;
-               }
-               if (lttng_strncpy(send_stream.channel_name,
-                                 vstream->channel_name,
-                                 sizeof(send_stream.channel_name))) {
-                       pthread_mutex_unlock(&vstream->stream->lock);
-                       viewer_stream_put(vstream);
-                       ret = -1; /* Error. */
-                       goto end_unlock;
-               }
+                       ctf_trace = vstream->stream->trace;
+                       send_stream.id = htobe64(vstream->stream->stream_handle);
+                       send_stream.ctf_trace_id = htobe64(ctf_trace->id);
+                       send_stream.metadata_flag = htobe32(vstream->stream->is_metadata);
+                       if (lttng_strncpy(send_stream.path_name,
+                                         vstream->path_name,
+                                         sizeof(send_stream.path_name))) {
+                               pthread_mutex_unlock(&vstream->stream->lock);
+                               viewer_stream_put(vstream);
+                               ret = -1; /* Error. */
+                               goto end;
+                       }
+                       if (lttng_strncpy(send_stream.channel_name,
+                                         vstream->channel_name,
+                                         sizeof(send_stream.channel_name))) {
+                               pthread_mutex_unlock(&vstream->stream->lock);
+                               viewer_stream_put(vstream);
+                               ret = -1; /* Error. */
+                               goto end;
+                       }
 
-               DBG("Sending stream %" PRIu64 " to viewer", vstream->stream->stream_handle);
-               vstream->sent_flag = true;
-               pthread_mutex_unlock(&vstream->stream->lock);
+                       DBG("Sending stream %" PRIu64 " to viewer", vstream->stream->stream_handle);
+                       vstream->sent_flag = true;
+                       pthread_mutex_unlock(&vstream->stream->lock);
 
-               ret = send_response(sock, &send_stream, sizeof(send_stream));
-               viewer_stream_put(vstream);
-               if (ret < 0) {
-                       goto end_unlock;
+                       ret = send_response(sock, &send_stream, sizeof(send_stream));
+                       viewer_stream_put(vstream);
+                       if (ret < 0) {
+                               goto end;
+                       }
                }
        }
 
        ret = 0;
 
-end_unlock:
-       rcu_read_unlock();
+end:
        return ret;
 }
 
@@ -370,191 +375,201 @@ static int make_viewer_streams(struct relay_session *relay_session,
         * Create viewer streams for relay streams that are ready to be
         * used for a the given session id only.
         */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (
-               relay_session->ctf_traces_ht->ht, &iter.iter, ctf_trace, node.node) {
-               bool trace_has_metadata_stream = false;
-
-               health_code_update();
-
-               if (!ctf_trace_get(ctf_trace)) {
-                       continue;
-               }
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               /*
-                * Iterate over all the streams of the trace to see if we have a
-                * metadata stream.
-                */
-               cds_list_for_each_entry_rcu(relay_stream, &ctf_trace->stream_list, stream_node)
-               {
-                       bool is_metadata_stream;
+               cds_lfht_for_each_entry (
+                       relay_session->ctf_traces_ht->ht, &iter.iter, ctf_trace, node.node) {
+                       bool trace_has_metadata_stream = false;
 
-                       pthread_mutex_lock(&relay_stream->lock);
-                       is_metadata_stream = relay_stream->is_metadata;
-                       pthread_mutex_unlock(&relay_stream->lock);
+                       health_code_update();
 
-                       if (is_metadata_stream) {
-                               trace_has_metadata_stream = true;
-                               break;
+                       if (!ctf_trace_get(ctf_trace)) {
+                               continue;
                        }
-               }
 
-               relay_stream = nullptr;
-
-               /*
-                * If there is no metadata stream in this trace at the moment
-                * and we never sent one to the viewer, skip the trace. We
-                * accept that the viewer will not see this trace at all.
-                */
-               if (!trace_has_metadata_stream && !ctf_trace->metadata_stream_sent_to_viewer) {
-                       ctf_trace_put(ctf_trace);
-                       continue;
-               }
-
-               cds_list_for_each_entry_rcu(relay_stream, &ctf_trace->stream_list, stream_node)
-               {
-                       struct relay_viewer_stream *viewer_stream;
-
-                       if (!stream_get(relay_stream)) {
-                               continue;
+                       /*
+                        * Iterate over all the streams of the trace to see if we have a
+                        * metadata stream.
+                        */
+                       cds_list_for_each_entry_rcu(
+                               relay_stream, &ctf_trace->stream_list, stream_node)
+                       {
+                               bool is_metadata_stream;
+
+                               pthread_mutex_lock(&relay_stream->lock);
+                               is_metadata_stream = relay_stream->is_metadata;
+                               pthread_mutex_unlock(&relay_stream->lock);
+
+                               if (is_metadata_stream) {
+                                       trace_has_metadata_stream = true;
+                                       break;
+                               }
                        }
 
-                       pthread_mutex_lock(&relay_stream->lock);
+                       relay_stream = nullptr;
+
                        /*
-                        * stream published is protected by the session lock.
+                        * If there is no metadata stream in this trace at the moment
+                        * and we never sent one to the viewer, skip the trace. We
+                        * accept that the viewer will not see this trace at all.
                         */
-                       if (!relay_stream->published) {
-                               goto next;
+                       if (!trace_has_metadata_stream &&
+                           !ctf_trace->metadata_stream_sent_to_viewer) {
+                               ctf_trace_put(ctf_trace);
+                               continue;
                        }
-                       viewer_stream = viewer_stream_get_by_id(relay_stream->stream_handle);
-                       if (!viewer_stream) {
-                               struct lttng_trace_chunk *viewer_stream_trace_chunk = nullptr;
 
-                               /*
-                                * Save that we sent the metadata stream to the
-                                * viewer. So that we know what trace the viewer
-                                * is aware of.
-                                */
-                               if (relay_stream->is_metadata) {
-                                       ctf_trace->metadata_stream_sent_to_viewer = true;
+                       cds_list_for_each_entry_rcu(
+                               relay_stream, &ctf_trace->stream_list, stream_node)
+                       {
+                               struct relay_viewer_stream *viewer_stream;
+
+                               if (!stream_get(relay_stream)) {
+                                       continue;
                                }
 
+                               pthread_mutex_lock(&relay_stream->lock);
                                /*
-                                * If a rotation is ongoing, use a copy of the
-                                * relay stream's chunk to ensure the stream
-                                * files exist.
-                                *
-                                * Otherwise, the viewer session's current trace
-                                * chunk can be used safely.
+                                * stream published is protected by the session lock.
                                 */
-                               if ((relay_stream->ongoing_rotation.is_set ||
-                                    session_has_ongoing_rotation(relay_session)) &&
-                                   relay_stream->trace_chunk) {
-                                       viewer_stream_trace_chunk =
-                                               lttng_trace_chunk_copy(relay_stream->trace_chunk);
-                                       if (!viewer_stream_trace_chunk) {
-                                               ret = -1;
-                                               ctf_trace_put(ctf_trace);
-                                               goto error_unlock;
+                               if (!relay_stream->published) {
+                                       goto next;
+                               }
+                               viewer_stream =
+                                       viewer_stream_get_by_id(relay_stream->stream_handle);
+                               if (!viewer_stream) {
+                                       struct lttng_trace_chunk *viewer_stream_trace_chunk =
+                                               nullptr;
+
+                                       /*
+                                        * Save that we sent the metadata stream to the
+                                        * viewer. So that we know what trace the viewer
+                                        * is aware of.
+                                        */
+                                       if (relay_stream->is_metadata) {
+                                               ctf_trace->metadata_stream_sent_to_viewer = true;
                                        }
-                               } else {
+
                                        /*
-                                        * Transition the viewer session into the newest trace chunk
-                                        * available.
+                                        * If a rotation is ongoing, use a copy of the
+                                        * relay stream's chunk to ensure the stream
+                                        * files exist.
+                                        *
+                                        * Otherwise, the viewer session's current trace
+                                        * chunk can be used safely.
                                         */
-                                       if (!lttng_trace_chunk_ids_equal(
-                                                   viewer_session->current_trace_chunk,
-                                                   relay_stream->trace_chunk)) {
-                                               ret = viewer_session_set_trace_chunk_copy(
-                                                       viewer_session, relay_stream->trace_chunk);
-                                               if (ret) {
+                                       if ((relay_stream->ongoing_rotation.is_set ||
+                                            session_has_ongoing_rotation(relay_session)) &&
+                                           relay_stream->trace_chunk) {
+                                               viewer_stream_trace_chunk = lttng_trace_chunk_copy(
+                                                       relay_stream->trace_chunk);
+                                               if (!viewer_stream_trace_chunk) {
                                                        ret = -1;
                                                        ctf_trace_put(ctf_trace);
                                                        goto error_unlock;
                                                }
-                                       }
-
-                                       if (relay_stream->trace_chunk) {
+                                       } else {
                                                /*
-                                                * If the corresponding relay
-                                                * stream's trace chunk is set,
-                                                * the viewer stream will be
-                                                * created under it.
-                                                *
-                                                * Note that a relay stream can
-                                                * have a NULL output trace
-                                                * chunk (for instance, after a
-                                                * clear against a stopped
-                                                * session).
+                                                * Transition the viewer session into the newest
+                                                * trace chunk available.
                                                 */
-                                               const bool reference_acquired =
-                                                       lttng_trace_chunk_get(
-                                                               viewer_session->current_trace_chunk);
+                                               if (!lttng_trace_chunk_ids_equal(
+                                                           viewer_session->current_trace_chunk,
+                                                           relay_stream->trace_chunk)) {
+                                                       ret = viewer_session_set_trace_chunk_copy(
+                                                               viewer_session,
+                                                               relay_stream->trace_chunk);
+                                                       if (ret) {
+                                                               ret = -1;
+                                                               ctf_trace_put(ctf_trace);
+                                                               goto error_unlock;
+                                                       }
+                                               }
 
-                                               LTTNG_ASSERT(reference_acquired);
-                                               viewer_stream_trace_chunk =
-                                                       viewer_session->current_trace_chunk;
+                                               if (relay_stream->trace_chunk) {
+                                                       /*
+                                                        * If the corresponding relay
+                                                        * stream's trace chunk is set,
+                                                        * the viewer stream will be
+                                                        * created under it.
+                                                        *
+                                                        * Note that a relay stream can
+                                                        * have a NULL output trace
+                                                        * chunk (for instance, after a
+                                                        * clear against a stopped
+                                                        * session).
+                                                        */
+                                                       const bool reference_acquired =
+                                                               lttng_trace_chunk_get(
+                                                                       viewer_session
+                                                                               ->current_trace_chunk);
+
+                                                       LTTNG_ASSERT(reference_acquired);
+                                                       viewer_stream_trace_chunk =
+                                                               viewer_session->current_trace_chunk;
+                                               }
                                        }
-                               }
 
-                               viewer_stream = viewer_stream_create(
-                                       relay_stream, viewer_stream_trace_chunk, seek_t);
-                               lttng_trace_chunk_put(viewer_stream_trace_chunk);
-                               viewer_stream_trace_chunk = nullptr;
-                               if (!viewer_stream) {
-                                       ret = -1;
-                                       ctf_trace_put(ctf_trace);
-                                       goto error_unlock;
-                               }
+                                       viewer_stream = viewer_stream_create(
+                                               relay_stream, viewer_stream_trace_chunk, seek_t);
+                                       lttng_trace_chunk_put(viewer_stream_trace_chunk);
+                                       viewer_stream_trace_chunk = nullptr;
+                                       if (!viewer_stream) {
+                                               ret = -1;
+                                               ctf_trace_put(ctf_trace);
+                                               goto error_unlock;
+                                       }
 
-                               if (nb_created) {
-                                       /* Update number of created stream counter. */
-                                       (*nb_created)++;
-                               }
-                               /*
-                                * Ensure a self-reference is preserved even
-                                * after we have put our local reference.
-                                */
-                               if (!viewer_stream_get(viewer_stream)) {
-                                       ERR("Unable to get self-reference on viewer stream, logic error.");
-                                       abort();
-                               }
-                       } else {
-                               if (!viewer_stream->sent_flag && nb_unsent) {
-                                       /* Update number of unsent stream counter. */
-                                       (*nb_unsent)++;
-                               }
-                       }
-                       /* Update number of total stream counter. */
-                       if (nb_total) {
-                               if (relay_stream->is_metadata) {
-                                       if (!relay_stream->closed ||
-                                           relay_stream->metadata_received >
-                                                   viewer_stream->metadata_sent) {
-                                               (*nb_total)++;
+                                       if (nb_created) {
+                                               /* Update number of created stream counter. */
+                                               (*nb_created)++;
+                                       }
+                                       /*
+                                        * Ensure a self-reference is preserved even
+                                        * after we have put our local reference.
+                                        */
+                                       if (!viewer_stream_get(viewer_stream)) {
+                                               ERR("Unable to get self-reference on viewer stream, logic error.");
+                                               abort();
                                        }
                                } else {
-                                       if (!relay_stream->closed ||
-                                           !(((int64_t) (relay_stream->prev_data_seq -
-                                                         relay_stream->last_net_seq_num)) >= 0)) {
-                                               (*nb_total)++;
+                                       if (!viewer_stream->sent_flag && nb_unsent) {
+                                               /* Update number of unsent stream counter. */
+                                               (*nb_unsent)++;
                                        }
                                }
+                               /* Update number of total stream counter. */
+                               if (nb_total) {
+                                       if (relay_stream->is_metadata) {
+                                               if (!relay_stream->closed ||
+                                                   relay_stream->metadata_received >
+                                                           viewer_stream->metadata_sent) {
+                                                       (*nb_total)++;
+                                               }
+                                       } else {
+                                               if (!relay_stream->closed ||
+                                                   !(((int64_t) (relay_stream->prev_data_seq -
+                                                                 relay_stream->last_net_seq_num)) >=
+                                                     0)) {
+                                                       (*nb_total)++;
+                                               }
+                                       }
+                               }
+                               /* Put local reference. */
+                               viewer_stream_put(viewer_stream);
+                       next:
+                               pthread_mutex_unlock(&relay_stream->lock);
+                               stream_put(relay_stream);
                        }
-                       /* Put local reference. */
-                       viewer_stream_put(viewer_stream);
-               next:
-                       pthread_mutex_unlock(&relay_stream->lock);
-                       stream_put(relay_stream);
+                       relay_stream = nullptr;
+                       ctf_trace_put(ctf_trace);
                }
-               relay_stream = nullptr;
-               ctf_trace_put(ctf_trace);
        }
 
        ret = 0;
 
 error_unlock:
-       rcu_read_unlock();
 
        if (relay_stream) {
                pthread_mutex_unlock(&relay_stream->lock);
@@ -1042,61 +1057,65 @@ static int viewer_list_sessions(struct relay_connection *conn)
                return -1;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, session, session_n.node) {
-               struct lttng_viewer_session *send_session;
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               health_code_update();
+               cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, session, session_n.node) {
+                       struct lttng_viewer_session *send_session;
 
-               pthread_mutex_lock(&session->lock);
-               if (session->connection_closed) {
-                       /* Skip closed session */
-                       goto next_session;
-               }
+                       health_code_update();
+
+                       pthread_mutex_lock(&session->lock);
+                       if (session->connection_closed) {
+                               /* Skip closed session */
+                               goto next_session;
+                       }
 
-               if (count >= buf_count) {
-                       struct lttng_viewer_session *newbuf;
-                       uint32_t new_buf_count = buf_count << 1;
+                       if (count >= buf_count) {
+                               struct lttng_viewer_session *newbuf;
+                               uint32_t new_buf_count = buf_count << 1;
 
-                       newbuf = (lttng_viewer_session *) realloc(
-                               send_session_buf, new_buf_count * sizeof(*send_session_buf));
-                       if (!newbuf) {
+                               newbuf = (lttng_viewer_session *) realloc(
+                                       send_session_buf,
+                                       new_buf_count * sizeof(*send_session_buf));
+                               if (!newbuf) {
+                                       ret = -1;
+                                       goto break_loop;
+                               }
+                               send_session_buf = newbuf;
+                               buf_count = new_buf_count;
+                       }
+                       send_session = &send_session_buf[count];
+                       if (lttng_strncpy(send_session->session_name,
+                                         session->session_name,
+                                         sizeof(send_session->session_name))) {
                                ret = -1;
                                goto break_loop;
                        }
-                       send_session_buf = newbuf;
-                       buf_count = new_buf_count;
-               }
-               send_session = &send_session_buf[count];
-               if (lttng_strncpy(send_session->session_name,
-                                 session->session_name,
-                                 sizeof(send_session->session_name))) {
-                       ret = -1;
-                       goto break_loop;
-               }
-               if (lttng_strncpy(send_session->hostname,
-                                 session->hostname,
-                                 sizeof(send_session->hostname))) {
-                       ret = -1;
-                       goto break_loop;
-               }
-               send_session->id = htobe64(session->id);
-               send_session->live_timer = htobe32(session->live_timer);
-               if (session->viewer_attached) {
-                       send_session->clients = htobe32(1);
-               } else {
-                       send_session->clients = htobe32(0);
+                       if (lttng_strncpy(send_session->hostname,
+                                         session->hostname,
+                                         sizeof(send_session->hostname))) {
+                               ret = -1;
+                               goto break_loop;
+                       }
+                       send_session->id = htobe64(session->id);
+                       send_session->live_timer = htobe32(session->live_timer);
+                       if (session->viewer_attached) {
+                               send_session->clients = htobe32(1);
+                       } else {
+                               send_session->clients = htobe32(0);
+                       }
+                       send_session->streams = htobe32(session->stream_count);
+                       count++;
+               next_session:
+                       pthread_mutex_unlock(&session->lock);
+                       continue;
+               break_loop:
+                       pthread_mutex_unlock(&session->lock);
+                       break;
                }
-               send_session->streams = htobe32(session->stream_count);
-               count++;
-       next_session:
-               pthread_mutex_unlock(&session->lock);
-               continue;
-       break_loop:
-               pthread_mutex_unlock(&session->lock);
-               break;
        }
-       rcu_read_unlock();
+
        if (ret < 0) {
                goto end_free;
        }
@@ -2717,12 +2736,15 @@ error:
        (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
 
        /* Cleanup remaining connection object. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (viewer_connections_ht->ht, &iter.iter, destroy_conn, sock_n.node) {
-               health_code_update();
-               connection_put(destroy_conn);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       viewer_connections_ht->ht, &iter.iter, destroy_conn, sock_n.node) {
+                       health_code_update();
+                       connection_put(destroy_conn);
+               }
        }
-       rcu_read_unlock();
 error_poll_create:
        lttng_ht_destroy(viewer_connections_ht);
 viewer_connections_ht_error:
index 9793e277808975728d5f6965e12f8648de35a9ff..e3eb9d72ef9c59a68ddc561abe9a01bf41559d90 100644 (file)
@@ -46,6 +46,7 @@
 #include <common/sessiond-comm/relayd.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
 #include <common/string-utils/format.hpp>
+#include <common/urcu.hpp>
 #include <common/uri.hpp>
 #include <common/utils.hpp>
 
@@ -1517,12 +1518,11 @@ static void publish_connection_local_streams(struct relay_connection *conn)
         * session lock.
         */
        pthread_mutex_lock(&session->lock);
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_list_for_each_entry_rcu(stream, &session->recv_list, recv_node)
        {
                stream_publish(stream);
        }
-       rcu_read_unlock();
 
        /*
         * Inform the viewer that there are new streams in the session.
@@ -2224,21 +2224,25 @@ static int relay_begin_data_pending(const struct lttcomm_relayd_hdr *recv_hdr,
         * to iterate over all streams to find the one associated with
         * the right session_id.
         */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
-               if (!stream_get(stream)) {
-                       continue;
-               }
-               if (stream->trace->session->id == msg.session_id) {
-                       pthread_mutex_lock(&stream->lock);
-                       stream->data_pending_check_done = false;
-                       pthread_mutex_unlock(&stream->lock);
-                       DBG("Set begin data pending flag to stream %" PRIu64,
-                           stream->stream_handle);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
+                       if (!stream_get(stream)) {
+                               continue;
+                       }
+
+                       if (stream->trace->session->id == msg.session_id) {
+                               pthread_mutex_lock(&stream->lock);
+                               stream->data_pending_check_done = false;
+                               pthread_mutex_unlock(&stream->lock);
+                               DBG("Set begin data pending flag to stream %" PRIu64,
+                                   stream->stream_handle);
+                       }
+
+                       stream_put(stream);
                }
-               stream_put(stream);
        }
-       rcu_read_unlock();
 
        memset(&reply, 0, sizeof(reply));
        /* All good, send back reply. */
@@ -2299,43 +2303,49 @@ static int relay_end_data_pending(const struct lttcomm_relayd_hdr *recv_hdr __at
         * Iterate over all streams to see if the begin data pending
         * flag is set.
         */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
-               if (!stream_get(stream)) {
-                       continue;
-               }
-               if (stream->trace->session->id != msg.session_id) {
-                       stream_put(stream);
-                       continue;
-               }
-               pthread_mutex_lock(&stream->lock);
-               if (!stream->data_pending_check_done) {
-                       uint64_t stream_seq;
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-                       if (session_streams_have_index(conn->session)) {
-                               /*
-                                * Ensure that both the index and stream data have been
-                                * flushed up to the requested point.
-                                */
-                               stream_seq =
-                                       std::min(stream->prev_data_seq, stream->prev_index_seq);
-                       } else {
-                               stream_seq = stream->prev_data_seq;
+               cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
+                       if (!stream_get(stream)) {
+                               continue;
                        }
-                       if (!stream->closed ||
-                           !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
-                               is_data_inflight = 1;
-                               DBG("Data is still in flight for stream %" PRIu64,
-                                   stream->stream_handle);
-                               pthread_mutex_unlock(&stream->lock);
+
+                       if (stream->trace->session->id != msg.session_id) {
                                stream_put(stream);
-                               break;
+                               continue;
+                       }
+
+                       pthread_mutex_lock(&stream->lock);
+                       if (!stream->data_pending_check_done) {
+                               uint64_t stream_seq;
+
+                               if (session_streams_have_index(conn->session)) {
+                                       /*
+                                        * Ensure that both the index and stream data have been
+                                        * flushed up to the requested point.
+                                        */
+                                       stream_seq = std::min(stream->prev_data_seq,
+                                                             stream->prev_index_seq);
+                               } else {
+                                       stream_seq = stream->prev_data_seq;
+                               }
+
+                               if (!stream->closed ||
+                                   !(((int64_t) (stream_seq - stream->last_net_seq_num)) >= 0)) {
+                                       is_data_inflight = 1;
+                                       DBG("Data is still in flight for stream %" PRIu64,
+                                           stream->stream_handle);
+                                       pthread_mutex_unlock(&stream->lock);
+                                       stream_put(stream);
+                                       break;
+                               }
                        }
+
+                       pthread_mutex_unlock(&stream->lock);
+                       stream_put(stream);
                }
-               pthread_mutex_unlock(&stream->lock);
-               stream_put(stream);
        }
-       rcu_read_unlock();
 
        memset(&reply, 0, sizeof(reply));
        /* All good, send back reply. */
@@ -4161,19 +4171,23 @@ restart:
 exit:
 error:
        /* Cleanup remaining connection object. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (relay_connections_ht->ht, &iter.iter, destroy_conn, sock_n.node) {
-               health_code_update();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               session_abort(destroy_conn->session);
+               cds_lfht_for_each_entry (
+                       relay_connections_ht->ht, &iter.iter, destroy_conn, sock_n.node) {
+                       health_code_update();
 
-               /*
-                * No need to grab another ref, because we own
-                * destroy_conn.
-                */
-               relay_thread_close_connection(&events, destroy_conn->sock->fd, destroy_conn);
+                       session_abort(destroy_conn->session);
+
+                       /*
+                        * No need to grab another ref, because we own
+                        * destroy_conn.
+                        */
+                       relay_thread_close_connection(
+                               &events, destroy_conn->sock->fd, destroy_conn);
+               }
        }
-       rcu_read_unlock();
 
        (void) fd_tracker_util_poll_clean(the_fd_tracker, &events);
 error_poll_create:
index 1430feef79cb50d9f2e55665e7c5ae5ac405748d..8ac4a510dc263f552e2b584256b328986c0e8aad 100644 (file)
@@ -20,6 +20,7 @@
 #include <common/defaults.hpp>
 #include <common/fd-tracker/utils.hpp>
 #include <common/time.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 #include <common/uuid.hpp>
 
@@ -454,7 +455,7 @@ struct relay_session *session_get_by_id(uint64_t id)
        struct lttng_ht_node_u64 *node;
        struct lttng_ht_iter iter;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_lookup(sessions_ht, &id, &iter);
        node = lttng_ht_iter_get_node_u64(&iter);
        if (!node) {
@@ -467,7 +468,6 @@ struct relay_session *session_get_by_id(uint64_t id)
                session = nullptr;
        }
 end:
-       rcu_read_unlock();
        return session;
 }
 
@@ -498,57 +498,60 @@ bool session_has_ongoing_rotation(const struct relay_session *session)
                goto end;
        }
 
-       rcu_read_lock();
        /*
         * Sample the 'ongoing_rotation' status of all relay sessions that
         * originate from the same session daemon session.
         */
-       cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, iterated_session, session_n.node) {
-               if (!session_get(iterated_session)) {
-                       continue;
-               }
-
-               if (session == iterated_session) {
-                       /* Skip this session. */
-                       goto next_session_no_unlock;
-               }
-
-               pthread_mutex_lock(&iterated_session->lock);
-
-               if (!iterated_session->id_sessiond.is_set) {
-                       /*
-                        * Session belongs to a peer that doesn't support
-                        * rotations.
-                        */
-                       goto next_session;
-               }
-
-               if (session->sessiond_uuid != iterated_session->sessiond_uuid) {
-                       /* Sessions do not originate from the same sessiond. */
-                       goto next_session;
-               }
-
-               if (LTTNG_OPTIONAL_GET(session->id_sessiond) !=
-                   LTTNG_OPTIONAL_GET(iterated_session->id_sessiond)) {
-                       /*
-                        * Sessions do not originate from the same sessiond
-                        * session.
-                        */
-                       goto next_session;
-               }
-
-               ongoing_rotation = iterated_session->ongoing_rotation;
-
-       next_session:
-               pthread_mutex_unlock(&iterated_session->lock);
-       next_session_no_unlock:
-               session_put(iterated_session);
-
-               if (ongoing_rotation) {
-                       break;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       sessions_ht->ht, &iter.iter, iterated_session, session_n.node) {
+                       if (!session_get(iterated_session)) {
+                               continue;
+                       }
+
+                       if (session == iterated_session) {
+                               /* Skip this session. */
+                               goto next_session_no_unlock;
+                       }
+
+                       pthread_mutex_lock(&iterated_session->lock);
+
+                       if (!iterated_session->id_sessiond.is_set) {
+                               /*
+                                * Session belongs to a peer that doesn't support
+                                * rotations.
+                                */
+                               goto next_session;
+                       }
+
+                       if (session->sessiond_uuid != iterated_session->sessiond_uuid) {
+                               /* Sessions do not originate from the same sessiond. */
+                               goto next_session;
+                       }
+
+                       if (LTTNG_OPTIONAL_GET(session->id_sessiond) !=
+                           LTTNG_OPTIONAL_GET(iterated_session->id_sessiond)) {
+                               /*
+                                * Sessions do not originate from the same sessiond
+                                * session.
+                                */
+                               goto next_session;
+                       }
+
+                       ongoing_rotation = iterated_session->ongoing_rotation;
+
+               next_session:
+                       pthread_mutex_unlock(&iterated_session->lock);
+               next_session_no_unlock:
+                       session_put(iterated_session);
+
+                       if (ongoing_rotation) {
+                               break;
+                       }
                }
        }
-       rcu_read_unlock();
 
 end:
        return ongoing_rotation;
@@ -611,9 +614,8 @@ void session_put(struct relay_session *session)
        if (!session) {
                return;
        }
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        urcu_ref_put(&session->ref, session_release);
-       rcu_read_unlock();
 }
 
 int session_close(struct relay_session *session)
@@ -630,23 +632,28 @@ int session_close(struct relay_session *session)
        session->connection_closed = true;
        pthread_mutex_unlock(&session->lock);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (session->ctf_traces_ht->ht, &iter.iter, trace, node.node) {
-               ret = ctf_trace_close(trace);
-               if (ret) {
-                       goto rcu_unlock;
-               }
-       }
-       cds_list_for_each_entry_rcu(stream, &session->recv_list, recv_node)
        {
-               /* Close streams which have not been published yet. */
-               try_stream_close(stream);
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (session->ctf_traces_ht->ht, &iter.iter, trace, node.node) {
+                       ret = ctf_trace_close(trace);
+                       if (ret) {
+                               goto end;
+                       }
+               }
+
+               cds_list_for_each_entry_rcu(stream, &session->recv_list, recv_node)
+               {
+                       /* Close streams which have not been published yet. */
+                       try_stream_close(stream);
+               }
        }
-rcu_unlock:
-       rcu_read_unlock();
+
+end:
        if (ret) {
                return ret;
        }
+
        /* Put self-reference from create. */
        session_put(session);
        return ret;
@@ -676,16 +683,18 @@ void print_sessions()
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, session, session_n.node) {
-               if (!session_get(session)) {
-                       continue;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (sessions_ht->ht, &iter.iter, session, session_n.node) {
+                       if (!session_get(session)) {
+                               continue;
+                       }
+                       DBG("session %p refcount %ld session %" PRIu64,
+                           session,
+                           session->ref.refcount,
+                           session->id);
+                       session_put(session);
                }
-               DBG("session %p refcount %ld session %" PRIu64,
-                   session,
-                   session->ref.refcount,
-                   session->id);
-               session_put(session);
        }
-       rcu_read_unlock();
 }
index 1fb034053eef5bbbadd9b0940c07176a1ccae8bb..9d793c9bcf79315e722447fcb3c205f785a60c54 100644 (file)
@@ -14,6 +14,7 @@
 #include <common/macros.hpp>
 #include <common/string-utils/format.hpp>
 #include <common/trace-chunk-registry.hpp>
+#include <common/urcu.hpp>
 
 #include <inttypes.h>
 #include <stdio.h>
@@ -112,9 +113,8 @@ static void trace_chunk_registry_ht_element_release(struct urcu_ref *ref)
        DBG("Destroying trace chunk registry associated to sessiond {%s}", uuid_str);
        if (element->sessiond_trace_chunk_registry) {
                /* Unpublish. */
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
                cds_lfht_del(element->sessiond_trace_chunk_registry->ht, &element->ht_node);
-               rcu_read_unlock();
                element->sessiond_trace_chunk_registry = nullptr;
        }
 
@@ -146,7 +146,7 @@ trace_chunk_registry_ht_element_find(struct sessiond_trace_chunk_registry *sessi
        struct cds_lfht_node *node;
        struct cds_lfht_iter iter;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_lookup(sessiond_registry->ht,
                        trace_chunk_registry_ht_key_hash(key),
                        trace_chunk_registry_ht_key_match,
@@ -164,7 +164,6 @@ trace_chunk_registry_ht_element_find(struct sessiond_trace_chunk_registry *sessi
                        element = nullptr;
                }
        }
-       rcu_read_unlock();
        return element;
 }
 
@@ -198,46 +197,51 @@ trace_chunk_registry_ht_element_create(struct sessiond_trace_chunk_registry *ses
        trace_chunk_registry = nullptr;
 
        /* Attempt to publish the new element. */
-       rcu_read_lock();
-       while (true) {
-               struct cds_lfht_node *published_node;
-               struct trace_chunk_registry_ht_element *published_element;
-
-               published_node =
-                       cds_lfht_add_unique(sessiond_registry->ht,
-                                           trace_chunk_registry_ht_key_hash(&new_element->key),
-                                           trace_chunk_registry_ht_key_match,
-                                           &new_element->key,
-                                           &new_element->ht_node);
-               if (published_node == &new_element->ht_node) {
-                       /* New element published successfully. */
-                       DBG("Created trace chunk registry for sessiond {%s}", uuid_str);
-                       new_element->sessiond_trace_chunk_registry = sessiond_registry;
-                       break;
-               }
-
+       {
                /*
-                * An equivalent element was published during the creation of
-                * this element. Attempt to acquire a reference to the one that
-                * was already published and release the reference to the copy
-                * we created if successful.
+                * Keep the rcu read lock is held accross all attempts
+                * purely for efficiency reasons.
                 */
-               published_element = lttng::utils::container_of(
-                       published_node, &trace_chunk_registry_ht_element::ht_node);
-               if (trace_chunk_registry_ht_element_get(published_element)) {
-                       DBG("Acquired reference to trace chunk registry of sessiond {%s}",
-                           uuid_str);
-                       trace_chunk_registry_ht_element_put(new_element);
-                       new_element = nullptr;
-                       break;
+               lttng::urcu::read_lock_guard read_lock;
+               while (true) {
+                       struct cds_lfht_node *published_node;
+                       struct trace_chunk_registry_ht_element *published_element;
+
+                       published_node = cds_lfht_add_unique(
+                               sessiond_registry->ht,
+                               trace_chunk_registry_ht_key_hash(&new_element->key),
+                               trace_chunk_registry_ht_key_match,
+                               &new_element->key,
+                               &new_element->ht_node);
+                       if (published_node == &new_element->ht_node) {
+                               /* New element published successfully. */
+                               DBG("Created trace chunk registry for sessiond {%s}", uuid_str);
+                               new_element->sessiond_trace_chunk_registry = sessiond_registry;
+                               break;
+                       }
+
+                       /*
+                        * An equivalent element was published during the creation of
+                        * this element. Attempt to acquire a reference to the one that
+                        * was already published and release the reference to the copy
+                        * we created if successful.
+                        */
+                       published_element = lttng::utils::container_of(
+                               published_node, &trace_chunk_registry_ht_element::ht_node);
+                       if (trace_chunk_registry_ht_element_get(published_element)) {
+                               DBG("Acquired reference to trace chunk registry of sessiond {%s}",
+                                   uuid_str);
+                               trace_chunk_registry_ht_element_put(new_element);
+                               new_element = nullptr;
+                               break;
+                       }
+                       /*
+                        * A reference to the previously published element could not
+                        * be acquired. Hence, retry to publish our copy of the
+                        * element.
+                        */
                }
-               /*
-                * A reference to the previously published element could not
-                * be acquired. Hence, retry to publish our copy of the
-                * element.
-                */
        }
-       rcu_read_unlock();
 end:
        if (ret < 0) {
                ERR("Failed to create trace chunk registry for session daemon {%s}", uuid_str);
index 60c90bc111a44975a6b7c255879132d77810fde9..94d5d6d177c6855a059dfba36333a24a57de9f36 100644 (file)
@@ -18,6 +18,7 @@
 #include <common/defaults.hpp>
 #include <common/fs-handle.hpp>
 #include <common/sessiond-comm/relayd.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <algorithm>
@@ -47,7 +48,7 @@ struct relay_stream *stream_get_by_id(uint64_t stream_id)
        struct lttng_ht_iter iter;
        struct relay_stream *stream = nullptr;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_lookup(relay_streams_ht, &stream_id, &iter);
        node = lttng_ht_iter_get_node_u64(&iter);
        if (!node) {
@@ -59,7 +60,6 @@ struct relay_stream *stream_get_by_id(uint64_t stream_id)
                stream = nullptr;
        }
 end:
-       rcu_read_unlock();
        return stream;
 }
 
@@ -807,14 +807,13 @@ static void stream_release(struct urcu_ref *ref)
 
 void stream_put(struct relay_stream *stream)
 {
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        LTTNG_ASSERT(stream->ref.refcount != 0);
        /*
         * Wait until we have processed all the stream packets before
         * actually putting our last stream reference.
         */
        urcu_ref_put(&stream->ref, stream_release);
-       rcu_read_unlock();
 }
 
 int stream_set_pending_rotation(struct relay_stream *stream,
@@ -1315,18 +1314,20 @@ static void print_stream_indexes(struct relay_stream *stream)
        struct lttng_ht_iter iter;
        struct relay_index *index;
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
-               DBG("index %p net_seq_num %" PRIu64 " refcount %ld"
-                   " stream %" PRIu64 " trace %" PRIu64 " session %" PRIu64,
-                   index,
-                   index->index_n.key,
-                   stream->ref.refcount,
-                   index->stream->stream_handle,
-                   index->stream->trace->id,
-                   index->stream->trace->session->id);
-       }
-       rcu_read_unlock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (stream->indexes_ht->ht, &iter.iter, index, index_n.node) {
+                       DBG("index %p net_seq_num %" PRIu64 " refcount %ld"
+                           " stream %" PRIu64 " trace %" PRIu64 " session %" PRIu64,
+                           index,
+                           index->index_n.key,
+                           stream->ref.refcount,
+                           index->stream->stream_handle,
+                           index->stream->trace->id,
+                           index->stream->trace->session->id);
+               }
+       }
 }
 
 int stream_reset_file(struct relay_stream *stream)
@@ -1369,19 +1370,23 @@ void print_relay_streams()
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
-               if (!stream_get(stream)) {
-                       continue;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (relay_streams_ht->ht, &iter.iter, stream, node.node) {
+                       if (!stream_get(stream)) {
+                               continue;
+                       }
+
+                       DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
+                           " session %" PRIu64,
+                           stream,
+                           stream->ref.refcount,
+                           stream->stream_handle,
+                           stream->trace->id,
+                           stream->trace->session->id);
+                       print_stream_indexes(stream);
+                       stream_put(stream);
                }
-               DBG("stream %p refcount %ld stream %" PRIu64 " trace %" PRIu64 " session %" PRIu64,
-                   stream,
-                   stream->ref.refcount,
-                   stream->stream_handle,
-                   stream->trace->id,
-                   stream->trace->session->id);
-               print_stream_indexes(stream);
-               stream_put(stream);
        }
-       rcu_read_unlock();
 }
index 1181936b24f1bea293102efc8444640cd7d096cb..45fa2dcacafff947a43de2945c948bf994694cd1 100644 (file)
@@ -16,6 +16,7 @@
 #include "viewer-stream.hpp"
 
 #include <common/common.hpp>
+#include <common/urcu.hpp>
 
 #include <urcu/rculist.h>
 
@@ -149,23 +150,29 @@ void viewer_session_close_one_session(struct relay_viewer_session *vsession,
         * TODO: improvement: create more efficient list of
         * vstream per session.
         */
-       cds_lfht_for_each_entry (viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
-               if (!viewer_stream_get(vstream)) {
-                       continue;
-               }
-               if (vstream->stream->trace->session != session) {
+       {
+               lttng::urcu::read_lock_guard read_guard;
+
+               cds_lfht_for_each_entry (
+                       viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
+                       if (!viewer_stream_get(vstream)) {
+                               continue;
+                       }
+                       if (vstream->stream->trace->session != session) {
+                               viewer_stream_put(vstream);
+                               continue;
+                       }
+                       /* Put local reference. */
+                       viewer_stream_put(vstream);
+                       /*
+                        * We have reached one of the viewer stream's lifetime
+                        * end condition. This "put" will cause the proper
+                        * teardown of the viewer stream.
+                        */
                        viewer_stream_put(vstream);
-                       continue;
                }
-               /* Put local reference. */
-               viewer_stream_put(vstream);
-               /*
-                * We have reached one of the viewer stream's lifetime
-                * end condition. This "put" will cause the proper
-                * teardown of the viewer stream.
-                */
-               viewer_stream_put(vstream);
        }
+
        lttng_trace_chunk_put(vsession->current_trace_chunk);
        vsession->current_trace_chunk = nullptr;
        viewer_session_detach(vsession, session);
@@ -175,12 +182,14 @@ void viewer_session_close(struct relay_viewer_session *vsession)
 {
        struct relay_session *session;
 
-       rcu_read_lock();
-       cds_list_for_each_entry_rcu(session, &vsession->session_list, viewer_session_node)
        {
-               viewer_session_close_one_session(vsession, session);
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_list_for_each_entry_rcu(session, &vsession->session_list, viewer_session_node)
+               {
+                       viewer_session_close_one_session(vsession, session);
+               }
        }
-       rcu_read_unlock();
 }
 
 /*
@@ -199,16 +208,18 @@ int viewer_session_is_attached(struct relay_viewer_session *vsession, struct rel
        if (!session->viewer_attached) {
                goto end;
        }
-       rcu_read_lock();
-       cds_list_for_each_entry_rcu(iter, &vsession->session_list, viewer_session_node)
+
        {
-               if (session == iter) {
-                       found = 1;
-                       goto end_rcu_unlock;
+               lttng::urcu::read_lock_guard read_lock;
+               cds_list_for_each_entry_rcu(iter, &vsession->session_list, viewer_session_node)
+               {
+                       if (session == iter) {
+                               found = 1;
+                               break;
+                       }
                }
        }
-end_rcu_unlock:
-       rcu_read_unlock();
+
 end:
        pthread_mutex_unlock(&session->lock);
        return found;
index 8bf0475045c4bb3a48a3b2040b24363994024d5c..9ca54e67ea49b7be778cf054579ca54004289bcb 100644 (file)
@@ -14,6 +14,7 @@
 #include <common/common.hpp>
 #include <common/compat/string.hpp>
 #include <common/index/index.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <algorithm>
@@ -262,7 +263,7 @@ struct relay_viewer_stream *viewer_stream_get_by_id(uint64_t id)
        struct lttng_ht_iter iter;
        struct relay_viewer_stream *vstream = nullptr;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_lookup(viewer_streams_ht, &id, &iter);
        node = lttng_ht_iter_get_node_u64(&iter);
        if (!node) {
@@ -274,15 +275,13 @@ struct relay_viewer_stream *viewer_stream_get_by_id(uint64_t id)
                vstream = nullptr;
        }
 end:
-       rcu_read_unlock();
        return vstream;
 }
 
 void viewer_stream_put(struct relay_viewer_stream *vstream)
 {
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        urcu_ref_put(&vstream->ref, viewer_stream_release);
-       rcu_read_unlock();
 }
 
 void viewer_stream_close_files(struct relay_viewer_stream *vstream)
@@ -378,18 +377,22 @@ void print_viewer_streams()
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
-               if (!viewer_stream_get(vstream)) {
-                       continue;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       viewer_streams_ht->ht, &iter.iter, vstream, stream_n.node) {
+                       if (!viewer_stream_get(vstream)) {
+                               continue;
+                       }
+                       DBG("vstream %p refcount %ld stream %" PRIu64 " trace %" PRIu64
+                           " session %" PRIu64,
+                           vstream,
+                           vstream->ref.refcount,
+                           vstream->stream->stream_handle,
+                           vstream->stream->trace->id,
+                           vstream->stream->trace->session->id);
+                       viewer_stream_put(vstream);
                }
-               DBG("vstream %p refcount %ld stream %" PRIu64 " trace %" PRIu64 " session %" PRIu64,
-                   vstream,
-                   vstream->ref.refcount,
-                   vstream->stream->stream_handle,
-                   vstream->stream->trace->id,
-                   vstream->stream->trace->session->id);
-               viewer_stream_put(vstream);
        }
-       rcu_read_unlock();
 }
index 4c1e748df4d1c2078b626bb704707fba583bc697..31683547b1340260579fa4016c29b6130b2080f5 100644 (file)
@@ -16,6 +16,7 @@
 #include <common/dynamic-array.hpp>
 #include <common/macros.hpp>
 #include <common/optional.hpp>
+#include <common/urcu.hpp>
 
 #include <lttng/action/action-internal.hpp>
 #include <lttng/action/list-internal.hpp>
@@ -290,6 +291,8 @@ static int action_executor_start_session_handler(struct action_executor *executo
        enum lttng_error_code cmd_ret;
        struct lttng_action *action = item->action;
 
+       lttng::urcu::read_lock_guard read_lock;
+
        action_status = lttng_action_start_session_get_session_name(action, &session_name);
        if (action_status != LTTNG_ACTION_STATUS_OK) {
                ERR("Failed to get session name from `%s` action", get_action_name(action));
@@ -311,7 +314,6 @@ static int action_executor_start_session_handler(struct action_executor *executo
        }
 
        session_lock_list();
-       rcu_read_lock();
        session = session_find_by_id(LTTNG_OPTIONAL_GET(item->context.session_id));
        if (!session) {
                DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
@@ -362,7 +364,6 @@ error_unlock_session:
        session_unlock(session);
        session_put(session);
 error_unlock_list:
-       rcu_read_unlock();
        session_unlock_list();
 end:
        return ret;
@@ -380,6 +381,8 @@ static int action_executor_stop_session_handler(struct action_executor *executor
        enum lttng_error_code cmd_ret;
        struct lttng_action *action = item->action;
 
+       lttng::urcu::read_lock_guard read_lock;
+
        action_status = lttng_action_stop_session_get_session_name(action, &session_name);
        if (action_status != LTTNG_ACTION_STATUS_OK) {
                ERR("Failed to get session name from `%s` action", get_action_name(action));
@@ -401,7 +404,6 @@ static int action_executor_stop_session_handler(struct action_executor *executor
        }
 
        session_lock_list();
-       rcu_read_lock();
        session = session_find_by_id(LTTNG_OPTIONAL_GET(item->context.session_id));
        if (!session) {
                DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
@@ -452,7 +454,6 @@ error_unlock_session:
        session_unlock(session);
        session_put(session);
 error_unlock_list:
-       rcu_read_unlock();
        session_unlock_list();
 end:
        return ret;
@@ -470,6 +471,8 @@ static int action_executor_rotate_session_handler(struct action_executor *execut
        enum lttng_error_code cmd_ret;
        struct lttng_action *action = item->action;
 
+       lttng::urcu::read_lock_guard read_lock;
+
        action_status = lttng_action_rotate_session_get_session_name(action, &session_name);
        if (action_status != LTTNG_ACTION_STATUS_OK) {
                ERR("Failed to get session name from `%s` action", get_action_name(action));
@@ -491,7 +494,6 @@ static int action_executor_rotate_session_handler(struct action_executor *execut
        }
 
        session_lock_list();
-       rcu_read_lock();
        session = session_find_by_id(LTTNG_OPTIONAL_GET(item->context.session_id));
        if (!session) {
                DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
@@ -550,7 +552,6 @@ error_unlock_session:
        session_unlock(session);
        session_put(session);
 error_unlock_list:
-       rcu_read_unlock();
        session_unlock_list();
 end:
        return ret;
@@ -572,6 +573,8 @@ static int action_executor_snapshot_session_handler(struct action_executor *exec
 
        default_snapshot_output.max_size = UINT64_MAX;
 
+       lttng::urcu::read_lock_guard read_lock;
+
        /*
         * Validate if, at the moment the action was queued, the target session
         * existed. If not, skip the action altogether.
@@ -599,7 +602,6 @@ static int action_executor_snapshot_session_handler(struct action_executor *exec
        }
 
        session_lock_list();
-       rcu_read_lock();
        session = session_find_by_id(LTTNG_OPTIONAL_GET(item->context.session_id));
        if (!session) {
                DBG("Failed to find session `%s` by name while executing `%s` action of trigger `%s`",
@@ -645,7 +647,6 @@ error_unlock_session:
        session_unlock(session);
        session_put(session);
 error_unlock_list:
-       rcu_read_unlock();
        session_unlock_list();
 end:
        return ret;
index 6abcb8fb34669587d8e091d47fb24b8f05bc5267..f641ed78e12aecf0b16a67680b1ac54f0dc3ded2 100644 (file)
@@ -18,6 +18,7 @@
 #include <common/common.hpp>
 #include <common/compat/endian.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
 #include <common/uri.hpp>
 #include <common/utils.hpp>
 
@@ -73,27 +74,28 @@ static void update_agent_app(const struct agent_app *app)
                if (session->ust_session) {
                        const struct agent *agt;
 
-                       rcu_read_lock();
+                       lttng::urcu::read_lock_guard read_lock;
                        agt = trace_ust_find_agent(session->ust_session, app->domain);
                        if (agt) {
                                agent_update(agt, app);
                        }
-                       rcu_read_unlock();
                }
                session_unlock(session);
                session_put(session);
        }
 
-       rcu_read_lock();
-       /*
-        * We are protected against the addition of new events by the session
-        * list lock being held.
-        */
-       cds_lfht_for_each_entry (
-               the_trigger_agents_ht_by_domain->ht, &iter.iter, trigger_agent, node.node) {
-               agent_update(trigger_agent, app);
+       {
+               /*
+                * We are protected against the addition of new events by the session
+                * list lock being held.
+                */
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       the_trigger_agents_ht_by_domain->ht, &iter.iter, trigger_agent, node.node) {
+                       agent_update(trigger_agent, app);
+               }
        }
-       rcu_read_unlock();
 }
 
 /*
index 13bb268bc0fd0109a95785b0f5844539b7ccc569..210ec9b0c6f2f997cab78ad26c6165192ea18360 100644 (file)
@@ -15,6 +15,7 @@
 #include <common/common.hpp>
 #include <common/compat/endian.hpp>
 #include <common/sessiond-comm/agent.hpp>
+#include <common/urcu.hpp>
 
 #include <lttng/condition/condition.h>
 #include <lttng/condition/event-rule-matches.h>
@@ -682,17 +683,20 @@ int agent_enable_event(struct agent_event *event, enum lttng_domain_type domain)
 
        LTTNG_ASSERT(event);
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
-               if (app->domain != domain) {
-                       continue;
-               }
+               cds_lfht_for_each_entry (
+                       the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+                       if (app->domain != domain) {
+                               continue;
+                       }
 
-               /* Enable event on agent application through TCP socket. */
-               ret = enable_event(app, event);
-               if (ret != LTTNG_OK) {
-                       goto error;
+                       /* Enable event on agent application through TCP socket. */
+                       ret = enable_event(app, event);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
                }
        }
 
@@ -700,7 +704,6 @@ int agent_enable_event(struct agent_event *event, enum lttng_domain_type domain)
        ret = LTTNG_OK;
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -753,33 +756,35 @@ int agent_enable_context(const struct lttng_event_context *ctx, enum lttng_domai
                goto error;
        }
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
-               struct agent_app_ctx *agent_ctx;
+               cds_lfht_for_each_entry (
+                       the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+                       struct agent_app_ctx *agent_ctx;
 
-               if (app->domain != domain) {
-                       continue;
-               }
+                       if (app->domain != domain) {
+                               continue;
+                       }
 
-               agent_ctx = create_app_ctx(ctx);
-               if (!agent_ctx) {
-                       ret = LTTNG_ERR_NOMEM;
-                       goto error_unlock;
-               }
+                       agent_ctx = create_app_ctx(ctx);
+                       if (!agent_ctx) {
+                               ret = LTTNG_ERR_NOMEM;
+                               goto error_unlock;
+                       }
 
-               /* Enable event on agent application through TCP socket. */
-               ret = app_context_op(app, agent_ctx, AGENT_CMD_APP_CTX_ENABLE);
-               destroy_app_ctx(agent_ctx);
-               if (ret != LTTNG_OK) {
-                       goto error_unlock;
+                       /* Enable event on agent application through TCP socket. */
+                       ret = app_context_op(app, agent_ctx, AGENT_CMD_APP_CTX_ENABLE);
+                       destroy_app_ctx(agent_ctx);
+                       if (ret != LTTNG_OK) {
+                               goto error_unlock;
+                       }
                }
        }
 
        ret = LTTNG_OK;
 
 error_unlock:
-       rcu_read_unlock();
 error:
        return ret;
 }
@@ -811,17 +816,20 @@ int agent_disable_event(struct agent_event *event, enum lttng_domain_type domain
                goto end;
        }
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
-               if (app->domain != domain) {
-                       continue;
-               }
+               cds_lfht_for_each_entry (
+                       the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+                       if (app->domain != domain) {
+                               continue;
+                       }
 
-               /* Enable event on agent application through TCP socket. */
-               ret = disable_event(app, event);
-               if (ret != LTTNG_OK) {
-                       goto error;
+                       /* Enable event on agent application through TCP socket. */
+                       ret = disable_event(app, event);
+                       if (ret != LTTNG_OK) {
+                               goto error;
+                       }
                }
        }
 
@@ -829,7 +837,6 @@ int agent_disable_event(struct agent_event *event, enum lttng_domain_type domain
        LTTNG_ASSERT(!AGENT_EVENT_IS_ENABLED(event));
 
 error:
-       rcu_read_unlock();
 end:
        return ret;
 }
@@ -847,21 +854,24 @@ static int disable_context(struct agent_app_ctx *ctx, enum lttng_domain_type dom
        struct lttng_ht_iter iter;
 
        LTTNG_ASSERT(ctx);
-
-       rcu_read_lock();
        DBG2("Disabling agent application context %s:%s", ctx->provider_name, ctx->ctx_name);
-       cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
-               if (app->domain != domain) {
-                       continue;
-               }
 
-               ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_DISABLE);
-               if (ret != LTTNG_OK) {
-                       goto end;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+                       if (app->domain != domain) {
+                               continue;
+                       }
+
+                       ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_DISABLE);
+                       if (ret != LTTNG_OK) {
+                               goto end;
+                       }
                }
        }
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -891,58 +901,60 @@ int agent_list_events(struct lttng_event **events, enum lttng_domain_type domain
                goto error;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
-               ssize_t nb_ev;
-               struct lttng_event *agent_events;
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               /* Skip domain not asked by the list. */
-               if (app->domain != domain) {
-                       continue;
-               }
+               cds_lfht_for_each_entry (
+                       the_agent_apps_ht_by_sock->ht, &iter.iter, app, node.node) {
+                       ssize_t nb_ev;
+                       struct lttng_event *agent_events;
 
-               nb_ev = list_events(app, &agent_events);
-               if (nb_ev < 0) {
-                       ret = nb_ev;
-                       goto error_unlock;
-               }
+                       /* Skip domain not asked by the list. */
+                       if (app->domain != domain) {
+                               continue;
+                       }
 
-               if (count + nb_ev > nbmem) {
-                       /* In case the realloc fails, we free the memory */
-                       struct lttng_event *new_tmp_events;
-                       size_t new_nbmem;
-
-                       new_nbmem = std::max(count + nb_ev, nbmem << 1);
-                       DBG2("Reallocating agent event list from %zu to %zu entries",
-                            nbmem,
-                            new_nbmem);
-                       new_tmp_events = (lttng_event *) realloc(
-                               tmp_events, new_nbmem * sizeof(*new_tmp_events));
-                       if (!new_tmp_events) {
-                               PERROR("realloc agent events");
-                               ret = -ENOMEM;
-                               free(agent_events);
-                               goto error_unlock;
+                       nb_ev = list_events(app, &agent_events);
+                       if (nb_ev < 0) {
+                               ret = nb_ev;
+                               goto error;
+                       }
+
+                       if (count + nb_ev > nbmem) {
+                               /* In case the realloc fails, we free the memory */
+                               struct lttng_event *new_tmp_events;
+                               size_t new_nbmem;
+
+                               new_nbmem = std::max(count + nb_ev, nbmem << 1);
+                               DBG2("Reallocating agent event list from %zu to %zu entries",
+                                    nbmem,
+                                    new_nbmem);
+                               new_tmp_events = (lttng_event *) realloc(
+                                       tmp_events, new_nbmem * sizeof(*new_tmp_events));
+                               if (!new_tmp_events) {
+                                       PERROR("realloc agent events");
+                                       ret = -ENOMEM;
+                                       free(agent_events);
+                                       goto error;
+                               }
+
+                               /* Zero the new memory */
+                               memset(new_tmp_events + nbmem,
+                                      0,
+                                      (new_nbmem - nbmem) * sizeof(*new_tmp_events));
+                               nbmem = new_nbmem;
+                               tmp_events = new_tmp_events;
                        }
-                       /* Zero the new memory */
-                       memset(new_tmp_events + nbmem,
-                              0,
-                              (new_nbmem - nbmem) * sizeof(*new_tmp_events));
-                       nbmem = new_nbmem;
-                       tmp_events = new_tmp_events;
+                       memcpy(tmp_events + count, agent_events, nb_ev * sizeof(*tmp_events));
+                       free(agent_events);
+                       count += nb_ev;
                }
-               memcpy(tmp_events + count, agent_events, nb_ev * sizeof(*tmp_events));
-               free(agent_events);
-               count += nb_ev;
        }
-       rcu_read_unlock();
 
        ret = count;
        *events = tmp_events;
        return ret;
 
-error_unlock:
-       rcu_read_unlock();
 error:
        free(tmp_events);
        return ret;
@@ -1411,32 +1423,35 @@ void agent_destroy(struct agent *agt)
 
        DBG3("Agent destroy");
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (agt->events->ht, &iter.iter, node, node) {
-               int ret;
-               struct agent_event *event;
-
-               /*
-                * When destroying an event, we have to try to disable it on the
-                * agent side so the event stops generating data. The return
-                * value is not important since we have to continue anyway
-                * destroying the object.
-                */
-               event = lttng::utils::container_of(node, &agent_event::node);
-               (void) agent_disable_event(event, agt->domain);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (agt->events->ht, &iter.iter, node, node) {
+                       int ret;
+                       struct agent_event *event;
+
+                       /*
+                        * When destroying an event, we have to try to disable it on the
+                        * agent side so the event stops generating data. The return
+                        * value is not important since we have to continue anyway
+                        * destroying the object.
+                        */
+                       event = lttng::utils::container_of(node, &agent_event::node);
+                       (void) agent_disable_event(event, agt->domain);
+
+                       ret = lttng_ht_del(agt->events, &iter);
+                       LTTNG_ASSERT(!ret);
+                       call_rcu(&node->head, destroy_event_agent_rcu);
+               }
 
-               ret = lttng_ht_del(agt->events, &iter);
-               LTTNG_ASSERT(!ret);
-               call_rcu(&node->head, destroy_event_agent_rcu);
+               cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node)
+               {
+                       (void) disable_context(ctx, agt->domain);
+                       cds_list_del(&ctx->list_node);
+                       call_rcu(&ctx->rcu_node, destroy_app_ctx_rcu);
+               }
        }
 
-       cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node)
-       {
-               (void) disable_context(ctx, agt->domain);
-               cds_list_del(&ctx->list_node);
-               call_rcu(&ctx->rcu_node, destroy_app_ctx_rcu);
-       }
-       rcu_read_unlock();
        lttng_ht_destroy(agt->events);
        free(agt);
 }
@@ -1464,7 +1479,7 @@ void agent_destroy_app_by_sock(int sock)
         * happen. The hash table deletion is ONLY done through this call when the
         * main sessiond thread is torn down.
         */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        app = agent_find_app_by_sock(sock);
        LTTNG_ASSERT(app);
 
@@ -1473,7 +1488,6 @@ void agent_destroy_app_by_sock(int sock)
 
        /* The application is freed in a RCU call but the socket is closed here. */
        agent_destroy_app(app);
-       rcu_read_unlock();
 }
 
 /*
@@ -1487,14 +1501,17 @@ void agent_app_ht_clean()
        if (!the_agent_apps_ht_by_sock) {
                return;
        }
-       rcu_read_lock();
-       cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, node, node) {
-               struct agent_app *app;
 
-               app = lttng::utils::container_of(node, &agent_app::node);
-               agent_destroy_app_by_sock(app->sock->fd);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (the_agent_apps_ht_by_sock->ht, &iter.iter, node, node) {
+                       struct agent_app *app;
+
+                       app = lttng::utils::container_of(node, &agent_app::node);
+                       agent_destroy_app_by_sock(app->sock->fd);
+               }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(the_agent_apps_ht_by_sock);
 }
@@ -1517,43 +1534,43 @@ void agent_update(const struct agent *agt, const struct agent_app *app)
 
        DBG("Agent updating app: pid = %ld", (long) app->pid);
 
-       rcu_read_lock();
        /*
         * We are in the registration path thus if the application is gone,
         * there is a serious code flow error.
         */
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
-               /* Skip event if disabled. */
-               if (!AGENT_EVENT_IS_ENABLED(event)) {
-                       continue;
-               }
+               cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
+                       /* Skip event if disabled. */
+                       if (!AGENT_EVENT_IS_ENABLED(event)) {
+                               continue;
+                       }
 
-               ret = enable_event(app, event);
-               if (ret != LTTNG_OK) {
-                       DBG2("Agent update unable to enable event %s on app pid: %d sock %d",
-                            event->name,
-                            app->pid,
-                            app->sock->fd);
-                       /* Let's try the others here and don't assume the app is dead. */
-                       continue;
+                       ret = enable_event(app, event);
+                       if (ret != LTTNG_OK) {
+                               DBG2("Agent update unable to enable event %s on app pid: %d sock %d",
+                                    event->name,
+                                    app->pid,
+                                    app->sock->fd);
+                               /* Let's try the others here and don't assume the app is dead. */
+                               continue;
+                       }
                }
-       }
 
-       cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node)
-       {
-               ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_ENABLE);
-               if (ret != LTTNG_OK) {
-                       DBG2("Agent update unable to add application context %s:%s on app pid: %d sock %d",
-                            ctx->provider_name,
-                            ctx->ctx_name,
-                            app->pid,
-                            app->sock->fd);
-                       continue;
+               cds_list_for_each_entry_rcu(ctx, &agt->app_ctx_list, list_node)
+               {
+                       ret = app_context_op(app, ctx, AGENT_CMD_APP_CTX_ENABLE);
+                       if (ret != LTTNG_OK) {
+                               DBG2("Agent update unable to add application context %s:%s on app pid: %d sock %d",
+                                    ctx->provider_name,
+                                    ctx->ctx_name,
+                                    app->pid,
+                                    app->sock->fd);
+                               continue;
+                       }
                }
        }
-
-       rcu_read_unlock();
 }
 
 /*
@@ -1578,16 +1595,19 @@ void agent_by_event_notifier_domain_ht_destroy()
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (the_trigger_agents_ht_by_domain->ht, &iter.iter, node, node) {
-               struct agent *agent = lttng::utils::container_of(node, &agent::node);
-               const int ret = lttng_ht_del(the_trigger_agents_ht_by_domain, &iter);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       the_trigger_agents_ht_by_domain->ht, &iter.iter, node, node) {
+                       struct agent *agent = lttng::utils::container_of(node, &agent::node);
+                       const int ret = lttng_ht_del(the_trigger_agents_ht_by_domain, &iter);
 
-               LTTNG_ASSERT(ret == 0);
-               agent_destroy(agent);
+                       LTTNG_ASSERT(ret == 0);
+                       agent_destroy(agent);
+               }
        }
 
-       rcu_read_unlock();
        lttng_ht_destroy(the_trigger_agents_ht_by_domain);
 }
 
index 39ca20279059c3b71522dec1b40c34423d52f54d..b840a7fa91ef0b69931a0f8c2f59c97389d31073 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <common/common.hpp>
 #include <common/hashtable/utils.hpp>
+#include <common/urcu.hpp>
 
 #include <inttypes.h>
 
@@ -171,11 +172,10 @@ void buffer_reg_uid_add(struct buffer_reg_uid *reg)
        DBG3("Buffer registry per UID adding to global registry with id: %" PRIu64,
             reg->session_id);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        nodep = cds_lfht_add_unique(
                ht->ht, ht->hash_fct(reg, lttng_ht_seed), ht->match_fct, reg, &reg->node.node);
        LTTNG_ASSERT(nodep == &reg->node.node);
-       rcu_read_unlock();
 }
 
 /*
@@ -298,9 +298,8 @@ void buffer_reg_pid_add(struct buffer_reg_pid *reg)
        DBG3("Buffer registry per PID adding to global registry with id: %" PRIu64,
             reg->session_id);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_add_unique_u64(buffer_registry_pid, &reg->node);
-       rcu_read_unlock();
 }
 
 /*
@@ -344,25 +343,26 @@ int buffer_reg_uid_consumer_channel_key(struct cds_list_head *buffer_reg_uid_lis
        struct buffer_reg_channel *reg_chan;
        int ret = -1;
 
-       rcu_read_lock();
-       /*
-        * For the per-uid registry, we have to iterate since we don't have the
-        * uid and bitness key.
-        */
-       cds_list_for_each_entry (uid_reg, buffer_reg_uid_list, lnode) {
-               session_reg = uid_reg->registry;
-               cds_lfht_for_each_entry (
-                       session_reg->channels->ht, &iter.iter, reg_chan, node.node) {
-                       if (reg_chan->key == chan_key) {
-                               *consumer_chan_key = reg_chan->consumer_key;
-                               ret = 0;
-                               goto end;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               /*
+                * For the per-uid registry, we have to iterate since we don't have the
+                * uid and bitness key.
+                */
+               cds_list_for_each_entry (uid_reg, buffer_reg_uid_list, lnode) {
+                       session_reg = uid_reg->registry;
+                       cds_lfht_for_each_entry (
+                               session_reg->channels->ht, &iter.iter, reg_chan, node.node) {
+                               if (reg_chan->key == chan_key) {
+                                       *consumer_chan_key = reg_chan->consumer_key;
+                                       ret = 0;
+                                       goto end;
+                               }
                        }
                }
        }
-
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -443,9 +443,8 @@ void buffer_reg_channel_add(struct buffer_reg_session *session, struct buffer_re
        LTTNG_ASSERT(session);
        LTTNG_ASSERT(channel);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_add_unique_u64(session->channels, &channel->node);
-       rcu_read_unlock();
 }
 
 /*
@@ -591,13 +590,15 @@ static void buffer_reg_session_destroy(struct buffer_reg_session *regp,
        DBG3("Buffer registry session destroy");
 
        /* Destroy all channels. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (regp->channels->ht, &iter.iter, reg_chan, node.node) {
-               ret = lttng_ht_del(regp->channels, &iter);
-               LTTNG_ASSERT(!ret);
-               buffer_reg_channel_destroy(reg_chan, domain);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (regp->channels->ht, &iter.iter, reg_chan, node.node) {
+                       ret = lttng_ht_del(regp->channels, &iter);
+                       LTTNG_ASSERT(!ret);
+                       buffer_reg_channel_destroy(reg_chan, domain);
+               }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(regp->channels);
 
@@ -623,11 +624,10 @@ void buffer_reg_uid_remove(struct buffer_reg_uid *regp)
 
        LTTNG_ASSERT(regp);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        iter.iter.node = &regp->node.node;
        ret = lttng_ht_del(buffer_registry_uid, &iter);
        LTTNG_ASSERT(!ret);
-       rcu_read_unlock();
 }
 
 static void rcu_free_buffer_reg_uid(struct rcu_head *head)
@@ -670,29 +670,28 @@ void buffer_reg_uid_destroy(struct buffer_reg_uid *regp, struct consumer_output
                goto destroy;
        }
 
-       rcu_read_lock();
-       /* Get the right socket from the consumer object. */
-       socket = consumer_find_socket_by_bitness(regp->bits_per_long, consumer);
-       if (!socket) {
-               goto unlock;
-       }
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               /* Get the right socket from the consumer object. */
+               socket = consumer_find_socket_by_bitness(regp->bits_per_long, consumer);
+               if (!socket) {
+                       goto destroy;
+               }
 
-       switch (regp->domain) {
-       case LTTNG_DOMAIN_UST:
-               if (regp->registry->reg.ust->_metadata_key) {
-                       /* Return value does not matter. This call will print errors. */
-                       (void) consumer_close_metadata(socket,
-                                                      regp->registry->reg.ust->_metadata_key);
+               switch (regp->domain) {
+               case LTTNG_DOMAIN_UST:
+                       if (regp->registry->reg.ust->_metadata_key) {
+                               /* Return value does not matter. This call will print errors. */
+                               (void) consumer_close_metadata(
+                                       socket, regp->registry->reg.ust->_metadata_key);
+                       }
+                       break;
+               default:
+                       abort();
+                       return;
                }
-               break;
-       default:
-               abort();
-               rcu_read_unlock();
-               return;
        }
 
-unlock:
-       rcu_read_unlock();
 destroy:
        call_rcu(&regp->node.head, rcu_free_buffer_reg_uid);
 }
index fc2da6ab8ba5e94939c181c223d239ee6c75a6cc..65bdd361d278b6dc96ac657f953ca2ab9b9da2ee 100644 (file)
@@ -325,6 +325,7 @@ enum lttng_error_code channel_ust_create(struct ltt_ust_session *usess,
        struct lttng_channel *defattr = nullptr;
        enum lttng_domain_type domain = LTTNG_DOMAIN_UST;
        bool chan_published = false;
+       lttng::urcu::read_lock_guard read_lock;
 
        LTTNG_ASSERT(usess);
 
@@ -448,7 +449,6 @@ enum lttng_error_code channel_ust_create(struct ltt_ust_session *usess,
        }
 
        /* Adding the channel to the channel hash table. */
-       rcu_read_lock();
        if (strncmp(uchan->name, DEFAULT_METADATA_NAME, sizeof(uchan->name)) != 0) {
                lttng_ht_add_unique_str(usess->domain_global.channels, &uchan->node);
                chan_published = true;
@@ -460,7 +460,6 @@ enum lttng_error_code channel_ust_create(struct ltt_ust_session *usess,
                 */
                memcpy(&usess->metadata_attr, &uchan->attr, sizeof(usess->metadata_attr));
        }
-       rcu_read_unlock();
 
        DBG2("Channel %s created successfully", uchan->name);
        if (domain != LTTNG_DOMAIN_UST) {
index 9ef4d379f200b0861212f434058fda12f6bd70b4..3f066a002d8d585d43cf3401556cc8a3eabca9ac 100644 (file)
@@ -41,6 +41,7 @@
 #include <common/sessiond-comm/sessiond-comm.hpp>
 #include <common/string-utils/string-utils.hpp>
 #include <common/trace-chunk.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <lttng/action/action-internal.hpp>
@@ -324,7 +325,6 @@ static enum lttng_error_code list_lttng_agent_events(struct agent *agt,
 
        DBG3("Listing agent events");
 
-       rcu_read_lock();
        agent_event_count = lttng_ht_get_count(agt->events);
        if (agent_event_count == 0) {
                /* Early exit. */
@@ -338,39 +338,46 @@ static enum lttng_error_code list_lttng_agent_events(struct agent *agt,
 
        local_nb_events = (unsigned int) agent_event_count;
 
-       cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
-               struct lttng_event *tmp_event = lttng_event_create();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               if (!tmp_event) {
-                       ret_code = LTTNG_ERR_NOMEM;
-                       goto error;
-               }
+               cds_lfht_for_each_entry (agt->events->ht, &iter.iter, event, node.node) {
+                       struct lttng_event *tmp_event = lttng_event_create();
 
-               if (lttng_strncpy(tmp_event->name, event->name, sizeof(tmp_event->name))) {
-                       lttng_event_destroy(tmp_event);
-                       ret_code = LTTNG_ERR_FATAL;
-                       goto error;
-               }
+                       if (!tmp_event) {
+                               ret_code = LTTNG_ERR_NOMEM;
+                               goto error;
+                       }
 
-               tmp_event->name[sizeof(tmp_event->name) - 1] = '\0';
-               tmp_event->enabled = !!event->enabled_count;
-               tmp_event->loglevel = event->loglevel_value;
-               tmp_event->loglevel_type = event->loglevel_type;
+                       if (lttng_strncpy(tmp_event->name, event->name, sizeof(tmp_event->name))) {
+                               lttng_event_destroy(tmp_event);
+                               ret_code = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
 
-               ret = lttng_event_serialize(
-                       tmp_event, 0, nullptr, event->filter_expression, 0, nullptr, reply_payload);
-               lttng_event_destroy(tmp_event);
-               if (ret) {
-                       ret_code = LTTNG_ERR_FATAL;
-                       goto error;
+                       tmp_event->name[sizeof(tmp_event->name) - 1] = '\0';
+                       tmp_event->enabled = !!event->enabled_count;
+                       tmp_event->loglevel = event->loglevel_value;
+                       tmp_event->loglevel_type = event->loglevel_type;
+
+                       ret = lttng_event_serialize(tmp_event,
+                                                   0,
+                                                   nullptr,
+                                                   event->filter_expression,
+                                                   0,
+                                                   nullptr,
+                                                   reply_payload);
+                       lttng_event_destroy(tmp_event);
+                       if (ret) {
+                               ret_code = LTTNG_ERR_FATAL;
+                               goto error;
+                       }
                }
        }
-
 end:
        ret_code = LTTNG_OK;
        *nb_events = local_nb_events;
 error:
-       rcu_read_unlock();
        return ret_code;
 }
 
@@ -396,7 +403,7 @@ static enum lttng_error_code list_lttng_ust_global_events(char *channel_name,
 
        DBG("Listing UST global events for channel %s", channel_name);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        lttng_ht_lookup(ust_global->channels, (void *) channel_name, &iter);
        node = lttng_ht_iter_get_node_str(&iter);
@@ -502,7 +509,6 @@ end:
        ret_code = LTTNG_OK;
        *nb_events = local_nb_events;
 error:
-       rcu_read_unlock();
        return ret_code;
 }
 
@@ -738,9 +744,9 @@ static int init_kernel_tracing(struct ltt_kernel_session *session)
 
        LTTNG_ASSERT(session);
 
-       rcu_read_lock();
-
        if (session->consumer_fds_sent == 0 && session->consumer != nullptr) {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (
                        session->consumer->socks->ht, &iter.iter, socket, node.node) {
                        pthread_mutex_lock(socket->lock);
@@ -754,7 +760,6 @@ static int init_kernel_tracing(struct ltt_kernel_session *session)
        }
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1016,7 +1021,6 @@ int cmd_setup_relayd(struct ltt_session *session)
 
        DBG("Setting relayd for session %s", session->name);
 
-       rcu_read_lock();
        if (session->current_trace_chunk) {
                enum lttng_trace_chunk_status status = lttng_trace_chunk_get_id(
                        session->current_trace_chunk, &current_chunk_id.value);
@@ -1033,6 +1037,8 @@ int cmd_setup_relayd(struct ltt_session *session)
        if (usess && usess->consumer && usess->consumer->type == CONSUMER_DST_NET &&
            usess->consumer->enabled) {
                /* For each consumer socket, send relayd sockets */
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (
                        usess->consumer->socks->ht, &iter.iter, socket, node.node) {
                        pthread_mutex_lock(socket->lock);
@@ -1054,6 +1060,7 @@ int cmd_setup_relayd(struct ltt_session *session)
                        /* Session is now ready for network streaming. */
                        session->net_handle = 1;
                }
+
                session->consumer->relay_major_version = usess->consumer->relay_major_version;
                session->consumer->relay_minor_version = usess->consumer->relay_minor_version;
                session->consumer->relay_allows_clear = usess->consumer->relay_allows_clear;
@@ -1061,6 +1068,8 @@ int cmd_setup_relayd(struct ltt_session *session)
 
        if (ksess && ksess->consumer && ksess->consumer->type == CONSUMER_DST_NET &&
            ksess->consumer->enabled) {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (
                        ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
                        pthread_mutex_lock(socket->lock);
@@ -1082,13 +1091,13 @@ int cmd_setup_relayd(struct ltt_session *session)
                        /* Session is now ready for network streaming. */
                        session->net_handle = 1;
                }
+
                session->consumer->relay_major_version = ksess->consumer->relay_major_version;
                session->consumer->relay_minor_version = ksess->consumer->relay_minor_version;
                session->consumer->relay_allows_clear = ksess->consumer->relay_allows_clear;
        }
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1216,7 +1225,7 @@ int cmd_disable_channel(struct ltt_session *session,
 
        usess = session->ust_session;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        switch (domain) {
        case LTTNG_DOMAIN_KERNEL:
@@ -1256,7 +1265,6 @@ int cmd_disable_channel(struct ltt_session *session,
        ret = LTTNG_OK;
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1325,6 +1333,8 @@ static enum lttng_error_code cmd_enable_channel_internal(struct ltt_session *ses
        LTTNG_ASSERT(_attr);
        LTTNG_ASSERT(domain);
 
+       lttng::urcu::read_lock_guard read_lock;
+
        attr = lttng_channel_copy(_attr);
        if (!attr) {
                ret_code = LTTNG_ERR_NOMEM;
@@ -1341,8 +1351,6 @@ static enum lttng_error_code cmd_enable_channel_internal(struct ltt_session *ses
 
        DBG("Enabling channel %s for session %s", attr->name, session->name);
 
-       rcu_read_lock();
-
        /*
         * If the session is a live session, remove the switch timer, the
         * live timer does the same thing but sends also synchronisation
@@ -1488,7 +1496,6 @@ static enum lttng_error_code cmd_enable_channel_internal(struct ltt_session *ses
                session->has_non_mmap_channel = true;
        }
 error:
-       rcu_read_unlock();
 end:
        lttng_channel_destroy(attr);
        return ret_code;
@@ -1727,6 +1734,8 @@ int cmd_disable_event(struct command_ctx *cmd_ctx,
 
        event_name = event->name;
 
+       lttng::urcu::read_lock_guard read_lock;
+
        /* Error out on unhandled search criteria */
        if (event->loglevel_type || event->loglevel != -1 || event->enabled || event->pid ||
            event->filter || event->exclusion) {
@@ -1734,8 +1743,6 @@ int cmd_disable_event(struct command_ctx *cmd_ctx,
                goto error;
        }
 
-       rcu_read_lock();
-
        switch (domain) {
        case LTTNG_DOMAIN_KERNEL:
        {
@@ -1880,7 +1887,6 @@ int cmd_disable_event(struct command_ctx *cmd_ctx,
        ret = LTTNG_OK;
 
 error_unlock:
-       rcu_read_unlock();
 error:
        free(exclusion);
        free(bytecode);
@@ -2084,7 +2090,7 @@ static int _cmd_enable_event(struct ltt_session *session,
 
        DBG("Enable event command for event \'%s\'", event->name);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        switch (domain->type) {
        case LTTNG_DOMAIN_KERNEL:
@@ -2426,7 +2432,6 @@ error:
        free(filter);
        free(exclusion);
        channel_attr_destroy(attr);
-       rcu_read_unlock();
        return ret;
 }
 
@@ -3609,9 +3614,8 @@ int cmd_register_consumer(struct ltt_session *session,
                pthread_mutex_init(socket->lock, nullptr);
                socket->registered = 1;
 
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
                consumer_add_socket(socket, ksess->consumer);
-               rcu_read_unlock();
 
                pthread_mutex_lock(&cdata->pid_mutex);
                cdata->pid = -1;
@@ -3653,14 +3657,14 @@ ssize_t cmd_list_domains(struct ltt_session *session, struct lttng_domain **doma
                DBG3("Listing domains found UST global domain");
                nb_dom++;
 
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (
                        session->ust_session->agents->ht, &iter.iter, agt, node.node) {
                        if (agt->being_used) {
                                nb_dom++;
                        }
                }
-               rcu_read_unlock();
        }
 
        if (!nb_dom) {
@@ -3687,16 +3691,19 @@ ssize_t cmd_list_domains(struct ltt_session *session, struct lttng_domain **doma
                (*domains)[index].buf_type = session->ust_session->buffer_type;
                index++;
 
-               rcu_read_lock();
-               cds_lfht_for_each_entry (
-                       session->ust_session->agents->ht, &iter.iter, agt, node.node) {
-                       if (agt->being_used) {
-                               (*domains)[index].type = agt->domain;
-                               (*domains)[index].buf_type = session->ust_session->buffer_type;
-                               index++;
+               {
+                       lttng::urcu::read_lock_guard read_lock;
+
+                       cds_lfht_for_each_entry (
+                               session->ust_session->agents->ht, &iter.iter, agt, node.node) {
+                               if (agt->being_used) {
+                                       (*domains)[index].type = agt->domain;
+                                       (*domains)[index].buf_type =
+                                               session->ust_session->buffer_type;
+                                       index++;
+                               }
                        }
                }
-               rcu_read_unlock();
        }
 end:
        return nb_dom;
@@ -3780,47 +3787,51 @@ enum lttng_error_code cmd_list_channels(enum lttng_domain_type domain,
                struct lttng_ht_iter iter;
                struct ltt_ust_channel *uchan;
 
-               rcu_read_lock();
-               cds_lfht_for_each_entry (session->ust_session->domain_global.channels->ht,
-                                        &iter.iter,
-                                        uchan,
-                                        node.node) {
-                       uint64_t discarded_events = 0, lost_packets = 0;
-                       struct lttng_channel *channel = nullptr;
-                       struct lttng_channel_extended *extended;
-
-                       channel = trace_ust_channel_to_lttng_channel(uchan);
-                       if (!channel) {
-                               ret_code = LTTNG_ERR_NOMEM;
-                               goto end;
-                       }
+               {
+                       lttng::urcu::read_lock_guard read_lock;
+
+                       cds_lfht_for_each_entry (session->ust_session->domain_global.channels->ht,
+                                                &iter.iter,
+                                                uchan,
+                                                node.node) {
+                               uint64_t discarded_events = 0, lost_packets = 0;
+                               struct lttng_channel *channel = nullptr;
+                               struct lttng_channel_extended *extended;
 
-                       extended = (struct lttng_channel_extended *) channel->attr.extended.ptr;
+                               channel = trace_ust_channel_to_lttng_channel(uchan);
+                               if (!channel) {
+                                       ret_code = LTTNG_ERR_NOMEM;
+                                       goto end;
+                               }
 
-                       ret = get_ust_runtime_stats(
-                               session, uchan, &discarded_events, &lost_packets);
-                       if (ret < 0) {
-                               lttng_channel_destroy(channel);
-                               ret_code = LTTNG_ERR_UNK;
-                               goto end;
-                       }
+                               extended = (struct lttng_channel_extended *)
+                                                  channel->attr.extended.ptr;
 
-                       extended->discarded_events = discarded_events;
-                       extended->lost_packets = lost_packets;
+                               ret = get_ust_runtime_stats(
+                                       session, uchan, &discarded_events, &lost_packets);
+                               if (ret < 0) {
+                                       lttng_channel_destroy(channel);
+                                       ret_code = LTTNG_ERR_UNK;
+                                       goto end;
+                               }
+
+                               extended->discarded_events = discarded_events;
+                               extended->lost_packets = lost_packets;
+
+                               ret = lttng_channel_serialize(channel, &payload->buffer);
+                               if (ret) {
+                                       ERR("Failed to serialize lttng_channel: channel name = '%s'",
+                                           channel->name);
+                                       lttng_channel_destroy(channel);
+                                       ret_code = LTTNG_ERR_UNK;
+                                       goto end;
+                               }
 
-                       ret = lttng_channel_serialize(channel, &payload->buffer);
-                       if (ret) {
-                               ERR("Failed to serialize lttng_channel: channel name = '%s'",
-                                   channel->name);
                                lttng_channel_destroy(channel);
-                               ret_code = LTTNG_ERR_UNK;
-                               goto end;
+                               i++;
                        }
-
-                       lttng_channel_destroy(channel);
-                       i++;
                }
-               rcu_read_unlock();
+
                break;
        }
        default:
@@ -3895,7 +3906,8 @@ enum lttng_error_code cmd_list_events(enum lttng_domain_type domain,
                        struct lttng_ht_iter iter;
                        struct agent *agt;
 
-                       rcu_read_lock();
+                       lttng::urcu::read_lock_guard read_lock;
+
                        cds_lfht_for_each_entry (
                                session->ust_session->agents->ht, &iter.iter, agt, node.node) {
                                if (agt->domain == domain) {
@@ -3904,8 +3916,6 @@ enum lttng_error_code cmd_list_events(enum lttng_domain_type domain,
                                        break;
                                }
                        }
-
-                       rcu_read_unlock();
                }
                break;
        default:
@@ -4119,12 +4129,10 @@ int cmd_snapshot_add_output(struct ltt_session *session,
                goto free_error;
        }
 
-       rcu_read_lock();
        snapshot_add_output(&session->snapshot, new_output);
        if (id) {
                *id = new_output->id;
        }
-       rcu_read_unlock();
 
        return LTTNG_OK;
 
@@ -4147,7 +4155,7 @@ int cmd_snapshot_del_output(struct ltt_session *session, const struct lttng_snap
        LTTNG_ASSERT(session);
        LTTNG_ASSERT(output);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /*
         * Permission denied to create an output if the session is not
@@ -4177,7 +4185,6 @@ int cmd_snapshot_del_output(struct ltt_session *session, const struct lttng_snap
        ret = LTTNG_OK;
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -4222,49 +4229,54 @@ ssize_t cmd_snapshot_list_outputs(struct ltt_session *session,
        }
 
        /* Copy list from session to the new list object. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (session->snapshot.output_ht->ht, &iter.iter, output, node.node) {
-               LTTNG_ASSERT(output->consumer);
-               list[idx].id = output->id;
-               list[idx].max_size = output->max_size;
-               if (lttng_strncpy(list[idx].name, output->name, sizeof(list[idx].name))) {
-                       ret = -LTTNG_ERR_INVALID;
-                       goto error;
-               }
-               if (output->consumer->type == CONSUMER_DST_LOCAL) {
-                       if (lttng_strncpy(list[idx].ctrl_url,
-                                         output->consumer->dst.session_root_path,
-                                         sizeof(list[idx].ctrl_url))) {
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       session->snapshot.output_ht->ht, &iter.iter, output, node.node) {
+                       LTTNG_ASSERT(output->consumer);
+                       list[idx].id = output->id;
+                       list[idx].max_size = output->max_size;
+                       if (lttng_strncpy(list[idx].name, output->name, sizeof(list[idx].name))) {
                                ret = -LTTNG_ERR_INVALID;
                                goto error;
                        }
-               } else {
-                       /* Control URI. */
-                       ret = uri_to_str_url(&output->consumer->dst.net.control,
-                                            list[idx].ctrl_url,
-                                            sizeof(list[idx].ctrl_url));
-                       if (ret < 0) {
-                               ret = -LTTNG_ERR_NOMEM;
-                               goto error;
-                       }
 
-                       /* Data URI. */
-                       ret = uri_to_str_url(&output->consumer->dst.net.data,
-                                            list[idx].data_url,
-                                            sizeof(list[idx].data_url));
-                       if (ret < 0) {
-                               ret = -LTTNG_ERR_NOMEM;
-                               goto error;
+                       if (output->consumer->type == CONSUMER_DST_LOCAL) {
+                               if (lttng_strncpy(list[idx].ctrl_url,
+                                                 output->consumer->dst.session_root_path,
+                                                 sizeof(list[idx].ctrl_url))) {
+                                       ret = -LTTNG_ERR_INVALID;
+                                       goto error;
+                               }
+                       } else {
+                               /* Control URI. */
+                               ret = uri_to_str_url(&output->consumer->dst.net.control,
+                                                    list[idx].ctrl_url,
+                                                    sizeof(list[idx].ctrl_url));
+                               if (ret < 0) {
+                                       ret = -LTTNG_ERR_NOMEM;
+                                       goto error;
+                               }
+
+                               /* Data URI. */
+                               ret = uri_to_str_url(&output->consumer->dst.net.data,
+                                                    list[idx].data_url,
+                                                    sizeof(list[idx].data_url));
+                               if (ret < 0) {
+                                       ret = -LTTNG_ERR_NOMEM;
+                                       goto error;
+                               }
                        }
+
+                       idx++;
                }
-               idx++;
        }
 
        *outputs = list;
        list = nullptr;
        ret = session->snapshot.nb_output;
 error:
-       rcu_read_unlock();
        free(list);
 end:
        return ret;
@@ -4955,27 +4967,28 @@ static enum lttng_error_code set_relayd_for_snapshot(struct consumer_output *out
         * For each consumer socket, create and send the relayd object of the
         * snapshot output.
         */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
-               pthread_mutex_lock(socket->lock);
-               status = send_consumer_relayd_sockets(
-                       session->id,
-                       output,
-                       socket,
-                       session->name,
-                       session->hostname,
-                       base_path,
-                       session->live_timer,
-                       current_chunk_id.is_set ? &current_chunk_id.value : nullptr,
-                       session->creation_time,
-                       session->name_contains_creation_time);
-               pthread_mutex_unlock(socket->lock);
-               if (status != LTTNG_OK) {
-                       rcu_read_unlock();
-                       goto error;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       status = send_consumer_relayd_sockets(
+                               session->id,
+                               output,
+                               socket,
+                               session->name,
+                               session->hostname,
+                               base_path,
+                               session->live_timer,
+                               current_chunk_id.is_set ? &current_chunk_id.value : nullptr,
+                               session->creation_time,
+                               session->name_contains_creation_time);
+                       pthread_mutex_unlock(socket->lock);
+                       if (status != LTTNG_OK) {
+                               goto error;
+                       }
                }
        }
-       rcu_read_unlock();
 
 error:
        return status;
@@ -5349,7 +5362,8 @@ int cmd_snapshot_record(struct ltt_session *session,
                struct snapshot_output *sout;
                struct lttng_ht_iter iter;
 
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (
                        session->snapshot.output_ht->ht, &iter.iter, sout, node.node) {
                        struct snapshot_output output_copy;
@@ -5374,19 +5388,17 @@ int cmd_snapshot_record(struct ltt_session *session,
                                                  output->name,
                                                  sizeof(output_copy.name))) {
                                        cmd_ret = LTTNG_ERR_INVALID;
-                                       rcu_read_unlock();
                                        goto error;
                                }
                        }
 
                        cmd_ret = snapshot_record(session, &output_copy);
                        if (cmd_ret != LTTNG_OK) {
-                               rcu_read_unlock();
                                goto error;
                        }
+
                        snapshot_success = 1;
                }
-               rcu_read_unlock();
        }
 
        if (snapshot_success) {
@@ -5399,6 +5411,7 @@ error:
        if (tmp_output) {
                snapshot_output_destroy(tmp_output);
        }
+
        return cmd_ret;
 }
 
index e8434702532273e5fd5e06093d15d7417eed8ccb..13c18325d75d363e5da76674734b4e3aedcd0c07 100644 (file)
@@ -17,6 +17,7 @@
 #include <common/defaults.hpp>
 #include <common/relayd/relayd.hpp>
 #include <common/string-utils/format.hpp>
+#include <common/urcu.hpp>
 #include <common/uri.hpp>
 
 #include <inttypes.h>
@@ -291,18 +292,17 @@ void consumer_output_send_destroy_relayd(struct consumer_output *consumer)
 
        /* Destroy any relayd connection */
        if (consumer->type == CONSUMER_DST_NET) {
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
-                       int ret;
+                       /* Send destroy relayd command. */
+                       const int ret = consumer_send_destroy_relayd(socket, consumer);
 
-                       /* Send destroy relayd command */
-                       ret = consumer_send_destroy_relayd(socket, consumer);
                        if (ret < 0) {
                                DBG("Unable to send destroy relayd command to consumer");
                                /* Continue since we MUST delete everything at this point. */
                        }
                }
-               rcu_read_unlock();
        }
 }
 
@@ -319,6 +319,8 @@ int consumer_create_socket(struct consumer_data *data, struct consumer_output *o
 
        LTTNG_ASSERT(data);
 
+       lttng::urcu::read_lock_guard read_lock;
+
        if (output == nullptr || data->cmd_sock < 0) {
                /*
                 * Not an error. Possible there is simply not spawned consumer or it's
@@ -327,9 +329,7 @@ int consumer_create_socket(struct consumer_data *data, struct consumer_output *o
                goto error;
        }
 
-       rcu_read_lock();
        socket = consumer_find_socket(data->cmd_sock, output);
-       rcu_read_unlock();
        if (socket == nullptr) {
                socket = consumer_allocate_socket(&data->cmd_sock);
                if (socket == nullptr) {
@@ -339,9 +339,7 @@ int consumer_create_socket(struct consumer_data *data, struct consumer_output *o
 
                socket->registered = 0;
                socket->lock = &data->lock;
-               rcu_read_lock();
                consumer_add_socket(socket, output);
-               rcu_read_unlock();
        }
 
        socket->type = data->type;
@@ -544,12 +542,14 @@ void consumer_destroy_output_sockets(struct consumer_output *obj)
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (obj->socks->ht, &iter.iter, socket, node.node) {
-               consumer_del_socket(socket, obj);
-               consumer_destroy_socket(socket);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (obj->socks->ht, &iter.iter, socket, node.node) {
+                       consumer_del_socket(socket, obj);
+                       consumer_destroy_socket(socket);
+               }
        }
-       rcu_read_unlock();
 }
 
 /*
@@ -636,32 +636,33 @@ int consumer_copy_sockets(struct consumer_output *dst, struct consumer_output *s
        LTTNG_ASSERT(dst);
        LTTNG_ASSERT(src);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (src->socks->ht, &iter.iter, socket, node.node) {
-               /* Ignore socket that are already there. */
-               copy_sock = consumer_find_socket(*socket->fd_ptr, dst);
-               if (copy_sock) {
-                       continue;
-               }
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               /* Create new socket object. */
-               copy_sock = consumer_allocate_socket(socket->fd_ptr);
-               if (copy_sock == nullptr) {
-                       rcu_read_unlock();
-                       ret = -ENOMEM;
-                       goto error;
-               }
+               cds_lfht_for_each_entry (src->socks->ht, &iter.iter, socket, node.node) {
+                       /* Ignore socket that are already there. */
+                       copy_sock = consumer_find_socket(*socket->fd_ptr, dst);
+                       if (copy_sock) {
+                               continue;
+                       }
 
-               copy_sock->registered = socket->registered;
-               /*
-                * This is valid because this lock is shared accross all consumer
-                * object being the global lock of the consumer data structure of the
-                * session daemon.
-                */
-               copy_sock->lock = socket->lock;
-               consumer_add_socket(copy_sock, dst);
+                       /* Create new socket object. */
+                       copy_sock = consumer_allocate_socket(socket->fd_ptr);
+                       if (copy_sock == nullptr) {
+                               ret = -ENOMEM;
+                               goto error;
+                       }
+
+                       copy_sock->registered = socket->registered;
+                       /*
+                        * This is valid because this lock is shared accross all consumer
+                        * object being the global lock of the consumer data structure of the
+                        * session daemon.
+                        */
+                       copy_sock->lock = socket->lock;
+                       consumer_add_socket(copy_sock, dst);
+               }
        }
-       rcu_read_unlock();
 
 error:
        return ret;
@@ -1268,33 +1269,35 @@ int consumer_is_data_pending(uint64_t session_id, struct consumer_output *consum
        msg.cmd_type = LTTNG_CONSUMER_DATA_PENDING;
        msg.u.data_pending.session_id = session_id;
 
-       /* Send command for each consumer */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
-               pthread_mutex_lock(socket->lock);
-               ret = consumer_socket_send(socket, &msg, sizeof(msg));
-               if (ret < 0) {
-                       pthread_mutex_unlock(socket->lock);
-                       goto error_unlock;
-               }
+       {
+               /* Send command for each consumer. */
+               lttng::urcu::read_lock_guard read_lock;
 
-               /*
-                * No need for a recv reply status because the answer to the command is
-                * the reply status message.
-                */
+               cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
+                       pthread_mutex_lock(socket->lock);
+                       ret = consumer_socket_send(socket, &msg, sizeof(msg));
+                       if (ret < 0) {
+                               pthread_mutex_unlock(socket->lock);
+                               goto error_unlock;
+                       }
+
+                       /*
+                        * No need for a recv reply status because the answer to the command is
+                        * the reply status message.
+                        */
+                       ret = consumer_socket_recv(socket, &ret_code, sizeof(ret_code));
+                       if (ret < 0) {
+                               pthread_mutex_unlock(socket->lock);
+                               goto error_unlock;
+                       }
 
-               ret = consumer_socket_recv(socket, &ret_code, sizeof(ret_code));
-               if (ret < 0) {
                        pthread_mutex_unlock(socket->lock);
-                       goto error_unlock;
-               }
-               pthread_mutex_unlock(socket->lock);
 
-               if (ret_code == 1) {
-                       break;
+                       if (ret_code == 1) {
+                               break;
+                       }
                }
        }
-       rcu_read_unlock();
 
        DBG("Consumer data is %s pending for session id %" PRIu64,
            ret_code == 1 ? "" : "NOT",
@@ -1302,7 +1305,6 @@ int consumer_is_data_pending(uint64_t session_id, struct consumer_output *consum
        return ret_code;
 
 error_unlock:
-       rcu_read_unlock();
        return -1;
 }
 
@@ -1580,35 +1582,41 @@ int consumer_get_discarded_events(uint64_t session_id,
 
        *discarded = 0;
 
-       /* Send command for each consumer */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
-               uint64_t consumer_discarded = 0;
-               pthread_mutex_lock(socket->lock);
-               ret = consumer_socket_send(socket, &msg, sizeof(msg));
-               if (ret < 0) {
-                       pthread_mutex_unlock(socket->lock);
-                       goto end;
-               }
+       /* Send command for each consumer. */
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
+                       uint64_t consumer_discarded = 0;
+
+                       pthread_mutex_lock(socket->lock);
+                       ret = consumer_socket_send(socket, &msg, sizeof(msg));
+                       if (ret < 0) {
+                               pthread_mutex_unlock(socket->lock);
+                               goto end;
+                       }
+
+                       /*
+                        * No need for a recv reply status because the answer to the
+                        * command is the reply status message.
+                        */
+                       ret = consumer_socket_recv(
+                               socket, &consumer_discarded, sizeof(consumer_discarded));
+                       if (ret < 0) {
+                               ERR("get discarded events");
+                               pthread_mutex_unlock(socket->lock);
+                               goto end;
+                       }
 
-               /*
-                * No need for a recv reply status because the answer to the
-                * command is the reply status message.
-                */
-               ret = consumer_socket_recv(socket, &consumer_discarded, sizeof(consumer_discarded));
-               if (ret < 0) {
-                       ERR("get discarded events");
                        pthread_mutex_unlock(socket->lock);
-                       goto end;
+                       *discarded += consumer_discarded;
                }
-               pthread_mutex_unlock(socket->lock);
-               *discarded += consumer_discarded;
        }
+
        ret = 0;
        DBG("Consumer discarded %" PRIu64 " events in session id %" PRIu64, *discarded, session_id);
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1636,35 +1644,38 @@ int consumer_get_lost_packets(uint64_t session_id,
 
        *lost = 0;
 
-       /* Send command for each consumer */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
-               uint64_t consumer_lost = 0;
-               pthread_mutex_lock(socket->lock);
-               ret = consumer_socket_send(socket, &msg, sizeof(msg));
-               if (ret < 0) {
-                       pthread_mutex_unlock(socket->lock);
-                       goto end;
-               }
+       /* Send command for each consumer. */
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               /*
-                * No need for a recv reply status because the answer to the
-                * command is the reply status message.
-                */
-               ret = consumer_socket_recv(socket, &consumer_lost, sizeof(consumer_lost));
-               if (ret < 0) {
-                       ERR("get lost packets");
+               cds_lfht_for_each_entry (consumer->socks->ht, &iter.iter, socket, node.node) {
+                       uint64_t consumer_lost = 0;
+                       pthread_mutex_lock(socket->lock);
+                       ret = consumer_socket_send(socket, &msg, sizeof(msg));
+                       if (ret < 0) {
+                               pthread_mutex_unlock(socket->lock);
+                               goto end;
+                       }
+
+                       /*
+                        * No need for a recv reply status because the answer to the
+                        * command is the reply status message.
+                        */
+                       ret = consumer_socket_recv(socket, &consumer_lost, sizeof(consumer_lost));
+                       if (ret < 0) {
+                               ERR("get lost packets");
+                               pthread_mutex_unlock(socket->lock);
+                               goto end;
+                       }
                        pthread_mutex_unlock(socket->lock);
-                       goto end;
+                       *lost += consumer_lost;
                }
-               pthread_mutex_unlock(socket->lock);
-               *lost += consumer_lost;
        }
+
        ret = 0;
        DBG("Consumer lost %" PRIu64 " packets in session id %" PRIu64, *lost, session_id);
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
index b3b549187ecd57fcf5f7d989de230bc00ae6abbf..79c454809cdb3f65f97da0526130c635e16207a2 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <common/error.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
 
 #include <stdio.h>
 #include <unistd.h>
@@ -374,7 +375,7 @@ int context_ust_add(struct ltt_ust_session *usess,
        LTTNG_ASSERT(ctx);
        LTTNG_ASSERT(channel_name);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        chan_ht = usess->domain_global.channels;
 
@@ -391,7 +392,6 @@ int context_ust_add(struct ltt_ust_session *usess,
                /* Add ctx to channel */
                ret = add_uctx_to_channel(usess, domain, uchan, ctx);
        } else {
-               rcu_read_lock();
                /* Add ctx all events, all channels */
                cds_lfht_for_each_entry (chan_ht->ht, &iter.iter, uchan, node.node) {
                        ret = add_uctx_to_channel(usess, domain, uchan, ctx);
@@ -400,7 +400,6 @@ int context_ust_add(struct ltt_ust_session *usess,
                                continue;
                        }
                }
-               rcu_read_unlock();
        }
 
        switch (ret) {
@@ -426,6 +425,5 @@ int context_ust_add(struct ltt_ust_session *usess,
        }
 
 error:
-       rcu_read_unlock();
        return ret;
 }
index 7fa9c8f16c5fee0a00ec2b6d222d1907188bd056..811cec0d9511923f58e652f9134fe3ddd61ba61c 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <common/futex.hpp>
 #include <common/macros.hpp>
+#include <common/urcu.hpp>
 
 #include <stddef.h>
 #include <stdlib.h>
@@ -47,7 +48,7 @@ static void update_ust_app(int app_sock)
                return;
        }
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        LTTNG_ASSERT(app_sock >= 0);
        app = ust_app_find_by_sock(app_sock);
        if (app == nullptr) {
@@ -57,7 +58,7 @@ static void update_ust_app(int app_sock)
                 * update.
                 */
                DBG3("UST app update failed to find app sock %d", app_sock);
-               goto unlock_rcu;
+               return;
        }
 
        /* Update all event notifiers for the app. */
@@ -78,9 +79,6 @@ static void update_ust_app(int app_sock)
                session_unlock(sess);
                session_put(sess);
        }
-
-unlock_rcu:
-       rcu_read_unlock();
 }
 
 /*
@@ -380,7 +378,7 @@ static void *thread_dispatch_ust_registration(void *data)
                                 * and change its state.
                                 */
                                session_lock_list();
-                               rcu_read_lock();
+                               lttng::urcu::read_lock_guard read_lock;
 
                                /*
                                 * Add application to the global hash table. This needs to be
@@ -398,7 +396,6 @@ static void *thread_dispatch_ust_registration(void *data)
                                ret = send_socket_to_thread(
                                        notifiers->apps_cmd_notify_pipe_write_fd, app->notify_sock);
                                if (ret < 0) {
-                                       rcu_read_unlock();
                                        session_unlock_list();
                                        /*
                                         * No notify thread, stop the UST tracing. However, this is
@@ -429,7 +426,6 @@ static void *thread_dispatch_ust_registration(void *data)
                                ret = send_socket_to_thread(notifiers->apps_cmd_pipe_write_fd,
                                                            app->sock);
                                if (ret < 0) {
-                                       rcu_read_unlock();
                                        session_unlock_list();
                                        /*
                                         * No apps. thread, stop the UST tracing. However, this is
@@ -440,7 +436,6 @@ static void *thread_dispatch_ust_registration(void *data)
                                        goto error;
                                }
 
-                               rcu_read_unlock();
                                session_unlock_list();
                        }
                } while (node != nullptr);
index 4ee5f2088409c7a22f52040b06d2fff919cab2e4..7fa54bc93080b54c361306ca3405b580f8b58f07 100644 (file)
@@ -14,6 +14,7 @@
 #include <common/index-allocator.hpp>
 #include <common/kernel-ctl/kernel-ctl.hpp>
 #include <common/shm.hpp>
+#include <common/urcu.hpp>
 
 #include <lttng/trigger/trigger-internal.hpp>
 
@@ -154,10 +155,9 @@ static void ust_error_accounting_entry_release(struct urcu_ref *entry_ref)
        struct ust_error_accounting_entry *entry =
                lttng::utils::container_of(entry_ref, &ust_error_accounting_entry::ref);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_del(error_counter_uid_ht->ht, &entry->node.node);
        call_rcu(&entry->rcu_head, free_ust_error_accounting_entry);
-       rcu_read_unlock();
 }
 
 static void ust_error_accounting_entry_put(struct ust_error_accounting_entry *entry)
@@ -179,12 +179,14 @@ static void put_ref_all_ust_error_accounting_entry()
 
        ASSERT_LOCKED(the_event_notifier_counter.lock);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (error_counter_uid_ht->ht, &iter.iter, uid_entry, node.node) {
-               ust_error_accounting_entry_put(uid_entry);
-       }
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       rcu_read_unlock();
+               cds_lfht_for_each_entry (
+                       error_counter_uid_ht->ht, &iter.iter, uid_entry, node.node) {
+                       ust_error_accounting_entry_put(uid_entry);
+               }
+       }
 }
 
 /*
@@ -197,12 +199,14 @@ static void get_ref_all_ust_error_accounting_entry()
 
        ASSERT_LOCKED(the_event_notifier_counter.lock);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (error_counter_uid_ht->ht, &iter.iter, uid_entry, node.node) {
-               ust_error_accounting_entry_get(uid_entry);
-       }
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       rcu_read_unlock();
+               cds_lfht_for_each_entry (
+                       error_counter_uid_ht->ht, &iter.iter, uid_entry, node.node) {
+                       ust_error_accounting_entry_get(uid_entry);
+               }
+       }
 }
 
 #endif /* HAVE_LIBLTTNG_UST_CTL */
@@ -300,8 +304,8 @@ static enum event_notifier_error_accounting_status get_error_counter_index_for_t
        struct lttng_ht_iter iter;
        const struct index_ht_entry *index_entry;
        enum event_notifier_error_accounting_status status;
+       lttng::urcu::read_lock_guard read_guard;
 
-       rcu_read_lock();
        lttng_ht_lookup(state->indices_ht, &tracer_token, &iter);
        node = lttng_ht_iter_get_node_u64(&iter);
        if (node) {
@@ -312,7 +316,6 @@ static enum event_notifier_error_accounting_status get_error_counter_index_for_t
                status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_NOT_FOUND;
        }
 
-       rcu_read_unlock();
        return status;
 }
 
@@ -591,6 +594,7 @@ event_notifier_error_accounting_register_app(struct ust_app *app)
        struct ust_error_accounting_entry *entry;
        enum event_notifier_error_accounting_status status;
        struct lttng_ust_abi_object_data **cpu_counters;
+       lttng::urcu::read_lock_guard read_lock;
 
        if (!ust_app_supports_counters(app)) {
                status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_UNSUPPORTED;
@@ -601,7 +605,6 @@ event_notifier_error_accounting_register_app(struct ust_app *app)
         * Check if we already have a error counter for the user id of this
         * app. If not, create one.
         */
-       rcu_read_lock();
        entry = ust_error_accounting_entry_find(error_counter_uid_ht, app);
        if (entry == nullptr) {
                /*
@@ -704,7 +707,7 @@ event_notifier_error_accounting_register_app(struct ust_app *app)
        app->event_notifier_group.nr_counter_cpu = entry->nr_counter_cpu_fds;
        app->event_notifier_group.counter_cpu = cpu_counters;
        cpu_counters = nullptr;
-       goto end_unlock;
+       goto end;
 
 error_send_cpu_counter_data:
 error_duplicate_cpu_counter:
@@ -732,8 +735,6 @@ error_duplicate_counter:
        ust_error_accounting_entry_put(entry);
 error_creating_entry:
        app->event_notifier_group.counter = nullptr;
-end_unlock:
-       rcu_read_unlock();
 end:
        return status;
 }
@@ -745,7 +746,7 @@ event_notifier_error_accounting_unregister_app(struct ust_app *app)
        struct ust_error_accounting_entry *entry;
        int i;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* If an error occurred during app registration no entry was created. */
        if (!app->event_notifier_group.counter) {
@@ -781,7 +782,6 @@ event_notifier_error_accounting_unregister_app(struct ust_app *app)
 
        status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
 end:
-       rcu_read_unlock();
        return status;
 }
 
@@ -797,7 +797,7 @@ event_notifier_error_accounting_ust_get_count(const struct lttng_trigger *trigge
        uid_t trigger_owner_uid;
        const char *trigger_name;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        get_trigger_info_for_log(trigger, &trigger_name, &trigger_owner_uid);
 
@@ -857,7 +857,6 @@ event_notifier_error_accounting_ust_get_count(const struct lttng_trigger *trigge
        status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
 
 end:
-       rcu_read_unlock();
        return status;
 }
 
@@ -871,7 +870,8 @@ event_notifier_error_accounting_ust_clear(const struct lttng_trigger *trigger)
        size_t dimension_index;
        const uint64_t tracer_token = lttng_trigger_get_tracer_token(trigger);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
+
        status = get_error_counter_index_for_token(&ust_state, tracer_token, &error_counter_index);
        if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
                uid_t trigger_owner_uid;
@@ -915,7 +915,6 @@ event_notifier_error_accounting_ust_clear(const struct lttng_trigger *trigger)
 
        status = EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK;
 end:
-       rcu_read_unlock();
        return status;
 }
 #endif /* HAVE_LIBLTTNG_UST_CTL */
@@ -1262,6 +1261,8 @@ void event_notifier_error_accounting_unregister_event_notifier(const struct lttn
        enum event_notifier_error_accounting_status status;
        struct error_accounting_state *state;
 
+       lttng::urcu::read_lock_guard read_lock;
+
        status = event_notifier_error_accounting_clear(trigger);
        if (status != EVENT_NOTIFIER_ERROR_ACCOUNTING_STATUS_OK) {
                /* Trigger details already logged by callee on error. */
@@ -1270,8 +1271,6 @@ void event_notifier_error_accounting_unregister_event_notifier(const struct lttn
                goto end;
        }
 
-       rcu_read_lock();
-
        switch (lttng_trigger_get_underlying_domain_type_restriction(trigger)) {
        case LTTNG_DOMAIN_KERNEL:
                state = &kernel_state;
@@ -1330,9 +1329,8 @@ void event_notifier_error_accounting_unregister_event_notifier(const struct lttn
                LTTNG_ASSERT(!del_ret);
                call_rcu(&index_entry->rcu_head, free_index_ht_entry);
        }
-
 end:
-       rcu_read_unlock();
+       return;
 }
 
 void event_notifier_error_accounting_fini()
index 3976f0fcee2eb5f230cbe02e61069276b96dedd8..367c4944553c56ad15f53b26830b1deee16c9535 100644 (file)
@@ -25,6 +25,7 @@
 #include <common/error.hpp>
 #include <common/filter.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
 
 #include <lttng/condition/condition.h>
 #include <lttng/condition/event-rule-matches.h>
@@ -172,7 +173,7 @@ int event_ust_enable_tracepoint(struct ltt_ust_session *usess,
        LTTNG_ASSERT(uchan);
        LTTNG_ASSERT(event);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        uevent = trace_ust_find_event(uchan->events,
                                      event->name,
@@ -237,7 +238,6 @@ int event_ust_enable_tracepoint(struct ltt_ust_session *usess,
        ret = LTTNG_OK;
 
 end:
-       rcu_read_unlock();
        free(filter_expression);
        free(filter);
        free(exclusion);
@@ -263,7 +263,7 @@ int event_ust_disable_tracepoint(struct ltt_ust_session *usess,
 
        ht = uchan->events;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /*
         * We use a custom lookup since we need the iterator for the next_duplicate
@@ -310,7 +310,6 @@ int event_ust_disable_tracepoint(struct ltt_ust_session *usess,
        ret = LTTNG_OK;
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -327,15 +326,18 @@ int event_ust_disable_all_tracepoints(struct ltt_ust_session *usess, struct ltt_
        LTTNG_ASSERT(usess);
        LTTNG_ASSERT(uchan);
 
-       rcu_read_lock();
 
        /* Disabling existing events */
-       cds_lfht_for_each_entry (uchan->events->ht, &iter.iter, uevent, node.node) {
-               if (uevent->enabled) {
-                       ret = event_ust_disable_tracepoint(usess, uchan, uevent->attr.name);
-                       if (ret < 0) {
-                               error = LTTNG_ERR_UST_DISABLE_FAIL;
-                               continue;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (uchan->events->ht, &iter.iter, uevent, node.node) {
+                       if (uevent->enabled) {
+                               ret = event_ust_disable_tracepoint(usess, uchan, uevent->attr.name);
+                               if (ret < 0) {
+                                       error = LTTNG_ERR_UST_DISABLE_FAIL;
+                                       continue;
+                               }
                        }
                }
        }
@@ -358,7 +360,6 @@ int event_ust_disable_all_tracepoints(struct ltt_ust_session *usess, struct ltt_
 
        ret = error ? error : LTTNG_OK;
 error:
-       rcu_read_unlock();
        free(events);
        return ret;
 }
@@ -368,12 +369,14 @@ static void agent_enable_all(struct agent *agt)
        struct agent_event *aevent;
        struct lttng_ht_iter iter;
 
-       /* Flag every event as enabled. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (agt->events->ht, &iter.iter, aevent, node.node) {
-               aevent->enabled_count++;
+       {
+               /* Flag every event as enabled. */
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (agt->events->ht, &iter.iter, aevent, node.node) {
+                       aevent->enabled_count++;
+               }
        }
-       rcu_read_unlock();
 }
 
 /*
@@ -480,7 +483,7 @@ static int agent_enable(struct agent *agt,
        LTTNG_ASSERT(event);
        LTTNG_ASSERT(agt);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        aevent = agent_find_event(
                event->name, event->loglevel_type, event->loglevel, filter_expression, agt);
        if (!aevent) {
@@ -530,7 +533,6 @@ error:
                agent_destroy_event(aevent);
        }
 end:
-       rcu_read_unlock();
        free(filter);
        free(filter_expression);
        return ret;
@@ -836,7 +838,7 @@ int trigger_agent_disable(const struct lttng_trigger *trigger, struct agent *agt
 
        DBG("Event agent disabling for trigger %" PRIu64, lttng_trigger_get_tracer_token(trigger));
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        aevent = agent_find_event_by_trigger(trigger, agt);
 
        if (aevent == nullptr) {
@@ -853,7 +855,6 @@ int trigger_agent_disable(const struct lttng_trigger *trigger, struct agent *agt
        }
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -875,7 +876,7 @@ int event_agent_disable(struct ltt_ust_session *usess, struct agent *agt, const
 
        DBG("Event agent disabling %s (all loglevels) for session %" PRIu64, event_name, usess->id);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        agent_find_events_by_name(event_name, agt, &iter);
        node = lttng_ht_iter_get_node_str(&iter);
 
@@ -898,7 +899,6 @@ int event_agent_disable(struct ltt_ust_session *usess, struct agent *agt, const
                node = lttng_ht_iter_get_node_str(&iter);
        } while (node);
 end:
-       rcu_read_unlock();
        return ret;
 }
 /*
@@ -925,21 +925,24 @@ int event_agent_disable_all(struct ltt_ust_session *usess, struct agent *agt)
        }
 
        /* Disable every event. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (agt->events->ht, &iter.iter, aevent, node.node) {
-               if (!AGENT_EVENT_IS_ENABLED(aevent)) {
-                       continue;
-               }
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               ret = event_agent_disable(usess, agt, aevent->name);
-               if (ret != LTTNG_OK) {
-                       goto error_unlock;
+               cds_lfht_for_each_entry (agt->events->ht, &iter.iter, aevent, node.node) {
+                       if (!AGENT_EVENT_IS_ENABLED(aevent)) {
+                               continue;
+                       }
+
+                       ret = event_agent_disable(usess, agt, aevent->name);
+                       if (ret != LTTNG_OK) {
+                               goto error_unlock;
+                       }
                }
        }
+
        ret = LTTNG_OK;
 
 error_unlock:
-       rcu_read_unlock();
 error:
        return ret;
 }
index 5efe2c2ae146bcfb7e701c300dd8ce555f1db8b2..b5ccd47dd27ac2947e40b944c3f1ea778fe8f61f 100644 (file)
@@ -16,6 +16,7 @@
 #include <common/common.hpp>
 #include <common/compat/string.hpp>
 #include <common/defaults.hpp>
+#include <common/urcu.hpp>
 
 #include <inttypes.h>
 #include <stdio.h>
@@ -95,6 +96,7 @@ static int kernel_consumer_add_channel(struct consumer_socket *sock,
        struct lttng_channel_extended *channel_attr_extended;
        bool is_local_trace;
        size_t consumer_path_offset = 0;
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Safety net */
        LTTNG_ASSERT(channel);
@@ -164,7 +166,6 @@ static int kernel_consumer_add_channel(struct consumer_socket *sock,
        }
 
        health_code_update();
-       rcu_read_lock();
        session = session_find_by_id(ksession->id);
        LTTNG_ASSERT(session);
        ASSERT_LOCKED(session->lock);
@@ -177,7 +178,6 @@ static int kernel_consumer_add_channel(struct consumer_socket *sock,
                                                         LTTNG_DOMAIN_KERNEL,
                                                         channel->channel->attr.subbuf_size *
                                                                 channel->channel->attr.num_subbuf);
-       rcu_read_unlock();
        if (status != LTTNG_OK) {
                ret = -1;
                goto error;
@@ -206,7 +206,7 @@ int kernel_consumer_add_metadata(struct consumer_socket *sock,
        struct lttcomm_consumer_msg lkm;
        struct consumer_output *consumer;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Safety net */
        LTTNG_ASSERT(ksession);
@@ -262,7 +262,6 @@ int kernel_consumer_add_metadata(struct consumer_socket *sock,
        health_code_update();
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -361,7 +360,7 @@ int kernel_consumer_send_channel_streams(struct consumer_socket *sock,
        LTTNG_ASSERT(ksession->consumer);
        LTTNG_ASSERT(sock);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Bail out if consumer is disabled */
        if (!ksession->consumer->enabled) {
@@ -394,7 +393,6 @@ int kernel_consumer_send_channel_streams(struct consumer_socket *sock,
        }
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
index e27de2436c1cd994aed3bfbf6c1cd39a48cf3370..5b4fc086bde52ce333f8cf72cf9f3d627b0cc000 100644 (file)
@@ -28,6 +28,7 @@
 #include <common/sessiond-comm/sessiond-comm.hpp>
 #include <common/trace-chunk.hpp>
 #include <common/tracker.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <lttng/condition/event-rule-matches-internal.hpp>
@@ -796,9 +797,8 @@ static int kernel_disable_event_notifier_rule(struct ltt_kernel_event_notifier_r
 
        LTTNG_ASSERT(event);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_del(kernel_token_to_event_notifier_rule_ht, &event->ht_node);
-       rcu_read_unlock();
 
        ret = kernctl_disable(event->fd);
        if (ret < 0) {
@@ -1590,7 +1590,8 @@ void kernel_destroy_session(struct ltt_kernel_session *ksess)
                struct lttng_ht_iter iter;
 
                /* For each consumer socket. */
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (
                        ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
                        struct ltt_kernel_channel *chan;
@@ -1604,7 +1605,6 @@ void kernel_destroy_session(struct ltt_kernel_session *ksess)
                                }
                        }
                }
-               rcu_read_unlock();
        }
 
        /* Close any relayd session */
@@ -1680,8 +1680,6 @@ enum lttng_error_code kernel_snapshot_record(struct ltt_kernel_session *ksess,
        saved_metadata = ksess->metadata;
        saved_metadata_fd = ksess->metadata_stream_fd;
 
-       rcu_read_lock();
-
        ret = kernel_open_metadata(ksess);
        if (ret < 0) {
                status = LTTNG_ERR_KERN_META_FAIL;
@@ -1699,49 +1697,56 @@ enum lttng_error_code kernel_snapshot_record(struct ltt_kernel_session *ksess,
                status = LTTNG_ERR_INVALID;
                goto error;
        }
-       /* Send metadata to consumer and snapshot everything. */
-       cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
-               struct ltt_kernel_channel *chan;
 
-               pthread_mutex_lock(socket->lock);
-               /* This stream must not be monitored by the consumer. */
-               ret = kernel_consumer_add_metadata(socket, ksess, 0);
-               pthread_mutex_unlock(socket->lock);
-               if (ret < 0) {
-                       status = LTTNG_ERR_KERN_META_FAIL;
-                       goto error_consumer;
-               }
+       {
+               /* Send metadata to consumer and snapshot everything. */
+               lttng::urcu::read_lock_guard read_lock;
 
-               /* For each channel, ask the consumer to snapshot it. */
-               cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+               cds_lfht_for_each_entry (output->socks->ht, &iter.iter, socket, node.node) {
+                       struct ltt_kernel_channel *chan;
+
+                       pthread_mutex_lock(socket->lock);
+                       /* This stream must not be monitored by the consumer. */
+                       ret = kernel_consumer_add_metadata(socket, ksess, 0);
+                       pthread_mutex_unlock(socket->lock);
+                       if (ret < 0) {
+                               status = LTTNG_ERR_KERN_META_FAIL;
+                               goto error_consumer;
+                       }
+
+                       /* For each channel, ask the consumer to snapshot it. */
+                       cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+                               status =
+                                       consumer_snapshot_channel(socket,
+                                                                 chan->key,
+                                                                 output,
+                                                                 0,
+                                                                 &trace_path[consumer_path_offset],
+                                                                 nb_packets_per_stream);
+                               if (status != LTTNG_OK) {
+                                       (void) kernel_consumer_destroy_metadata(socket,
+                                                                               ksess->metadata);
+                                       goto error_consumer;
+                               }
+                       }
+
+                       /* Snapshot metadata, */
                        status = consumer_snapshot_channel(socket,
-                                                          chan->key,
+                                                          ksess->metadata->key,
                                                           output,
-                                                          0,
+                                                          1,
                                                           &trace_path[consumer_path_offset],
-                                                          nb_packets_per_stream);
+                                                          0);
                        if (status != LTTNG_OK) {
-                               (void) kernel_consumer_destroy_metadata(socket, ksess->metadata);
                                goto error_consumer;
                        }
-               }
 
-               /* Snapshot metadata, */
-               status = consumer_snapshot_channel(socket,
-                                                  ksess->metadata->key,
-                                                  output,
-                                                  1,
-                                                  &trace_path[consumer_path_offset],
-                                                  0);
-               if (status != LTTNG_OK) {
-                       goto error_consumer;
+                       /*
+                        * The metadata snapshot is done, ask the consumer to destroy it since
+                        * it's not monitored on the consumer side.
+                        */
+                       (void) kernel_consumer_destroy_metadata(socket, ksess->metadata);
                }
-
-               /*
-                * The metadata snapshot is done, ask the consumer to destroy it since
-                * it's not monitored on the consumer side.
-                */
-               (void) kernel_consumer_destroy_metadata(socket, ksess->metadata);
        }
 
 error_consumer:
@@ -1757,7 +1762,6 @@ error:
        /* Restore metadata state.*/
        ksess->metadata = saved_metadata;
        ksess->metadata_stream_fd = saved_metadata_fd;
-       rcu_read_unlock();
        free(trace_path);
        return status;
 }
@@ -1854,45 +1858,47 @@ enum lttng_error_code kernel_rotate_session(struct ltt_session *session)
 
        DBG("Rotate kernel session %s started (session %" PRIu64 ")", session->name, session->id);
 
-       rcu_read_lock();
+       {
+               /*
+                * Note that this loop will end after one iteration given that there is
+                * only one kernel consumer.
+                */
+               lttng::urcu::read_lock_guard read_lock;
 
-       /*
-        * Note that this loop will end after one iteration given that there is
-        * only one kernel consumer.
-        */
-       cds_lfht_for_each_entry (ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
-               struct ltt_kernel_channel *chan;
-
-               /* For each channel, ask the consumer to rotate it. */
-               cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
-                       DBG("Rotate kernel channel %" PRIu64 ", session %s",
-                           chan->key,
-                           session->name);
+               cds_lfht_for_each_entry (
+                       ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
+                       struct ltt_kernel_channel *chan;
+
+                       /* For each channel, ask the consumer to rotate it. */
+                       cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+                               DBG("Rotate kernel channel %" PRIu64 ", session %s",
+                                   chan->key,
+                                   session->name);
+                               ret = consumer_rotate_channel(socket,
+                                                             chan->key,
+                                                             ksess->consumer,
+                                                             /* is_metadata_channel */ false);
+                               if (ret < 0) {
+                                       status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
+                                       goto error;
+                               }
+                       }
+
+                       /*
+                        * Rotate the metadata channel.
+                        */
                        ret = consumer_rotate_channel(socket,
-                                                     chan->key,
+                                                     ksess->metadata->key,
                                                      ksess->consumer,
-                                                     /* is_metadata_channel */ false);
+                                                     /* is_metadata_channel */ true);
                        if (ret < 0) {
                                status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
                                goto error;
                        }
                }
-
-               /*
-                * Rotate the metadata channel.
-                */
-               ret = consumer_rotate_channel(socket,
-                                             ksess->metadata->key,
-                                             ksess->consumer,
-                                             /* is_metadata_channel */ true);
-               if (ret < 0) {
-                       status = LTTNG_ERR_ROTATION_FAIL_CONSUMER;
-                       goto error;
-               }
        }
 
 error:
-       rcu_read_unlock();
        return status;
 }
 
@@ -1901,7 +1907,7 @@ enum lttng_error_code kernel_create_channel_subdirectories(const struct ltt_kern
        enum lttng_error_code ret = LTTNG_OK;
        enum lttng_trace_chunk_status chunk_status;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        LTTNG_ASSERT(ksess->current_trace_chunk);
 
        /*
@@ -1915,7 +1921,6 @@ enum lttng_error_code kernel_create_channel_subdirectories(const struct ltt_kern
                goto error;
        }
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -2132,49 +2137,52 @@ enum lttng_error_code kernel_clear_session(struct ltt_session *session)
 
        DBG("Clear kernel session %s (session %" PRIu64 ")", session->name, session->id);
 
-       rcu_read_lock();
-
        if (ksess->active) {
                ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
                status = LTTNG_ERR_FATAL;
                goto end;
        }
 
-       /*
-        * Note that this loop will end after one iteration given that there is
-        * only one kernel consumer.
-        */
-       cds_lfht_for_each_entry (ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
-               struct ltt_kernel_channel *chan;
-
-               /* For each channel, ask the consumer to clear it. */
-               cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
-                       DBG("Clear kernel channel %" PRIu64 ", session %s",
-                           chan->key,
-                           session->name);
-                       ret = consumer_clear_channel(socket, chan->key);
-                       if (ret < 0) {
-                               goto error;
+       {
+               /*
+                * Note that this loop will end after one iteration given that there is
+                * only one kernel consumer.
+                */
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
+                       struct ltt_kernel_channel *chan;
+
+                       /* For each channel, ask the consumer to clear it. */
+                       cds_list_for_each_entry (chan, &ksess->channel_list.head, list) {
+                               DBG("Clear kernel channel %" PRIu64 ", session %s",
+                                   chan->key,
+                                   session->name);
+                               ret = consumer_clear_channel(socket, chan->key);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+                       }
+
+                       if (!ksess->metadata) {
+                               /*
+                                * Nothing to do for the metadata.
+                                * This is a snapshot session.
+                                * The metadata is genererated on the fly.
+                                */
+                               continue;
                        }
-               }
 
-               if (!ksess->metadata) {
                        /*
-                        * Nothing to do for the metadata.
-                        * This is a snapshot session.
-                        * The metadata is genererated on the fly.
+                        * Clear the metadata channel.
+                        * Metadata channel is not cleared per se but we still need to
+                        * perform a rotation operation on it behind the scene.
                         */
-                       continue;
-               }
-
-               /*
-                * Clear the metadata channel.
-                * Metadata channel is not cleared per se but we still need to
-                * perform a rotation operation on it behind the scene.
-                */
-               ret = consumer_clear_channel(socket, ksess->metadata->key);
-               if (ret < 0) {
-                       goto error;
+                       ret = consumer_clear_channel(socket, ksess->metadata->key);
+                       if (ret < 0) {
+                               goto error;
+                       }
                }
        }
 
@@ -2189,7 +2197,6 @@ error:
                break;
        }
 end:
-       rcu_read_unlock();
        return status;
 }
 
@@ -2422,11 +2429,12 @@ static enum lttng_error_code kernel_create_event_notifier_rule(
        }
 
        /* Add trigger to kernel token mapping in the hash table. */
-       rcu_read_lock();
-       cds_lfht_add(kernel_token_to_event_notifier_rule_ht,
-                    hash_trigger(trigger),
-                    &event_notifier_rule->ht_node);
-       rcu_read_unlock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_add(kernel_token_to_event_notifier_rule_ht,
+                            hash_trigger(trigger),
+                            &event_notifier_rule->ht_node);
+       }
 
        DBG("Created kernel event notifier: name = '%s', fd = %d",
            kernel_event_notifier.event.name,
@@ -2488,7 +2496,7 @@ enum lttng_error_code kernel_unregister_event_notifier(const struct lttng_trigge
        enum lttng_error_code error_code_ret;
        int ret;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        cds_lfht_lookup(kernel_token_to_event_notifier_rule_ht,
                        hash_trigger(trigger),
@@ -2515,7 +2523,6 @@ enum lttng_error_code kernel_unregister_event_notifier(const struct lttng_trigge
        error_code_ret = LTTNG_OK;
 
 error:
-       rcu_read_unlock();
 
        return error_code_ret;
 }
index dd042fd361aa752862e9dba5bb0c75de569ee86d..821bbaa34f80b6e1c6d572192909c0ec0257aab2 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <common/common.hpp>
 #include <common/kernel-ctl/kernel-ctl.hpp>
+#include <common/urcu.hpp>
 
 #include <stdbool.h>
 
@@ -150,28 +151,31 @@ error_ioctl:
  * syscall hashtable used to track duplicate between 32 and 64 bit arch.
  *
  * This empty the hash table and destroys it after. After this, the pointer is
- * unsuable. RCU read side lock MUST be acquired before calling this.
+ * unsuable. RCU read side lock MUST NOT be acquired before calling this.
  */
 static void destroy_syscall_ht(struct lttng_ht *ht)
 {
        struct lttng_ht_iter iter;
        struct syscall *ksyscall;
 
-       ASSERT_RCU_READ_LOCKED();
-
        DBG3("Destroying syscall hash table.");
 
        if (!ht) {
                return;
        }
 
-       cds_lfht_for_each_entry (ht->ht, &iter.iter, ksyscall, node.node) {
-               int ret;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (ht->ht, &iter.iter, ksyscall, node.node) {
+                       int ret;
 
-               ret = lttng_ht_del(ht, &iter);
-               LTTNG_ASSERT(!ret);
-               free(ksyscall);
+                       ret = lttng_ht_del(ht, &iter);
+                       LTTNG_ASSERT(!ret);
+                       free(ksyscall);
+               }
        }
+
        lttng_ht_destroy(ht);
 }
 
@@ -197,6 +201,8 @@ static int init_syscall_ht(struct lttng_ht **ht)
 /*
  * Lookup a syscall in the given hash table by name.
  *
+ * RCU read lock MUST be acquired by the callers of this function.
+ *
  * Return syscall object if found or else NULL.
  */
 static struct syscall *lookup_syscall(struct lttng_ht *ht, const char *name)
@@ -283,8 +289,6 @@ ssize_t syscall_table_list(struct lttng_event **_events)
 
        DBG("Syscall table listing.");
 
-       rcu_read_lock();
-
        /*
         * Allocate at least the number of total syscall we have even if some of
         * them might not be valid. The count below will make sure to return the
@@ -303,17 +307,20 @@ ssize_t syscall_table_list(struct lttng_event **_events)
        }
 
        for (i = 0; i < syscall_table_nb_entry; i++) {
-               struct syscall *ksyscall;
-
                /* Skip empty syscalls. */
                if (*syscall_table[i].name == '\0') {
                        continue;
                }
 
-               ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
-               if (ksyscall) {
-                       update_event_syscall_bitness(events, i, ksyscall->index);
-                       continue;
+               {
+                       lttng::urcu::read_lock_guard read_lock;
+                       struct syscall *ksyscall;
+
+                       ksyscall = lookup_syscall(syscalls_ht, syscall_table[i].name);
+                       if (ksyscall) {
+                               update_event_syscall_bitness(events, i, ksyscall->index);
+                               continue;
+                       }
                }
 
                ret = add_syscall_to_ht(syscalls_ht, i, index);
@@ -332,12 +339,10 @@ ssize_t syscall_table_list(struct lttng_event **_events)
 
        destroy_syscall_ht(syscalls_ht);
        *_events = events;
-       rcu_read_unlock();
        return index;
 
 error:
        destroy_syscall_ht(syscalls_ht);
        free(events);
-       rcu_read_unlock();
        return ret;
 }
index 49730f497654fe1af8d09be8f6920fa41bce1620..74bcb1322e868284b788e2c4d279f0481b03a6d8 100644 (file)
@@ -16,6 +16,7 @@
 #include "utils.hpp"
 
 #include <common/pipe.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 namespace {
@@ -94,12 +95,14 @@ static int update_kernel_stream(int fd)
                if (!session_get(session)) {
                        continue;
                }
+
                session_lock(session);
                if (session->kernel_session == nullptr) {
                        session_unlock(session);
                        session_put(session);
                        continue;
                }
+
                ksess = session->kernel_session;
 
                cds_list_for_each_entry (channel, &ksess->channel_list.head, list) {
@@ -127,20 +130,25 @@ static int update_kernel_stream(int fd)
                                goto error;
                        }
 
-                       rcu_read_lock();
-                       cds_lfht_for_each_entry (
-                               ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
-                               pthread_mutex_lock(socket->lock);
-                               ret = kernel_consumer_send_channel_streams(
-                                       socket, channel, ksess, session->output_traces ? 1 : 0);
-                               pthread_mutex_unlock(socket->lock);
-                               if (ret < 0) {
-                                       rcu_read_unlock();
-                                       goto error;
+                       {
+                               lttng::urcu::read_lock_guard read_lock;
+
+                               cds_lfht_for_each_entry (
+                                       ksess->consumer->socks->ht, &iter.iter, socket, node.node) {
+                                       pthread_mutex_lock(socket->lock);
+                                       ret = kernel_consumer_send_channel_streams(
+                                               socket,
+                                               channel,
+                                               ksess,
+                                               session->output_traces ? 1 : 0);
+                                       pthread_mutex_unlock(socket->lock);
+                                       if (ret < 0) {
+                                               goto error;
+                                       }
                                }
                        }
-                       rcu_read_unlock();
                }
+
                session_unlock(session);
                session_put(session);
        }
index a560ad1c01967222d305845947f6e078c610d268..58a38cce191d7b720861d14e8aa96161cfc7a008 100644 (file)
@@ -24,6 +24,7 @@
 #include <common/macros.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
 #include <common/unix.hpp>
+#include <common/urcu.hpp>
 
 #include <lttng/action/action-internal.hpp>
 #include <lttng/action/list-internal.hpp>
@@ -525,9 +526,8 @@ static void session_info_destroy(void *_data)
        }
        lttng_session_trigger_list_destroy(session_info->trigger_list);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_del(session_info->sessions_ht, &session_info->sessions_ht_node);
-       rcu_read_unlock();
        free(session_info->name);
        lttng_trace_archive_location_put(session_info->last_state_sample.rotation.location);
        call_rcu(&session_info->rcu_node, free_session_info_rcu);
@@ -594,19 +594,17 @@ error:
 static void session_info_add_channel(struct session_info *session_info,
                                     struct channel_info *channel_info)
 {
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_add(session_info->channel_infos_ht,
                     hash_channel_key(&channel_info->key),
                     &channel_info->session_info_channels_ht_node);
-       rcu_read_unlock();
 }
 
 static void session_info_remove_channel(struct session_info *session_info,
                                        struct channel_info *channel_info)
 {
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_del(session_info->channel_infos_ht, &channel_info->session_info_channels_ht_node);
-       rcu_read_unlock();
 }
 
 static struct channel_info *channel_info_create(const char *channel_name,
@@ -664,11 +662,10 @@ static void notification_client_list_release(struct urcu_ref *list_ref)
        lttng_condition_put(list->condition);
 
        if (list->notification_trigger_clients_ht) {
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
 
                cds_lfht_del(list->notification_trigger_clients_ht,
                             &list->notification_trigger_clients_ht_node);
-               rcu_read_unlock();
                list->notification_trigger_clients_ht = nullptr;
        }
        cds_list_for_each_entry_safe (client_list_element, tmp, &list->clients_list, node) {
@@ -728,36 +725,43 @@ notification_client_list_create(struct notification_thread_state *state,
         */
        client_list->condition = lttng_condition_copy(condition);
 
-       /* Build a list of clients to which this new condition applies. */
-       cds_lfht_for_each_entry (state->client_socket_ht, &iter, client, client_socket_ht_node) {
-               struct notification_client_list_element *client_list_element;
+       {
+               /* Build a list of clients to which this new condition applies. */
+               lttng::urcu::read_lock_guard read_lock;
 
-               if (!condition_applies_to_client(condition, client)) {
-                       continue;
-               }
+               cds_lfht_for_each_entry (
+                       state->client_socket_ht, &iter, client, client_socket_ht_node) {
+                       struct notification_client_list_element *client_list_element;
 
-               client_list_element = zmalloc<notification_client_list_element>();
-               if (!client_list_element) {
-                       goto error_put_client_list;
-               }
+                       if (!condition_applies_to_client(condition, client)) {
+                               continue;
+                       }
+
+                       client_list_element = zmalloc<notification_client_list_element>();
+                       if (!client_list_element) {
+                               goto error_put_client_list;
+                       }
 
-               CDS_INIT_LIST_HEAD(&client_list_element->node);
-               client_list_element->client = client;
-               cds_list_add(&client_list_element->node, &client_list->clients_list);
+                       CDS_INIT_LIST_HEAD(&client_list_element->node);
+                       client_list_element->client = client;
+                       cds_list_add(&client_list_element->node, &client_list->clients_list);
+               }
        }
 
        client_list->notification_trigger_clients_ht = state->notification_trigger_clients_ht;
 
-       rcu_read_lock();
        /*
         * Add the client list to the global list of client list.
         */
-       cds_lfht_add_unique(state->notification_trigger_clients_ht,
-                           lttng_condition_hash(client_list->condition),
-                           match_client_list_condition,
-                           client_list->condition,
-                           &client_list->notification_trigger_clients_ht_node);
-       rcu_read_unlock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_add_unique(state->notification_trigger_clients_ht,
+                                   lttng_condition_hash(client_list->condition),
+                                   match_client_list_condition,
+                                   client_list->condition,
+                                   &client_list->notification_trigger_clients_ht_node);
+       }
        goto end;
 
 error_put_client_list:
@@ -785,7 +789,7 @@ get_client_list_from_condition(struct notification_thread_state *state,
        struct cds_lfht_iter iter;
        struct notification_client_list *list = nullptr;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_lookup(state->notification_trigger_clients_ht,
                        lttng_condition_hash(condition),
                        match_client_list_condition,
@@ -798,7 +802,6 @@ get_client_list_from_condition(struct notification_thread_state *state,
                list = notification_client_list_get(list) ? list : nullptr;
        }
 
-       rcu_read_unlock();
        return list;
 }
 
@@ -816,7 +819,7 @@ static int evaluate_channel_condition_for_client(const struct lttng_condition *c
        struct channel_state_sample *last_sample = nullptr;
        struct lttng_channel_trigger_list *channel_trigger_list = nullptr;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Find the channel associated with the condition. */
        cds_lfht_for_each_entry (
@@ -886,7 +889,6 @@ static int evaluate_channel_condition_for_client(const struct lttng_condition *c
        *session_uid = channel_info->session_info->uid;
        *session_gid = channel_info->session_info->gid;
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1511,12 +1513,13 @@ lttng_session_trigger_list_create(const char *session_name, struct cds_lfht *ses
        cds_lfht_node_init(&list->session_triggers_ht_node);
        list->session_triggers_ht = session_triggers_ht;
 
-       rcu_read_lock();
        /* Publish the list through the session_triggers_ht. */
-       cds_lfht_add(session_triggers_ht,
-                    hash_key_str(session_name, lttng_ht_seed),
-                    &list->session_triggers_ht_node);
-       rcu_read_unlock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_add(session_triggers_ht,
+                            hash_key_str(session_name, lttng_ht_seed),
+                            &list->session_triggers_ht_node);
+       }
 end:
        return list;
 }
@@ -1539,10 +1542,9 @@ static void lttng_session_trigger_list_destroy(struct lttng_session_trigger_list
                cds_list_del(&trigger_list_element->node);
                free(trigger_list_element);
        }
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        /* Unpublish the list from the session_triggers_ht. */
        cds_lfht_del(list->session_triggers_ht, &list->session_triggers_ht_node);
-       rcu_read_unlock();
        call_rcu(&list->rcu_node, free_session_trigger_list_rcu);
 }
 
@@ -1605,21 +1607,26 @@ lttng_session_trigger_list_build(const struct notification_thread_state *state,
        session_trigger_list =
                lttng_session_trigger_list_create(session_name, state->session_triggers_ht);
 
-       /* Add all triggers applying to the session named 'session_name'. */
-       cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
-               int ret;
+       {
+               /* Add all triggers applying to the session named 'session_name'. */
+               lttng::urcu::read_lock_guard read_lock;
 
-               if (!trigger_applies_to_session(trigger_ht_element->trigger, session_name)) {
-                       continue;
-               }
+               cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
+                       int ret;
 
-               ret = lttng_session_trigger_list_add(session_trigger_list,
-                                                    trigger_ht_element->trigger);
-               if (ret) {
-                       goto error;
-               }
+                       if (!trigger_applies_to_session(trigger_ht_element->trigger,
+                                                       session_name)) {
+                               continue;
+                       }
 
-               trigger_count++;
+                       ret = lttng_session_trigger_list_add(session_trigger_list,
+                                                            trigger_ht_element->trigger);
+                       if (ret) {
+                               goto error;
+                       }
+
+                       trigger_count++;
+               }
        }
 
        DBG("Found %i triggers that apply to newly created session", trigger_count);
@@ -1638,7 +1645,7 @@ static struct session_info *create_and_publish_session_info(struct notification_
        struct session_info *session = nullptr;
        struct lttng_session_trigger_list *trigger_list;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        trigger_list = lttng_session_trigger_list_build(state, name);
        if (!trigger_list) {
                goto error;
@@ -1666,10 +1673,8 @@ static struct session_info *create_and_publish_session_info(struct notification_
                goto error;
        }
 
-       rcu_read_unlock();
        return session;
 error:
-       rcu_read_unlock();
        session_info_put(session);
        return nullptr;
 }
@@ -1693,6 +1698,7 @@ static int handle_notification_thread_command_add_channel(struct notification_th
        int trigger_count = 0;
        struct cds_lfht_iter iter;
        struct session_info *session_info = nullptr;
+       lttng::urcu::read_lock_guard read_lock;
 
        DBG("Adding channel: channel name = `%s`, session id = %" PRIu64 ", channel key = %" PRIu64
            ", domain = %s",
@@ -1717,7 +1723,6 @@ static int handle_notification_thread_command_add_channel(struct notification_th
                goto error;
        }
 
-       rcu_read_lock();
        /* Build a list of all triggers applying to the new channel. */
        cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
                struct lttng_trigger_list_element *new_element;
@@ -1728,7 +1733,6 @@ static int handle_notification_thread_command_add_channel(struct notification_th
 
                new_element = zmalloc<lttng_trigger_list_element>();
                if (!new_element) {
-                       rcu_read_unlock();
                        goto error;
                }
                CDS_INIT_LIST_HEAD(&new_element->node);
@@ -1736,7 +1740,6 @@ static int handle_notification_thread_command_add_channel(struct notification_th
                cds_list_add(&new_element->node, &trigger_list);
                trigger_count++;
        }
-       rcu_read_unlock();
 
        DBG("Found %i triggers that apply to newly added channel", trigger_count);
        channel_trigger_list = zmalloc<lttng_channel_trigger_list>();
@@ -1748,7 +1751,6 @@ static int handle_notification_thread_command_add_channel(struct notification_th
        cds_lfht_node_init(&channel_trigger_list->channel_triggers_ht_node);
        cds_list_splice(&trigger_list, &channel_trigger_list->list);
 
-       rcu_read_lock();
        /* Add channel to the channel_ht which owns the channel_infos. */
        cds_lfht_add(state->channels_ht,
                     hash_channel_key(&new_channel_info->key),
@@ -1760,7 +1762,6 @@ static int handle_notification_thread_command_add_channel(struct notification_th
        cds_lfht_add(state->channel_triggers_ht,
                     hash_channel_key(&new_channel_info->key),
                     &channel_trigger_list->channel_triggers_ht_node);
-       rcu_read_unlock();
        session_info_put(session_info);
        *cmd_result = LTTNG_OK;
        return 0;
@@ -1865,7 +1866,7 @@ handle_notification_thread_command_remove_channel(struct notification_thread_sta
            channel_key,
            lttng_domain_type_str(domain));
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        cds_lfht_lookup(state->channel_triggers_ht,
                        hash_channel_key(&key),
@@ -1920,7 +1921,6 @@ handle_notification_thread_command_remove_channel(struct notification_thread_sta
        cds_lfht_del(state->channels_ht, node);
        channel_info_destroy(channel_info);
 end:
-       rcu_read_unlock();
        *cmd_result = LTTNG_OK;
        return 0;
 }
@@ -1941,7 +1941,7 @@ handle_notification_thread_command_session_rotation(struct notification_thread_s
        struct lttng_credentials session_creds;
        struct session_state_sample new_session_state;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        session_info = get_session_info_by_id(state, session_id);
        if (!session_info) {
@@ -2042,7 +2042,6 @@ end:
 
        session_info_put(session_info);
        *_cmd_result = cmd_result;
-       rcu_read_unlock();
        return ret;
 }
 
@@ -2269,7 +2268,7 @@ handle_notification_thread_command_list_triggers(struct notification_thread_hand
        struct lttng_triggers *local_triggers = nullptr;
        const struct lttng_credentials *creds;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        local_triggers = lttng_triggers_create();
        if (!local_triggers) {
@@ -2302,7 +2301,6 @@ handle_notification_thread_command_list_triggers(struct notification_thread_hand
        local_triggers = nullptr;
 
 end:
-       rcu_read_unlock();
        lttng_triggers_destroy(local_triggers);
        *_cmd_result = cmd_result;
        return ret;
@@ -2341,16 +2339,18 @@ static int handle_notification_thread_command_get_trigger(struct notification_th
        const char *trigger_name;
        uid_t trigger_owner_uid;
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
-               if (lttng_trigger_is_equal(trigger, trigger_ht_element->trigger)) {
-                       /* Take one reference on the return trigger. */
-                       *registered_trigger = trigger_ht_element->trigger;
-                       lttng_trigger_get(*registered_trigger);
-                       ret = 0;
-                       cmd_result = LTTNG_OK;
-                       goto end;
+               cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
+                       if (lttng_trigger_is_equal(trigger, trigger_ht_element->trigger)) {
+                               /* Take one reference on the return trigger. */
+                               *registered_trigger = trigger_ht_element->trigger;
+                               lttng_trigger_get(*registered_trigger);
+                               ret = 0;
+                               cmd_result = LTTNG_OK;
+                               goto end;
+                       }
                }
        }
 
@@ -2363,7 +2363,6 @@ static int handle_notification_thread_command_get_trigger(struct notification_th
        ret = 0;
 
 end:
-       rcu_read_unlock();
        *_cmd_result = cmd_result;
        return ret;
 }
@@ -2682,7 +2681,7 @@ handle_notification_thread_command_register_trigger(struct notification_thread_s
        enum action_executor_status executor_status;
        const uint64_t trigger_tracer_token = state->trigger_id.next_tracer_token++;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Set the trigger's tracer token. */
        lttng_trigger_set_tracer_token(trigger, trigger_tracer_token);
@@ -2973,7 +2972,6 @@ error:
                lttng_trigger_destroy(trigger);
        }
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -2993,22 +2991,27 @@ static void teardown_tracer_notifier(struct notification_thread_state *state,
        struct cds_lfht_iter iter;
        struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element;
 
-       cds_lfht_for_each_entry (state->trigger_tokens_ht, &iter, trigger_tokens_ht_element, node) {
-               if (!lttng_trigger_is_equal(trigger, trigger_tokens_ht_element->trigger)) {
-                       continue;
-               }
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       state->trigger_tokens_ht, &iter, trigger_tokens_ht_element, node) {
+                       if (!lttng_trigger_is_equal(trigger, trigger_tokens_ht_element->trigger)) {
+                               continue;
+                       }
 
-               event_notifier_error_accounting_unregister_event_notifier(
-                       trigger_tokens_ht_element->trigger);
+                       event_notifier_error_accounting_unregister_event_notifier(
+                               trigger_tokens_ht_element->trigger);
 
-               /* TODO talk to all app and remove it */
-               DBG("Removed trigger from tokens_ht");
-               cds_lfht_del(state->trigger_tokens_ht, &trigger_tokens_ht_element->node);
+                       /* TODO talk to all app and remove it */
+                       DBG("Removed trigger from tokens_ht");
+                       cds_lfht_del(state->trigger_tokens_ht, &trigger_tokens_ht_element->node);
 
-               call_rcu(&trigger_tokens_ht_element->rcu_node,
-                        free_notification_trigger_tokens_ht_element_rcu);
+                       call_rcu(&trigger_tokens_ht_element->rcu_node,
+                                free_notification_trigger_tokens_ht_element_rcu);
 
-               break;
+                       break;
+               }
        }
 }
 
@@ -3052,7 +3055,7 @@ handle_notification_thread_command_unregister_trigger(struct notification_thread
        const struct lttng_condition *condition = lttng_trigger_get_const_condition(trigger);
        enum lttng_error_code cmd_reply;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        cds_lfht_lookup(
                state->triggers_ht, lttng_condition_hash(condition), match_trigger, trigger, &iter);
@@ -3149,7 +3152,6 @@ handle_notification_thread_command_unregister_trigger(struct notification_thread
        lttng_trigger_destroy(trigger_ht_element->trigger);
        call_rcu(&trigger_ht_element->rcu_node, free_lttng_trigger_ht_element_rcu);
 end:
-       rcu_read_unlock();
        if (_cmd_reply) {
                *_cmd_reply = cmd_reply;
        }
@@ -3292,7 +3294,7 @@ int handle_notification_thread_command(struct notification_thread_handle *handle
                        cmd->parameters.client_communication_update.id;
                struct notification_client *client;
 
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
                client = get_client_from_id(client_id, state);
 
                if (!client) {
@@ -3307,7 +3309,6 @@ int handle_notification_thread_command(struct notification_thread_handle *handle
                } else {
                        ret = client_handle_transmission_status(client, client_status, state);
                }
-               rcu_read_unlock();
                break;
        }
        default:
@@ -3434,12 +3435,16 @@ int handle_notification_thread_client_connect(struct notification_thread_state *
        }
        DBG("Added new notification channel client socket (%i) to poll set", client->socket);
 
-       rcu_read_lock();
-       cds_lfht_add(state->client_socket_ht,
-                    hash_client_socket(client->socket),
-                    &client->client_socket_ht_node);
-       cds_lfht_add(state->client_id_ht, hash_client_id(client->id), &client->client_id_ht_node);
-       rcu_read_unlock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_add(state->client_socket_ht,
+                            hash_client_socket(client->socket),
+                            &client->client_socket_ht_node);
+               cds_lfht_add(state->client_id_ht,
+                            hash_client_id(client->id),
+                            &client->client_id_ht_node);
+       }
 
        return ret;
 
@@ -3492,7 +3497,8 @@ int handle_notification_thread_client_disconnect(int client_socket,
        int ret = 0;
        struct notification_client *client;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
+
        DBG("Closing client connection (socket fd = %i)", client_socket);
        client = get_client_from_socket(client_socket, state);
        if (!client) {
@@ -3504,7 +3510,6 @@ int handle_notification_thread_client_disconnect(int client_socket,
 
        ret = notification_thread_client_disconnect(client, state);
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -3514,17 +3519,22 @@ int handle_notification_thread_client_disconnect_all(struct notification_thread_
        struct notification_client *client;
        bool error_encoutered = false;
 
-       rcu_read_lock();
        DBG("Closing all client connections");
-       cds_lfht_for_each_entry (state->client_socket_ht, &iter, client, client_socket_ht_node) {
-               int ret;
 
-               ret = notification_thread_client_disconnect(client, state);
-               if (ret) {
-                       error_encoutered = true;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       state->client_socket_ht, &iter, client, client_socket_ht_node) {
+                       int ret;
+
+                       ret = notification_thread_client_disconnect(client, state);
+                       if (ret) {
+                               error_encoutered = true;
+                       }
                }
        }
-       rcu_read_unlock();
+
        return error_encoutered ? 1 : 0;
 }
 
@@ -3534,7 +3544,7 @@ int handle_notification_thread_trigger_unregister_all(struct notification_thread
        struct cds_lfht_iter iter;
        struct lttng_trigger_ht_element *trigger_ht_element;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_for_each_entry (state->triggers_ht, &iter, trigger_ht_element, node) {
                int ret = handle_notification_thread_command_unregister_trigger(
                        state, trigger_ht_element->trigger, nullptr);
@@ -3542,7 +3552,6 @@ int handle_notification_thread_trigger_unregister_all(struct notification_thread
                        error_occurred = true;
                }
        }
-       rcu_read_unlock();
        return error_occurred ? -1 : 0;
 }
 
@@ -4016,7 +4025,7 @@ int handle_notification_thread_client_in(struct notification_thread_state *state
        ssize_t recv_ret;
        size_t offset;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        client = get_client_from_socket(socket, state);
        if (!client) {
                /* Internal error, abort. */
@@ -4101,7 +4110,6 @@ receive_fds:
        }
 
 end:
-       rcu_read_unlock();
        return ret;
 
 error_disconnect_client:
@@ -4116,7 +4124,7 @@ int handle_notification_thread_client_out(struct notification_thread_state *stat
        struct notification_client *client;
        enum client_transmission_status transmission_status;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        client = get_client_from_socket(socket, state);
        if (!client) {
                /* Internal error, abort. */
@@ -4173,7 +4181,6 @@ int handle_notification_thread_client_out(struct notification_thread_state *stat
                goto end;
        }
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -4647,7 +4654,7 @@ dispatch_one_event_notifier_notification(struct notification_thread_state *state
        unsigned int capture_count = 0;
 
        /* Find triggers associated with this token. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_lookup(state->trigger_tokens_ht,
                        hash_key_u64(&notification->tracer_token, lttng_ht_seed),
                        match_trigger_token,
@@ -4760,7 +4767,6 @@ dispatch_one_event_notifier_notification(struct notification_thread_state *state
 
 end_unlock:
        notification_client_list_put(client_list);
-       rcu_read_unlock();
 end:
        return ret;
 }
@@ -4819,6 +4825,7 @@ int handle_notification_thread_channel_sample(struct notification_thread_state *
        struct lttng_credentials channel_creds = {};
        struct lttng_credentials session_creds = {};
        struct session_info *session;
+       lttng::urcu::read_lock_guard read_lock;
 
        /*
         * The monitoring pipe only holds messages smaller than PIPE_BUF,
@@ -4837,8 +4844,6 @@ int handle_notification_thread_channel_sample(struct notification_thread_state *
        channel_new_sample.highest_usage = sample_msg.highest;
        channel_new_sample.lowest_usage = sample_msg.lowest;
 
-       rcu_read_lock();
-
        session = get_session_info_by_id(state, sample_msg.session_id);
        if (!session) {
                DBG("Received a sample for an unknown session from consumerd: session id = %" PRIu64,
@@ -5068,7 +5073,6 @@ end_unlock:
                session->last_state_sample = session_new_sample;
        }
        session_info_put(session);
-       rcu_read_unlock();
 end:
        return ret;
 }
index aa5dc4a8c40432a7c7a2f03dfb36e6982f0c150f..4a0865eb8940a4faec5bb288ac61e50e1bca79ad 100644 (file)
@@ -26,6 +26,7 @@
 #include <common/hashtable/utils.hpp>
 #include <common/kernel-ctl/kernel-ctl.hpp>
 #include <common/time.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <lttng/condition/condition-internal.hpp>
@@ -318,6 +319,7 @@ static void check_session_rotation_pending_on_consumers(struct ltt_session *sess
        uint64_t relayd_id;
        bool chunk_exists_on_peer = false;
        enum lttng_trace_chunk_status chunk_status;
+       lttng::urcu::read_lock_guard read_lock;
 
        LTTNG_ASSERT(session->chunk_being_archived);
 
@@ -325,10 +327,10 @@ static void check_session_rotation_pending_on_consumers(struct ltt_session *sess
         * Check for a local pending rotation on all consumers (32-bit
         * user space, 64-bit user space, and kernel).
         */
-       rcu_read_lock();
        if (!session->ust_session) {
                goto skip_ust;
        }
+
        cds_lfht_for_each_entry (
                session->ust_session->consumer->socks->ht, &iter, socket, node.node) {
                relayd_id = session->ust_session->consumer->type == CONSUMER_DST_LOCAL ?
@@ -386,7 +388,6 @@ skip_ust:
        }
 skip_kernel:
 end:
-       rcu_read_unlock();
 
        if (!chunk_exists_on_peer) {
                uint64_t chunk_being_archived_id;
index c3d4c06ed4ac8e6f9c85d41e68e33f35d9babba1..e8205bf7f1ff76e7aff37f84c25649948539d19e 100644 (file)
@@ -17,6 +17,7 @@
 #include <common/defaults.hpp>
 #include <common/error.hpp>
 #include <common/runas.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <lttng/save-internal.hpp>
@@ -1105,21 +1106,22 @@ static int save_ust_events(struct config_writer *writer, struct lttng_ht *events
                goto end;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (events->ht, &iter.iter, node, node) {
-               event = lttng::utils::container_of(node, &ltt_ust_event::node);
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               if (event->internal) {
-                       /* Internal events must not be exposed to clients */
-                       continue;
-               }
-               ret = save_ust_event(writer, event);
-               if (ret != LTTNG_OK) {
-                       rcu_read_unlock();
-                       goto end;
+               cds_lfht_for_each_entry (events->ht, &iter.iter, node, node) {
+                       event = lttng::utils::container_of(node, &ltt_ust_event::node);
+
+                       if (event->internal) {
+                               /* Internal events must not be exposed to clients */
+                               continue;
+                       }
+                       ret = save_ust_event(writer, event);
+                       if (ret != LTTNG_OK) {
+                               goto end;
+                       }
                }
        }
-       rcu_read_unlock();
 
        /* /events */
        ret = config_writer_close_element(writer);
@@ -1185,32 +1187,32 @@ static int save_agent_events(struct config_writer *writer, struct agent *agent)
                goto end;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (agent->events->ht, &iter.iter, node, node) {
-               struct agent_event *agent_event;
-               struct ltt_ust_event fake_event;
-
-               memset(&fake_event, 0, sizeof(fake_event));
-               agent_event = lttng::utils::container_of(node, &agent_event::node);
-
-               /*
-                * Initialize a fake ust event to reuse the same serialization
-                * function since UST and agent events contain the same info
-                * (and one could wonder why they don't reuse the same
-                * structures...).
-                */
-               ret = init_ust_event_from_agent_event(&fake_event, agent_event);
-               if (ret != LTTNG_OK) {
-                       rcu_read_unlock();
-                       goto end;
-               }
-               ret = save_ust_event(writer, &fake_event);
-               if (ret != LTTNG_OK) {
-                       rcu_read_unlock();
-                       goto end;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (agent->events->ht, &iter.iter, node, node) {
+                       struct agent_event *agent_event;
+                       struct ltt_ust_event fake_event;
+
+                       memset(&fake_event, 0, sizeof(fake_event));
+                       agent_event = lttng::utils::container_of(node, &agent_event::node);
+
+                       /*
+                        * Initialize a fake ust event to reuse the same serialization
+                        * function since UST and agent events contain the same info
+                        * (and one could wonder why they don't reuse the same
+                        * structures...).
+                        */
+                       ret = init_ust_event_from_agent_event(&fake_event, agent_event);
+                       if (ret != LTTNG_OK) {
+                               goto end;
+                       }
+                       ret = save_ust_event(writer, &fake_event);
+                       if (ret != LTTNG_OK) {
+                               goto end;
+                       }
                }
        }
-       rcu_read_unlock();
 
        /* /events */
        ret = config_writer_close_element(writer);
@@ -2034,19 +2036,20 @@ static int save_ust_domain(struct config_writer *writer,
                goto end;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (
-               session->ust_session->domain_global.channels->ht, &iter.iter, node, node) {
-               ust_chan = lttng::utils::container_of(node, &ltt_ust_channel::node);
-               if (domain == ust_chan->domain) {
-                       ret = save_ust_channel(writer, ust_chan, session->ust_session);
-                       if (ret != LTTNG_OK) {
-                               rcu_read_unlock();
-                               goto end;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       session->ust_session->domain_global.channels->ht, &iter.iter, node, node) {
+                       ust_chan = lttng::utils::container_of(node, &ltt_ust_channel::node);
+                       if (domain == ust_chan->domain) {
+                               ret = save_ust_channel(writer, ust_chan, session->ust_session);
+                               if (ret != LTTNG_OK) {
+                                       goto end;
+                               }
                        }
                }
        }
-       rcu_read_unlock();
 
        /* /channels */
        ret = config_writer_close_element(writer);
@@ -2314,40 +2317,43 @@ static int save_snapshot_outputs(struct config_writer *writer, struct snapshot *
                goto end;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (snapshot->output_ht->ht, &iter.iter, output, node.node) {
-               ret = config_writer_open_element(writer, config_element_output);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_unlock;
-               }
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               ret = config_writer_write_element_string(writer, config_element_name, output->name);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_unlock;
-               }
+               cds_lfht_for_each_entry (snapshot->output_ht->ht, &iter.iter, output, node.node) {
+                       ret = config_writer_open_element(writer, config_element_output);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end_unlock;
+                       }
 
-               ret = config_writer_write_element_unsigned_int(
-                       writer, config_element_max_size, output->max_size);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_unlock;
-               }
+                       ret = config_writer_write_element_string(
+                               writer, config_element_name, output->name);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end_unlock;
+                       }
 
-               ret = save_consumer_output(writer, output->consumer);
-               if (ret != LTTNG_OK) {
-                       goto end_unlock;
-               }
+                       ret = config_writer_write_element_unsigned_int(
+                               writer, config_element_max_size, output->max_size);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end_unlock;
+                       }
 
-               /* /output */
-               ret = config_writer_close_element(writer);
-               if (ret) {
-                       ret = LTTNG_ERR_SAVE_IO_FAIL;
-                       goto end_unlock;
+                       ret = save_consumer_output(writer, output->consumer);
+                       if (ret != LTTNG_OK) {
+                               goto end_unlock;
+                       }
+
+                       /* /output */
+                       ret = config_writer_close_element(writer);
+                       if (ret) {
+                               ret = LTTNG_ERR_SAVE_IO_FAIL;
+                               goto end_unlock;
+                       }
                }
        }
-       rcu_read_unlock();
 
        /* /snapshot_outputs */
        ret = config_writer_close_element(writer);
@@ -2360,7 +2366,6 @@ static int save_snapshot_outputs(struct config_writer *writer, struct snapshot *
 end:
        return ret;
 end_unlock:
-       rcu_read_unlock();
        return ret;
 }
 
index 0549d36de6b2ba9f9d6bf1770cb493c253c2da1c..75d767c25644088b364c8a133a21e7ca214b77a9 100644 (file)
@@ -475,7 +475,7 @@ static int _session_set_trace_chunk_no_lock_check(struct ltt_session *session,
        uint64_t chunk_id;
        enum lttng_trace_chunk_status chunk_status;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        /*
         * Ownership of current trace chunk is transferred to
         * `current_trace_chunk`.
@@ -580,7 +580,6 @@ end:
                current_trace_chunk = nullptr;
        }
 end_no_move:
-       rcu_read_unlock();
        lttng_trace_chunk_put(current_trace_chunk);
        return ret;
 error:
@@ -845,7 +844,7 @@ static enum lttng_error_code session_kernel_open_packets(struct ltt_session *ses
        struct cds_lfht_node *node;
        struct ltt_kernel_channel *chan;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        cds_lfht_first(session->kernel_session->consumer->socks->ht, &iter.iter);
        node = cds_lfht_iter_get_node(&iter.iter);
@@ -869,7 +868,6 @@ static enum lttng_error_code session_kernel_open_packets(struct ltt_session *ses
        }
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1406,7 +1404,7 @@ bool sample_session_id_by_name(const char *name, uint64_t *id)
        struct lttng_ht_iter iter;
        struct ltt_session *ls;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        if (!ltt_sessions_ht_by_name) {
                found = false;
@@ -1426,7 +1424,6 @@ bool sample_session_id_by_name(const char *name, uint64_t *id)
 
        DBG3("Session id `%" PRIu64 "` sampled for session `%s", *id, name);
 end:
-       rcu_read_unlock();
        return found;
 }
 
index 1b48034c2e9c3a84d11895b6ff7e8dd221bc671b..e7a0033541a5d07d5cdafd77d5f4306e462e2662 100644 (file)
@@ -10,6 +10,7 @@
 #include "utils.hpp"
 
 #include <common/defaults.hpp>
+#include <common/urcu.hpp>
 
 #include <inttypes.h>
 #include <string.h>
@@ -191,9 +192,8 @@ void snapshot_delete_output(struct snapshot *snapshot, struct snapshot_output *o
        LTTNG_ASSERT(output);
 
        iter.iter.node = &output->node.node;
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        ret = lttng_ht_del(snapshot->output_ht, &iter);
-       rcu_read_unlock();
        LTTNG_ASSERT(!ret);
        /*
         * This is safe because the ownership of a snapshot object is in a session
@@ -211,9 +211,8 @@ void snapshot_add_output(struct snapshot *snapshot, struct snapshot_output *outp
        LTTNG_ASSERT(snapshot->output_ht);
        LTTNG_ASSERT(output);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_add_unique_ulong(snapshot->output_ht, &output->node);
-       rcu_read_unlock();
        /*
         * This is safe because the ownership of a snapshot object is in a session
         * for which the session lock need to be acquired to read and modify it.
@@ -325,11 +324,14 @@ void snapshot_destroy(struct snapshot *obj)
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (obj->output_ht->ht, &iter.iter, output, node.node) {
-               snapshot_delete_output(obj, output);
-               snapshot_output_destroy(output);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (obj->output_ht->ht, &iter.iter, output, node.node) {
+                       snapshot_delete_output(obj, output);
+                       snapshot_output_destroy(output);
+               }
        }
-       rcu_read_unlock();
+
        lttng_ht_destroy(obj->output_ht);
 }
index 1505c3159ffb6e10b28404367772df3776c54d27..e4fdef9ad159d686b1f3bd4f84fa453e7438c8ce 100644 (file)
@@ -16,6 +16,7 @@
 #include <common/common.hpp>
 #include <common/defaults.hpp>
 #include <common/trace-chunk.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <inttypes.h>
@@ -761,14 +762,18 @@ static void fini_id_tracker(struct ust_id_tracker *id_tracker)
        if (!id_tracker->ht) {
                return;
        }
-       rcu_read_lock();
-       cds_lfht_for_each_entry (id_tracker->ht->ht, &iter.iter, tracker_node, node.node) {
-               int ret = lttng_ht_del(id_tracker->ht, &iter);
 
-               LTTNG_ASSERT(!ret);
-               destroy_id_tracker_node(tracker_node);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (id_tracker->ht->ht, &iter.iter, tracker_node, node.node) {
+                       int ret = lttng_ht_del(id_tracker->ht, &iter);
+
+                       LTTNG_ASSERT(!ret);
+                       destroy_id_tracker_node(tracker_node);
+               }
        }
-       rcu_read_unlock();
+
        lttng_ht_destroy(id_tracker->ht);
        id_tracker->ht = nullptr;
 }
@@ -1202,18 +1207,20 @@ static void destroy_contexts(struct lttng_ht *ht)
 
        LTTNG_ASSERT(ht);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (ht->ht, &iter.iter, node, node) {
-               /* Remove from ordered list. */
-               ctx = lttng::utils::container_of(node, &ltt_ust_context::node);
-               cds_list_del(&ctx->list);
-               /* Remove from channel's hash table. */
-               ret = lttng_ht_del(ht, &iter);
-               if (!ret) {
-                       call_rcu(&node->head, destroy_context_rcu);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (ht->ht, &iter.iter, node, node) {
+                       /* Remove from ordered list. */
+                       ctx = lttng::utils::container_of(node, &ltt_ust_context::node);
+                       cds_list_del(&ctx->list);
+                       /* Remove from channel's hash table. */
+                       ret = lttng_ht_del(ht, &iter);
+                       if (!ret) {
+                               call_rcu(&node->head, destroy_context_rcu);
+                       }
                }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(ht);
 }
@@ -1268,13 +1275,15 @@ static void destroy_events(struct lttng_ht *events)
 
        LTTNG_ASSERT(events);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (events->ht, &iter.iter, node, node) {
-               ret = lttng_ht_del(events, &iter);
-               LTTNG_ASSERT(!ret);
-               call_rcu(&node->head, destroy_event_rcu);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (events->ht, &iter.iter, node, node) {
+                       ret = lttng_ht_del(events, &iter);
+                       LTTNG_ASSERT(!ret);
+                       call_rcu(&node->head, destroy_event_rcu);
+               }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(events);
 }
@@ -1336,7 +1345,7 @@ int trace_ust_regenerate_metadata(struct ltt_ust_session *usess)
        struct buffer_reg_uid *uid_reg = nullptr;
        struct buffer_reg_session *session_reg = nullptr;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_list_for_each_entry (uid_reg, &usess->buffer_reg_uid_list, lnode) {
                lsu::registry_session *registry;
 
@@ -1353,7 +1362,6 @@ int trace_ust_regenerate_metadata(struct ltt_ust_session *usess)
        }
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1367,15 +1375,17 @@ static void destroy_channels(struct lttng_ht *channels)
 
        LTTNG_ASSERT(channels);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (channels->ht, &iter.iter, node, node) {
-               struct ltt_ust_channel *chan =
-                       lttng::utils::container_of(node, &ltt_ust_channel::node);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (channels->ht, &iter.iter, node, node) {
+                       struct ltt_ust_channel *chan =
+                               lttng::utils::container_of(node, &ltt_ust_channel::node);
 
-               trace_ust_delete_channel(channels, chan);
-               trace_ust_destroy_channel(chan);
+                       trace_ust_delete_channel(channels, chan);
+                       trace_ust_destroy_channel(chan);
+               }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(channels);
 }
@@ -1407,14 +1417,16 @@ void trace_ust_destroy_session(struct ltt_ust_session *session)
        /* Cleaning up UST domain */
        destroy_domain_global(&session->domain_global);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (session->agents->ht, &iter.iter, agt, node.node) {
-               int ret = lttng_ht_del(session->agents, &iter);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (session->agents->ht, &iter.iter, agt, node.node) {
+                       int ret = lttng_ht_del(session->agents, &iter);
 
-               LTTNG_ASSERT(!ret);
-               agent_destroy(agt);
+                       LTTNG_ASSERT(!ret);
+                       agent_destroy(agt);
+               }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(session->agents);
 
index 9e1a155adcdefb2b71fc7f478677f30521265afa..526e51b6bc847ad47f7b4ceec872f95c975cbba7 100644 (file)
@@ -17,6 +17,7 @@
 #include <common/hashtable/hashtable.hpp>
 #include <common/hashtable/utils.hpp>
 #include <common/tracker.hpp>
+#include <common/urcu.hpp>
 
 #include <lttng/lttng-error.h>
 
@@ -91,12 +92,15 @@ static void process_attr_tracker_clear_inclusion_set(struct process_attr_tracker
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (
-               tracker->inclusion_set_ht, &iter.iter, value_node, inclusion_set_ht_node) {
-               process_attr_tracker_remove_value_node(tracker, value_node);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       tracker->inclusion_set_ht, &iter.iter, value_node, inclusion_set_ht_node) {
+                       process_attr_tracker_remove_value_node(tracker, value_node);
+               }
        }
-       rcu_read_unlock();
+
        ret = cds_lfht_destroy(tracker->inclusion_set_ht, nullptr);
        LTTNG_ASSERT(ret == 0);
        tracker->inclusion_set_ht = nullptr;
@@ -163,14 +167,13 @@ process_attr_tracker_lookup(const struct process_attr_tracker *tracker,
 
        LTTNG_ASSERT(tracker->policy == LTTNG_TRACKING_POLICY_INCLUDE_SET);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_lookup(tracker->inclusion_set_ht,
                        process_attr_value_hash(value),
                        match_inclusion_set_value,
                        value,
                        &iter);
        node = cds_lfht_iter_get_node(&iter);
-       rcu_read_unlock();
 
        return node ? lttng::utils::container_of(
                              node, &process_attr_tracker_value_node::inclusion_set_ht_node) :
@@ -186,7 +189,7 @@ process_attr_tracker_inclusion_set_add_value(struct process_attr_tracker *tracke
        struct process_attr_value *value_copy = nullptr;
        struct process_attr_tracker_value_node *value_node = nullptr;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
                status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
                goto end;
@@ -222,7 +225,6 @@ end:
        if (value_node) {
                free(value_node);
        }
-       rcu_read_unlock();
        return status;
 }
 
@@ -234,7 +236,7 @@ process_attr_tracker_inclusion_set_remove_value(struct process_attr_tracker *tra
        struct process_attr_tracker_value_node *value_node;
        enum process_attr_tracker_status status = PROCESS_ATTR_TRACKER_STATUS_OK;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        if (tracker->policy != LTTNG_TRACKING_POLICY_INCLUDE_SET) {
                status = PROCESS_ATTR_TRACKER_STATUS_INVALID_TRACKING_POLICY;
                goto end;
@@ -248,7 +250,6 @@ process_attr_tracker_inclusion_set_remove_value(struct process_attr_tracker *tra
 
        process_attr_tracker_remove_value_node(tracker, value_node);
 end:
-       rcu_read_unlock();
        return status;
 }
 
@@ -273,30 +274,32 @@ process_attr_tracker_get_inclusion_set(const struct process_attr_tracker *tracke
                goto error;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (
-               tracker->inclusion_set_ht, &iter.iter, value_node, inclusion_set_ht_node) {
-               int ret;
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               new_value = process_attr_value_copy(value_node->value);
-               if (!new_value) {
-                       status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
-                       goto error_unlock;
-               }
+               cds_lfht_for_each_entry (
+                       tracker->inclusion_set_ht, &iter.iter, value_node, inclusion_set_ht_node) {
+                       int ret;
 
-               ret = lttng_dynamic_pointer_array_add_pointer(&values->array, new_value);
-               if (ret) {
-                       status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
-                       goto error_unlock;
-               }
+                       new_value = process_attr_value_copy(value_node->value);
+                       if (!new_value) {
+                               status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+                               goto error_unlock;
+                       }
+
+                       ret = lttng_dynamic_pointer_array_add_pointer(&values->array, new_value);
+                       if (ret) {
+                               status = PROCESS_ATTR_TRACKER_STATUS_ERROR;
+                               goto error_unlock;
+                       }
 
-               new_value = nullptr;
+                       new_value = nullptr;
+               }
        }
-       rcu_read_unlock();
+
        *_values = values;
        return status;
 error_unlock:
-       rcu_read_unlock();
 error:
        lttng_process_attr_values_destroy(values);
        process_attr_value_destroy(new_value);
index 90b0b65b729d5614f89665b839886a939ebaaa90..aaab0a3b2512d4a517d584127a5bafc8f513696b 100644 (file)
@@ -505,7 +505,7 @@ static void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan
                return;
        }
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        session = session_find_by_id(ua_chan->session->tracing_id);
        if (!session || !session->ust_session) {
                /*
@@ -548,7 +548,6 @@ static void save_per_pid_lost_discarded_counters(struct ust_app_channel *ua_chan
        uchan->per_pid_closed_app_lost += lost;
 
 end:
-       rcu_read_unlock();
        if (session) {
                session_put(session);
        }
@@ -1034,24 +1033,24 @@ static void delete_ust_app(struct ust_app *app)
        /* Wipe sessions */
        cds_list_for_each_entry_safe (ua_sess, tmp_ua_sess, &app->teardown_head, teardown_node) {
                /* Free every object in the session and the session. */
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
                delete_ust_app_session(sock, ua_sess, app);
-               rcu_read_unlock();
        }
 
        /* Remove the event notifier rules associated with this app. */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
-                                &iter.iter,
-                                event_notifier_rule,
-                                node.node) {
-               ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
-               LTTNG_ASSERT(!ret);
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
-       }
+               cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+                                        &iter.iter,
+                                        event_notifier_rule,
+                                        node.node) {
+                       ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &iter);
+                       LTTNG_ASSERT(!ret);
 
-       rcu_read_unlock();
+                       delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+               }
+       }
 
        lttng_ht_destroy(app->sessions);
        lttng_ht_destroy(app->ust_sessions_objd);
@@ -2566,7 +2565,7 @@ static int setup_buffer_reg_pid(struct ust_app_session *ua_sess,
        LTTNG_ASSERT(ua_sess);
        LTTNG_ASSERT(app);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        reg_pid = buffer_reg_pid_find(ua_sess->id);
        if (!reg_pid) {
@@ -2614,7 +2613,6 @@ end:
                *regp = reg_pid;
        }
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -2636,7 +2634,7 @@ static int setup_buffer_reg_uid(struct ltt_ust_session *usess,
        LTTNG_ASSERT(usess);
        LTTNG_ASSERT(app);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        reg_uid = buffer_reg_uid_find(usess->id, app->abi.bits_per_long, app->uid);
        if (!reg_uid) {
@@ -2690,7 +2688,6 @@ end:
                *regp = reg_uid;
        }
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -3053,7 +3050,7 @@ static int do_consumer_create_channel(struct ltt_ust_session *usess,
        LTTNG_ASSERT(ua_chan);
        LTTNG_ASSERT(registry);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        health_code_update();
 
        /* Get the right consumer socket for the application. */
@@ -3108,7 +3105,6 @@ static int do_consumer_create_channel(struct ltt_ust_session *usess,
                }
        }
 
-       rcu_read_unlock();
        return 0;
 
 error_destroy:
@@ -3125,7 +3121,6 @@ error_ask:
        lttng_fd_put(LTTNG_FD_APPS, 1);
 error:
        health_code_update();
-       rcu_read_unlock();
        return ret;
 }
 
@@ -3581,7 +3576,7 @@ static int create_channel_per_pid(struct ust_app *app,
 
        DBG("UST app creating channel %s with per PID buffers", ua_chan->name);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        registry = get_session_registry(ua_sess);
        /* The UST app session lock is held, registry shall not be null. */
@@ -3649,7 +3644,6 @@ error_remove_from_registry:
                }
        }
 error:
-       rcu_read_unlock();
        if (session) {
                session_put(session);
        }
@@ -4139,7 +4133,7 @@ void ust_app_add(struct ust_app *app)
 
        app->registration_time = time(nullptr);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /*
         * On a re-registration, we want to kick out the previous registration of
@@ -4169,8 +4163,6 @@ void ust_app_add(struct ust_app *app)
            app->notify_sock,
            app->v_major,
            app->v_minor);
-
-       rcu_read_unlock();
 }
 
 /*
@@ -4350,7 +4342,7 @@ void ust_app_unregister(int sock)
        struct ust_app_session *ua_sess;
        int ret;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Get the node reference for a call_rcu */
        lttng_ht_lookup(ust_app_ht_by_sock, (void *) ((unsigned long) sock), &ust_app_sock_iter);
@@ -4459,7 +4451,6 @@ void ust_app_unregister(int sock)
        /* Free memory */
        call_rcu(&lta->pid_n.head, delete_ust_app_rcu);
 
-       rcu_read_unlock();
        return;
 }
 
@@ -4482,75 +4473,49 @@ int ust_app_list_events(struct lttng_event **events)
                goto error;
        }
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               struct lttng_ust_abi_tracepoint_iter uiter;
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct lttng_ust_abi_tracepoint_iter uiter;
 
-               health_code_update();
+                       health_code_update();
 
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               pthread_mutex_lock(&app->sock_lock);
-               handle = lttng_ust_ctl_tracepoint_list(app->sock);
-               if (handle < 0) {
-                       if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
-                               ERR("UST app list events getting handle failed for app pid %d",
-                                   app->pid);
+                       if (!app->compatible) {
+                               /*
+                                * TODO: In time, we should notice the caller of this error by
+                                * telling him that this is a version error.
+                                */
+                               continue;
                        }
-                       pthread_mutex_unlock(&app->sock_lock);
-                       continue;
-               }
 
-               while ((ret = lttng_ust_ctl_tracepoint_list_get(app->sock, handle, &uiter)) !=
-                      -LTTNG_UST_ERR_NOENT) {
-                       /* Handle ustctl error. */
-                       if (ret < 0) {
-                               int release_ret;
-
-                               if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
-                                       ERR("UST app tp list get failed for app %d with ret %d",
-                                           app->sock,
-                                           ret);
-                               } else {
-                                       DBG3("UST app tp list get failed. Application is dead");
-                                       break;
-                               }
-                               free(tmp_event);
-                               release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
-                               if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
-                                   release_ret != -EPIPE) {
-                                       ERR("Error releasing app handle for app %d with ret %d",
-                                           app->sock,
-                                           release_ret);
+                       pthread_mutex_lock(&app->sock_lock);
+                       handle = lttng_ust_ctl_tracepoint_list(app->sock);
+                       if (handle < 0) {
+                               if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+                                       ERR("UST app list events getting handle failed for app pid %d",
+                                           app->pid);
                                }
                                pthread_mutex_unlock(&app->sock_lock);
-                               goto rcu_error;
+                               continue;
                        }
 
-                       health_code_update();
-                       if (count >= nbmem) {
-                               /* In case the realloc fails, we free the memory */
-                               struct lttng_event *new_tmp_event;
-                               size_t new_nbmem;
-
-                               new_nbmem = nbmem << 1;
-                               DBG2("Reallocating event list from %zu to %zu entries",
-                                    nbmem,
-                                    new_nbmem);
-                               new_tmp_event = (lttng_event *) realloc(
-                                       tmp_event, new_nbmem * sizeof(struct lttng_event));
-                               if (new_tmp_event == nullptr) {
+                       while ((ret = lttng_ust_ctl_tracepoint_list_get(
+                                       app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
+                               /* Handle ustctl error. */
+                               if (ret < 0) {
                                        int release_ret;
 
-                                       PERROR("realloc ust app events");
+                                       if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+                                               ERR("UST app tp list get failed for app %d with ret %d",
+                                                   app->sock,
+                                                   ret);
+                                       } else {
+                                               DBG3("UST app tp list get failed. Application is dead");
+                                               break;
+                                       }
+
                                        free(tmp_event);
-                                       ret = -ENOMEM;
                                        release_ret =
                                                lttng_ust_ctl_release_handle(app->sock, handle);
                                        if (release_ret < 0 &&
@@ -4560,39 +4525,78 @@ int ust_app_list_events(struct lttng_event **events)
                                                    app->sock,
                                                    release_ret);
                                        }
+
                                        pthread_mutex_unlock(&app->sock_lock);
                                        goto rcu_error;
                                }
-                               /* Zero the new memory */
-                               memset(new_tmp_event + nbmem,
-                                      0,
-                                      (new_nbmem - nbmem) * sizeof(struct lttng_event));
-                               nbmem = new_nbmem;
-                               tmp_event = new_tmp_event;
+
+                               health_code_update();
+                               if (count >= nbmem) {
+                                       /* In case the realloc fails, we free the memory */
+                                       struct lttng_event *new_tmp_event;
+                                       size_t new_nbmem;
+
+                                       new_nbmem = nbmem << 1;
+                                       DBG2("Reallocating event list from %zu to %zu entries",
+                                            nbmem,
+                                            new_nbmem);
+                                       new_tmp_event = (lttng_event *) realloc(
+                                               tmp_event, new_nbmem * sizeof(struct lttng_event));
+                                       if (new_tmp_event == nullptr) {
+                                               int release_ret;
+
+                                               PERROR("realloc ust app events");
+                                               free(tmp_event);
+                                               ret = -ENOMEM;
+                                               release_ret = lttng_ust_ctl_release_handle(
+                                                       app->sock, handle);
+                                               if (release_ret < 0 &&
+                                                   release_ret != -LTTNG_UST_ERR_EXITING &&
+                                                   release_ret != -EPIPE) {
+                                                       ERR("Error releasing app handle for app %d with ret %d",
+                                                           app->sock,
+                                                           release_ret);
+                                               }
+
+                                               pthread_mutex_unlock(&app->sock_lock);
+                                               goto rcu_error;
+                                       }
+                                       /* Zero the new memory */
+                                       memset(new_tmp_event + nbmem,
+                                              0,
+                                              (new_nbmem - nbmem) * sizeof(struct lttng_event));
+                                       nbmem = new_nbmem;
+                                       tmp_event = new_tmp_event;
+                               }
+
+                               memcpy(tmp_event[count].name,
+                                      uiter.name,
+                                      LTTNG_UST_ABI_SYM_NAME_LEN);
+                               tmp_event[count].loglevel = uiter.loglevel;
+                               tmp_event[count].type =
+                                       (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
+                               tmp_event[count].pid = app->pid;
+                               tmp_event[count].enabled = -1;
+                               count++;
                        }
-                       memcpy(tmp_event[count].name, uiter.name, LTTNG_UST_ABI_SYM_NAME_LEN);
-                       tmp_event[count].loglevel = uiter.loglevel;
-                       tmp_event[count].type = (enum lttng_event_type) LTTNG_UST_ABI_TRACEPOINT;
-                       tmp_event[count].pid = app->pid;
-                       tmp_event[count].enabled = -1;
-                       count++;
-               }
-               ret = lttng_ust_ctl_release_handle(app->sock, handle);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
-                               DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
-                                    app->pid,
-                                    app->sock);
-                       } else if (ret == -EAGAIN) {
-                               WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
-                                    app->pid,
-                                    app->sock);
-                       } else {
-                               ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
-                                   ret,
-                                   app->pid,
-                                   app->sock);
+
+                       ret = lttng_ust_ctl_release_handle(app->sock, handle);
+                       pthread_mutex_unlock(&app->sock_lock);
+                       if (ret < 0) {
+                               if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
+                                       DBG3("Error releasing app handle. Application died: pid = %d, sock = %d",
+                                            app->pid,
+                                            app->sock);
+                               } else if (ret == -EAGAIN) {
+                                       WARN("Error releasing app handle. Communication time out: pid = %d, sock = %d",
+                                            app->pid,
+                                            app->sock);
+                               } else {
+                                       ERR("Error releasing app handle with ret %d: pid = %d, sock = %d",
+                                           ret,
+                                           app->pid,
+                                           app->sock);
+                               }
                        }
                }
        }
@@ -4603,7 +4607,6 @@ int ust_app_list_events(struct lttng_event **events)
        DBG2("UST app list events done (%zu events)", count);
 
 rcu_error:
-       rcu_read_unlock();
 error:
        health_code_update();
        return ret;
@@ -4628,114 +4631,129 @@ int ust_app_list_event_fields(struct lttng_event_field **fields)
                goto error;
        }
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               struct lttng_ust_abi_field_iter uiter;
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct lttng_ust_abi_field_iter uiter;
 
-               health_code_update();
+                       health_code_update();
 
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               pthread_mutex_lock(&app->sock_lock);
-               handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
-               if (handle < 0) {
-                       if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
-                               ERR("UST app list field getting handle failed for app pid %d",
-                                   app->pid);
+                       if (!app->compatible) {
+                               /*
+                                * TODO: In time, we should notice the caller of this error by
+                                * telling him that this is a version error.
+                                */
+                               continue;
                        }
-                       pthread_mutex_unlock(&app->sock_lock);
-                       continue;
-               }
-
-               while ((ret = lttng_ust_ctl_tracepoint_field_list_get(app->sock, handle, &uiter)) !=
-                      -LTTNG_UST_ERR_NOENT) {
-                       /* Handle ustctl error. */
-                       if (ret < 0) {
-                               int release_ret;
 
-                               if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
-                                       ERR("UST app tp list field failed for app %d with ret %d",
-                                           app->sock,
-                                           ret);
-                               } else {
-                                       DBG3("UST app tp list field failed. Application is dead");
-                                       break;
+                       pthread_mutex_lock(&app->sock_lock);
+                       handle = lttng_ust_ctl_tracepoint_field_list(app->sock);
+                       if (handle < 0) {
+                               if (handle != -EPIPE && handle != -LTTNG_UST_ERR_EXITING) {
+                                       ERR("UST app list field getting handle failed for app pid %d",
+                                           app->pid);
                                }
-                               free(tmp_event);
-                               release_ret = lttng_ust_ctl_release_handle(app->sock, handle);
                                pthread_mutex_unlock(&app->sock_lock);
-                               if (release_ret < 0 && release_ret != -LTTNG_UST_ERR_EXITING &&
-                                   release_ret != -EPIPE) {
-                                       ERR("Error releasing app handle for app %d with ret %d",
-                                           app->sock,
-                                           release_ret);
-                               }
-                               goto rcu_error;
+                               continue;
                        }
 
-                       health_code_update();
-                       if (count >= nbmem) {
-                               /* In case the realloc fails, we free the memory */
-                               struct lttng_event_field *new_tmp_event;
-                               size_t new_nbmem;
-
-                               new_nbmem = nbmem << 1;
-                               DBG2("Reallocating event field list from %zu to %zu entries",
-                                    nbmem,
-                                    new_nbmem);
-                               new_tmp_event = (lttng_event_field *) realloc(
-                                       tmp_event, new_nbmem * sizeof(struct lttng_event_field));
-                               if (new_tmp_event == nullptr) {
+                       while ((ret = lttng_ust_ctl_tracepoint_field_list_get(
+                                       app->sock, handle, &uiter)) != -LTTNG_UST_ERR_NOENT) {
+                               /* Handle ustctl error. */
+                               if (ret < 0) {
                                        int release_ret;
 
-                                       PERROR("realloc ust app event fields");
+                                       if (ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+                                               ERR("UST app tp list field failed for app %d with ret %d",
+                                                   app->sock,
+                                                   ret);
+                                       } else {
+                                               DBG3("UST app tp list field failed. Application is dead");
+                                               break;
+                                       }
+
                                        free(tmp_event);
-                                       ret = -ENOMEM;
                                        release_ret =
                                                lttng_ust_ctl_release_handle(app->sock, handle);
                                        pthread_mutex_unlock(&app->sock_lock);
-                                       if (release_ret && release_ret != -LTTNG_UST_ERR_EXITING &&
+                                       if (release_ret < 0 &&
+                                           release_ret != -LTTNG_UST_ERR_EXITING &&
                                            release_ret != -EPIPE) {
                                                ERR("Error releasing app handle for app %d with ret %d",
                                                    app->sock,
                                                    release_ret);
                                        }
+
                                        goto rcu_error;
                                }
-                               /* Zero the new memory */
-                               memset(new_tmp_event + nbmem,
-                                      0,
-                                      (new_nbmem - nbmem) * sizeof(struct lttng_event_field));
-                               nbmem = new_nbmem;
-                               tmp_event = new_tmp_event;
+
+                               health_code_update();
+                               if (count >= nbmem) {
+                                       /* In case the realloc fails, we free the memory */
+                                       struct lttng_event_field *new_tmp_event;
+                                       size_t new_nbmem;
+
+                                       new_nbmem = nbmem << 1;
+                                       DBG2("Reallocating event field list from %zu to %zu entries",
+                                            nbmem,
+                                            new_nbmem);
+                                       new_tmp_event = (lttng_event_field *) realloc(
+                                               tmp_event,
+                                               new_nbmem * sizeof(struct lttng_event_field));
+                                       if (new_tmp_event == nullptr) {
+                                               int release_ret;
+
+                                               PERROR("realloc ust app event fields");
+                                               free(tmp_event);
+                                               ret = -ENOMEM;
+                                               release_ret = lttng_ust_ctl_release_handle(
+                                                       app->sock, handle);
+                                               pthread_mutex_unlock(&app->sock_lock);
+                                               if (release_ret &&
+                                                   release_ret != -LTTNG_UST_ERR_EXITING &&
+                                                   release_ret != -EPIPE) {
+                                                       ERR("Error releasing app handle for app %d with ret %d",
+                                                           app->sock,
+                                                           release_ret);
+                                               }
+
+                                               goto rcu_error;
+                                       }
+
+                                       /* Zero the new memory */
+                                       memset(new_tmp_event + nbmem,
+                                              0,
+                                              (new_nbmem - nbmem) *
+                                                      sizeof(struct lttng_event_field));
+                                       nbmem = new_nbmem;
+                                       tmp_event = new_tmp_event;
+                               }
+
+                               memcpy(tmp_event[count].field_name,
+                                      uiter.field_name,
+                                      LTTNG_UST_ABI_SYM_NAME_LEN);
+                               /* Mapping between these enums matches 1 to 1. */
+                               tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
+                               tmp_event[count].nowrite = uiter.nowrite;
+
+                               memcpy(tmp_event[count].event.name,
+                                      uiter.event_name,
+                                      LTTNG_UST_ABI_SYM_NAME_LEN);
+                               tmp_event[count].event.loglevel = uiter.loglevel;
+                               tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
+                               tmp_event[count].event.pid = app->pid;
+                               tmp_event[count].event.enabled = -1;
+                               count++;
                        }
 
-                       memcpy(tmp_event[count].field_name,
-                              uiter.field_name,
-                              LTTNG_UST_ABI_SYM_NAME_LEN);
-                       /* Mapping between these enums matches 1 to 1. */
-                       tmp_event[count].type = (enum lttng_event_field_type) uiter.type;
-                       tmp_event[count].nowrite = uiter.nowrite;
-
-                       memcpy(tmp_event[count].event.name,
-                              uiter.event_name,
-                              LTTNG_UST_ABI_SYM_NAME_LEN);
-                       tmp_event[count].event.loglevel = uiter.loglevel;
-                       tmp_event[count].event.type = LTTNG_EVENT_TRACEPOINT;
-                       tmp_event[count].event.pid = app->pid;
-                       tmp_event[count].event.enabled = -1;
-                       count++;
-               }
-               ret = lttng_ust_ctl_release_handle(app->sock, handle);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
-                       ERR("Error releasing app handle for app %d with ret %d", app->sock, ret);
+                       ret = lttng_ust_ctl_release_handle(app->sock, handle);
+                       pthread_mutex_unlock(&app->sock_lock);
+                       if (ret < 0 && ret != -LTTNG_UST_ERR_EXITING && ret != -EPIPE) {
+                               ERR("Error releasing app handle for app %d with ret %d",
+                                   app->sock,
+                                   ret);
+                       }
                }
        }
 
@@ -4745,7 +4763,6 @@ int ust_app_list_event_fields(struct lttng_event_field **fields)
        DBG2("UST app list event fields done (%zu events)", count);
 
 rcu_error:
-       rcu_read_unlock();
 error:
        health_code_update();
        return ret;
@@ -4762,10 +4779,10 @@ void ust_app_clean_list()
 
        DBG2("UST app cleaning registered apps hash table");
 
-       rcu_read_lock();
-
        /* Cleanup notify socket hash table */
        if (ust_app_ht_by_notify_sock) {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (
                        ust_app_ht_by_notify_sock->ht, &iter.iter, app, notify_sock_n.node) {
                        /*
@@ -4779,6 +4796,8 @@ void ust_app_clean_list()
        }
 
        if (ust_app_ht) {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                        ret = lttng_ht_del(ust_app_ht, &iter);
                        LTTNG_ASSERT(!ret);
@@ -4788,14 +4807,14 @@ void ust_app_clean_list()
 
        /* Cleanup socket hash table */
        if (ust_app_ht_by_sock) {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (ust_app_ht_by_sock->ht, &iter.iter, app, sock_n.node) {
                        ret = lttng_ht_del(ust_app_ht_by_sock, &iter);
                        LTTNG_ASSERT(!ret);
                }
        }
 
-       rcu_read_unlock();
-
        /* Destroy is done only when the ht is empty */
        if (ust_app_ht) {
                lttng_ht_destroy(ust_app_ht);
@@ -4845,42 +4864,43 @@ int ust_app_disable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_ch
             uchan->name,
             usess->id);
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       /* For every registered applications */
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               struct lttng_ht_iter uiter;
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == nullptr) {
-                       continue;
-               }
+               /* For every registered applications */
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct lttng_ht_iter uiter;
+                       if (!app->compatible) {
+                               /*
+                                * TODO: In time, we should notice the caller of this error by
+                                * telling him that this is a version error.
+                                */
+                               continue;
+                       }
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (ua_sess == nullptr) {
+                               continue;
+                       }
 
-               /* Get channel */
-               lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               /* If the session if found for the app, the channel must be there */
-               LTTNG_ASSERT(ua_chan_node);
+                       /* Get channel */
+                       lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+                       ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+                       /* If the session if found for the app, the channel must be there */
+                       LTTNG_ASSERT(ua_chan_node);
 
-               ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
-               /* The channel must not be already disabled */
-               LTTNG_ASSERT(ua_chan->enabled);
+                       ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+                       /* The channel must not be already disabled */
+                       LTTNG_ASSERT(ua_chan->enabled);
 
-               /* Disable channel onto application */
-               ret = disable_ust_app_channel(ua_sess, ua_chan, app);
-               if (ret < 0) {
-                       /* XXX: We might want to report this error at some point... */
-                       continue;
+                       /* Disable channel onto application */
+                       ret = disable_ust_app_channel(ua_sess, ua_chan, app);
+                       if (ret < 0) {
+                               /* XXX: We might want to report this error at some point... */
+                               continue;
+                       }
                }
        }
 
-       rcu_read_unlock();
        return ret;
 }
 
@@ -4899,31 +4919,32 @@ int ust_app_enable_channel_glb(struct ltt_ust_session *usess, struct ltt_ust_cha
             uchan->name,
             usess->id);
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       /* For every registered applications */
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == nullptr) {
-                       continue;
-               }
+               /* For every registered applications */
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       if (!app->compatible) {
+                               /*
+                                * TODO: In time, we should notice the caller of this error by
+                                * telling him that this is a version error.
+                                */
+                               continue;
+                       }
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (ua_sess == nullptr) {
+                               continue;
+                       }
 
-               /* Enable channel onto application */
-               ret = enable_ust_app_channel(ua_sess, uchan, app);
-               if (ret < 0) {
-                       /* XXX: We might want to report this error at some point... */
-                       continue;
+                       /* Enable channel onto application */
+                       ret = enable_ust_app_channel(ua_sess, uchan, app);
+                       if (ret < 0) {
+                               /* XXX: We might want to report this error at some point... */
+                               continue;
+                       }
                }
        }
 
-       rcu_read_unlock();
        return ret;
 }
 
@@ -4949,58 +4970,60 @@ int ust_app_disable_event_glb(struct ltt_ust_session *usess,
            uchan->name,
            usess->id);
 
-       rcu_read_lock();
-
-       /* For all registered applications */
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == nullptr) {
-                       /* Next app */
-                       continue;
-               }
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               if (ua_chan_node == nullptr) {
-                       DBG2("Channel %s not found in session id %" PRIu64 " for app pid %d."
-                            "Skipping",
-                            uchan->name,
-                            usess->id,
-                            app->pid);
-                       continue;
-               }
-               ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+               /* For all registered applications */
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       if (!app->compatible) {
+                               /*
+                                * TODO: In time, we should notice the caller of this error by
+                                * telling him that this is a version error.
+                                */
+                               continue;
+                       }
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (ua_sess == nullptr) {
+                               /* Next app */
+                               continue;
+                       }
 
-               ua_event = find_ust_app_event(ua_chan->events,
-                                             uevent->attr.name,
-                                             uevent->filter,
-                                             uevent->attr.loglevel,
-                                             uevent->exclusion);
-               if (ua_event == nullptr) {
-                       DBG2("Event %s not found in channel %s for app pid %d."
-                            "Skipping",
-                            uevent->attr.name,
-                            uchan->name,
-                            app->pid);
-                       continue;
-               }
+                       /* Lookup channel in the ust app session */
+                       lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+                       ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+                       if (ua_chan_node == nullptr) {
+                               DBG2("Channel %s not found in session id %" PRIu64
+                                    " for app pid %d."
+                                    "Skipping",
+                                    uchan->name,
+                                    usess->id,
+                                    app->pid);
+                               continue;
+                       }
+                       ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+                       ua_event = find_ust_app_event(ua_chan->events,
+                                                     uevent->attr.name,
+                                                     uevent->filter,
+                                                     uevent->attr.loglevel,
+                                                     uevent->exclusion);
+                       if (ua_event == nullptr) {
+                               DBG2("Event %s not found in channel %s for app pid %d."
+                                    "Skipping",
+                                    uevent->attr.name,
+                                    uchan->name,
+                                    app->pid);
+                               continue;
+                       }
 
-               ret = disable_ust_app_event(ua_event, app);
-               if (ret < 0) {
-                       /* XXX: Report error someday... */
-                       continue;
+                       ret = disable_ust_app_event(ua_event, app);
+                       if (ret < 0) {
+                               /* XXX: Report error someday... */
+                               continue;
+                       }
                }
        }
 
-       rcu_read_unlock();
        return ret;
 }
 
@@ -5101,70 +5124,70 @@ int ust_app_enable_event_glb(struct ltt_ust_session *usess,
         * tracer also.
         */
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       /* For all registered applications */
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (!ua_sess) {
-                       /* The application has problem or is probably dead. */
-                       continue;
-               }
+               /* For all registered applications */
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       if (!app->compatible) {
+                               /*
+                                * TODO: In time, we should notice the caller of this error by
+                                * telling him that this is a version error.
+                                */
+                               continue;
+                       }
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* The application has problem or is probably dead. */
+                               continue;
+                       }
 
-               pthread_mutex_lock(&ua_sess->lock);
+                       pthread_mutex_lock(&ua_sess->lock);
 
-               if (ua_sess->deleted) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
+                       if (ua_sess->deleted) {
+                               pthread_mutex_unlock(&ua_sess->lock);
+                               continue;
+                       }
 
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               /*
-                * It is possible that the channel cannot be found is
-                * the channel/event creation occurs concurrently with
-                * an application exit.
-                */
-               if (!ua_chan_node) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
+                       /* Lookup channel in the ust app session */
+                       lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+                       ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+                       /*
+                        * It is possible that the channel cannot be found is
+                        * the channel/event creation occurs concurrently with
+                        * an application exit.
+                        */
+                       if (!ua_chan_node) {
+                               pthread_mutex_unlock(&ua_sess->lock);
+                               continue;
+                       }
 
-               ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+                       ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+                       /* Get event node */
+                       ua_event = find_ust_app_event(ua_chan->events,
+                                                     uevent->attr.name,
+                                                     uevent->filter,
+                                                     uevent->attr.loglevel,
+                                                     uevent->exclusion);
+                       if (ua_event == nullptr) {
+                               DBG3("UST app enable event %s not found for app PID %d."
+                                    "Skipping app",
+                                    uevent->attr.name,
+                                    app->pid);
+                               goto next_app;
+                       }
 
-               /* Get event node */
-               ua_event = find_ust_app_event(ua_chan->events,
-                                             uevent->attr.name,
-                                             uevent->filter,
-                                             uevent->attr.loglevel,
-                                             uevent->exclusion);
-               if (ua_event == nullptr) {
-                       DBG3("UST app enable event %s not found for app PID %d."
-                            "Skipping app",
-                            uevent->attr.name,
-                            app->pid);
-                       goto next_app;
-               }
-
-               ret = enable_ust_app_event(ua_event, app);
-               if (ret < 0) {
+                       ret = enable_ust_app_event(ua_event, app);
+                       if (ret < 0) {
+                               pthread_mutex_unlock(&ua_sess->lock);
+                               goto error;
+                       }
+               next_app:
                        pthread_mutex_unlock(&ua_sess->lock);
-                       goto error;
                }
-       next_app:
-               pthread_mutex_unlock(&ua_sess->lock);
        }
-
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -5188,53 +5211,56 @@ int ust_app_create_event_glb(struct ltt_ust_session *usess,
            uevent->attr.name,
            usess->id);
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       /* For all registered applications */
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (!ua_sess) {
-                       /* The application has problem or is probably dead. */
-                       continue;
-               }
+               /* For all registered applications */
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       if (!app->compatible) {
+                               /*
+                                * TODO: In time, we should notice the caller of this error by
+                                * telling him that this is a version error.
+                                */
+                               continue;
+                       }
 
-               pthread_mutex_lock(&ua_sess->lock);
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* The application has problem or is probably dead. */
+                               continue;
+                       }
 
-               if (ua_sess->deleted) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
+                       pthread_mutex_lock(&ua_sess->lock);
 
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               /* If the channel is not found, there is a code flow error */
-               LTTNG_ASSERT(ua_chan_node);
+                       if (ua_sess->deleted) {
+                               pthread_mutex_unlock(&ua_sess->lock);
+                               continue;
+                       }
 
-               ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+                       /* Lookup channel in the ust app session */
+                       lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+                       ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+                       /* If the channel is not found, there is a code flow error */
+                       LTTNG_ASSERT(ua_chan_node);
 
-               ret = create_ust_app_event(ua_chan, uevent, app);
-               pthread_mutex_unlock(&ua_sess->lock);
-               if (ret < 0) {
-                       if (ret != -LTTNG_UST_ERR_EXIST) {
-                               /* Possible value at this point: -ENOMEM. If so, we stop! */
-                               break;
+                       ua_chan = lttng::utils::container_of(ua_chan_node, &ust_app_channel::node);
+
+                       ret = create_ust_app_event(ua_chan, uevent, app);
+                       pthread_mutex_unlock(&ua_sess->lock);
+                       if (ret < 0) {
+                               if (ret != -LTTNG_UST_ERR_EXIST) {
+                                       /* Possible value at this point: -ENOMEM. If so, we stop! */
+                                       break;
+                               }
+
+                               DBG2("UST app event %s already exist on app PID %d",
+                                    uevent->attr.name,
+                                    app->pid);
+                               continue;
                        }
-                       DBG2("UST app event %s already exist on app PID %d",
-                            uevent->attr.name,
-                            app->pid);
-                       continue;
                }
        }
 
-       rcu_read_unlock();
        return ret;
 }
 
@@ -5251,7 +5277,7 @@ static int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *ap
 
        DBG("Starting tracing for ust app pid %d", app->pid);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        if (!app->compatible) {
                goto end;
@@ -5340,13 +5366,11 @@ skip_setup:
        }
 
 end:
-       rcu_read_unlock();
        health_code_update();
        return 0;
 
 error_unlock:
        pthread_mutex_unlock(&ua_sess->lock);
-       rcu_read_unlock();
        health_code_update();
        return -1;
 }
@@ -5361,7 +5385,7 @@ static int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app
 
        DBG("Stopping tracing for ust app pid %d", app->pid);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        if (!app->compatible) {
                goto end_no_session;
@@ -5455,13 +5479,11 @@ static int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app
 end_unlock:
        pthread_mutex_unlock(&ua_sess->lock);
 end_no_session:
-       rcu_read_unlock();
        health_code_update();
        return 0;
 
 error_rcu_unlock:
        pthread_mutex_unlock(&ua_sess->lock);
-       rcu_read_unlock();
        health_code_update();
        return -1;
 }
@@ -5475,8 +5497,6 @@ static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session
 
        DBG("Flushing app session buffers for ust app pid %d", app->pid);
 
-       rcu_read_lock();
-
        if (!app->compatible) {
                goto end_not_compatible;
        }
@@ -5495,6 +5515,9 @@ static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session
        /* Flush buffers and push metadata. */
        switch (ua_sess->buffer_type) {
        case LTTNG_BUFFER_PER_PID:
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (ua_sess->channels->ht, &iter.iter, ua_chan, node.node) {
                        health_code_update();
                        ret = consumer_flush_channel(socket, ua_chan->key);
@@ -5504,7 +5527,9 @@ static int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session
                                continue;
                        }
                }
+
                break;
+       }
        case LTTNG_BUFFER_PER_UID:
        default:
                abort();
@@ -5517,7 +5542,6 @@ end_deleted:
        pthread_mutex_unlock(&ua_sess->lock);
 
 end_not_compatible:
-       rcu_read_unlock();
        health_code_update();
        return retval;
 }
@@ -5533,8 +5557,6 @@ static int ust_app_flush_session(struct ltt_ust_session *usess)
 
        DBG("Flushing session buffers for all ust apps");
 
-       rcu_read_lock();
-
        /* Flush buffers and push metadata. */
        switch (usess->buffer_type) {
        case LTTNG_BUFFER_PER_UID:
@@ -5544,6 +5566,7 @@ static int ust_app_flush_session(struct ltt_ust_session *usess)
 
                /* Flush all per UID buffers associated to that session. */
                cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
+                       lttng::urcu::read_lock_guard read_lock;
                        lsu::registry_session *ust_session_reg;
                        struct buffer_reg_channel *buf_reg_chan;
                        struct consumer_socket *socket;
@@ -5571,6 +5594,7 @@ static int ust_app_flush_session(struct ltt_ust_session *usess)
                        auto locked_registry = ust_session_reg->lock();
                        (void) push_metadata(locked_registry, usess->consumer);
                }
+
                break;
        }
        case LTTNG_BUFFER_PER_PID:
@@ -5578,14 +5602,17 @@ static int ust_app_flush_session(struct ltt_ust_session *usess)
                struct ust_app_session *ua_sess;
                struct lttng_ht_iter iter;
                struct ust_app *app;
+               lttng::urcu::read_lock_guard read_lock;
 
                cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                        ua_sess = lookup_session_by_app(usess, app);
                        if (ua_sess == nullptr) {
                                continue;
                        }
+
                        (void) ust_app_flush_app_session(app, ua_sess);
                }
+
                break;
        }
        default:
@@ -5594,7 +5621,6 @@ static int ust_app_flush_session(struct ltt_ust_session *usess)
                break;
        }
 
-       rcu_read_unlock();
        health_code_update();
        return ret;
 }
@@ -5608,7 +5634,7 @@ static int ust_app_clear_quiescent_app_session(struct ust_app *app, struct ust_a
 
        DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        if (!app->compatible) {
                goto end_not_compatible;
@@ -5655,7 +5681,6 @@ end_unlock:
        pthread_mutex_unlock(&ua_sess->lock);
 
 end_not_compatible:
-       rcu_read_unlock();
        health_code_update();
        return ret;
 }
@@ -5672,8 +5697,6 @@ static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
 
        DBG("Clearing stream quiescent state for all ust apps");
 
-       rcu_read_lock();
-
        switch (usess->buffer_type) {
        case LTTNG_BUFFER_PER_UID:
        {
@@ -5687,6 +5710,7 @@ static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
                cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
                        struct consumer_socket *socket;
                        struct buffer_reg_channel *buf_reg_chan;
+                       lttng::urcu::read_lock_guard read_lock;
 
                        /* Get associated consumer socket.*/
                        socket = consumer_find_socket_by_bitness(reg->bits_per_long,
@@ -5711,6 +5735,7 @@ static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
                                                                        buf_reg_chan->consumer_key);
                        }
                }
+
                break;
        }
        case LTTNG_BUFFER_PER_PID:
@@ -5718,6 +5743,7 @@ static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
                struct ust_app_session *ua_sess;
                struct lttng_ht_iter iter;
                struct ust_app *app;
+               lttng::urcu::read_lock_guard read_lock;
 
                cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                        ua_sess = lookup_session_by_app(usess, app);
@@ -5726,6 +5752,7 @@ static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
                        }
                        (void) ust_app_clear_quiescent_app_session(app, ua_sess);
                }
+
                break;
        }
        default:
@@ -5734,7 +5761,6 @@ static int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
                break;
        }
 
-       rcu_read_unlock();
        health_code_update();
        return ret;
 }
@@ -5751,7 +5777,7 @@ static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
 
        DBG("Destroy tracing for ust app pid %d", app->pid);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        if (!app->compatible) {
                goto end;
@@ -5791,7 +5817,6 @@ static int destroy_trace(struct ltt_ust_session *usess, struct ust_app *app)
                }
        }
 end:
-       rcu_read_unlock();
        health_code_update();
        return 0;
 }
@@ -5812,8 +5837,6 @@ int ust_app_start_trace_all(struct ltt_ust_session *usess)
         */
        usess->active = true;
 
-       rcu_read_lock();
-
        /*
         * In a start-stop-start use-case, we need to clear the quiescent state
         * of each channel set by the prior stop command, thus ensuring that a
@@ -5822,11 +5845,13 @@ int ust_app_start_trace_all(struct ltt_ust_session *usess)
         */
        (void) ust_app_clear_quiescent_session(usess);
 
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ust_app_global_update(usess, app);
-       }
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       rcu_read_unlock();
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       ust_app_global_update(usess, app);
+               }
+       }
 
        return 0;
 }
@@ -5849,20 +5874,20 @@ int ust_app_stop_trace_all(struct ltt_ust_session *usess)
         */
        usess->active = false;
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ret = ust_app_stop_trace(usess, app);
-               if (ret < 0) {
-                       /* Continue to next apps even on error */
-                       continue;
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       ret = ust_app_stop_trace(usess, app);
+                       if (ret < 0) {
+                               /* Continue to next apps even on error */
+                               continue;
+                       }
                }
        }
 
        (void) ust_app_flush_session(usess);
 
-       rcu_read_unlock();
-
        return 0;
 }
 
@@ -5877,18 +5902,18 @@ int ust_app_destroy_trace_all(struct ltt_ust_session *usess)
 
        DBG("Destroy all UST traces");
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ret = destroy_trace(usess, app);
-               if (ret < 0) {
-                       /* Continue to next apps even on error */
-                       continue;
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       ret = destroy_trace(usess, app);
+                       if (ret < 0) {
+                               /* Continue to next apps even on error */
+                               continue;
+                       }
                }
        }
 
-       rcu_read_unlock();
-
        return 0;
 }
 
@@ -6038,54 +6063,55 @@ static void ust_app_synchronize_event_notifier_rules(struct ust_app *app)
                }
        }
 
-       rcu_read_lock();
-       /* Remove all unknown event sources from the app. */
-       cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
-                                &app_trigger_iter.iter,
-                                event_notifier_rule,
-                                node.node) {
-               const uint64_t app_token = event_notifier_rule->token;
-               bool found = false;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               /* Remove all unknown event sources from the app. */
+               cds_lfht_for_each_entry (app->token_to_event_notifier_rule_ht->ht,
+                                        &app_trigger_iter.iter,
+                                        event_notifier_rule,
+                                        node.node) {
+                       const uint64_t app_token = event_notifier_rule->token;
+                       bool found = false;
 
-               /*
-                * Check if the app event trigger still exists on the
-                * notification side.
-                */
-               for (i = 0; i < count; i++) {
-                       uint64_t notification_thread_token;
-                       const struct lttng_trigger *trigger =
-                               lttng_triggers_get_at_index(triggers, i);
+                       /*
+                        * Check if the app event trigger still exists on the
+                        * notification side.
+                        */
+                       for (i = 0; i < count; i++) {
+                               uint64_t notification_thread_token;
+                               const struct lttng_trigger *trigger =
+                                       lttng_triggers_get_at_index(triggers, i);
 
-                       LTTNG_ASSERT(trigger);
+                               LTTNG_ASSERT(trigger);
 
-                       notification_thread_token = lttng_trigger_get_tracer_token(trigger);
+                               notification_thread_token = lttng_trigger_get_tracer_token(trigger);
 
-                       if (notification_thread_token == app_token) {
-                               found = true;
-                               break;
+                               if (notification_thread_token == app_token) {
+                                       found = true;
+                                       break;
+                               }
                        }
-               }
 
-               if (found) {
-                       /* Still valid. */
-                       continue;
-               }
+                       if (found) {
+                               /* Still valid. */
+                               continue;
+                       }
 
-               /*
-                * This trigger was unregistered, disable it on the tracer's
-                * side.
-                */
-               ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
-               LTTNG_ASSERT(ret == 0);
+                       /*
+                        * This trigger was unregistered, disable it on the tracer's
+                        * side.
+                        */
+                       ret = lttng_ht_del(app->token_to_event_notifier_rule_ht, &app_trigger_iter);
+                       LTTNG_ASSERT(ret == 0);
 
-               /* Callee logs errors. */
-               (void) disable_ust_object(app, event_notifier_rule->obj);
+                       /* Callee logs errors. */
+                       (void) disable_ust_object(app, event_notifier_rule->obj);
 
-               delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+                       delete_ust_app_event_notifier_rule(app->sock, event_notifier_rule, app);
+               }
        }
 
-       rcu_read_unlock();
-
 end:
        lttng_triggers_destroy(triggers);
        return;
@@ -6179,28 +6205,28 @@ static void ust_app_synchronize(struct ltt_ust_session *usess, struct ust_app *a
                goto deleted_session;
        }
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       ust_app_synchronize_all_channels(usess, ua_sess, app);
+               ust_app_synchronize_all_channels(usess, ua_sess, app);
 
-       /*
-        * Create the metadata for the application. This returns gracefully if a
-        * metadata was already set for the session.
-        *
-        * The metadata channel must be created after the data channels as the
-        * consumer daemon assumes this ordering. When interacting with a relay
-        * daemon, the consumer will use this assumption to send the
-        * "STREAMS_SENT" message to the relay daemon.
-        */
-       ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
-       if (ret < 0) {
-               ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
-                   app->sock,
-                   usess->id);
+               /*
+                * Create the metadata for the application. This returns gracefully if a
+                * metadata was already set for the session.
+                *
+                * The metadata channel must be created after the data channels as the
+                * consumer daemon assumes this ordering. When interacting with a relay
+                * daemon, the consumer will use this assumption to send the
+                * "STREAMS_SENT" message to the relay daemon.
+                */
+               ret = create_ust_app_metadata(ua_sess, app, usess->consumer);
+               if (ret < 0) {
+                       ERR("Metadata creation failed for app sock %d for session id %" PRIu64,
+                           app->sock,
+                           usess->id);
+               }
        }
 
-       rcu_read_unlock();
-
 deleted_session:
        pthread_mutex_unlock(&ua_sess->lock);
 end:
@@ -6285,11 +6311,13 @@ void ust_app_global_update_all(struct ltt_ust_session *usess)
        struct lttng_ht_iter iter;
        struct ust_app *app;
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               ust_app_global_update(usess, app);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       ust_app_global_update(usess, app);
+               }
        }
-       rcu_read_unlock();
 }
 
 void ust_app_global_update_all_event_notifier_rules()
@@ -6297,12 +6325,10 @@ void ust_app_global_update_all_event_notifier_rules()
        struct lttng_ht_iter iter;
        struct ust_app *app;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                ust_app_global_update_event_notifier_rules(app);
        }
-
-       rcu_read_unlock();
 }
 
 /*
@@ -6321,43 +6347,44 @@ int ust_app_add_ctx_channel_glb(struct ltt_ust_session *usess,
 
        LTTNG_ASSERT(usess->active);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-               ua_sess = lookup_session_by_app(usess, app);
-               if (ua_sess == nullptr) {
-                       continue;
-               }
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       if (!app->compatible) {
+                               /*
+                                * TODO: In time, we should notice the caller of this error by
+                                * telling him that this is a version error.
+                                */
+                               continue;
+                       }
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (ua_sess == nullptr) {
+                               continue;
+                       }
 
-               pthread_mutex_lock(&ua_sess->lock);
+                       pthread_mutex_lock(&ua_sess->lock);
 
-               if (ua_sess->deleted) {
-                       pthread_mutex_unlock(&ua_sess->lock);
-                       continue;
-               }
+                       if (ua_sess->deleted) {
+                               pthread_mutex_unlock(&ua_sess->lock);
+                               continue;
+                       }
 
-               /* Lookup channel in the ust app session */
-               lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
-               ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
-               if (ua_chan_node == nullptr) {
-                       goto next_app;
-               }
-               ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
-               ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
-               if (ret < 0) {
-                       goto next_app;
+                       /* Lookup channel in the ust app session */
+                       lttng_ht_lookup(ua_sess->channels, (void *) uchan->name, &uiter);
+                       ua_chan_node = lttng_ht_iter_get_node_str(&uiter);
+                       if (ua_chan_node == nullptr) {
+                               goto next_app;
+                       }
+                       ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
+                       ret = create_ust_app_channel_context(ua_chan, &uctx->ctx, app);
+                       if (ret < 0) {
+                               goto next_app;
+                       }
+               next_app:
+                       pthread_mutex_unlock(&ua_sess->lock);
                }
-       next_app:
-               pthread_mutex_unlock(&ua_sess->lock);
        }
 
-       rcu_read_unlock();
        return ret;
 }
 
@@ -7049,7 +7076,7 @@ void ust_app_notify_sock_unregister(int sock)
 
        LTTNG_ASSERT(sock >= 0);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        obj = zmalloc<ust_app_notify_sock_obj>();
        if (!obj) {
@@ -7096,7 +7123,6 @@ void ust_app_notify_sock_unregister(int sock)
        (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
 
 close_socket:
-       rcu_read_unlock();
 
        /*
         * Close socket after a grace period to avoid for the socket to be reused
@@ -7139,13 +7165,13 @@ enum lttng_error_code ust_app_snapshot_record(const struct ltt_ust_session *uses
        LTTNG_ASSERT(usess);
        LTTNG_ASSERT(output);
 
-       rcu_read_lock();
-
        switch (usess->buffer_type) {
        case LTTNG_BUFFER_PER_UID:
        {
                struct buffer_reg_uid *reg;
 
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
                        struct buffer_reg_channel *buf_reg_chan;
                        struct consumer_socket *socket;
@@ -7208,10 +7234,13 @@ enum lttng_error_code ust_app_snapshot_record(const struct ltt_ust_session *uses
                                goto error;
                        }
                }
+
                break;
        }
        case LTTNG_BUFFER_PER_PID:
        {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                        struct consumer_socket *socket;
                        struct lttng_ht_iter chan_iter;
@@ -7298,7 +7327,6 @@ enum lttng_error_code ust_app_snapshot_record(const struct ltt_ust_session *uses
 
 error:
        free(trace_path);
-       rcu_read_unlock();
        return status;
 }
 
@@ -7322,7 +7350,8 @@ uint64_t ust_app_get_size_one_more_packet_per_stream(const struct ltt_ust_sessio
                cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
                        struct buffer_reg_channel *buf_reg_chan;
 
-                       rcu_read_lock();
+                       lttng::urcu::read_lock_guard read_lock;
+
                        cds_lfht_for_each_entry (
                                reg->registry->channels->ht, &iter.iter, buf_reg_chan, node.node) {
                                if (cur_nr_packets >= buf_reg_chan->num_subbuf) {
@@ -7334,13 +7363,13 @@ uint64_t ust_app_get_size_one_more_packet_per_stream(const struct ltt_ust_sessio
                                }
                                tot_size += buf_reg_chan->subbuf_size * buf_reg_chan->stream_count;
                        }
-                       rcu_read_unlock();
                }
                break;
        }
        case LTTNG_BUFFER_PER_PID:
        {
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                        struct ust_app_channel *ua_chan;
                        struct ust_app_session *ua_sess;
@@ -7364,7 +7393,6 @@ uint64_t ust_app_get_size_one_more_packet_per_stream(const struct ltt_ust_sessio
                                tot_size += ua_chan->attr.subbuf_size * ua_chan->streams.count;
                        }
                }
-               rcu_read_unlock();
                break;
        }
        default:
@@ -7425,11 +7453,12 @@ int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
        *discarded = 0;
        *lost = 0;
 
-       rcu_read_lock();
        /*
         * Iterate over every registered applications. Sum counters for
         * all applications containing requested session and channel.
         */
+       lttng::urcu::read_lock_guard read_lock;
+
        cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                struct lttng_ht_iter uiter;
 
@@ -7466,7 +7495,6 @@ int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
                }
        }
 
-       rcu_read_unlock();
        return ret;
 }
 
@@ -7477,7 +7505,7 @@ static int ust_app_regenerate_statedump(struct ltt_ust_session *usess, struct us
 
        DBG("Regenerating the metadata for ust app pid %d", app->pid);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        ua_sess = lookup_session_by_app(usess, app);
        if (ua_sess == nullptr) {
@@ -7499,7 +7527,6 @@ end_unlock:
        pthread_mutex_unlock(&ua_sess->lock);
 
 end:
-       rcu_read_unlock();
        health_code_update();
        return ret;
 }
@@ -7515,7 +7542,7 @@ int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
 
        DBG("Regenerating the metadata for all UST apps");
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                if (!app->compatible) {
@@ -7529,8 +7556,6 @@ int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
                }
        }
 
-       rcu_read_unlock();
-
        return 0;
 }
 
@@ -7549,8 +7574,6 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
 
        LTTNG_ASSERT(usess);
 
-       rcu_read_lock();
-
        switch (usess->buffer_type) {
        case LTTNG_BUFFER_PER_UID:
        {
@@ -7559,6 +7582,7 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
                cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
                        struct buffer_reg_channel *buf_reg_chan;
                        struct consumer_socket *socket;
+                       lttng::urcu::read_lock_guard read_lock;
 
                        /* Get consumer socket to use to push the metadata.*/
                        socket = consumer_find_socket_by_bitness(reg->bits_per_long,
@@ -7612,6 +7636,8 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
        }
        case LTTNG_BUFFER_PER_PID:
        {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                        struct consumer_socket *socket;
                        struct lttng_ht_iter chan_iter;
@@ -7683,7 +7709,6 @@ enum lttng_error_code ust_app_rotate_session(struct ltt_session *session)
        cmd_ret = LTTNG_OK;
 
 error:
-       rcu_read_unlock();
        return cmd_ret;
 }
 
@@ -7696,12 +7721,12 @@ enum lttng_error_code ust_app_create_channel_subdirectories(const struct ltt_ust
        int fmt_ret;
 
        LTTNG_ASSERT(usess->current_trace_chunk);
-       rcu_read_lock();
 
        switch (usess->buffer_type) {
        case LTTNG_BUFFER_PER_UID:
        {
                struct buffer_reg_uid *reg;
+               lttng::urcu::read_lock_guard read_lock;
 
                cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
                        fmt_ret = asprintf(&pathname_index,
@@ -7732,6 +7757,7 @@ enum lttng_error_code ust_app_create_channel_subdirectories(const struct ltt_ust
        case LTTNG_BUFFER_PER_PID:
        {
                struct ust_app *app;
+               lttng::urcu::read_lock_guard read_lock;
 
                /*
                 * Create the toplevel ust/ directory in case no apps are running.
@@ -7787,7 +7813,6 @@ enum lttng_error_code ust_app_create_channel_subdirectories(const struct ltt_ust
 
        ret = LTTNG_OK;
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -7806,8 +7831,6 @@ enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
 
        LTTNG_ASSERT(usess);
 
-       rcu_read_lock();
-
        if (usess->active) {
                ERR("Expecting inactive session %s (%" PRIu64 ")", session->name, session->id);
                cmd_ret = LTTNG_ERR_FATAL;
@@ -7818,6 +7841,7 @@ enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
        case LTTNG_BUFFER_PER_UID:
        {
                struct buffer_reg_uid *reg;
+               lttng::urcu::read_lock_guard read_lock;
 
                cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
                        struct buffer_reg_channel *buf_reg_chan;
@@ -7859,6 +7883,8 @@ enum lttng_error_code ust_app_clear_session(struct ltt_session *session)
        }
        case LTTNG_BUFFER_PER_PID:
        {
+               lttng::urcu::read_lock_guard read_lock;
+
                cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                        struct consumer_socket *socket;
                        struct lttng_ht_iter chan_iter;
@@ -7939,7 +7965,6 @@ error:
 
 error_socket:
 end:
-       rcu_read_unlock();
        return cmd_ret;
 }
 
@@ -7967,8 +7992,6 @@ enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
 
        LTTNG_ASSERT(usess);
 
-       rcu_read_lock();
-
        switch (usess->buffer_type) {
        case LTTNG_BUFFER_PER_UID:
        {
@@ -7977,6 +8000,7 @@ enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
                cds_list_for_each_entry (reg, &usess->buffer_reg_uid_list, lnode) {
                        struct buffer_reg_channel *buf_reg_chan;
                        struct consumer_socket *socket;
+                       lttng::urcu::read_lock_guard read_lock;
 
                        socket = consumer_find_socket_by_bitness(reg->bits_per_long,
                                                                 usess->consumer);
@@ -8001,6 +8025,7 @@ enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
        case LTTNG_BUFFER_PER_PID:
        {
                struct ust_app *app;
+               lttng::urcu::read_lock_guard read_lock;
 
                cds_lfht_for_each_entry (ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                        struct consumer_socket *socket;
@@ -8056,7 +8081,6 @@ enum lttng_error_code ust_app_open_packets(struct ltt_session *session)
        }
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -8085,4 +8109,4 @@ lsu::ctl_field_quirks ust_app::ctl_field_quirks() const
         */
        return v_major <= 9 ? lsu::ctl_field_quirks::UNDERSCORE_PREFIXED_VARIANT_TAG_MAPPINGS :
                              lsu::ctl_field_quirks::NONE;
-}
\ No newline at end of file
+}
index 1d8e8ac054669736dd464f78fde48d783358a3bd..8bcc246121346b239c651ac66c8a23b8aab926a3 100644 (file)
@@ -462,7 +462,7 @@ int ust_consumer_metadata_request(struct consumer_socket *socket)
 
        LTTNG_ASSERT(socket);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        health_code_update();
 
        /* Wait for a metadata request */
@@ -520,6 +520,5 @@ int ust_consumer_metadata_request(struct consumer_socket *socket)
        ret = 0;
 
 end:
-       rcu_read_unlock();
        return ret;
 }
index ef2af356d8f2857f7ce2a78b5df5a3f4499c4e5a..efdc38dcc35ced97148d12857d767c013f242666 100644 (file)
@@ -11,6 +11,7 @@
 #include <common/error.hpp>
 #include <common/macros.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
 
 #include <lttng/channel-internal.hpp>
 #include <lttng/channel.h>
index 161289d2104adbb4e99736f07995d1a5dea083ba..ab9b39111a02580bd176baac55b7c3b95379d123 100644 (file)
@@ -19,6 +19,7 @@
 #include <common/kernel-ctl/kernel-ctl.hpp>
 #include <common/macros.hpp>
 #include <common/relayd/relayd.hpp>
+#include <common/urcu.hpp>
 #include <common/ust-consumer/ust-consumer.hpp>
 #include <common/utils.hpp>
 
@@ -371,7 +372,7 @@ int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx, uint64_
        /* Ease our life a bit. */
        ht = the_consumer_data.stream_list_ht;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Search the metadata associated with the session id of the given stream. */
 
@@ -400,7 +401,6 @@ int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx, uint64_
        ret = 0;
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -642,6 +642,7 @@ struct lttng_consumer_stream *consumer_stream_create(struct lttng_consumer_chann
 {
        int ret;
        struct lttng_consumer_stream *stream;
+       lttng::urcu::read_lock_guard read_lock;
 
        stream = zmalloc<lttng_consumer_stream>();
        if (stream == nullptr) {
@@ -650,8 +651,6 @@ struct lttng_consumer_stream *consumer_stream_create(struct lttng_consumer_chann
                goto end;
        }
 
-       rcu_read_lock();
-
        if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) {
                ERR("Failed to acquire trace chunk reference during the creation of a stream");
                ret = -1;
@@ -726,8 +725,6 @@ struct lttng_consumer_stream *consumer_stream_create(struct lttng_consumer_chann
             stream->net_seq_idx,
             stream->session_id);
 
-       rcu_read_unlock();
-
        lttng_dynamic_array_init(
                &stream->read_subbuffer_ops.post_consume_cbs, sizeof(post_consume_cb), nullptr);
 
@@ -772,7 +769,6 @@ struct lttng_consumer_stream *consumer_stream_create(struct lttng_consumer_chann
        return stream;
 
 error:
-       rcu_read_unlock();
        lttng_trace_chunk_put(stream->trace_chunk);
        lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs);
        free(stream);
@@ -854,14 +850,12 @@ void consumer_stream_close_output(struct lttng_consumer_stream *stream)
        stream->trace_chunk = nullptr;
 
        /* Check and cleanup relayd if needed. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        relayd = consumer_find_relayd(stream->net_seq_idx);
        if (relayd != nullptr) {
                consumer_stream_relayd_close(stream, relayd);
                stream->net_seq_idx = -1ULL;
        }
-
-       rcu_read_unlock();
 }
 
 /*
@@ -879,7 +873,7 @@ void consumer_stream_delete(struct lttng_consumer_stream *stream, struct lttng_h
        /* Should NEVER be called not in monitor mode. */
        LTTNG_ASSERT(stream->chan->monitor);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        if (ht) {
                iter.iter.node = &stream->node.node;
@@ -902,8 +896,6 @@ void consumer_stream_delete(struct lttng_consumer_stream *stream, struct lttng_h
        /* See the previous ht del on why we ignore the returned value. */
        (void) lttng_ht_del(the_consumer_data.stream_list_ht, &iter);
 
-       rcu_read_unlock();
-
        if (!stream->metadata_flag) {
                /* Decrement the stream count of the global consumer data. */
                LTTNG_ASSERT(the_consumer_data.stream_count > 0);
@@ -1100,7 +1092,7 @@ int consumer_stream_write_index(struct lttng_consumer_stream *stream,
        LTTNG_ASSERT(stream);
        LTTNG_ASSERT(element);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        if (stream->net_seq_idx != (uint64_t) -1ULL) {
                struct consumer_relayd_sock_pair *relayd;
                relayd = consumer_find_relayd(stream->net_seq_idx);
@@ -1139,7 +1131,6 @@ int consumer_stream_write_index(struct lttng_consumer_stream *stream,
        }
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
index 7f88a9712825de9151c4d082f6951ad11e81db1a..133ec6c0eaaca52d6348241b5ca61e08556eb41c 100644 (file)
@@ -14,6 +14,7 @@
 #include <common/consumer/consumer-timer.hpp>
 #include <common/kernel-consumer/kernel-consumer.hpp>
 #include <common/kernel-ctl/kernel-ctl.hpp>
+#include <common/urcu.hpp>
 #include <common/ust-consumer/ust-consumer.hpp>
 
 #include <bin/lttng-consumerd/health-consumerd.hpp>
@@ -282,23 +283,23 @@ static void live_timer(struct lttng_consumer_local_data *ctx, siginfo_t *si)
 
        DBG("Live timer for channel %" PRIu64, channel->key);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry_duplicate(ht->ht,
-                                         ht->hash_fct(&channel->key, lttng_ht_seed),
-                                         ht->match_fct,
-                                         &channel->key,
-                                         &iter.iter,
-                                         stream,
-                                         node_channel_id.node)
        {
-               ret = check_stream(stream, flush_index);
-               if (ret < 0) {
-                       goto error_unlock;
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_for_each_entry_duplicate(ht->ht,
+                                                 ht->hash_fct(&channel->key, lttng_ht_seed),
+                                                 ht->match_fct,
+                                                 &channel->key,
+                                                 &iter.iter,
+                                                 stream,
+                                                 node_channel_id.node)
+               {
+                       ret = check_stream(stream, flush_index);
+                       if (ret < 0) {
+                               goto error_unlock;
+                       }
                }
        }
-
 error_unlock:
-       rcu_read_unlock();
 
 error:
        return;
@@ -575,7 +576,7 @@ static int sample_channel_positions(struct lttng_consumer_channel *channel,
 
        *_total_consumed = 0;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        cds_lfht_for_each_entry_duplicate(ht->ht,
                                          ht->hash_fct(&channel->key, lttng_ht_seed),
@@ -633,7 +634,6 @@ static int sample_channel_positions(struct lttng_consumer_channel *channel,
        *_highest_use = high;
        *_lowest_use = low;
 end:
-       rcu_read_unlock();
        if (empty_channel) {
                ret = -1;
        }
index 4d64b0ea2be2b7f531f5fc03c1eef4573d2eefc8..27b34a39f249071867e49e9df7fb438517bb6ee8 100644 (file)
@@ -29,6 +29,7 @@
 #include <common/time.hpp>
 #include <common/trace-chunk-registry.hpp>
 #include <common/trace-chunk.hpp>
+#include <common/urcu.hpp>
 #include <common/ust-consumer/ust-consumer.hpp>
 #include <common/utils.hpp>
 
@@ -202,7 +203,7 @@ static struct lttng_consumer_stream *find_stream(uint64_t key, struct lttng_ht *
                return nullptr;
        }
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        lttng_ht_lookup(ht, &key, &iter);
        node = lttng_ht_iter_get_node_u64(&iter);
@@ -210,8 +211,6 @@ static struct lttng_consumer_stream *find_stream(uint64_t key, struct lttng_ht *
                stream = lttng::utils::container_of(node, &lttng_consumer_stream::node);
        }
 
-       rcu_read_unlock();
-
        return stream;
 }
 
@@ -219,7 +218,7 @@ static void steal_stream_key(uint64_t key, struct lttng_ht *ht)
 {
        struct lttng_consumer_stream *stream;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        stream = find_stream(key, ht);
        if (stream) {
                stream->key = (uint64_t) -1ULL;
@@ -230,7 +229,6 @@ static void steal_stream_key(uint64_t key, struct lttng_ht *ht)
                 */
                stream->node.key = (uint64_t) -1ULL;
        }
-       rcu_read_unlock();
 }
 
 /*
@@ -273,7 +271,7 @@ static void steal_channel_key(uint64_t key)
 {
        struct lttng_consumer_channel *channel;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        channel = consumer_find_channel(key);
        if (channel) {
                channel->key = (uint64_t) -1ULL;
@@ -284,7 +282,6 @@ static void steal_channel_key(uint64_t key)
                 */
                channel->node.key = (uint64_t) -1ULL;
        }
-       rcu_read_unlock();
 }
 
 static void free_channel_rcu(struct rcu_head *head)
@@ -404,7 +401,7 @@ void consumer_del_channel(struct lttng_consumer_channel *channel)
        if (channel->is_published) {
                int ret;
 
-               rcu_read_lock();
+               lttng::urcu::read_lock_guard read_lock;
                iter.iter.node = &channel->node.node;
                ret = lttng_ht_del(the_consumer_data.channel_ht, &iter);
                LTTNG_ASSERT(!ret);
@@ -412,7 +409,6 @@ void consumer_del_channel(struct lttng_consumer_channel *channel)
                iter.iter.node = &channel->channels_by_session_id_ht_node.node;
                ret = lttng_ht_del(the_consumer_data.channels_by_session_id_ht, &iter);
                LTTNG_ASSERT(!ret);
-               rcu_read_unlock();
        }
 
        channel->is_deleted = true;
@@ -431,14 +427,15 @@ static void cleanup_relayd_ht()
        struct lttng_ht_iter iter;
        struct consumer_relayd_sock_pair *relayd;
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (the_consumer_data.relayd_ht->ht, &iter.iter, relayd, node.node) {
-               consumer_destroy_relayd(relayd);
+               cds_lfht_for_each_entry (
+                       the_consumer_data.relayd_ht->ht, &iter.iter, relayd, node.node) {
+                       consumer_destroy_relayd(relayd);
+               }
        }
 
-       rcu_read_unlock();
-
        lttng_ht_destroy(the_consumer_data.relayd_ht);
 }
 
@@ -457,7 +454,7 @@ static void update_endpoint_status_by_netidx(uint64_t net_seq_idx,
 
        DBG("Consumer set delete flag on stream by idx %" PRIu64, net_seq_idx);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Let's begin with metadata */
        cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
@@ -474,7 +471,6 @@ static void update_endpoint_status_by_netidx(uint64_t net_seq_idx,
                        DBG("Delete flag set to data stream %d", stream->wait_fd);
                }
        }
-       rcu_read_unlock();
 }
 
 /*
@@ -581,7 +577,7 @@ void consumer_add_data_stream(struct lttng_consumer_stream *stream)
        pthread_mutex_lock(&stream->chan->lock);
        pthread_mutex_lock(&stream->chan->timer_lock);
        pthread_mutex_lock(&stream->lock);
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Steal stream identifier to avoid having streams with the same key */
        steal_stream_key(stream->key, ht);
@@ -614,7 +610,6 @@ void consumer_add_data_stream(struct lttng_consumer_stream *stream)
        the_consumer_data.stream_count++;
        the_consumer_data.need_update = 1;
 
-       rcu_read_unlock();
        pthread_mutex_unlock(&stream->lock);
        pthread_mutex_unlock(&stream->chan->timer_lock);
        pthread_mutex_unlock(&stream->chan->lock);
@@ -720,7 +715,7 @@ int consumer_send_relayd_stream(struct lttng_consumer_stream *stream, char *path
        LTTNG_ASSERT(path);
 
        /* The stream is not metadata. Get relayd reference if exists. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        relayd = consumer_find_relayd(stream->net_seq_idx);
        if (relayd != nullptr) {
                /* Add stream on the relayd */
@@ -757,7 +752,6 @@ int consumer_send_relayd_stream(struct lttng_consumer_stream *stream, char *path
            stream->net_seq_idx);
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -774,7 +768,7 @@ int consumer_send_relayd_streams_sent(uint64_t net_seq_idx)
        LTTNG_ASSERT(net_seq_idx != -1ULL);
 
        /* The stream is not metadata. Get relayd reference if exists. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        relayd = consumer_find_relayd(net_seq_idx);
        if (relayd != nullptr) {
                /* Add stream on the relayd */
@@ -797,7 +791,6 @@ int consumer_send_relayd_streams_sent(uint64_t net_seq_idx)
        DBG("All streams sent relayd id %" PRIu64, net_seq_idx);
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -809,12 +802,11 @@ void close_relayd_stream(struct lttng_consumer_stream *stream)
        struct consumer_relayd_sock_pair *relayd;
 
        /* The stream is not metadata. Get relayd reference if exists. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        relayd = consumer_find_relayd(stream->net_seq_idx);
        if (relayd) {
                consumer_stream_relayd_close(stream, relayd);
        }
-       rcu_read_unlock();
 }
 
 /*
@@ -1127,11 +1119,10 @@ int consumer_add_channel(struct lttng_consumer_channel *channel,
         */
        steal_channel_key(channel->key);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        lttng_ht_add_unique_u64(the_consumer_data.channel_ht, &channel->node);
        lttng_ht_add_u64(the_consumer_data.channels_by_session_id_ht,
                         &channel->channels_by_session_id_ht_node);
-       rcu_read_unlock();
        channel->is_published = true;
 
        pthread_mutex_unlock(&channel->timer_lock);
@@ -1169,35 +1160,34 @@ static int update_poll_array(struct lttng_consumer_local_data *ctx,
 
        DBG("Updating poll fd array");
        *nb_inactive_fd = 0;
-       rcu_read_lock();
-       cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
-               /*
-                * Only active streams with an active end point can be added to the
-                * poll set and local stream storage of the thread.
-                *
-                * There is a potential race here for endpoint_status to be updated
-                * just after the check. However, this is OK since the stream(s) will
-                * be deleted once the thread is notified that the end point state has
-                * changed where this function will be called back again.
-                *
-                * We track the number of inactive FDs because they still need to be
-                * closed by the polling thread after a wakeup on the data_pipe or
-                * metadata_pipe.
-                */
-               if (stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) {
-                       (*nb_inactive_fd)++;
-                       continue;
+
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
+                       /*
+                        * Only active streams with an active end point can be added to the
+                        * poll set and local stream storage of the thread.
+                        *
+                        * There is a potential race here for endpoint_status to be updated
+                        * just after the check. However, this is OK since the stream(s) will
+                        * be deleted once the thread is notified that the end point state has
+                        * changed where this function will be called back again.
+                        *
+                        * We track the number of inactive FDs because they still need to be
+                        * closed by the polling thread after a wakeup on the data_pipe or
+                        * metadata_pipe.
+                        */
+                       if (stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) {
+                               (*nb_inactive_fd)++;
+                               continue;
+                       }
+
+                       (*pollfd)[i].fd = stream->wait_fd;
+                       (*pollfd)[i].events = POLLIN | POLLPRI;
+                       local_stream[i] = stream;
+                       i++;
                }
-               /*
-                * This clobbers way too much the debug output. Uncomment that if you
-                * need it for debugging purposes.
-                */
-               (*pollfd)[i].fd = stream->wait_fd;
-               (*pollfd)[i].events = POLLIN | POLLPRI;
-               local_stream[i] = stream;
-               i++;
        }
-       rcu_read_unlock();
 
        /*
         * Insert the consumer_data_pipe at the end of the array and don't
@@ -1278,14 +1268,15 @@ void lttng_consumer_cleanup()
        struct lttng_consumer_channel *channel;
        unsigned int trace_chunks_left;
 
-       rcu_read_lock();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-       cds_lfht_for_each_entry (the_consumer_data.channel_ht->ht, &iter.iter, channel, node.node) {
-               consumer_del_channel(channel);
+               cds_lfht_for_each_entry (
+                       the_consumer_data.channel_ht->ht, &iter.iter, channel, node.node) {
+                       consumer_del_channel(channel);
+               }
        }
 
-       rcu_read_unlock();
-
        lttng_ht_destroy(the_consumer_data.channel_ht);
        lttng_ht_destroy(the_consumer_data.channels_by_session_id_ht);
 
@@ -1486,15 +1477,16 @@ static void destroy_data_stream_ht(struct lttng_ht *ht)
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
-               /*
-                * Ignore return value since we are currently cleaning up so any error
-                * can't be handled.
-                */
-               (void) consumer_del_stream(stream, ht);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
+                       /*
+                        * Ignore return value since we are currently cleaning up so any error
+                        * can't be handled.
+                        */
+                       (void) consumer_del_stream(stream, ht);
+               }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(ht);
 }
@@ -1512,15 +1504,16 @@ static void destroy_metadata_stream_ht(struct lttng_ht *ht)
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
-               /*
-                * Ignore return value since we are currently cleaning up so any error
-                * can't be handled.
-                */
-               (void) consumer_del_metadata_stream(stream, ht);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_for_each_entry (ht->ht, &iter.iter, stream, node.node) {
+                       /*
+                        * Ignore return value since we are currently cleaning up so any error
+                        * can't be handled.
+                        */
+                       (void) consumer_del_metadata_stream(stream, ht);
+               }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(ht);
 }
@@ -1622,7 +1615,7 @@ ssize_t lttng_consumer_on_read_subbuffer_mmap(struct lttng_consumer_stream *stre
        size_t write_len;
 
        /* RCU lock for the relayd pointer */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        LTTNG_ASSERT(stream->net_seq_idx != (uint64_t) -1ULL || stream->trace_chunk);
 
        /* Flag that the current stream if set for network streaming. */
@@ -1762,7 +1755,6 @@ end:
                pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
        }
 
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1801,7 +1793,7 @@ ssize_t lttng_consumer_on_read_subbuffer_splice(struct lttng_consumer_local_data
        }
 
        /* RCU lock for the relayd pointer */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Flag that the current stream if set for network streaming. */
        if (stream->net_seq_idx != (uint64_t) -1ULL) {
@@ -1984,7 +1976,6 @@ end:
                pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
        }
 
-       rcu_read_unlock();
        return written;
 }
 
@@ -2205,7 +2196,7 @@ void consumer_add_metadata_stream(struct lttng_consumer_stream *stream)
         * after this point.
         */
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /*
         * Lookup the stream just to make sure it does not exist in our internal
@@ -2239,8 +2230,6 @@ void consumer_add_metadata_stream(struct lttng_consumer_stream *stream)
         */
        lttng_ht_add_u64(the_consumer_data.stream_list_ht, &stream->node_session_id);
 
-       rcu_read_unlock();
-
        pthread_mutex_unlock(&stream->lock);
        pthread_mutex_unlock(&stream->chan->lock);
        pthread_mutex_unlock(&stream->chan->timer_lock);
@@ -2257,16 +2246,18 @@ static void validate_endpoint_status_data_stream()
 
        DBG("Consumer delete flagged data stream");
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (data_ht->ht, &iter.iter, stream, node.node) {
-               /* Validate delete flag of the stream */
-               if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
-                       continue;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (data_ht->ht, &iter.iter, stream, node.node) {
+                       /* Validate delete flag of the stream */
+                       if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
+                               continue;
+                       }
+                       /* Delete it right now */
+                       consumer_del_stream(stream, data_ht);
                }
-               /* Delete it right now */
-               consumer_del_stream(stream, data_ht);
        }
-       rcu_read_unlock();
 }
 
 /*
@@ -2281,22 +2272,23 @@ static void validate_endpoint_status_metadata_stream(struct lttng_poll_event *po
 
        LTTNG_ASSERT(pollset);
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
-               /* Validate delete flag of the stream */
-               if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
-                       continue;
-               }
-               /*
-                * Remove from pollset so the metadata thread can continue without
-                * blocking on a deleted stream.
-                */
-               lttng_poll_del(pollset, stream->wait_fd);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
+                       /* Validate delete flag of the stream */
+                       if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) {
+                               continue;
+                       }
+                       /*
+                        * Remove from pollset so the metadata thread can continue without
+                        * blocking on a deleted stream.
+                        */
+                       lttng_poll_del(pollset, stream->wait_fd);
 
-               /* Delete it right now */
-               consumer_del_metadata_stream(stream, metadata_ht);
+                       /* Delete it right now */
+                       consumer_del_metadata_stream(stream, metadata_ht);
+               }
        }
-       rcu_read_unlock();
 }
 
 /*
@@ -2431,7 +2423,7 @@ void *consumer_thread_metadata_poll(void *data)
                                continue;
                        }
 
-                       rcu_read_lock();
+                       lttng::urcu::read_lock_guard read_lock;
                        {
                                uint64_t tmp_id = (uint64_t) pollfd;
 
@@ -2495,11 +2487,9 @@ void *consumer_thread_metadata_poll(void *data)
                                consumer_del_metadata_stream(stream, metadata_ht);
                        } else {
                                ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                               rcu_read_unlock();
                                goto end;
                        }
                        /* Release RCU lock for the stream looked up */
-                       rcu_read_unlock();
                }
        }
 
@@ -2838,7 +2828,7 @@ static void consumer_close_channel_streams(struct lttng_consumer_channel *channe
 
        ht = the_consumer_data.stream_per_chan_id_ht;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_for_each_entry_duplicate(ht->ht,
                                          ht->hash_fct(&channel->key, lttng_ht_seed),
                                          ht->match_fct,
@@ -2878,7 +2868,6 @@ static void consumer_close_channel_streams(struct lttng_consumer_channel *channe
        next:
                pthread_mutex_unlock(&stream->lock);
        }
-       rcu_read_unlock();
 }
 
 static void destroy_channel_ht(struct lttng_ht *ht)
@@ -2891,12 +2880,14 @@ static void destroy_channel_ht(struct lttng_ht *ht)
                return;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (ht->ht, &iter.iter, channel, wait_fd_node.node) {
-               ret = lttng_ht_del(ht, &iter);
-               LTTNG_ASSERT(ret != 0);
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (ht->ht, &iter.iter, channel, wait_fd_node.node) {
+                       ret = lttng_ht_del(ht, &iter);
+                       LTTNG_ASSERT(ret != 0);
+               }
        }
-       rcu_read_unlock();
 
        lttng_ht_destroy(ht);
 }
@@ -2998,19 +2989,20 @@ void *consumer_thread_channel_poll(void *data)
 
                                        switch (action) {
                                        case CONSUMER_CHANNEL_ADD:
+                                       {
                                                DBG("Adding channel %d to poll set", chan->wait_fd);
 
                                                lttng_ht_node_init_u64(&chan->wait_fd_node,
                                                                       chan->wait_fd);
-                                               rcu_read_lock();
+                                               lttng::urcu::read_lock_guard read_lock;
                                                lttng_ht_add_unique_u64(channel_ht,
                                                                        &chan->wait_fd_node);
-                                               rcu_read_unlock();
                                                /* Add channel to the global poll events list */
                                                // FIXME: Empty flag on a pipe pollset, this might
                                                // hang on FreeBSD.
                                                lttng_poll_add(&events, chan->wait_fd, 0);
                                                break;
+                                       }
                                        case CONSUMER_CHANNEL_DEL:
                                        {
                                                /*
@@ -3023,10 +3015,9 @@ void *consumer_thread_channel_poll(void *data)
                                                 * GET_CHANNEL failed.
                                                 */
 
-                                               rcu_read_lock();
+                                               lttng::urcu::read_lock_guard read_lock;
                                                chan = consumer_find_channel(key);
                                                if (!chan) {
-                                                       rcu_read_unlock();
                                                        ERR("UST consumer get channel key %" PRIu64
                                                            " not found for del channel",
                                                            key);
@@ -3059,7 +3050,6 @@ void *consumer_thread_channel_poll(void *data)
                                                if (!uatomic_sub_return(&chan->refcount, 1)) {
                                                        consumer_del_channel(chan);
                                                }
-                                               rcu_read_unlock();
                                                goto restart;
                                        }
                                        case CONSUMER_CHANNEL_QUIT:
@@ -3093,7 +3083,7 @@ void *consumer_thread_channel_poll(void *data)
                                continue;
                        }
 
-                       rcu_read_lock();
+                       lttng::urcu::read_lock_guard read_lock;
                        {
                                uint64_t tmp_id = (uint64_t) pollfd;
 
@@ -3126,12 +3116,10 @@ void *consumer_thread_channel_poll(void *data)
                                }
                        } else {
                                ERR("Unexpected poll events %u for sock %d", revents, pollfd);
-                               rcu_read_unlock();
                                goto end;
                        }
 
                        /* Release RCU lock for the channel looked up */
-                       rcu_read_unlock();
                }
        }
 
@@ -3773,7 +3761,7 @@ int consumer_data_pending(uint64_t id)
 
        DBG("Consumer data pending command on session id %" PRIu64, id);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        pthread_mutex_lock(&the_consumer_data.lock);
 
        switch (the_consumer_data.type) {
@@ -3887,13 +3875,11 @@ int consumer_data_pending(uint64_t id)
 data_not_pending:
        /* Data is available to be read by a viewer. */
        pthread_mutex_unlock(&the_consumer_data.lock);
-       rcu_read_unlock();
        return 0;
 
 data_pending:
        /* Data is still being extracted from buffers. */
        pthread_mutex_unlock(&the_consumer_data.lock);
-       rcu_read_unlock();
        return 1;
 }
 
@@ -4020,7 +4006,7 @@ int lttng_consumer_rotate_channel(struct lttng_consumer_channel *channel,
                                 nullptr);
        lttng_dynamic_pointer_array_init(&streams_packet_to_open, nullptr);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        pthread_mutex_lock(&channel->lock);
        LTTNG_ASSERT(channel->trace_chunk);
@@ -4372,7 +4358,6 @@ end_unlock_stream:
 end_unlock_channel:
        pthread_mutex_unlock(&channel->lock);
 end:
-       rcu_read_unlock();
        lttng_dynamic_array_reset(&stream_rotation_positions);
        lttng_dynamic_pointer_array_reset(&streams_packet_to_open);
        return ret;
@@ -4459,7 +4444,7 @@ static int consumer_clear_unmonitored_channel(struct lttng_consumer_channel *cha
        int ret;
        struct lttng_consumer_stream *stream;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        pthread_mutex_lock(&channel->lock);
        cds_list_for_each_entry (stream, &channel->streams.head, send_node) {
                health_code_update();
@@ -4471,13 +4456,11 @@ static int consumer_clear_unmonitored_channel(struct lttng_consumer_channel *cha
                pthread_mutex_unlock(&stream->lock);
        }
        pthread_mutex_unlock(&channel->lock);
-       rcu_read_unlock();
        return 0;
 
 error_unlock:
        pthread_mutex_unlock(&stream->lock);
        pthread_mutex_unlock(&channel->lock);
-       rcu_read_unlock();
        return ret;
 }
 
@@ -4673,7 +4656,7 @@ int lttng_consumer_rotate_ready_streams(struct lttng_consumer_channel *channel,
 
        ASSERT_RCU_READ_LOCKED();
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        DBG("Consumer rotate ready streams in channel %" PRIu64, key);
 
@@ -4708,7 +4691,6 @@ int lttng_consumer_rotate_ready_streams(struct lttng_consumer_channel *channel,
        ret = 0;
 
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -4837,46 +4819,50 @@ lttng_consumer_create_trace_chunk(const uint64_t *relayd_id,
                goto error;
        }
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry_duplicate(
-               the_consumer_data.channels_by_session_id_ht->ht,
-               the_consumer_data.channels_by_session_id_ht->hash_fct(&session_id, lttng_ht_seed),
-               the_consumer_data.channels_by_session_id_ht->match_fct,
-               &session_id,
-               &iter.iter,
-               channel,
-               channels_by_session_id_ht_node.node)
        {
-               ret = lttng_consumer_channel_set_trace_chunk(channel, published_chunk);
-               if (ret) {
-                       /*
-                        * Roll-back the creation of this chunk.
-                        *
-                        * This is important since the session daemon will
-                        * assume that the creation of this chunk failed and
-                        * will never ask for it to be closed, resulting
-                        * in a leak and an inconsistent state for some
-                        * channels.
-                        */
-                       enum lttcomm_return_code close_ret;
-                       char path[LTTNG_PATH_MAX];
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_for_each_entry_duplicate(
+                       the_consumer_data.channels_by_session_id_ht->ht,
+                       the_consumer_data.channels_by_session_id_ht->hash_fct(&session_id,
+                                                                             lttng_ht_seed),
+                       the_consumer_data.channels_by_session_id_ht->match_fct,
+                       &session_id,
+                       &iter.iter,
+                       channel,
+                       channels_by_session_id_ht_node.node)
+               {
+                       ret = lttng_consumer_channel_set_trace_chunk(channel, published_chunk);
+                       if (ret) {
+                               /*
+                                * Roll-back the creation of this chunk.
+                                *
+                                * This is important since the session daemon will
+                                * assume that the creation of this chunk failed and
+                                * will never ask for it to be closed, resulting
+                                * in a leak and an inconsistent state for some
+                                * channels.
+                                */
+                               enum lttcomm_return_code close_ret;
+                               char path[LTTNG_PATH_MAX];
+
+                               DBG("Failed to set new trace chunk on existing channels, rolling back");
+                               close_ret =
+                                       lttng_consumer_close_trace_chunk(relayd_id,
+                                                                        session_id,
+                                                                        chunk_id,
+                                                                        chunk_creation_timestamp,
+                                                                        nullptr,
+                                                                        path);
+                               if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) {
+                                       ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64
+                                           ", chunk_id = %" PRIu64,
+                                           session_id,
+                                           chunk_id);
+                               }
 
-                       DBG("Failed to set new trace chunk on existing channels, rolling back");
-                       close_ret = lttng_consumer_close_trace_chunk(relayd_id,
-                                                                    session_id,
-                                                                    chunk_id,
-                                                                    chunk_creation_timestamp,
-                                                                    nullptr,
-                                                                    path);
-                       if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) {
-                               ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64
-                                   ", chunk_id = %" PRIu64,
-                                   session_id,
-                                   chunk_id);
+                               ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
+                               break;
                        }
-
-                       ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED;
-                       break;
                }
        }
 
@@ -4914,7 +4900,6 @@ lttng_consumer_create_trace_chunk(const uint64_t *relayd_id,
                }
        }
 error_unlock:
-       rcu_read_unlock();
 error:
        /* Release the reference returned by the "publish" operation. */
        lttng_trace_chunk_put(published_chunk);
@@ -4990,30 +4975,32 @@ lttng_consumer_close_trace_chunk(const uint64_t *relayd_id,
         * it; it is only kept around to compare it (by address) to the
         * current chunk found in the session's channels.
         */
-       rcu_read_lock();
-       cds_lfht_for_each_entry (the_consumer_data.channel_ht->ht, &iter.iter, channel, node.node) {
-               int ret;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_lfht_for_each_entry (
+                       the_consumer_data.channel_ht->ht, &iter.iter, channel, node.node) {
+                       int ret;
 
-               /*
-                * Only change the channel's chunk to NULL if it still
-                * references the chunk being closed. The channel may
-                * reference a newer channel in the case of a session
-                * rotation. When a session rotation occurs, the "next"
-                * chunk is created before the "current" chunk is closed.
-                */
-               if (channel->trace_chunk != chunk) {
-                       continue;
-               }
-               ret = lttng_consumer_channel_set_trace_chunk(channel, nullptr);
-               if (ret) {
                        /*
-                        * Attempt to close the chunk on as many channels as
-                        * possible.
+                        * Only change the channel's chunk to NULL if it still
+                        * references the chunk being closed. The channel may
+                        * reference a newer channel in the case of a session
+                        * rotation. When a session rotation occurs, the "next"
+                        * chunk is created before the "current" chunk is closed.
                         */
-                       ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED;
+                       if (channel->trace_chunk != chunk) {
+                               continue;
+                       }
+                       ret = lttng_consumer_channel_set_trace_chunk(channel, nullptr);
+                       if (ret) {
+                               /*
+                                * Attempt to close the chunk on as many channels as
+                                * possible.
+                                */
+                               ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED;
+                       }
                }
        }
-
        if (relayd_id) {
                int ret;
                struct consumer_relayd_sock_pair *relayd;
@@ -5033,7 +5020,6 @@ lttng_consumer_close_trace_chunk(const uint64_t *relayd_id,
                }
        }
 error_unlock:
-       rcu_read_unlock();
 end:
        /*
         * Release the reference returned by the "find" operation and
@@ -5055,6 +5041,7 @@ lttng_consumer_trace_chunk_exists(const uint64_t *relayd_id, uint64_t session_id
        const bool is_local_trace = !relayd_id;
        struct consumer_relayd_sock_pair *relayd = nullptr;
        bool chunk_exists_local, chunk_exists_remote;
+       lttng::urcu::read_lock_guard read_lock;
 
        if (relayd_id) {
                /* Only used for logging purposes. */
@@ -5087,7 +5074,6 @@ lttng_consumer_trace_chunk_exists(const uint64_t *relayd_id, uint64_t session_id
                goto end;
        }
 
-       rcu_read_lock();
        relayd = consumer_find_relayd(*relayd_id);
        if (!relayd) {
                ERR("Failed to find relayd %" PRIu64, *relayd_id);
@@ -5109,7 +5095,6 @@ lttng_consumer_trace_chunk_exists(const uint64_t *relayd_id, uint64_t session_id
        DBG("Trace chunk %s on relay daemon", chunk_exists_remote ? "exists" : "does not exist");
 
 end_rcu_unlock:
-       rcu_read_unlock();
 end:
        return ret_code;
 }
@@ -5123,7 +5108,7 @@ static int consumer_clear_monitored_channel(struct lttng_consumer_channel *chann
 
        ht = the_consumer_data.stream_per_chan_id_ht;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_for_each_entry_duplicate(ht->ht,
                                          ht->hash_fct(&channel->key, lttng_ht_seed),
                                          ht->match_fct,
@@ -5146,12 +5131,10 @@ static int consumer_clear_monitored_channel(struct lttng_consumer_channel *chann
        next:
                pthread_mutex_unlock(&stream->lock);
        }
-       rcu_read_unlock();
        return LTTCOMM_CONSUMERD_SUCCESS;
 
 error_unlock:
        pthread_mutex_unlock(&stream->lock);
-       rcu_read_unlock();
        return ret;
 }
 
@@ -5192,56 +5175,56 @@ enum lttcomm_return_code lttng_consumer_open_channel_packets(struct lttng_consum
                goto end;
        }
 
-       rcu_read_lock();
-       cds_list_for_each_entry (stream, &channel->streams.head, send_node) {
-               enum consumer_stream_open_packet_status status;
+       {
+               lttng::urcu::read_lock_guard read_lock;
+               cds_list_for_each_entry (stream, &channel->streams.head, send_node) {
+                       enum consumer_stream_open_packet_status status;
 
-               pthread_mutex_lock(&stream->lock);
-               if (cds_lfht_is_node_deleted(&stream->node.node)) {
-                       goto next;
-               }
+                       pthread_mutex_lock(&stream->lock);
+                       if (cds_lfht_is_node_deleted(&stream->node.node)) {
+                               goto next;
+                       }
 
-               status = consumer_stream_open_packet(stream);
-               switch (status) {
-               case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED:
-                       DBG("Opened a packet in \"open channel packets\" command: stream id = %" PRIu64
-                           ", channel name = %s, session id = %" PRIu64,
-                           stream->key,
-                           stream->chan->name,
-                           stream->chan->session_id);
-                       stream->opened_packet_in_current_trace_chunk = true;
-                       break;
-               case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE:
-                       DBG("No space left to open a packet in \"open channel packets\" command: stream id = %" PRIu64
-                           ", channel name = %s, session id = %" PRIu64,
-                           stream->key,
-                           stream->chan->name,
-                           stream->chan->session_id);
-                       break;
-               case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR:
-                       /*
-                        * Only unexpected internal errors can lead to this
-                        * failing. Report an unknown error.
-                        */
-                       ERR("Failed to flush empty buffer in \"open channel packets\" command: stream id = %" PRIu64
-                           ", channel id = %" PRIu64 ", channel name = %s"
-                           ", session id = %" PRIu64,
-                           stream->key,
-                           channel->key,
-                           channel->name,
-                           channel->session_id);
-                       ret = LTTCOMM_CONSUMERD_UNKNOWN_ERROR;
-                       goto error_unlock;
-               default:
-                       abort();
-               }
+                       status = consumer_stream_open_packet(stream);
+                       switch (status) {
+                       case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED:
+                               DBG("Opened a packet in \"open channel packets\" command: stream id = %" PRIu64
+                                   ", channel name = %s, session id = %" PRIu64,
+                                   stream->key,
+                                   stream->chan->name,
+                                   stream->chan->session_id);
+                               stream->opened_packet_in_current_trace_chunk = true;
+                               break;
+                       case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE:
+                               DBG("No space left to open a packet in \"open channel packets\" command: stream id = %" PRIu64
+                                   ", channel name = %s, session id = %" PRIu64,
+                                   stream->key,
+                                   stream->chan->name,
+                                   stream->chan->session_id);
+                               break;
+                       case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR:
+                               /*
+                                * Only unexpected internal errors can lead to this
+                                * failing. Report an unknown error.
+                                */
+                               ERR("Failed to flush empty buffer in \"open channel packets\" command: stream id = %" PRIu64
+                                   ", channel id = %" PRIu64 ", channel name = %s"
+                                   ", session id = %" PRIu64,
+                                   stream->key,
+                                   channel->key,
+                                   channel->name,
+                                   channel->session_id);
+                               ret = LTTCOMM_CONSUMERD_UNKNOWN_ERROR;
+                               goto error_unlock;
+                       default:
+                               abort();
+                       }
 
-       next:
-               pthread_mutex_unlock(&stream->lock);
+               next:
+                       pthread_mutex_unlock(&stream->lock);
+               }
        }
-
 end_rcu_unlock:
-       rcu_read_unlock();
 end:
        return ret;
 
index e7ea6e9efb2973f23e6c56a9a9f19fb08732d1bb..7f4c2b5b2b698f81116570d94380720c7ceda7c6 100644 (file)
@@ -15,6 +15,7 @@
 #include <common/hashtable/utils.hpp>
 #include <common/macros.hpp>
 #include <common/optional.hpp>
+#include <common/urcu.hpp>
 
 #include <fcntl.h>
 #include <inttypes.h>
@@ -424,14 +425,18 @@ void fd_tracker_log(struct fd_tracker *tracker)
        }
 
        DBG_NO_LOC("  Tracked unsuspendable file descriptors");
-       rcu_read_lock();
-       cds_lfht_for_each_entry (
-               tracker->unsuspendable_fds, &iter, unsuspendable_fd, tracker_node) {
-               DBG_NO_LOC("    %s [active, fd %d]",
-                          unsuspendable_fd->name ?: "Unnamed",
-                          unsuspendable_fd->fd);
-       }
-       rcu_read_unlock();
+
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       tracker->unsuspendable_fds, &iter, unsuspendable_fd, tracker_node) {
+                       DBG_NO_LOC("    %s [active, fd %d]",
+                                  unsuspendable_fd->name ?: "Unnamed",
+                                  unsuspendable_fd->fd);
+               }
+       }
+
        if (!UNSUSPENDABLE_COUNT(tracker)) {
                DBG_NO_LOC("    None");
        }
@@ -599,6 +604,7 @@ int fd_tracker_open_unsuspendable_fd(struct fd_tracker *tracker,
        int ret, user_ret, i, fds_to_suspend;
        unsigned int active_fds;
        struct unsuspendable_fd **entries;
+       lttng::urcu::read_lock_guard read_lock;
 
        entries = calloc<unsuspendable_fd *>(fd_count);
        if (!entries) {
@@ -650,7 +656,6 @@ int fd_tracker_open_unsuspendable_fd(struct fd_tracker *tracker,
                entries[i] = entry;
        }
 
-       rcu_read_lock();
        for (i = 0; i < fd_count; i++) {
                struct cds_lfht_node *node;
                struct unsuspendable_fd *entry = entries[i];
@@ -664,13 +669,11 @@ int fd_tracker_open_unsuspendable_fd(struct fd_tracker *tracker,
 
                if (node != &entry->tracker_node) {
                        ret = -EEXIST;
-                       rcu_read_unlock();
                        goto end_free_entries;
                }
                entries[i] = nullptr;
        }
        tracker->count.unsuspendable += fd_count;
-       rcu_read_unlock();
        ret = user_ret;
 end_unlock:
        pthread_mutex_unlock(&tracker->lock);
@@ -692,6 +695,7 @@ int fd_tracker_close_unsuspendable_fd(struct fd_tracker *tracker,
 {
        int i, ret, user_ret;
        int *fds = nullptr;
+       lttng::urcu::read_lock_guard read_lock;
 
        /*
         * Maintain a local copy of fds_in as the user's callback may modify its
@@ -705,7 +709,6 @@ int fd_tracker_close_unsuspendable_fd(struct fd_tracker *tracker,
        memcpy(fds, fds_in, sizeof(*fds) * fd_count);
 
        pthread_mutex_lock(&tracker->lock);
-       rcu_read_lock();
 
        /* Let the user close the file descriptors. */
        user_ret = close(user_data, fds_in);
@@ -743,7 +746,6 @@ int fd_tracker_close_unsuspendable_fd(struct fd_tracker *tracker,
        tracker->count.unsuspendable -= fd_count;
        ret = 0;
 end_unlock:
-       rcu_read_unlock();
        pthread_mutex_unlock(&tracker->lock);
        free(fds);
 end:
index 6555e97a311b10067bed7a495d49f17f58628dd5..c98f6ad455c39598bcafb8eb9918852b822aba07 100644 (file)
@@ -13,6 +13,7 @@
 #include <common/macros.hpp>
 #include <common/optional.hpp>
 #include <common/string-utils/format.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <lttng/constant.h>
@@ -211,9 +212,8 @@ static void lttng_inode_destroy(struct lttng_inode *inode)
                return;
        }
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_del(inode->registry_ht, &inode->registry_node);
-       rcu_read_unlock();
 
        if (inode->unlink_pending) {
                int ret;
@@ -496,6 +496,7 @@ lttng_inode_registry_get_inode(struct lttng_inode_registry *registry,
        struct cds_lfht_iter iter;
        struct cds_lfht_node *node;
        struct lttng_inode *inode = nullptr;
+       lttng::urcu::read_lock_guard read_lock;
 
        ret = fstat(fd, &statbuf);
        if (ret < 0) {
@@ -506,18 +507,17 @@ lttng_inode_registry_get_inode(struct lttng_inode_registry *registry,
        id.device = statbuf.st_dev;
        id.inode = statbuf.st_ino;
 
-       rcu_read_lock();
        cds_lfht_lookup(registry->inodes, lttng_inode_id_hash(&id), lttng_inode_match, &id, &iter);
        node = cds_lfht_iter_get_node(&iter);
        if (node) {
                inode = lttng::utils::container_of(node, &lttng_inode::registry_node);
                lttng_inode_get(inode);
-               goto end_unlock;
+               goto end;
        }
 
        inode = lttng_inode_create(&id, registry->inodes, unlinked_file_pool, handle, path);
        if (!inode) {
-               goto end_unlock;
+               goto end;
        }
 
        node = cds_lfht_add_unique(registry->inodes,
@@ -526,8 +526,6 @@ lttng_inode_registry_get_inode(struct lttng_inode_registry *registry,
                                   &inode->id,
                                   &inode->registry_node);
        LTTNG_ASSERT(node == &inode->registry_node);
-end_unlock:
-       rcu_read_unlock();
 end:
        return inode;
 }
index 6497da5f56af9701b43938e06fec839cf76e6b4c..29d3ac499715f1e9da4e8eb8d7b4c79b2c786157 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <common/common.hpp>
 #include <common/defaults.hpp>
+#include <common/urcu.hpp>
 
 #include <string.h>
 #include <urcu.h>
@@ -272,13 +273,12 @@ void lttng_ht_add_unique_str(struct lttng_ht *ht, struct lttng_ht_node_str *node
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        node_ptr = cds_lfht_add_unique(ht->ht,
                                       ht->hash_fct(node->key, lttng_ht_seed),
                                       ht->match_fct,
                                       node->key,
                                       &node->node);
-       rcu_read_unlock();
        LTTNG_ASSERT(node_ptr == &node->node);
 }
 
@@ -292,9 +292,8 @@ void lttng_ht_add_str(struct lttng_ht *ht, struct lttng_ht_node_str *node)
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_add(ht->ht, ht->hash_fct(node->key, lttng_ht_seed), &node->node);
-       rcu_read_unlock();
 }
 
 /*
@@ -307,9 +306,8 @@ void lttng_ht_add_ulong(struct lttng_ht *ht, struct lttng_ht_node_ulong *node)
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_add(ht->ht, ht->hash_fct((void *) node->key, lttng_ht_seed), &node->node);
-       rcu_read_unlock();
 }
 
 /*
@@ -322,9 +320,8 @@ void lttng_ht_add_u64(struct lttng_ht *ht, struct lttng_ht_node_u64 *node)
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_add(ht->ht, ht->hash_fct(&node->key, lttng_ht_seed), &node->node);
-       rcu_read_unlock();
 }
 
 /*
@@ -338,13 +335,12 @@ void lttng_ht_add_unique_ulong(struct lttng_ht *ht, struct lttng_ht_node_ulong *
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        node_ptr = cds_lfht_add_unique(ht->ht,
                                       ht->hash_fct((void *) node->key, lttng_ht_seed),
                                       ht->match_fct,
                                       (void *) node->key,
                                       &node->node);
-       rcu_read_unlock();
        LTTNG_ASSERT(node_ptr == &node->node);
 }
 
@@ -359,13 +355,12 @@ void lttng_ht_add_unique_u64(struct lttng_ht *ht, struct lttng_ht_node_u64 *node
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        node_ptr = cds_lfht_add_unique(ht->ht,
                                       ht->hash_fct(&node->key, lttng_ht_seed),
                                       ht->match_fct,
                                       &node->key,
                                       &node->node);
-       rcu_read_unlock();
        LTTNG_ASSERT(node_ptr == &node->node);
 }
 
@@ -380,13 +375,12 @@ void lttng_ht_add_unique_two_u64(struct lttng_ht *ht, struct lttng_ht_node_two_u
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        node_ptr = cds_lfht_add_unique(ht->ht,
                                       ht->hash_fct((void *) &node->key, lttng_ht_seed),
                                       ht->match_fct,
                                       (void *) &node->key,
                                       &node->node);
-       rcu_read_unlock();
        LTTNG_ASSERT(node_ptr == &node->node);
 }
 
@@ -402,19 +396,17 @@ struct lttng_ht_node_ulong *lttng_ht_add_replace_ulong(struct lttng_ht *ht,
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        node_ptr = cds_lfht_add_replace(ht->ht,
                                        ht->hash_fct((void *) node->key, lttng_ht_seed),
                                        ht->match_fct,
                                        (void *) node->key,
                                        &node->node);
-       rcu_read_unlock();
        if (!node_ptr) {
                return nullptr;
        } else {
                return lttng::utils::container_of(node_ptr, &lttng_ht_node_ulong::node);
        }
-       LTTNG_ASSERT(node_ptr == &node->node);
 }
 
 /*
@@ -429,19 +421,18 @@ struct lttng_ht_node_u64 *lttng_ht_add_replace_u64(struct lttng_ht *ht,
        LTTNG_ASSERT(node);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        node_ptr = cds_lfht_add_replace(ht->ht,
                                        ht->hash_fct(&node->key, lttng_ht_seed),
                                        ht->match_fct,
                                        &node->key,
                                        &node->node);
-       rcu_read_unlock();
        if (!node_ptr) {
                return nullptr;
        } else {
+               LTTNG_ASSERT(node_ptr == &node->node);
                return lttng::utils::container_of(node_ptr, &lttng_ht_node_u64::node);
        }
-       LTTNG_ASSERT(node_ptr == &node->node);
 }
 
 /*
@@ -456,9 +447,8 @@ int lttng_ht_del(struct lttng_ht *ht, struct lttng_ht_iter *iter)
        LTTNG_ASSERT(iter);
 
        /* RCU read lock protects from ABA. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        ret = cds_lfht_del(ht->ht, iter->iter.node);
-       rcu_read_unlock();
        return ret;
 }
 
@@ -498,9 +488,8 @@ unsigned long lttng_ht_get_count(struct lttng_ht *ht)
        LTTNG_ASSERT(ht->ht);
 
        /* RCU read lock protects from ABA and allows RCU traversal. */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_count_nodes(ht->ht, &scb, &count, &sca);
-       rcu_read_unlock();
 
        return count;
 }
index f520736b42b10c580f28a46b09238bdf028da283..b0aa99ec1ce25ffc45242b4ffcc0cd6acf0375da 100644 (file)
@@ -25,6 +25,7 @@
 #include <common/relayd/relayd.hpp>
 #include <common/sessiond-comm/relayd.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <bin/lttng-consumerd/health-consumerd.hpp>
@@ -151,7 +152,7 @@ static int lttng_kconsumer_snapshot_channel(struct lttng_consumer_channel *chann
        /* Prevent channel modifications while we perform the snapshot.*/
        pthread_mutex_lock(&channel->lock);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        /* Splice is not supported yet for channel snapshot. */
        if (channel->output != CONSUMER_CHANNEL_MMAP) {
@@ -328,7 +329,6 @@ error_close_stream_output:
 end_unlock:
        pthread_mutex_unlock(&stream->lock);
 end:
-       rcu_read_unlock();
        pthread_mutex_unlock(&channel->lock);
        return ret;
 }
@@ -354,7 +354,7 @@ static int lttng_kconsumer_snapshot_metadata(struct lttng_consumer_channel *meta
 
        DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s", key, path);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        metadata_stream = metadata_channel->metadata_stream;
        LTTNG_ASSERT(metadata_stream);
@@ -415,7 +415,6 @@ error_snapshot:
        metadata_stream->read_subbuffer_ops.unlock(metadata_stream);
        consumer_stream_destroy(metadata_stream, nullptr);
        metadata_channel->metadata_stream = nullptr;
-       rcu_read_unlock();
        return ret;
 }
 
@@ -455,7 +454,7 @@ int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        health_code_update();
 
        /* relayd needs RCU read-side protection */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        switch (msg.cmd_type) {
        case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
@@ -872,7 +871,6 @@ int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        }
        case LTTNG_CONSUMER_UPDATE_STREAM:
        {
-               rcu_read_unlock();
                return -ENOSYS;
        }
        case LTTNG_CONSUMER_DESTROY_RELAYD:
@@ -1388,7 +1386,6 @@ end_msg_sessiond:
 
 end:
        health_code_update();
-       rcu_read_unlock();
        return ret_func;
 }
 
index dab6e6df014b7ee1c6976e171423c187f7ff1c91..ff0d5bb9365fb11cf6e1f9506aeebd80b2e20d80 100644 (file)
@@ -251,7 +251,8 @@ void *memmove(DestinationType *d, const SourceType *s, size_t n) = delete;
 #define member_sizeof(type, field)     sizeof(((type *) 0)->field)
 
 #define ASSERT_LOCKED(lock)         LTTNG_ASSERT(pthread_mutex_trylock(&(lock)))
-#define ASSERT_RCU_READ_LOCKED(lock) LTTNG_ASSERT(rcu_read_ongoing())
+#define ASSERT_RCU_READ_LOCKED() LTTNG_ASSERT(rcu_read_ongoing())
+#define ASSERT_RCU_READ_UNLOCKED() LTTNG_ASSERT(!rcu_read_ongoing())
 
 /* Attribute suitable to tag functions as having printf()-like arguments. */
 #define ATTR_FORMAT_PRINTF(_string_index, _first_to_check) \
index d48ebc6be3bc22bca29711a28e3afabb9f5ecb1b..5da16a6134ac7027365a022abb27410bec2edf1f 100644 (file)
@@ -21,6 +21,7 @@
 #include <common/time.hpp>
 #include <common/trace-chunk-registry.hpp>
 #include <common/trace-chunk.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <lttng/constant.h>
@@ -1827,9 +1828,8 @@ static void lttng_trace_chunk_release(struct urcu_ref *ref)
                element = lttng::utils::container_of(chunk,
                                                     &lttng_trace_chunk_registry_element::chunk);
                if (element->registry) {
-                       rcu_read_lock();
+                       lttng::urcu::read_lock_guard read_lock;
                        cds_lfht_del(element->registry->ht, &element->trace_chunk_registry_ht_node);
-                       rcu_read_unlock();
                        call_rcu(&element->rcu_node, free_lttng_trace_chunk_registry_element);
                } else {
                        /* Never published, can be free'd immediately. */
@@ -1943,6 +1943,8 @@ lttng_trace_chunk_registry_publish_chunk(struct lttng_trace_chunk_registry *regi
        pthread_mutex_lock(&chunk->lock);
        element = lttng_trace_chunk_registry_element_create_from_chunk(chunk, session_id);
        pthread_mutex_unlock(&chunk->lock);
+
+       lttng::urcu::read_lock_guard read_lock;
        if (!element) {
                goto end;
        }
@@ -1953,7 +1955,6 @@ lttng_trace_chunk_registry_publish_chunk(struct lttng_trace_chunk_registry *regi
        chunk = nullptr;
        element_hash = lttng_trace_chunk_registry_element_hash(element);
 
-       rcu_read_lock();
        while (true) {
                struct cds_lfht_node *published_node;
                struct lttng_trace_chunk *published_chunk;
@@ -2005,7 +2006,6 @@ lttng_trace_chunk_registry_publish_chunk(struct lttng_trace_chunk_registry *regi
                 * chunk.
                 */
        }
-       rcu_read_unlock();
 end:
        return element ? &element->chunk : nullptr;
 }
@@ -2034,7 +2034,7 @@ static struct lttng_trace_chunk *_lttng_trace_chunk_registry_find_chunk(
        struct lttng_trace_chunk *published_chunk = nullptr;
        struct cds_lfht_iter iter;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_lookup(registry->ht,
                        element_hash,
                        lttng_trace_chunk_registry_element_match,
@@ -2051,7 +2051,6 @@ static struct lttng_trace_chunk *_lttng_trace_chunk_registry_find_chunk(
                published_chunk = &published_element->chunk;
        }
 end:
-       rcu_read_unlock();
        return published_chunk;
 }
 
@@ -2077,7 +2076,7 @@ int lttng_trace_chunk_registry_chunk_exists(const struct lttng_trace_chunk_regis
        struct cds_lfht_node *published_node;
        struct cds_lfht_iter iter;
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        cds_lfht_lookup(registry->ht,
                        element_hash,
                        lttng_trace_chunk_registry_element_match,
@@ -2091,7 +2090,6 @@ int lttng_trace_chunk_registry_chunk_exists(const struct lttng_trace_chunk_regis
 
        *chunk_exists = !cds_lfht_is_node_deleted(published_node);
 end:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -2110,37 +2108,42 @@ lttng_trace_chunk_registry_put_each_chunk(const struct lttng_trace_chunk_registr
        unsigned int trace_chunks_left = 0;
 
        DBG("Releasing trace chunk registry to all trace chunks");
-       rcu_read_lock();
-       cds_lfht_for_each_entry (registry->ht, &iter, chunk_element, trace_chunk_registry_ht_node) {
-               const char *chunk_id_str = "none";
-               char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
-
-               pthread_mutex_lock(&chunk_element->chunk.lock);
-               if (chunk_element->chunk.id.is_set) {
-                       int fmt_ret;
-
-                       fmt_ret = snprintf(chunk_id_buf,
-                                          sizeof(chunk_id_buf),
-                                          "%" PRIu64,
-                                          chunk_element->chunk.id.value);
-                       if (fmt_ret < 0 || fmt_ret >= sizeof(chunk_id_buf)) {
-                               chunk_id_str = "formatting error";
-                       } else {
-                               chunk_id_str = chunk_id_buf;
+
+       {
+               lttng::urcu::read_lock_guard read_lock;
+
+               cds_lfht_for_each_entry (
+                       registry->ht, &iter, chunk_element, trace_chunk_registry_ht_node) {
+                       const char *chunk_id_str = "none";
+                       char chunk_id_buf[MAX_INT_DEC_LEN(uint64_t)];
+
+                       pthread_mutex_lock(&chunk_element->chunk.lock);
+                       if (chunk_element->chunk.id.is_set) {
+                               int fmt_ret;
+
+                               fmt_ret = snprintf(chunk_id_buf,
+                                                  sizeof(chunk_id_buf),
+                                                  "%" PRIu64,
+                                                  chunk_element->chunk.id.value);
+                               if (fmt_ret < 0 || fmt_ret >= sizeof(chunk_id_buf)) {
+                                       chunk_id_str = "formatting error";
+                               } else {
+                                       chunk_id_str = chunk_id_buf;
+                               }
                        }
+
+                       DBG("Releasing reference to trace chunk: session_id = %" PRIu64
+                           "chunk_id = %s, name = \"%s\", status = %s",
+                           chunk_element->session_id,
+                           chunk_id_str,
+                           chunk_element->chunk.name ?: "none",
+                           chunk_element->chunk.close_command.is_set ? "open" : "closed");
+                       pthread_mutex_unlock(&chunk_element->chunk.lock);
+                       lttng_trace_chunk_put(&chunk_element->chunk);
+                       trace_chunks_left++;
                }
+       }
 
-               DBG("Releasing reference to trace chunk: session_id = %" PRIu64
-                   "chunk_id = %s, name = \"%s\", status = %s",
-                   chunk_element->session_id,
-                   chunk_id_str,
-                   chunk_element->chunk.name ?: "none",
-                   chunk_element->chunk.close_command.is_set ? "open" : "closed");
-               pthread_mutex_unlock(&chunk_element->chunk.lock);
-               lttng_trace_chunk_put(&chunk_element->chunk);
-               trace_chunks_left++;
-       }
-       rcu_read_unlock();
        DBG("Released reference to %u trace chunks in %s()", trace_chunks_left, __FUNCTION__);
 
        return trace_chunks_left;
index d0b39a887b467f1098397ba53213a69bd670067e..48a01a352e7f5ccd268ac13f4120e2c9ba473f62 100644 (file)
@@ -22,6 +22,7 @@
 #include <common/relayd/relayd.hpp>
 #include <common/sessiond-comm/sessiond-comm.hpp>
 #include <common/shm.hpp>
+#include <common/urcu.hpp>
 #include <common/utils.hpp>
 
 #include <lttng/ust-ctl.h>
@@ -666,7 +667,7 @@ static int flush_channel(uint64_t chan_key)
 
        DBG("UST consumer flush channel key %" PRIu64, chan_key);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        channel = consumer_find_channel(chan_key);
        if (!channel) {
                ERR("UST consumer flush channel %" PRIu64 " not found", chan_key);
@@ -721,7 +722,6 @@ static int flush_channel(uint64_t chan_key)
         */
        sample_and_send_channel_buffer_stats(channel);
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -741,7 +741,7 @@ static int clear_quiescent_channel(uint64_t chan_key)
 
        DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
        channel = consumer_find_channel(chan_key);
        if (!channel) {
                ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
@@ -767,7 +767,6 @@ static int clear_quiescent_channel(uint64_t chan_key)
                pthread_mutex_unlock(&stream->lock);
        }
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -958,7 +957,7 @@ static int snapshot_metadata(struct lttng_consumer_channel *metadata_channel,
 
        DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s", key, path);
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        LTTNG_ASSERT(!metadata_channel->monitor);
 
@@ -1016,7 +1015,6 @@ error_stream:
        metadata_channel->metadata_stream = nullptr;
 
 error:
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1067,7 +1065,7 @@ static int snapshot_channel(struct lttng_consumer_channel *channel,
        LTTNG_ASSERT(ctx);
        ASSERT_RCU_READ_LOCKED();
 
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        if (relayd_id != (uint64_t) -1ULL) {
                use_relayd = 1;
@@ -1218,7 +1216,6 @@ static int snapshot_channel(struct lttng_consumer_channel *channel,
                pthread_mutex_unlock(&stream->lock);
        }
 
-       rcu_read_unlock();
        return 0;
 
 error_put_subbuf:
@@ -1229,7 +1226,6 @@ error_close_stream:
        consumer_stream_close_output(stream);
 error_unlock:
        pthread_mutex_unlock(&stream->lock);
-       rcu_read_unlock();
        return ret;
 }
 
@@ -1401,7 +1397,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        health_code_update();
 
        /* relayd needs RCU read-side lock */
-       rcu_read_lock();
+       lttng::urcu::read_lock_guard read_lock;
 
        switch (msg.cmd_type) {
        case LTTNG_CONSUMER_ADD_RELAYD_SOCKET:
@@ -1456,7 +1452,6 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        }
        case LTTNG_CONSUMER_UPDATE_STREAM:
        {
-               rcu_read_unlock();
                return -ENOSYS;
        }
        case LTTNG_CONSUMER_DATA_PENDING:
@@ -1877,7 +1872,6 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                uint64_t key = msg.u.discarded_events.channel_key;
 
                DBG("UST consumer discarded events command for session id %" PRIu64, id);
-               rcu_read_lock();
                pthread_mutex_lock(&the_consumer_data.lock);
 
                ht = the_consumer_data.stream_list_ht;
@@ -1904,7 +1898,6 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        }
                }
                pthread_mutex_unlock(&the_consumer_data.lock);
-               rcu_read_unlock();
 
                DBG("UST consumer discarded events command for session id %" PRIu64
                    ", channel key %" PRIu64,
@@ -1933,7 +1926,6 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                uint64_t key = msg.u.lost_packets.channel_key;
 
                DBG("UST consumer lost packets command for session id %" PRIu64, id);
-               rcu_read_lock();
                pthread_mutex_lock(&the_consumer_data.lock);
 
                ht = the_consumer_data.stream_list_ht;
@@ -1959,7 +1951,6 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        }
                }
                pthread_mutex_unlock(&the_consumer_data.lock);
-               rcu_read_unlock();
 
                DBG("UST consumer lost packets command for session id %" PRIu64
                    ", channel key %" PRIu64,
@@ -2296,7 +2287,6 @@ error_fatal:
        goto end;
 
 end:
-       rcu_read_unlock();
        health_code_update();
        return ret_func;
 }
@@ -3213,15 +3203,17 @@ void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht)
 
        DBG("UST consumer closing all metadata streams");
 
-       rcu_read_lock();
-       cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
-               health_code_update();
+       {
+               lttng::urcu::read_lock_guard read_lock;
 
-               pthread_mutex_lock(&stream->chan->lock);
-               lttng_ustconsumer_close_metadata(stream->chan);
-               pthread_mutex_unlock(&stream->chan->lock);
+               cds_lfht_for_each_entry (metadata_ht->ht, &iter.iter, stream, node.node) {
+                       health_code_update();
+
+                       pthread_mutex_lock(&stream->chan->lock);
+                       lttng_ustconsumer_close_metadata(stream->chan);
+                       pthread_mutex_unlock(&stream->chan->lock);
+               }
        }
-       rcu_read_unlock();
 }
 
 void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
This page took 0.212371 seconds and 4 git commands to generate.