sessiond: notification-thread: Missing action executor status handling
[lttng-tools.git] / src / bin / lttng-sessiond / notification-thread-events.c
index 6ccd1fe04abfdc54e3c84aa6e80ceb4edf58c096..ccd285a8103d6a344bcb3344abbd982f079a0c13 100644 (file)
@@ -5,6 +5,8 @@
  *
  */
 
+#include "lttng/action/action.h"
+#include "lttng/trigger/trigger-internal.h"
 #define _LGPL_SOURCE
 #include <urcu.h>
 #include <urcu/rculfhash.h>
 #include <common/macros.h>
 #include <lttng/condition/condition.h>
 #include <lttng/action/action-internal.h>
+#include <lttng/action/group-internal.h>
+#include <lttng/domain-internal.h>
 #include <lttng/notification/notification-internal.h>
 #include <lttng/condition/condition-internal.h>
 #include <lttng/condition/buffer-usage-internal.h>
 #include <lttng/condition/session-consumed-size-internal.h>
 #include <lttng/condition/session-rotation-internal.h>
+#include <lttng/condition/event-rule-internal.h>
+#include <lttng/domain-internal.h>
 #include <lttng/notification/channel-internal.h>
+#include <lttng/trigger/trigger-internal.h>
+#include <lttng/event-rule/event-rule-internal.h>
 
 #include <time.h>
 #include <unistd.h>
@@ -32,6 +40,7 @@
 #include <inttypes.h>
 #include <fcntl.h>
 
+#include "condition-internal.h"
 #include "notification-thread.h"
 #include "notification-thread-events.h"
 #include "notification-thread-commands.h"
@@ -41,6 +50,9 @@
 #define CLIENT_POLL_MASK_IN (LPOLLIN | LPOLLERR | LPOLLHUP | LPOLLRDHUP)
 #define CLIENT_POLL_MASK_IN_OUT (CLIENT_POLL_MASK_IN | LPOLLOUT)
 
+/* The tracers currently limit the capture size to PIPE_BUF (4kb on linux). */
+#define MAX_CAPTURE_SIZE (PIPE_BUF)
+
 enum lttng_object_type {
        LTTNG_OBJECT_TYPE_UNKNOWN,
        LTTNG_OBJECT_TYPE_NONE,
@@ -50,7 +62,7 @@ enum lttng_object_type {
 
 struct lttng_trigger_list_element {
        /* No ownership of the trigger object is assumed. */
-       const struct lttng_trigger *trigger;
+       struct lttng_trigger *trigger;
        struct cds_list_head node;
 };
 
@@ -108,6 +120,7 @@ struct lttng_session_trigger_list {
 struct lttng_trigger_ht_element {
        struct lttng_trigger *trigger;
        struct cds_lfht_node node;
+       struct cds_lfht_node node_by_name_uid;
        /* call_rcu delayed reclaim. */
        struct rcu_head rcu_node;
 };
@@ -117,87 +130,6 @@ struct lttng_condition_list_element {
        struct cds_list_head node;
 };
 
-struct notification_client_list_element {
-       struct notification_client *client;
-       struct cds_list_head node;
-};
-
-struct notification_client_list {
-       const struct lttng_trigger *trigger;
-       struct cds_list_head list;
-       struct cds_lfht_node notification_trigger_ht_node;
-       /* call_rcu delayed reclaim. */
-       struct rcu_head rcu_node;
-};
-
-struct notification_client {
-       int socket;
-       /* Client protocol version. */
-       uint8_t major, minor;
-       uid_t uid;
-       gid_t gid;
-       /*
-        * Indicates if the credentials and versions of the client have been
-        * checked.
-        */
-       bool validated;
-       /*
-        * Conditions to which the client's notification channel is subscribed.
-        * List of struct lttng_condition_list_node. The condition member is
-        * owned by the client.
-        */
-       struct cds_list_head condition_list;
-       struct cds_lfht_node client_socket_ht_node;
-       struct {
-               struct {
-                       /*
-                        * During the reception of a message, the reception
-                        * buffers' "size" is set to contain the current
-                        * message's complete payload.
-                        */
-                       struct lttng_dynamic_buffer buffer;
-                       /* Bytes left to receive for the current message. */
-                       size_t bytes_to_receive;
-                       /* Type of the message being received. */
-                       enum lttng_notification_channel_message_type msg_type;
-                       /*
-                        * Indicates whether or not credentials are expected
-                        * from the client.
-                        */
-                       bool expect_creds;
-                       /*
-                        * Indicates whether or not credentials were received
-                        * from the client.
-                        */
-                       bool creds_received;
-                       /* Only used during credentials reception. */
-                       lttng_sock_cred creds;
-               } inbound;
-               struct {
-                       /*
-                        * Indicates whether or not a notification addressed to
-                        * this client was dropped because a command reply was
-                        * already buffered.
-                        *
-                        * A notification is dropped whenever the buffer is not
-                        * empty.
-                        */
-                       bool dropped_notification;
-                       /*
-                        * Indicates whether or not a command reply is already
-                        * buffered. In this case, it means that the client is
-                        * not consuming command replies before emitting a new
-                        * one. This could be caused by a protocol error or a
-                        * misbehaving/malicious client.
-                        */
-                       bool queued_command_reply;
-                       struct lttng_dynamic_buffer buffer;
-               } outbound;
-       } communication;
-       /* call_rcu delayed reclaim. */
-       struct rcu_head rcu_node;
-};
-
 struct channel_state_sample {
        struct channel_key key;
        struct cds_lfht_node channel_state_ht_node;
@@ -258,20 +190,42 @@ void lttng_session_trigger_list_destroy(
                struct lttng_session_trigger_list *list);
 static
 int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
-               const struct lttng_trigger *trigger);
+               struct lttng_trigger *trigger);
+
+static
+int client_handle_transmission_status(
+               struct notification_client *client,
+               enum client_transmission_status transmission_status,
+               struct notification_thread_state *state);
+
+static
+int handle_one_event_notifier_notification(
+               struct notification_thread_state *state,
+               int pipe, enum lttng_domain_type domain);
 
+static
+void free_lttng_trigger_ht_element_rcu(struct rcu_head *node);
 
 static
-int match_client(struct cds_lfht_node *node, const void *key)
+int match_client_socket(struct cds_lfht_node *node, const void *key)
 {
        /* This double-cast is intended to supress pointer-to-cast warning. */
-       int socket = (int) (intptr_t) key;
-       struct notification_client *client;
+       const int socket = (int) (intptr_t) key;
+       const struct notification_client *client = caa_container_of(node,
+                       struct notification_client, client_socket_ht_node);
 
-       client = caa_container_of(node, struct notification_client,
-                       client_socket_ht_node);
+       return client->socket == socket;
+}
+
+static
+int match_client_id(struct cds_lfht_node *node, const void *key)
+{
+       /* This double-cast is intended to supress pointer-to-cast warning. */
+       const notification_client_id id = *((notification_client_id *) key);
+       const struct notification_client *client = caa_container_of(
+                       node, struct notification_client, client_id_ht_node);
 
-       return !!(client->socket == socket);
+       return client->id == id;
 }
 
 static
@@ -326,18 +280,26 @@ int match_channel_info(struct cds_lfht_node *node, const void *key)
 }
 
 static
-int match_condition(struct cds_lfht_node *node, const void *key)
+int match_trigger(struct cds_lfht_node *node, const void *key)
 {
-       struct lttng_condition *condition_key = (struct lttng_condition *) key;
-       struct lttng_trigger_ht_element *trigger;
-       struct lttng_condition *condition;
+       struct lttng_trigger *trigger_key = (struct lttng_trigger *) key;
+       struct lttng_trigger_ht_element *trigger_ht_element;
 
-       trigger = caa_container_of(node, struct lttng_trigger_ht_element,
+       trigger_ht_element = caa_container_of(node, struct lttng_trigger_ht_element,
                        node);
-       condition = lttng_trigger_get_condition(trigger->trigger);
-       assert(condition);
 
-       return !!lttng_condition_is_equal(condition_key, condition);
+       return !!lttng_trigger_is_equal(trigger_key, trigger_ht_element->trigger);
+}
+
+static
+int match_trigger_token(struct cds_lfht_node *node, const void *key)
+{
+       const uint64_t *_key = key;
+       struct notification_trigger_tokens_ht_element *element;
+
+       element = caa_container_of(node,
+                       struct notification_trigger_tokens_ht_element, node);
+       return *_key == element->token;
 }
 
 static
@@ -350,7 +312,7 @@ int match_client_list_condition(struct cds_lfht_node *node, const void *key)
        assert(condition_key);
 
        client_list = caa_container_of(node, struct notification_client_list,
-                       notification_trigger_ht_node);
+                       notification_trigger_clients_ht_node);
        condition = lttng_trigger_get_const_condition(client_list->trigger);
 
        return !!lttng_condition_is_equal(condition_key, condition);
@@ -367,102 +329,100 @@ int match_session(struct cds_lfht_node *node, const void *key)
 }
 
 static
-unsigned long lttng_condition_buffer_usage_hash(
-       const struct lttng_condition *_condition)
+const char *notification_command_type_str(
+               enum notification_thread_command_type type)
 {
-       unsigned long hash;
-       unsigned long condition_type;
-       struct lttng_condition_buffer_usage *condition;
-
-       condition = container_of(_condition,
-                       struct lttng_condition_buffer_usage, parent);
-
-       condition_type = (unsigned long) condition->parent.type;
-       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
-       if (condition->session_name) {
-               hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
-       }
-       if (condition->channel_name) {
-               hash ^= hash_key_str(condition->channel_name, lttng_ht_seed);
-       }
-       if (condition->domain.set) {
-               hash ^= hash_key_ulong(
-                               (void *) condition->domain.type,
-                               lttng_ht_seed);
-       }
-       if (condition->threshold_ratio.set) {
-               uint64_t val;
-
-               val = condition->threshold_ratio.value * (double) UINT32_MAX;
-               hash ^= hash_key_u64(&val, lttng_ht_seed);
-       } else if (condition->threshold_bytes.set) {
-               uint64_t val;
-
-               val = condition->threshold_bytes.value;
-               hash ^= hash_key_u64(&val, lttng_ht_seed);
+       switch (type) {
+       case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER:
+               return "REGISTER_TRIGGER";
+       case NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER:
+               return "UNREGISTER_TRIGGER";
+       case NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL:
+               return "ADD_CHANNEL";
+       case NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL:
+               return "REMOVE_CHANNEL";
+       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING:
+               return "SESSION_ROTATION_ONGOING";
+       case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED:
+               return "SESSION_ROTATION_COMPLETED";
+       case NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE:
+               return "ADD_TRACER_EVENT_SOURCE";
+       case NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE:
+               return "REMOVE_TRACER_EVENT_SOURCE";
+       case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
+               return "LIST_TRIGGERS";
+       case NOTIFICATION_COMMAND_TYPE_QUIT:
+               return "QUIT";
+       case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE:
+               return "CLIENT_COMMUNICATION_UPDATE";
+       default:
+               abort();
        }
-       return hash;
 }
 
+/*
+ * Match trigger based on name and credentials only.
+ * Name duplication is NOT allowed for the same uid.
+ */
 static
-unsigned long lttng_condition_session_consumed_size_hash(
-       const struct lttng_condition *_condition)
+int match_trigger_by_name_uid(struct cds_lfht_node *node,
+               const void *key)
 {
-       unsigned long hash;
-       unsigned long condition_type =
-                       (unsigned long) LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE;
-       struct lttng_condition_session_consumed_size *condition;
-       uint64_t val;
-
-       condition = container_of(_condition,
-                       struct lttng_condition_session_consumed_size, parent);
-
-       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
-       if (condition->session_name) {
-               hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
+       bool match = false;
+       const char *name;
+       const char *key_name;
+       enum lttng_trigger_status status;
+       const struct lttng_credentials *key_creds;
+       const struct lttng_credentials *node_creds;
+       const struct lttng_trigger *trigger_key =
+                       (const struct lttng_trigger *) key;
+       const struct lttng_trigger_ht_element *trigger_ht_element =
+                       caa_container_of(node,
+                               struct lttng_trigger_ht_element,
+                               node_by_name_uid);
+
+       status = lttng_trigger_get_name(trigger_ht_element->trigger, &name);
+       assert(status == LTTNG_TRIGGER_STATUS_OK);
+
+       status = lttng_trigger_get_name(trigger_key, &key_name);
+       assert(status == LTTNG_TRIGGER_STATUS_OK);
+
+       /* Compare the names. */
+       if (strcmp(name, key_name) != 0) {
+               goto end;
        }
-       val = condition->consumed_threshold_bytes.value;
-       hash ^= hash_key_u64(&val, lttng_ht_seed);
-       return hash;
-}
 
-static
-unsigned long lttng_condition_session_rotation_hash(
-       const struct lttng_condition *_condition)
-{
-       unsigned long hash, condition_type;
-       struct lttng_condition_session_rotation *condition;
+       /* Compare the owners' UIDs. */
+       key_creds = lttng_trigger_get_credentials(trigger_key);
+       node_creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
 
-       condition = container_of(_condition,
-                       struct lttng_condition_session_rotation, parent);
-       condition_type = (unsigned long) condition->parent.type;
-       hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
-       assert(condition->session_name);
-       hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
-       return hash;
+       match = lttng_credentials_is_equal_uid(key_creds, node_creds);
+
+end:
+       return match;
 }
 
 /*
- * The lttng_condition hashing code is kept in this file (rather than
- * condition.c) since it makes use of GPLv2 code (hashtable utils), which we
- * don't want to link in liblttng-ctl.
+ * Hash trigger based on name and credentials only.
  */
 static
-unsigned long lttng_condition_hash(const struct lttng_condition *condition)
+unsigned long hash_trigger_by_name_uid(const struct lttng_trigger *trigger)
 {
-       switch (condition->type) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-               return lttng_condition_buffer_usage_hash(condition);
-       case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
-               return lttng_condition_session_consumed_size_hash(condition);
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
-       case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
-               return lttng_condition_session_rotation_hash(condition);
-       default:
-               ERR("[notification-thread] Unexpected condition type caught");
-               abort();
+       unsigned long hash = 0;
+       const struct lttng_credentials *trigger_creds;
+       const char *trigger_name;
+       enum lttng_trigger_status status;
+
+       status = lttng_trigger_get_name(trigger, &trigger_name);
+       if (status == LTTNG_TRIGGER_STATUS_OK) {
+               hash = hash_key_str(trigger_name, lttng_ht_seed);
        }
+
+       trigger_creds = lttng_trigger_get_credentials(trigger);
+       hash ^= hash_key_ulong((void *) (unsigned long) LTTNG_OPTIONAL_GET(trigger_creds->uid),
+                       lttng_ht_seed);
+
+       return hash;
 }
 
 static
@@ -475,6 +435,18 @@ unsigned long hash_channel_key(struct channel_key *key)
        return key_hash ^ domain_hash;
 }
 
+static
+unsigned long hash_client_socket(int socket)
+{
+       return hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed);
+}
+
+static
+unsigned long hash_client_id(notification_client_id id)
+{
+       return hash_key_u64(&id, lttng_ht_seed);
+}
+
 /*
  * Get the type of object to which a given condition applies. Bindings let
  * the notification system evaluate a trigger's condition when a given
@@ -495,6 +467,8 @@ enum lttng_object_type get_condition_binding_object(
        case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
        case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
                return LTTNG_OBJECT_TYPE_SESSION;
+       case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+               return LTTNG_OBJECT_TYPE_NONE;
        default:
                return LTTNG_OBJECT_TYPE_UNKNOWN;
        }
@@ -666,7 +640,91 @@ error:
        return NULL;
 }
 
-/* RCU read lock must be held by the caller. */
+LTTNG_HIDDEN
+bool notification_client_list_get(struct notification_client_list *list)
+{
+       return urcu_ref_get_unless_zero(&list->ref);
+}
+
+static
+void free_notification_client_list_rcu(struct rcu_head *node)
+{
+       free(caa_container_of(node, struct notification_client_list,
+                       rcu_node));
+}
+
+static
+void notification_client_list_release(struct urcu_ref *list_ref)
+{
+       struct notification_client_list *list =
+                       container_of(list_ref, typeof(*list), ref);
+       struct notification_client_list_element *client_list_element, *tmp;
+
+       if (list->notification_trigger_clients_ht) {
+               rcu_read_lock();
+               cds_lfht_del(list->notification_trigger_clients_ht,
+                               &list->notification_trigger_clients_ht_node);
+               rcu_read_unlock();
+               list->notification_trigger_clients_ht = NULL;
+       }
+       cds_list_for_each_entry_safe(client_list_element, tmp,
+                                    &list->list, node) {
+               free(client_list_element);
+       }
+       pthread_mutex_destroy(&list->lock);
+       call_rcu(&list->rcu_node, free_notification_client_list_rcu);
+}
+
+static
+struct notification_client_list *notification_client_list_create(
+               const struct lttng_trigger *trigger)
+{
+       struct notification_client_list *client_list =
+                       zmalloc(sizeof(*client_list));
+
+       if (!client_list) {
+               goto error;
+       }
+       pthread_mutex_init(&client_list->lock, NULL);
+       urcu_ref_init(&client_list->ref);
+       cds_lfht_node_init(&client_list->notification_trigger_clients_ht_node);
+       CDS_INIT_LIST_HEAD(&client_list->list);
+       client_list->trigger = trigger;
+error:
+       return client_list;
+}
+
+static
+void publish_notification_client_list(
+               struct notification_thread_state *state,
+               struct notification_client_list *list)
+{
+       const struct lttng_condition *condition =
+                       lttng_trigger_get_const_condition(list->trigger);
+
+       assert(!list->notification_trigger_clients_ht);
+       notification_client_list_get(list);
+
+       list->notification_trigger_clients_ht =
+                       state->notification_trigger_clients_ht;
+
+       rcu_read_lock();
+       cds_lfht_add(state->notification_trigger_clients_ht,
+                       lttng_condition_hash(condition),
+                       &list->notification_trigger_clients_ht_node);
+       rcu_read_unlock();
+}
+
+LTTNG_HIDDEN
+void notification_client_list_put(struct notification_client_list *list)
+{
+       if (!list) {
+               return;
+       }
+       return urcu_ref_put(&list->ref, notification_client_list_release);
+}
+
+/* Provides a reference to the returned list. */
 static
 struct notification_client_list *get_client_list_from_condition(
        struct notification_thread_state *state,
@@ -674,20 +732,25 @@ struct notification_client_list *get_client_list_from_condition(
 {
        struct cds_lfht_node *node;
        struct cds_lfht_iter iter;
+       struct notification_client_list *list = NULL;
 
+       rcu_read_lock();
        cds_lfht_lookup(state->notification_trigger_clients_ht,
                        lttng_condition_hash(condition),
                        match_client_list_condition,
                        condition,
                        &iter);
        node = cds_lfht_iter_get_node(&iter);
+       if (node) {
+               list = container_of(node, struct notification_client_list,
+                               notification_trigger_clients_ht_node);
+               list = notification_client_list_get(list) ? list : NULL;
+       }
 
-       return node ? caa_container_of(node,
-                       struct notification_client_list,
-                       notification_trigger_ht_node) : NULL;
+       rcu_read_unlock();
+       return list;
 }
 
-/* This function must be called with the RCU read lock held. */
 static
 int evaluate_channel_condition_for_client(
                const struct lttng_condition *condition,
@@ -703,6 +766,8 @@ int evaluate_channel_condition_for_client(
        struct channel_state_sample *last_sample = NULL;
        struct lttng_channel_trigger_list *channel_trigger_list = NULL;
 
+       rcu_read_lock();
+
        /* Find the channel associated with the condition. */
        cds_lfht_for_each_entry(state->channel_triggers_ht, &iter,
                        channel_trigger_list, channel_triggers_ht_node) {
@@ -777,6 +842,7 @@ int evaluate_channel_condition_for_client(
        *session_uid = channel_info->session_info->uid;
        *session_gid = channel_info->session_info->gid;
 end:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -812,7 +878,6 @@ end:
        return session_name;
 }
 
-/* This function must be called with the RCU read lock held. */
 static
 int evaluate_session_condition_for_client(
                const struct lttng_condition *condition,
@@ -826,6 +891,7 @@ int evaluate_session_condition_for_client(
        const char *session_name;
        struct session_info *session_info = NULL;
 
+       rcu_read_lock();
        session_name = get_condition_session_name(condition);
 
        /* Find the session associated with the trigger. */
@@ -879,10 +945,10 @@ int evaluate_session_condition_for_client(
 end_session_put:
        session_info_put(session_info);
 end:
+       rcu_read_unlock();
        return ret;
 }
 
-/* This function must be called with the RCU read lock held. */
 static
 int evaluate_condition_for_client(const struct lttng_trigger *trigger,
                const struct lttng_condition *condition,
@@ -891,7 +957,9 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger,
 {
        int ret;
        struct lttng_evaluation *evaluation = NULL;
-       struct notification_client_list client_list = { 0 };
+       struct notification_client_list client_list = {
+               .lock = PTHREAD_MUTEX_INITIALIZER,
+       };
        struct notification_client_list_element client_list_element = { 0 };
        uid_t object_uid = 0;
        gid_t object_gid = 0;
@@ -911,6 +979,7 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger,
                                &evaluation, &object_uid, &object_gid);
                break;
        case LTTNG_OBJECT_TYPE_NONE:
+               DBG("[notification-thread] Newly subscribed-to condition not bound to object, nothing to evaluate");
                ret = 0;
                goto end;
        case LTTNG_OBJECT_TYPE_UNKNOWN:
@@ -933,7 +1002,7 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger,
         * Create a temporary client list with the client currently
         * subscribing.
         */
-       cds_lfht_node_init(&client_list.notification_trigger_ht_node);
+       cds_lfht_node_init(&client_list.notification_trigger_clients_ht_node);
        CDS_INIT_LIST_HEAD(&client_list.list);
        client_list.trigger = trigger;
 
@@ -957,7 +1026,7 @@ int notification_thread_client_subscribe(struct notification_client *client,
                enum lttng_notification_channel_status *_status)
 {
        int ret = 0;
-       struct notification_client_list *client_list;
+       struct notification_client_list *client_list = NULL;
        struct lttng_condition_list_element *condition_list_element = NULL;
        struct notification_client_list_element *client_list_element = NULL;
        enum lttng_notification_channel_status status =
@@ -986,8 +1055,6 @@ int notification_thread_client_subscribe(struct notification_client *client,
                goto error;
        }
 
-       rcu_read_lock();
-
        /*
         * Add the newly-subscribed condition to the client's subscription list.
         */
@@ -1003,20 +1070,24 @@ int notification_thread_client_subscribe(struct notification_client *client,
                 * since this trigger is not registered yet.
                 */
                free(client_list_element);
-               goto end_unlock;
+               goto end;
        }
 
        /*
         * The condition to which the client just subscribed is evaluated
         * at this point so that conditions that are already TRUE result
         * in a notification being sent out.
+        *
+        * The client_list's trigger is used without locking the list itself.
+        * This is correct since the list doesn't own the trigger and the
+        * object is immutable.
         */
        if (evaluate_condition_for_client(client_list->trigger, condition,
                        client, state)) {
                WARN("[notification-thread] Evaluation of a condition on client subscription failed, aborting.");
                ret = -1;
                free(client_list_element);
-               goto end_unlock;
+               goto end;
        }
 
        /*
@@ -1026,13 +1097,17 @@ int notification_thread_client_subscribe(struct notification_client *client,
         */
        client_list_element->client = client;
        CDS_INIT_LIST_HEAD(&client_list_element->node);
+
+       pthread_mutex_lock(&client_list->lock);
        cds_list_add(&client_list_element->node, &client_list->list);
-end_unlock:
-       rcu_read_unlock();
+       pthread_mutex_unlock(&client_list->lock);
 end:
        if (_status) {
                *_status = status;
        }
+       if (client_list) {
+               notification_client_list_put(client_list);
+       }
        return ret;
 error:
        free(condition_list_element);
@@ -1088,23 +1163,24 @@ int notification_thread_client_unsubscribe(
         * Remove the client from the list of clients interested the trigger
         * matching the condition.
         */
-       rcu_read_lock();
        client_list = get_client_list_from_condition(state, condition);
        if (!client_list) {
-               goto end_unlock;
+               goto end;
        }
 
+       pthread_mutex_lock(&client_list->lock);
        cds_list_for_each_entry_safe(client_list_element, client_tmp,
                        &client_list->list, node) {
-               if (client_list_element->client->socket != client->socket) {
+               if (client_list_element->client->id != client->id) {
                        continue;
                }
                cds_list_del(&client_list_element->node);
                free(client_list_element);
                break;
        }
-end_unlock:
-       rcu_read_unlock();
+       pthread_mutex_unlock(&client_list->lock);
+       notification_client_list_put(client_list);
+       client_list = NULL;
 end:
        lttng_condition_destroy(condition);
        if (_status) {
@@ -1123,24 +1199,22 @@ static
 void notification_client_destroy(struct notification_client *client,
                struct notification_thread_state *state)
 {
-       struct lttng_condition_list_element *condition_list_element, *tmp;
-
        if (!client) {
                return;
        }
 
-       /* Release all conditions to which the client was subscribed. */
-       cds_list_for_each_entry_safe(condition_list_element, tmp,
-                       &client->condition_list, node) {
-               (void) notification_thread_client_unsubscribe(client,
-                               condition_list_element->condition, state, NULL);
-       }
-
+       /*
+        * The client object is not reachable by other threads, no need to lock
+        * the client here.
+        */
        if (client->socket >= 0) {
                (void) lttcomm_close_unix_sock(client->socket);
+               client->socket = -1;
        }
-       lttng_dynamic_buffer_reset(&client->communication.inbound.buffer);
-       lttng_dynamic_buffer_reset(&client->communication.outbound.buffer);
+       client->communication.active = false;
+       lttng_payload_reset(&client->communication.inbound.payload);
+       lttng_payload_reset(&client->communication.outbound.payload);
+       pthread_mutex_destroy(&client->lock);
        call_rcu(&client->rcu_node, free_notification_client_rcu);
 }
 
@@ -1157,8 +1231,8 @@ struct notification_client *get_client_from_socket(int socket,
        struct notification_client *client = NULL;
 
        cds_lfht_lookup(state->client_socket_ht,
-                       hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed),
-                       match_client,
+                       hash_client_socket(socket),
+                       match_client_socket,
                        (void *) (unsigned long) socket,
                        &iter);
        node = cds_lfht_iter_get_node(&iter);
@@ -1172,6 +1246,34 @@ end:
        return client;
 }
 
+/*
+ * Call with rcu_read_lock held (and hold for the lifetime of the returned
+ * client pointer).
+ */
+static
+struct notification_client *get_client_from_id(notification_client_id id,
+               struct notification_thread_state *state)
+{
+       struct cds_lfht_iter iter;
+       struct cds_lfht_node *node;
+       struct notification_client *client = NULL;
+
+       cds_lfht_lookup(state->client_id_ht,
+                       hash_client_id(id),
+                       match_client_id,
+                       &id,
+                       &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (!node) {
+               goto end;
+       }
+
+       client = caa_container_of(node, struct notification_client,
+                       client_id_ht_node);
+end:
+       return client;
+}
+
 static
 bool buffer_usage_condition_applies_to_channel(
                const struct lttng_condition *condition,
@@ -1375,7 +1477,7 @@ void lttng_session_trigger_list_destroy(struct lttng_session_trigger_list *list)
 
 static
 int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
-               const struct lttng_trigger *trigger)
+               struct lttng_trigger *trigger)
 {
        int ret = 0;
        struct lttng_trigger_list_element *new_element =
@@ -1549,7 +1651,7 @@ int handle_notification_thread_command_add_channel(
 
        DBG("[notification-thread] Adding channel %s from session %s, channel key = %" PRIu64 " in %s domain",
                        channel_name, session_name, channel_key_int,
-                       channel_domain == LTTNG_DOMAIN_KERNEL ? "kernel" : "user space");
+                       lttng_domain_type_str(channel_domain));
 
        CDS_INIT_LIST_HEAD(&trigger_list);
 
@@ -1650,7 +1752,7 @@ int handle_notification_thread_command_remove_channel(
        struct channel_info *channel_info;
 
        DBG("[notification-thread] Removing channel key = %" PRIu64 " in %s domain",
-                       channel_key, domain == LTTNG_DOMAIN_KERNEL ? "kernel" : "user space");
+                       channel_key, lttng_domain_type_str(domain));
 
        rcu_read_lock();
 
@@ -1732,6 +1834,10 @@ int handle_notification_thread_command_session_rotation(
        struct lttng_session_trigger_list *trigger_list;
        struct lttng_trigger_list_element *trigger_list_element;
        struct session_info *session_info;
+       const struct lttng_credentials session_creds = {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(session_uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(session_gid),
+       };
 
        rcu_read_lock();
 
@@ -1757,11 +1863,11 @@ int handle_notification_thread_command_session_rotation(
        cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
                        node) {
                const struct lttng_condition *condition;
-               const struct lttng_action *action;
-               const struct lttng_trigger *trigger;
+               struct lttng_trigger *trigger;
                struct notification_client_list *client_list;
                struct lttng_evaluation *evaluation = NULL;
                enum lttng_condition_type condition_type;
+               enum action_executor_status executor_status;
 
                trigger = trigger_list_element->trigger;
                condition = lttng_trigger_get_const_condition(trigger);
@@ -1776,23 +1882,7 @@ int handle_notification_thread_command_session_rotation(
                        continue;
                }
 
-               action = lttng_trigger_get_const_action(trigger);
-
-               /* Notify actions are the only type currently supported. */
-               assert(lttng_action_get_type_const(action) ==
-                               LTTNG_ACTION_TYPE_NOTIFY);
-
                client_list = get_client_list_from_condition(state, condition);
-               assert(client_list);
-
-               if (cds_list_empty(&client_list->list)) {
-                       /*
-                        * No clients interested in the evaluation's result,
-                        * skip it.
-                        */
-                       continue;
-               }
-
                if (cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
                        evaluation = lttng_evaluation_session_rotation_ongoing_create(
                                        trace_archive_chunk_id);
@@ -1805,17 +1895,47 @@ int handle_notification_thread_command_session_rotation(
                        /* Internal error */
                        ret = -1;
                        cmd_result = LTTNG_ERR_UNK;
-                       goto end;
+                       goto put_list;
+               }
+
+               /*
+                * Ownership of `evaluation` transferred to the action executor
+                * no matter the result.
+                */
+               executor_status = action_executor_enqueue(state->executor,
+                               trigger, evaluation, &session_creds,
+                               client_list);
+               evaluation = NULL;
+               switch (executor_status) {
+               case ACTION_EXECUTOR_STATUS_OK:
+                       break;
+               case ACTION_EXECUTOR_STATUS_ERROR:
+               case ACTION_EXECUTOR_STATUS_INVALID:
+                       /*
+                        * TODO Add trigger identification (name/id) when
+                        * it is added to the API.
+                        */
+                       ERR("Fatal error occurred while enqueuing action associated with session rotation trigger");
+                       ret = -1;
+                       goto put_list;
+               case ACTION_EXECUTOR_STATUS_OVERFLOW:
+                       /*
+                        * TODO Add trigger identification (name/id) when
+                        * it is added to the API.
+                        *
+                        * Not a fatal error.
+                        */
+                       WARN("No space left when enqueuing action associated with session rotation trigger");
+                       ret = 0;
+                       goto put_list;
+               default:
+                       abort();
                }
 
-               /* Dispatch evaluation result to all clients. */
-               ret = send_evaluation_to_clients(trigger_list_element->trigger,
-                               evaluation, client_list, state,
-                               session_info->uid,
-                               session_info->gid);
-               lttng_evaluation_destroy(evaluation);
+put_list:
+               notification_client_list_put(client_list);
                if (caa_unlikely(ret)) {
-                       goto end;
+                       break;
                }
        }
 end:
@@ -1826,25 +1946,269 @@ end:
 }
 
 static
-int condition_is_supported(struct lttng_condition *condition)
+int handle_notification_thread_command_add_tracer_event_source(
+               struct notification_thread_state *state,
+               int tracer_event_source_fd,
+               enum lttng_domain_type domain_type,
+               enum lttng_error_code *_cmd_result)
 {
-       int ret;
-
-       switch (lttng_condition_get_type(condition)) {
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
-       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
-       {
-               enum lttng_domain_type domain;
+       int ret = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct notification_event_tracer_event_source_element *element = NULL;
 
-               ret = lttng_condition_buffer_usage_get_domain_type(condition,
-                               &domain);
-               if (ret) {
+       element = zmalloc(sizeof(*element));
+       if (!element) {
+               cmd_result = LTTNG_ERR_NOMEM;
+               ret = -1;
+               goto end;
+       }
+
+       element->fd = tracer_event_source_fd;
+       element->domain = domain_type;
+
+       cds_list_add(&element->node, &state->tracer_event_sources_list);
+
+       DBG3("[notification-thread] Adding tracer event source fd to poll set: tracer_event_source_fd = %d, domain = '%s'",
+                       tracer_event_source_fd,
+                       lttng_domain_type_str(domain_type));
+
+       /* Adding the read side pipe to the event poll. */
+       ret = lttng_poll_add(&state->events, tracer_event_source_fd, LPOLLIN | LPOLLERR);
+       if (ret < 0) {
+               ERR("[notification-thread] Failed to add tracer event source to poll set: tracer_event_source_fd = %d, domain = '%s'",
+                               tracer_event_source_fd,
+                               lttng_domain_type_str(element->domain));
+               cds_list_del(&element->node);
+               free(element);
+               goto end;
+       }
+
+       element->is_fd_in_poll_set = true;
+
+end:
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static
+int drain_event_notifier_notification_pipe(
+               struct notification_thread_state *state,
+               int pipe, enum lttng_domain_type domain)
+{
+       struct lttng_poll_event events = {0};
+       int ret;
+
+       ret = lttng_poll_create(&events, 1, LTTNG_CLOEXEC);
+       if (ret < 0) {
+               ERR("[notification-thread] Error creating lttng_poll_event");
+               goto end;
+       }
+
+       ret = lttng_poll_add(&events, pipe, LPOLLIN);
+       if (ret < 0) {
+               ERR("[notification-thread] Error adding fd event notifier notification pipe to lttng_poll_event: fd = %d",
+                               pipe);
+               goto end;
+       }
+
+       while (true) {
+               /*
+                * Continue to consume notifications as long as there are new
+                * ones coming in. The tracer has been asked to stop producing
+                * them.
+                *
+                * LPOLLIN is explicitly checked since LPOLLHUP is implicitly
+                * monitored (on Linux, at least) and will be returned when
+                * the pipe is closed but empty.
+                */
+               ret = lttng_poll_wait_interruptible(&events, 0);
+               if (ret == 0 || (LTTNG_POLL_GETEV(&events, 0) & LPOLLIN) == 0) {
+                       /* No more notification to be read on this pipe. */
+                       ret = 0;
+                       goto end;
+               } else if (ret < 0) {
+                       PERROR("Failed on lttng_poll_wait_interruptible() call");
                        ret = -1;
                        goto end;
                }
 
+               ret = handle_one_event_notifier_notification(state, pipe, domain);
+               if (ret) {
+                       ERR("[notification-thread] Error consuming an event notifier notification from pipe: fd = %d",
+                                       pipe);
+               }
+       }
+end:
+       lttng_poll_clean(&events);
+       return ret;
+}
+
+static
+int handle_notification_thread_command_remove_tracer_event_source(
+               struct notification_thread_state *state,
+               int tracer_event_source_fd,
+               enum lttng_error_code *_cmd_result)
+{
+       int ret = 0;
+       bool found = false;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct notification_event_tracer_event_source_element *source_element = NULL, *tmp;
+
+       cds_list_for_each_entry_safe(source_element, tmp,
+                       &state->tracer_event_sources_list, node) {
+               if (source_element->fd != tracer_event_source_fd) {
+                       continue;
+               }
+
+               DBG("[notification-thread] Removed tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+                               tracer_event_source_fd,
+                               lttng_domain_type_str(source_element->domain));
+               cds_list_del(&source_element->node);
+               found = true;
+               break;
+       }
+
+       if (!found) {
+               /*
+                * This is temporarily allowed since the poll activity set is
+                * not properly cleaned-up for the moment. This is adressed in
+                * an upcoming fix.
+                */
+               source_element = NULL;
+               goto end;
+       }
+
+       if (!source_element->is_fd_in_poll_set) {
+               /* Skip the poll set removal. */
+               goto end;
+       }
+
+       DBG3("[notification-thread] Removing tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+                       tracer_event_source_fd,
+                       lttng_domain_type_str(source_element->domain));
+
+       /* Removing the fd from the event poll set. */
+       ret = lttng_poll_del(&state->events, tracer_event_source_fd);
+       if (ret < 0) {
+               ERR("[notification-thread] Failed to remove tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+                               tracer_event_source_fd,
+                               lttng_domain_type_str(source_element->domain));
+               cmd_result = LTTNG_ERR_FATAL;
+               goto end;
+       }
+
+       source_element->is_fd_in_poll_set = false;
+
+       ret = drain_event_notifier_notification_pipe(state, tracer_event_source_fd,
+                       source_element->domain);
+       if (ret) {
+               ERR("[notification-thread] Error draining event notifier notification: tracer_event_source_fd = %d, domain = %s",
+                               tracer_event_source_fd,
+                               lttng_domain_type_str(source_element->domain));
+               cmd_result = LTTNG_ERR_FATAL;
+               goto end;
+       }
+
+       /*
+        * The drain_event_notifier_notification_pipe() call might have read
+        * data from an fd that we received in event in the latest _poll_wait()
+        * call. Make sure the thread call poll_wait() again to ensure we have
+        * a clean state.
+        */
+       state->restart_poll = true;
+
+end:
+       free(source_element);
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+int handle_notification_thread_remove_tracer_event_source_no_result(
+               struct notification_thread_state *state,
+               int tracer_event_source_fd)
+{
+       int ret;
+       enum lttng_error_code cmd_result;
+
+       ret = handle_notification_thread_command_remove_tracer_event_source(
+                       state, tracer_event_source_fd, &cmd_result);
+       (void) cmd_result;
+       return ret;
+}
+
+static int handle_notification_thread_command_list_triggers(
+               struct notification_thread_handle *handle,
+               struct notification_thread_state *state,
+               uid_t client_uid,
+               struct lttng_triggers **triggers,
+               enum lttng_error_code *_cmd_result)
+{
+       int ret = 0;
+       enum lttng_error_code cmd_result = LTTNG_OK;
+       struct cds_lfht_iter iter;
+       struct lttng_trigger_ht_element *trigger_ht_element;
+       struct lttng_triggers *local_triggers = NULL;
+       const struct lttng_credentials *creds;
+
+       rcu_read_lock();
+
+       local_triggers = lttng_triggers_create();
+       if (!local_triggers) {
+               /* Not a fatal error. */
+               cmd_result = LTTNG_ERR_NOMEM;
+               goto end;
+       }
+
+       cds_lfht_for_each_entry(state->triggers_ht, &iter,
+                       trigger_ht_element, node) {
+               /*
+                * Only return the triggers to which the client has access.
+                * The root user has visibility over all triggers.
+                */
+               creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+               if (client_uid != lttng_credentials_get_uid(creds) && client_uid != 0) {
+                       continue;
+               }
+
+               ret = lttng_triggers_add(local_triggers,
+                               trigger_ht_element->trigger);
+               if (ret < 0) {
+                       /* Not a fatal error. */
+                       ret = 0;
+                       cmd_result = LTTNG_ERR_NOMEM;
+                       goto end;
+               }
+       }
+
+       /* Transferring ownership to the caller. */
+       *triggers = local_triggers;
+       local_triggers = NULL;
+
+end:
+       rcu_read_unlock();
+       lttng_triggers_destroy(local_triggers);
+       *_cmd_result = cmd_result;
+       return ret;
+}
+
+static
+bool condition_is_supported(struct lttng_condition *condition)
+{
+       bool is_supported;
+
+       switch (lttng_condition_get_type(condition)) {
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+       case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+       {
+               int ret;
+               enum lttng_domain_type domain;
+
+               ret = lttng_condition_buffer_usage_get_domain_type(condition,
+                               &domain);
+               assert(ret == 0);
+
                if (domain != LTTNG_DOMAIN_KERNEL) {
-                       ret = 1;
+                       is_supported = true;
                        goto end;
                }
 
@@ -1852,20 +2216,48 @@ int condition_is_supported(struct lttng_condition *condition)
                 * Older kernel tracers don't expose the API to monitor their
                 * buffers. Therefore, we reject triggers that require that
                 * mechanism to be available to be evaluated.
+                *
+                * Assume unsupported on error.
+                */
+               is_supported = kernel_supports_ring_buffer_snapshot_sample_positions() == 1;
+               break;
+       }
+       case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+       {
+               const struct lttng_event_rule *event_rule;
+               enum lttng_domain_type domain;
+               const enum lttng_condition_status status =
+                               lttng_condition_event_rule_get_rule(
+                                               condition, &event_rule);
+
+               assert(status == LTTNG_CONDITION_STATUS_OK);
+
+               domain = lttng_event_rule_get_domain_type(event_rule);
+               if (domain != LTTNG_DOMAIN_KERNEL) {
+                       is_supported = true;
+                       goto end;
+               }
+
+               /*
+                * Older kernel tracers can't emit notification. Therefore, we
+                * reject triggers that require that mechanism to be available
+                * to be evaluated.
+                *
+                * Assume unsupported on error.
                 */
-               ret = kernel_supports_ring_buffer_snapshot_sample_positions();
+               is_supported = kernel_supports_event_notifiers() == 1;
                break;
        }
        default:
-               ret = 1;
+               is_supported = true;
        }
 end:
-       return ret;
+       return is_supported;
 }
 
 /* Must be called with RCU read lock held. */
 static
-int bind_trigger_to_matching_session(const struct lttng_trigger *trigger,
+int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
                struct notification_thread_state *state)
 {
        int ret = 0;
@@ -1911,7 +2303,7 @@ end:
 
 /* Must be called with RCU read lock held. */
 static
-int bind_trigger_to_matching_channels(const struct lttng_trigger *trigger,
+int bind_trigger_to_matching_channels(struct lttng_trigger *trigger,
                struct notification_thread_state *state)
 {
        int ret = 0;
@@ -1955,9 +2347,90 @@ end:
        return ret;
 }
 
+static
+bool is_trigger_action_notify(const struct lttng_trigger *trigger)
+{
+       bool is_notify = false;
+       unsigned int i, count;
+       enum lttng_action_status action_status;
+       const struct lttng_action *action =
+                       lttng_trigger_get_const_action(trigger);
+       enum lttng_action_type action_type;
+
+       assert(action);
+       action_type = lttng_action_get_type(action);
+       if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
+               is_notify = true;
+               goto end;
+       } else if (action_type != LTTNG_ACTION_TYPE_GROUP) {
+               goto end;
+       }
+
+       action_status = lttng_action_group_get_count(action, &count);
+       assert(action_status == LTTNG_ACTION_STATUS_OK);
+
+       for (i = 0; i < count; i++) {
+               const struct lttng_action *inner_action =
+                               lttng_action_group_get_at_index(
+                                               action, i);
+
+               action_type = lttng_action_get_type(inner_action);
+               if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
+                       is_notify = true;
+                       goto end;
+               }
+       }
+
+end:
+       return is_notify;
+}
+
+static bool trigger_name_taken(struct notification_thread_state *state,
+               const struct lttng_trigger *trigger)
+{
+       struct cds_lfht_iter iter;
+
+       /*
+        * No duplicata is allowed in the triggers_by_name_uid_ht.
+        * The match is done against the trigger name and uid.
+        */
+       cds_lfht_lookup(state->triggers_by_name_uid_ht,
+                       hash_trigger_by_name_uid(trigger),
+                       match_trigger_by_name_uid,
+                       trigger,
+                       &iter);
+       return !!cds_lfht_iter_get_node(&iter);
+}
+
+static
+enum lttng_error_code generate_trigger_name(
+               struct notification_thread_state *state,
+               struct lttng_trigger *trigger, const char **name)
+{
+       enum lttng_error_code ret_code = LTTNG_OK;
+       bool taken = false;
+       enum lttng_trigger_status status;
+
+       do {
+               const int ret = lttng_trigger_generate_name(trigger,
+                               state->trigger_id.name_offset++);
+               if (ret) {
+                       /* The only reason this can fail right now. */
+                       ret_code = LTTNG_ERR_NOMEM;
+                       break;
+               }
+
+               status = lttng_trigger_get_name(trigger, name);
+               assert(status == LTTNG_TRIGGER_STATUS_OK);
+
+               taken = trigger_name_taken(state, trigger);
+       } while (taken || state->trigger_id.name_offset == UINT64_MAX);
+
+       return ret_code;
+}
+
 /*
- * FIXME A client's credentials are not checked when registering a trigger, nor
- *       are they stored alongside with the trigger.
+ * FIXME A client's credentials are not checked when registering a trigger.
  *
  * The effects of this are benign since:
  *     - The client will succeed in registering the trigger, as it is valid,
@@ -1982,25 +2455,50 @@ int handle_notification_thread_command_register_trigger(
        struct notification_client *client;
        struct notification_client_list *client_list = NULL;
        struct lttng_trigger_ht_element *trigger_ht_element = NULL;
-       struct notification_client_list_element *client_list_element, *tmp;
+       struct notification_client_list_element *client_list_element;
+       struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element = NULL;
        struct cds_lfht_node *node;
        struct cds_lfht_iter iter;
+       const char* trigger_name;
        bool free_trigger = true;
+       struct lttng_evaluation *evaluation = NULL;
+       struct lttng_credentials object_creds;
+       uid_t object_uid;
+       gid_t object_gid;
+       enum action_executor_status executor_status;
+       const uint64_t trigger_tracer_token =
+                       state->trigger_id.next_tracer_token++;
 
        rcu_read_lock();
 
+       /* Set the trigger's tracer token. */
+       lttng_trigger_set_tracer_token(trigger, trigger_tracer_token);
+
+       if (lttng_trigger_get_name(trigger, &trigger_name) ==
+                       LTTNG_TRIGGER_STATUS_UNSET) {
+               const enum lttng_error_code ret_code = generate_trigger_name(
+                               state, trigger, &trigger_name);
+
+               if (ret_code != LTTNG_OK) {
+                       /* Fatal error. */
+                       ret = -1;
+                       *cmd_result = ret_code;
+                       goto error;
+               }
+       } else if (trigger_name_taken(state, trigger)) {
+               /* Not a fatal error. */
+               *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+               ret = 0;
+               goto error;
+       }
+
        condition = lttng_trigger_get_condition(trigger);
        assert(condition);
 
-       ret = condition_is_supported(condition);
-       if (ret < 0) {
-               goto error;
-       } else if (ret == 0) {
+       /* Some conditions require tracers to implement a minimal ABI version. */
+       if (!condition_is_supported(condition)) {
                *cmd_result = LTTNG_ERR_NOT_SUPPORTED;
                goto error;
-       } else {
-               /* Feature is supported, continue. */
-               ret = 0;
        }
 
        trigger_ht_element = zmalloc(sizeof(*trigger_ht_element));
@@ -2011,12 +2509,13 @@ int handle_notification_thread_command_register_trigger(
 
        /* Add trigger to the trigger_ht. */
        cds_lfht_node_init(&trigger_ht_element->node);
+       cds_lfht_node_init(&trigger_ht_element->node_by_name_uid);
        trigger_ht_element->trigger = trigger;
 
        node = cds_lfht_add_unique(state->triggers_ht,
                        lttng_condition_hash(condition),
-                       match_condition,
-                       condition,
+                       match_trigger,
+                       trigger,
                        &trigger_ht_element->node);
        if (node != &trigger_ht_element->node) {
                /* Not a fatal error, simply report it to the client. */
@@ -2024,10 +2523,59 @@ int handle_notification_thread_command_register_trigger(
                goto error_free_ht_element;
        }
 
+       node = cds_lfht_add_unique(state->triggers_by_name_uid_ht,
+                       hash_trigger_by_name_uid(trigger),
+                       match_trigger_by_name_uid,
+                       trigger,
+                       &trigger_ht_element->node_by_name_uid);
+       if (node != &trigger_ht_element->node_by_name_uid) {
+               /* Not a fatal error, simply report it to the client. */
+               cds_lfht_del(state->triggers_ht, &trigger_ht_element->node);
+               *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+               goto error_free_ht_element;
+       }
+
+       if (lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
+               trigger_tokens_ht_element = zmalloc(sizeof(*trigger_tokens_ht_element));
+               if (!trigger_tokens_ht_element) {
+                       /* Fatal error. */
+                       ret = -1;
+                       cds_lfht_del(state->triggers_ht,
+                                       &trigger_ht_element->node);
+                       cds_lfht_del(state->triggers_by_name_uid_ht,
+                                       &trigger_ht_element->node_by_name_uid);
+                       goto error_free_ht_element;
+               }
+
+               /* Add trigger token to the trigger_tokens_ht. */
+               cds_lfht_node_init(&trigger_tokens_ht_element->node);
+               trigger_tokens_ht_element->token =
+                               LTTNG_OPTIONAL_GET(trigger->tracer_token);
+               trigger_tokens_ht_element->trigger = trigger;
+
+               node = cds_lfht_add_unique(state->trigger_tokens_ht,
+                               hash_key_u64(&trigger_tokens_ht_element->token,
+                                               lttng_ht_seed),
+                               match_trigger_token,
+                               &trigger_tokens_ht_element->token,
+                               &trigger_tokens_ht_element->node);
+               if (node != &trigger_tokens_ht_element->node) {
+                       /* Internal corruption, fatal error. */
+                       ret = -1;
+                       *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+                       cds_lfht_del(state->triggers_ht,
+                                       &trigger_ht_element->node);
+                       cds_lfht_del(state->triggers_by_name_uid_ht,
+                                       &trigger_ht_element->node_by_name_uid);
+                       goto error_free_ht_element;
+               }
+       }
+
        /*
         * Ownership of the trigger and of its wrapper was transfered to
-        * the triggers_ht.
+        * the triggers_ht. Same for token ht element if necessary.
         */
+       trigger_tokens_ht_element = NULL;
        trigger_ht_element = NULL;
        free_trigger = false;
 
@@ -2036,42 +2584,46 @@ int handle_notification_thread_command_register_trigger(
         * It is not skipped as this is the only action type currently
         * supported.
         */
-       client_list = zmalloc(sizeof(*client_list));
-       if (!client_list) {
-               ret = -1;
-               goto error_free_ht_element;
-       }
-       cds_lfht_node_init(&client_list->notification_trigger_ht_node);
-       CDS_INIT_LIST_HEAD(&client_list->list);
-       client_list->trigger = trigger;
-
-       /* Build a list of clients to which this new trigger applies. */
-       cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
-                       client_socket_ht_node) {
-               if (!trigger_applies_to_client(trigger, client)) {
-                       continue;
+       if (is_trigger_action_notify(trigger)) {
+               client_list = notification_client_list_create(trigger);
+               if (!client_list) {
+                       ret = -1;
+                       goto error_free_ht_element;
                }
 
-               client_list_element = zmalloc(sizeof(*client_list_element));
-               if (!client_list_element) {
-                       ret = -1;
-                       goto error_free_client_list;
+               /* Build a list of clients to which this new trigger applies. */
+               cds_lfht_for_each_entry (state->client_socket_ht, &iter, client,
+                               client_socket_ht_node) {
+                       if (!trigger_applies_to_client(trigger, client)) {
+                               continue;
+                       }
+
+                       client_list_element =
+                                       zmalloc(sizeof(*client_list_element));
+                       if (!client_list_element) {
+                               ret = -1;
+                               goto error_put_client_list;
+                       }
+
+                       CDS_INIT_LIST_HEAD(&client_list_element->node);
+                       client_list_element->client = client;
+                       cds_list_add(&client_list_element->node,
+                                       &client_list->list);
                }
-               CDS_INIT_LIST_HEAD(&client_list_element->node);
-               client_list_element->client = client;
-               cds_list_add(&client_list_element->node, &client_list->list);
-       }
 
-       cds_lfht_add(state->notification_trigger_clients_ht,
-                       lttng_condition_hash(condition),
-                       &client_list->notification_trigger_ht_node);
+               /*
+                * Client list ownership transferred to the
+                * notification_trigger_clients_ht.
+                */
+               publish_notification_client_list(state, client_list);
+       }
 
        switch (get_condition_binding_object(condition)) {
        case LTTNG_OBJECT_TYPE_SESSION:
                /* Add the trigger to the list if it matches a known session. */
                ret = bind_trigger_to_matching_session(trigger, state);
                if (ret) {
-                       goto error_free_client_list;
+                       goto error_put_client_list;
                }
                break;
        case LTTNG_OBJECT_TYPE_CHANNEL:
@@ -2081,21 +2633,24 @@ int handle_notification_thread_command_register_trigger(
                 */
                ret = bind_trigger_to_matching_channels(trigger, state);
                if (ret) {
-                       goto error_free_client_list;
+                       goto error_put_client_list;
                }
                break;
        case LTTNG_OBJECT_TYPE_NONE:
                break;
        default:
-               ERR("[notification-thread] Unknown object type on which to bind a newly registered trigger was encountered");
+               ERR("Unknown object type on which to bind a newly registered trigger was encountered");
                ret = -1;
-               goto error_free_client_list;
+               goto error_put_client_list;
        }
 
        /*
-        * Since there is nothing preventing clients from subscribing to a
-        * condition before the corresponding trigger is registered, we have
-        * to evaluate this new condition right away.
+        * The new trigger's condition must be evaluated against the current
+        * state.
+        *
+        * In the case of `notify` action, nothing preventing clients from
+        * subscribing to a condition before the corresponding trigger is
+        * registered, we have to evaluate this new condition right away.
         *
         * At some point, we were waiting for the next "evaluation" (e.g. on
         * reception of a channel sample) to evaluate this new condition, but
@@ -2118,32 +2673,92 @@ int handle_notification_thread_command_register_trigger(
         * that the evaluations remain the same (true for samples n-1 and n) and
         * the client will never know that the condition has been met.
         */
-       cds_list_for_each_entry_safe(client_list_element, tmp,
-                       &client_list->list, node) {
-               ret = evaluate_condition_for_client(trigger, condition,
-                               client_list_element->client, state);
-               if (ret) {
-                       goto error_free_client_list;
-               }
-       }
-
-       /*
-        * Client list ownership transferred to the
-        * notification_trigger_clients_ht.
-        */
-       client_list = NULL;
-
-       *cmd_result = LTTNG_OK;
-error_free_client_list:
-       if (client_list) {
-               cds_list_for_each_entry_safe(client_list_element, tmp,
-                               &client_list->list, node) {
-                       free(client_list_element);
-               }
-               free(client_list);
+       switch (get_condition_binding_object(condition)) {
+       case LTTNG_OBJECT_TYPE_SESSION:
+               ret = evaluate_session_condition_for_client(condition, state,
+                               &evaluation, &object_uid,
+                               &object_gid);
+               LTTNG_OPTIONAL_SET(&object_creds.uid, object_uid);
+               LTTNG_OPTIONAL_SET(&object_creds.gid, object_gid);
+               break;
+       case LTTNG_OBJECT_TYPE_CHANNEL:
+               ret = evaluate_channel_condition_for_client(condition, state,
+                               &evaluation, &object_uid,
+                               &object_gid);
+               LTTNG_OPTIONAL_SET(&object_creds.uid, object_uid);
+               LTTNG_OPTIONAL_SET(&object_creds.gid, object_gid);
+               break;
+       case LTTNG_OBJECT_TYPE_NONE:
+               ret = 0;
+               break;
+       case LTTNG_OBJECT_TYPE_UNKNOWN:
+       default:
+               ret = -1;
+               break;
+       }
+
+       if (ret) {
+               /* Fatal error. */
+               goto error_put_client_list;
+       }
+
+       DBG("Newly registered trigger's condition evaluated to %s",
+                       evaluation ? "true" : "false");
+       if (!evaluation) {
+               /* Evaluation yielded nothing. Normal exit. */
+               ret = 0;
+               goto end;
        }
+
+       /*
+        * Ownership of `evaluation` transferred to the action executor
+        * no matter the result.
+        */
+       executor_status = action_executor_enqueue(state->executor, trigger,
+                       evaluation, &object_creds, client_list);
+       evaluation = NULL;
+       switch (executor_status) {
+       case ACTION_EXECUTOR_STATUS_OK:
+               break;
+       case ACTION_EXECUTOR_STATUS_ERROR:
+       case ACTION_EXECUTOR_STATUS_INVALID:
+               /*
+                * TODO Add trigger identification (name/id) when
+                * it is added to the API.
+                */
+               ERR("Fatal error occurred while enqueuing action associated to newly registered trigger");
+               ret = -1;
+               goto error_put_client_list;
+       case ACTION_EXECUTOR_STATUS_OVERFLOW:
+               /*
+                * TODO Add trigger identification (name/id) when
+                * it is added to the API.
+                *
+                * Not a fatal error.
+                */
+               WARN("No space left when enqueuing action associated to newly registered trigger");
+               ret = 0;
+               goto end;
+       default:
+               abort();
+       }
+
+end:
+       *cmd_result = LTTNG_OK;
+       DBG("Registered trigger: name = `%s`, tracer token = %" PRIu64,
+                       trigger_name, trigger_tracer_token);
+
+error_put_client_list:
+       notification_client_list_put(client_list);
+
 error_free_ht_element:
-       free(trigger_ht_element);
+       if (trigger_ht_element) {
+               /* Delayed removal due to RCU constraint on delete. */
+               call_rcu(&trigger_ht_element->rcu_node,
+                               free_lttng_trigger_ht_element_rcu);
+       }
+
+       free(trigger_tokens_ht_element);
 error:
        if (free_trigger) {
                lttng_trigger_destroy(trigger);
@@ -2153,32 +2768,31 @@ error:
 }
 
 static
-void free_notification_client_list_rcu(struct rcu_head *node)
+void free_lttng_trigger_ht_element_rcu(struct rcu_head *node)
 {
-       free(caa_container_of(node, struct notification_client_list,
+       free(caa_container_of(node, struct lttng_trigger_ht_element,
                        rcu_node));
 }
 
 static
-void free_lttng_trigger_ht_element_rcu(struct rcu_head *node)
+void free_notification_trigger_tokens_ht_element_rcu(struct rcu_head *node)
 {
-       free(caa_container_of(node, struct lttng_trigger_ht_element,
+       free(caa_container_of(node, struct notification_trigger_tokens_ht_element,
                        rcu_node));
 }
 
 static
 int handle_notification_thread_command_unregister_trigger(
                struct notification_thread_state *state,
-               struct lttng_trigger *trigger,
+               const struct lttng_trigger *trigger,
                enum lttng_error_code *_cmd_reply)
 {
        struct cds_lfht_iter iter;
        struct cds_lfht_node *triggers_ht_node;
        struct lttng_channel_trigger_list *trigger_list;
        struct notification_client_list *client_list;
-       struct notification_client_list_element *client_list_element, *tmp;
        struct lttng_trigger_ht_element *trigger_ht_element = NULL;
-       struct lttng_condition *condition = lttng_trigger_get_condition(
+       const struct lttng_condition *condition = lttng_trigger_get_const_condition(
                        trigger);
        enum lttng_error_code cmd_reply;
 
@@ -2186,8 +2800,8 @@ int handle_notification_thread_command_unregister_trigger(
 
        cds_lfht_lookup(state->triggers_ht,
                        lttng_condition_hash(condition),
-                       match_condition,
-                       condition,
+                       match_trigger,
+                       trigger,
                        &iter);
        triggers_ht_node = cds_lfht_iter_get_node(&iter);
        if (!triggers_ht_node) {
@@ -2204,13 +2818,7 @@ int handle_notification_thread_command_unregister_trigger(
 
                cds_list_for_each_entry_safe(trigger_element, tmp,
                                &trigger_list->list, node) {
-                       const struct lttng_condition *current_condition =
-                                       lttng_trigger_get_const_condition(
-                                               trigger_element->trigger);
-
-                       assert(current_condition);
-                       if (!lttng_condition_is_equal(condition,
-                                       current_condition)) {
+                       if (!lttng_trigger_is_equal(trigger, trigger_element->trigger)) {
                                continue;
                        }
 
@@ -2221,24 +2829,46 @@ int handle_notification_thread_command_unregister_trigger(
                }
        }
 
-       /*
-        * Remove and release the client list from
-        * notification_trigger_clients_ht.
-        */
-       client_list = get_client_list_from_condition(state, condition);
-       assert(client_list);
+       if (lttng_condition_get_type(condition) ==
+                       LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
+               struct notification_trigger_tokens_ht_element
+                               *trigger_tokens_ht_element;
 
-       cds_list_for_each_entry_safe(client_list_element, tmp,
-                       &client_list->list, node) {
-               free(client_list_element);
+               cds_lfht_for_each_entry (state->trigger_tokens_ht, &iter,
+                               trigger_tokens_ht_element, node) {
+                       if (!lttng_trigger_is_equal(trigger,
+                                           trigger_tokens_ht_element->trigger)) {
+                               continue;
+                       }
+
+                       DBG("[notification-thread] Removed trigger from tokens_ht");
+                       cds_lfht_del(state->trigger_tokens_ht,
+                                       &trigger_tokens_ht_element->node);
+                       call_rcu(&trigger_tokens_ht_element->rcu_node,
+                                       free_notification_trigger_tokens_ht_element_rcu);
+
+                       break;
+               }
+       }
+
+       if (is_trigger_action_notify(trigger)) {
+               /*
+                * Remove and release the client list from
+                * notification_trigger_clients_ht.
+                */
+               client_list = get_client_list_from_condition(state, condition);
+               assert(client_list);
+
+               /* Put new reference and the hashtable's reference. */
+               notification_client_list_put(client_list);
+               notification_client_list_put(client_list);
+               client_list = NULL;
        }
-       cds_lfht_del(state->notification_trigger_clients_ht,
-                       &client_list->notification_trigger_ht_node);
-       call_rcu(&client_list->rcu_node, free_notification_client_list_rcu);
 
        /* Remove trigger from triggers_ht. */
        trigger_ht_element = caa_container_of(triggers_ht_node,
                        struct lttng_trigger_ht_element, node);
+       cds_lfht_del(state->triggers_by_name_uid_ht, &trigger_ht_element->node_by_name_uid);
        cds_lfht_del(state->triggers_ht, triggers_ht_node);
 
        /* Release the ownership of the trigger. */
@@ -2271,21 +2901,24 @@ int handle_notification_thread_command(
        pthread_mutex_lock(&handle->cmd_queue.lock);
        cmd = cds_list_first_entry(&handle->cmd_queue.list,
                        struct notification_thread_command, cmd_list_node);
+       cds_list_del(&cmd->cmd_list_node);
+       pthread_mutex_unlock(&handle->cmd_queue.lock);
+
+       DBG("[notification-thread] Received `%s` command",
+                       notification_command_type_str(cmd->type));
        switch (cmd->type) {
        case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER:
-               DBG("[notification-thread] Received register trigger command");
-               ret = handle_notification_thread_command_register_trigger(
-                               state, cmd->parameters.trigger,
+               ret = handle_notification_thread_command_register_trigger(state,
+                               cmd->parameters.register_trigger.trigger,
                                &cmd->reply_code);
                break;
        case NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER:
-               DBG("[notification-thread] Received unregister trigger command");
                ret = handle_notification_thread_command_unregister_trigger(
-                               state, cmd->parameters.trigger,
+                               state,
+                               cmd->parameters.unregister_trigger.trigger,
                                &cmd->reply_code);
                break;
        case NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL:
-               DBG("[notification-thread] Received add channel command");
                ret = handle_notification_thread_command_add_channel(
                                state,
                                cmd->parameters.add_channel.session.name,
@@ -2298,7 +2931,6 @@ int handle_notification_thread_command(
                                &cmd->reply_code);
                break;
        case NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL:
-               DBG("[notification-thread] Received remove channel command");
                ret = handle_notification_thread_command_remove_channel(
                                state, cmd->parameters.remove_channel.key,
                                cmd->parameters.remove_channel.domain,
@@ -2306,9 +2938,6 @@ int handle_notification_thread_command(
                break;
        case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING:
        case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED:
-               DBG("[notification-thread] Received session rotation %s command",
-                               cmd->type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING ?
-                               "ongoing" : "completed");
                ret = handle_notification_thread_command_session_rotation(
                                state,
                                cmd->type,
@@ -2319,11 +2948,65 @@ int handle_notification_thread_command(
                                cmd->parameters.session_rotation.location,
                                &cmd->reply_code);
                break;
+       case NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE:
+               ret = handle_notification_thread_command_add_tracer_event_source(
+                               state,
+                               cmd->parameters.tracer_event_source.tracer_event_source_fd,
+                               cmd->parameters.tracer_event_source.domain,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE:
+               ret = handle_notification_thread_command_remove_tracer_event_source(
+                               state,
+                               cmd->parameters.tracer_event_source.tracer_event_source_fd,
+                               &cmd->reply_code);
+               break;
+       case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
+       {
+               struct lttng_triggers *triggers = NULL;
+
+               ret = handle_notification_thread_command_list_triggers(
+                               handle,
+                               state,
+                               cmd->parameters.list_triggers.uid,
+                               &triggers,
+                               &cmd->reply_code);
+               cmd->reply.list_triggers.triggers = triggers;
+               ret = 0;
+               break;
+       }
        case NOTIFICATION_COMMAND_TYPE_QUIT:
-               DBG("[notification-thread] Received quit command");
                cmd->reply_code = LTTNG_OK;
                ret = 1;
                goto end;
+       case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE:
+       {
+               const enum client_transmission_status client_status =
+                               cmd->parameters.client_communication_update
+                                               .status;
+               const notification_client_id client_id =
+                               cmd->parameters.client_communication_update.id;
+               struct notification_client *client;
+
+               rcu_read_lock();
+               client = get_client_from_id(client_id, state);
+
+               if (!client) {
+                       /*
+                        * Client error was probably already picked-up by the
+                        * notification thread or it has disconnected
+                        * gracefully while this command was queued.
+                        */
+                       DBG("Failed to find notification client to update communication status, client id = %" PRIu64,
+                                       client_id);
+                       ret = 0;
+               } else {
+                       ret = client_handle_transmission_status(
+                                       client, client_status, state);
+               }
+               rcu_read_unlock();
+               break;
+       }
        default:
                ERR("[notification-thread] Unknown internal command received");
                goto error_unlock;
@@ -2333,26 +3016,22 @@ int handle_notification_thread_command(
                goto error_unlock;
        }
 end:
-       cds_list_del(&cmd->cmd_list_node);
-       lttng_waiter_wake_up(&cmd->reply_waiter);
-       pthread_mutex_unlock(&handle->cmd_queue.lock);
+       if (cmd->is_async) {
+               free(cmd);
+               cmd = NULL;
+       } else {
+               lttng_waiter_wake_up(&cmd->reply_waiter);
+       }
        return ret;
 error_unlock:
        /* Wake-up and return a fatal error to the calling thread. */
        lttng_waiter_wake_up(&cmd->reply_waiter);
-       pthread_mutex_unlock(&handle->cmd_queue.lock);
        cmd->reply_code = LTTNG_ERR_FATAL;
 error:
        /* Indicate a fatal error to the caller. */
        return -1;
 }
 
-static
-unsigned long hash_client_socket(int socket)
-{
-       return hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed);
-}
-
 static
 int socket_set_non_blocking(int socket)
 {
@@ -2381,9 +3060,8 @@ int client_reset_inbound_state(struct notification_client *client)
 {
        int ret;
 
-       ret = lttng_dynamic_buffer_set_size(
-                       &client->communication.inbound.buffer, 0);
-       assert(!ret);
+
+       lttng_payload_clear(&client->communication.inbound.payload);
 
        client->communication.inbound.bytes_to_receive =
                        sizeof(struct lttng_notification_channel_message);
@@ -2392,8 +3070,9 @@ int client_reset_inbound_state(struct notification_client *client)
        LTTNG_SOCK_SET_UID_CRED(&client->communication.inbound.creds, -1);
        LTTNG_SOCK_SET_GID_CRED(&client->communication.inbound.creds, -1);
        ret = lttng_dynamic_buffer_set_size(
-                       &client->communication.inbound.buffer,
+                       &client->communication.inbound.payload.buffer,
                        client->communication.inbound.bytes_to_receive);
+
        return ret;
 }
 
@@ -2411,10 +3090,14 @@ int handle_notification_thread_client_connect(
                ret = -1;
                goto error;
        }
+
+       pthread_mutex_init(&client->lock, NULL);
+       client->id = state->next_notification_client_id++;
        CDS_INIT_LIST_HEAD(&client->condition_list);
-       lttng_dynamic_buffer_init(&client->communication.inbound.buffer);
-       lttng_dynamic_buffer_init(&client->communication.outbound.buffer);
+       lttng_payload_init(&client->communication.inbound.payload);
+       lttng_payload_init(&client->communication.outbound.payload);
        client->communication.inbound.expect_creds = true;
+
        ret = client_reset_inbound_state(client);
        if (ret) {
                ERR("[notification-thread] Failed to reset client communication's inbound state");
@@ -2459,17 +3142,60 @@ int handle_notification_thread_client_connect(
        cds_lfht_add(state->client_socket_ht,
                        hash_client_socket(client->socket),
                        &client->client_socket_ht_node);
+       cds_lfht_add(state->client_id_ht,
+                       hash_client_id(client->id),
+                       &client->client_id_ht_node);
        rcu_read_unlock();
 
        return ret;
+
 error:
        notification_client_destroy(client, state);
        return ret;
 }
 
-int handle_notification_thread_client_disconnect(
-               int client_socket,
+/*
+ * RCU read-lock must be held by the caller.
+ * Client lock must _not_ be held by the caller.
+ */
+static
+int notification_thread_client_disconnect(
+               struct notification_client *client,
                struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_condition_list_element *condition_list_element, *tmp;
+
+       /* Acquire the client lock to disable its communication atomically. */
+       pthread_mutex_lock(&client->lock);
+       client->communication.active = false;
+       cds_lfht_del(state->client_socket_ht, &client->client_socket_ht_node);
+       cds_lfht_del(state->client_id_ht, &client->client_id_ht_node);
+       pthread_mutex_unlock(&client->lock);
+
+       ret = lttng_poll_del(&state->events, client->socket);
+       if (ret) {
+               ERR("[notification-thread] Failed to remove client socket %d from poll set",
+                               client->socket);
+       }
+
+       /* Release all conditions to which the client was subscribed. */
+       cds_list_for_each_entry_safe(condition_list_element, tmp,
+                       &client->condition_list, node) {
+               (void) notification_thread_client_unsubscribe(client,
+                               condition_list_element->condition, state, NULL);
+       }
+
+       /*
+        * Client no longer accessible to other threads (through the
+        * client lists).
+        */
+       notification_client_destroy(client, state);
+       return ret;
+}
+
+int handle_notification_thread_client_disconnect(
+               int client_socket, struct notification_thread_state *state)
 {
        int ret = 0;
        struct notification_client *client;
@@ -2486,13 +3212,7 @@ int handle_notification_thread_client_disconnect(
                goto end;
        }
 
-       ret = lttng_poll_del(&state->events, client_socket);
-       if (ret) {
-               ERR("[notification-thread] Failed to remove client socket from poll set");
-       }
-       cds_lfht_del(state->client_socket_ht,
-                       &client->client_socket_ht_node);
-       notification_client_destroy(client, state);
+       ret = notification_thread_client_disconnect(client, state);
 end:
        rcu_read_unlock();
        return ret;
@@ -2508,11 +3228,11 @@ int handle_notification_thread_client_disconnect_all(
        rcu_read_lock();
        DBG("[notification-thread] Closing all client connections");
        cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
-               client_socket_ht_node) {
+                       client_socket_ht_node) {
                int ret;
 
-               ret = handle_notification_thread_client_disconnect(
-                               client->socket, state);
+               ret = notification_thread_client_disconnect(
+                               client, state);
                if (ret) {
                        error_encoutered = true;
                }
@@ -2542,76 +3262,185 @@ int handle_notification_thread_trigger_unregister_all(
 }
 
 static
-int client_flush_outgoing_queue(struct notification_client *client,
+int client_handle_transmission_status(
+               struct notification_client *client,
+               enum client_transmission_status transmission_status,
                struct notification_thread_state *state)
+{
+       int ret = 0;
+
+       switch (transmission_status) {
+       case CLIENT_TRANSMISSION_STATUS_COMPLETE:
+               ret = lttng_poll_mod(&state->events, client->socket,
+                               CLIENT_POLL_MASK_IN);
+               if (ret) {
+                       goto end;
+               }
+
+               break;
+       case CLIENT_TRANSMISSION_STATUS_QUEUED:
+               /*
+                * We want to be notified whenever there is buffer space
+                * available to send the rest of the payload.
+                */
+               ret = lttng_poll_mod(&state->events, client->socket,
+                               CLIENT_POLL_MASK_IN_OUT);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       case CLIENT_TRANSMISSION_STATUS_FAIL:
+               ret = notification_thread_client_disconnect(client, state);
+               if (ret) {
+                       goto end;
+               }
+               break;
+       case CLIENT_TRANSMISSION_STATUS_ERROR:
+               ret = -1;
+               goto end;
+       default:
+               abort();
+       }
+end:
+       return ret;
+}
+
+/* Client lock must be acquired by caller. */
+static
+enum client_transmission_status client_flush_outgoing_queue(
+               struct notification_client *client)
 {
        ssize_t ret;
        size_t to_send_count;
+       enum client_transmission_status status;
+       struct lttng_payload_view pv = lttng_payload_view_from_payload(
+                       &client->communication.outbound.payload, 0, -1);
+       const int fds_to_send_count =
+                       lttng_payload_view_get_fd_handle_count(&pv);
+
+       ASSERT_LOCKED(client->lock);
+
+       if (!client->communication.active) {
+               status = CLIENT_TRANSMISSION_STATUS_FAIL;
+               goto end;
+       }
 
-       assert(client->communication.outbound.buffer.size != 0);
-       to_send_count = client->communication.outbound.buffer.size;
+       if (pv.buffer.size == 0) {
+               /*
+                * If both data and fds are equal to zero, we are in an invalid
+                * state.
+                */
+               assert(fds_to_send_count != 0);
+               goto send_fds;
+       }
+
+       /* Send data. */
+       to_send_count = pv.buffer.size;
        DBG("[notification-thread] Flushing client (socket fd = %i) outgoing queue",
                        client->socket);
 
        ret = lttcomm_send_unix_sock_non_block(client->socket,
-                       client->communication.outbound.buffer.data,
+                       pv.buffer.data,
                        to_send_count);
        if ((ret >= 0 && ret < to_send_count)) {
                DBG("[notification-thread] Client (socket fd = %i) outgoing queue could not be completely flushed",
                                client->socket);
                to_send_count -= max(ret, 0);
 
-               memcpy(client->communication.outbound.buffer.data,
-                               client->communication.outbound.buffer.data +
-                               client->communication.outbound.buffer.size - to_send_count,
+               memmove(client->communication.outbound.payload.buffer.data,
+                               pv.buffer.data +
+                               pv.buffer.size - to_send_count,
                                to_send_count);
                ret = lttng_dynamic_buffer_set_size(
-                               &client->communication.outbound.buffer,
+                               &client->communication.outbound.payload.buffer,
                                to_send_count);
                if (ret) {
                        goto error;
                }
 
-               /*
-                * We want to be notified whenever there is buffer space
-                * available to send the rest of the payload.
-                */
-               ret = lttng_poll_mod(&state->events, client->socket,
-                               CLIENT_POLL_MASK_IN_OUT);
-               if (ret) {
-                       goto error;
-               }
+               status = CLIENT_TRANSMISSION_STATUS_QUEUED;
+               goto end;
        } else if (ret < 0) {
-               /* Generic error, disconnect the client. */
-               ERR("[notification-thread] Failed to send flush outgoing queue, disconnecting client (socket fd = %i)",
+               /* Generic error, disable the client's communication. */
+               ERR("[notification-thread] Failed to flush outgoing queue, disconnecting client (socket fd = %i)",
                                client->socket);
-               ret = handle_notification_thread_client_disconnect(
-                               client->socket, state);
-               if (ret) {
-                       goto error;
-               }
+               client->communication.active = false;
+               status = CLIENT_TRANSMISSION_STATUS_FAIL;
+               goto end;
        } else {
-               /* No error and flushed the queue completely. */
+               /*
+                * No error and flushed the queue completely.
+                *
+                * The payload buffer size is used later to
+                * check if there is notifications queued. So albeit that the
+                * direct caller knows that the transmission is complete, we
+                * need to set the buffer size to zero.
+                */
                ret = lttng_dynamic_buffer_set_size(
-                               &client->communication.outbound.buffer, 0);
-               if (ret) {
-                       goto error;
-               }
-               ret = lttng_poll_mod(&state->events, client->socket,
-                               CLIENT_POLL_MASK_IN);
+                               &client->communication.outbound.payload.buffer, 0);
                if (ret) {
                        goto error;
                }
+       }
 
+send_fds:
+       /* No fds to send, transmission is complete. */
+       if (fds_to_send_count == 0) {
+               status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
+               goto end;
+       }
+
+       ret = lttcomm_send_payload_view_fds_unix_sock_non_block(
+                       client->socket, &pv);
+       if (ret < 0) {
+               /* Generic error, disable the client's communication. */
+               ERR("[notification-thread] Failed to flush outgoing fds queue, disconnecting client (socket fd = %i)",
+                               client->socket);
+               client->communication.active = false;
+               status = CLIENT_TRANSMISSION_STATUS_FAIL;
+               goto end;
+       } else if (ret == 0) {
+               /* Nothing could be sent. */
+               status = CLIENT_TRANSMISSION_STATUS_QUEUED;
+       } else {
+               /* Fd passing is an all or nothing kind of thing. */
+               status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
+               /*
+                * The payload _fd_array count is used later to
+                * check if there is notifications queued. So although the
+                * direct caller knows that the transmission is complete, we
+                * need to clear the _fd_array for the queuing check.
+                */
+               lttng_dynamic_pointer_array_clear(
+                               &client->communication.outbound.payload
+                                                ._fd_handles);
+       }
+
+end:
+       if (status == CLIENT_TRANSMISSION_STATUS_COMPLETE) {
                client->communication.outbound.queued_command_reply = false;
                client->communication.outbound.dropped_notification = false;
+               lttng_payload_clear(&client->communication.outbound.payload);
        }
 
-       return 0;
+       return status;
 error:
-       return -1;
+       return CLIENT_TRANSMISSION_STATUS_ERROR;
+}
+
+static
+bool client_has_outbound_data_left(
+               const struct notification_client *client)
+{
+       const struct lttng_payload_view pv = lttng_payload_view_from_payload(
+                       &client->communication.outbound.payload, 0, -1);
+       const bool has_data = pv.buffer.size != 0;
+       const bool has_fds = lttng_payload_view_get_fd_handle_count(&pv);
+
+       return has_data || has_fds;
 }
 
+/* Client lock must _not_ be held by the caller. */
 static
 int client_send_command_reply(struct notification_client *client,
                struct notification_thread_state *state,
@@ -2626,210 +3455,266 @@ int client_send_command_reply(struct notification_client *client,
                .size = sizeof(reply),
        };
        char buffer[sizeof(msg) + sizeof(reply)];
-
-       if (client->communication.outbound.queued_command_reply) {
-               /* Protocol error. */
-               goto error;
-       }
+       enum client_transmission_status transmission_status;
 
        memcpy(buffer, &msg, sizeof(msg));
        memcpy(buffer + sizeof(msg), &reply, sizeof(reply));
        DBG("[notification-thread] Send command reply (%i)", (int) status);
 
+       pthread_mutex_lock(&client->lock);
+       if (client->communication.outbound.queued_command_reply) {
+               /* Protocol error. */
+               goto error_unlock;
+       }
+
        /* Enqueue buffer to outgoing queue and flush it. */
        ret = lttng_dynamic_buffer_append(
-                       &client->communication.outbound.buffer,
+                       &client->communication.outbound.payload.buffer,
                        buffer, sizeof(buffer));
        if (ret) {
-               goto error;
+               goto error_unlock;
        }
 
-       ret = client_flush_outgoing_queue(client, state);
-       if (ret) {
-               goto error;
-       }
+       transmission_status = client_flush_outgoing_queue(client);
 
-       if (client->communication.outbound.buffer.size != 0) {
+       if (client_has_outbound_data_left(client)) {
                /* Queue could not be emptied. */
                client->communication.outbound.queued_command_reply = true;
        }
 
+       pthread_mutex_unlock(&client->lock);
+       ret = client_handle_transmission_status(
+                       client, transmission_status, state);
+       if (ret) {
+               goto error;
+       }
+
        return 0;
+error_unlock:
+       pthread_mutex_unlock(&client->lock);
 error:
        return -1;
 }
 
 static
-int client_dispatch_message(struct notification_client *client,
+int client_handle_message_unknown(struct notification_client *client,
                struct notification_thread_state *state)
 {
-       int ret = 0;
+       int ret;
+       /*
+        * Receiving message header. The function will be called again
+        * once the rest of the message as been received and can be
+        * interpreted.
+        */
+       const struct lttng_notification_channel_message *msg;
 
-       if (client->communication.inbound.msg_type !=
-                       LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE &&
-                       client->communication.inbound.msg_type !=
-                               LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN &&
-                       !client->validated) {
-               WARN("[notification-thread] client attempted a command before handshake");
+       assert(sizeof(*msg) == client->communication.inbound.payload.buffer.size);
+       msg = (const struct lttng_notification_channel_message *)
+                             client->communication.inbound.payload.buffer.data;
+
+       if (msg->size == 0 ||
+                       msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) {
+               ERR("[notification-thread] Invalid notification channel message: length = %u",
+                               msg->size);
                ret = -1;
                goto end;
        }
 
-       switch (client->communication.inbound.msg_type) {
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN:
-       {
-               /*
-                * Receiving message header. The function will be called again
-                * once the rest of the message as been received and can be
-                * interpreted.
-                */
-               const struct lttng_notification_channel_message *msg;
-
-               assert(sizeof(*msg) ==
-                               client->communication.inbound.buffer.size);
-               msg = (const struct lttng_notification_channel_message *)
-                               client->communication.inbound.buffer.data;
+       switch (msg->type) {
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
+               break;
+       default:
+               ret = -1;
+               ERR("[notification-thread] Invalid notification channel message: unexpected message type");
+               goto end;
+       }
 
-               if (msg->size == 0 || msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) {
-                       ERR("[notification-thread] Invalid notification channel message: length = %u", msg->size);
-                       ret = -1;
-                       goto end;
-               }
+       client->communication.inbound.bytes_to_receive = msg->size;
+       client->communication.inbound.fds_to_receive = msg->fds;
+       client->communication.inbound.msg_type =
+                       (enum lttng_notification_channel_message_type) msg->type;
+       ret = lttng_dynamic_buffer_set_size(
+                       &client->communication.inbound.payload.buffer, msg->size);
 
-               switch (msg->type) {
-               case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
-               case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
-               case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
-                       break;
-               default:
-                       ret = -1;
-                       ERR("[notification-thread] Invalid notification channel message: unexpected message type");
-                       goto end;
-               }
+       /* msg is not valid anymore due to lttng_dynamic_buffer_set_size. */
+       msg = NULL;
+end:
+       return ret;
+}
 
-               client->communication.inbound.bytes_to_receive = msg->size;
-               client->communication.inbound.msg_type =
-                               (enum lttng_notification_channel_message_type) msg->type;
-               ret = lttng_dynamic_buffer_set_size(
-                               &client->communication.inbound.buffer, msg->size);
-               if (ret) {
-                       goto end;
-               }
-               break;
-       }
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
-       {
-               struct lttng_notification_channel_command_handshake *handshake_client;
-               struct lttng_notification_channel_command_handshake handshake_reply = {
+static
+int client_handle_message_handshake(struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_notification_channel_command_handshake *handshake_client;
+       const struct lttng_notification_channel_command_handshake handshake_reply = {
                        .major = LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR,
                        .minor = LTTNG_NOTIFICATION_CHANNEL_VERSION_MINOR,
-               };
-               struct lttng_notification_channel_message msg_header = {
+       };
+       const struct lttng_notification_channel_message msg_header = {
                        .type = LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE,
                        .size = sizeof(handshake_reply),
-               };
-               enum lttng_notification_channel_status status =
-                               LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
-               char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)];
-
-               memcpy(send_buffer, &msg_header, sizeof(msg_header));
-               memcpy(send_buffer + sizeof(msg_header), &handshake_reply,
-                               sizeof(handshake_reply));
-
-               handshake_client =
-                               (struct lttng_notification_channel_command_handshake *)
-                                       client->communication.inbound.buffer.data;
-               client->major = handshake_client->major;
-               client->minor = handshake_client->minor;
-               if (!client->communication.inbound.creds_received) {
-                       ERR("[notification-thread] No credentials received from client");
-                       ret = -1;
-                       goto end;
-               }
+       };
+       enum lttng_notification_channel_status status =
+                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+       char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)];
+
+       memcpy(send_buffer, &msg_header, sizeof(msg_header));
+       memcpy(send_buffer + sizeof(msg_header), &handshake_reply,
+                       sizeof(handshake_reply));
+
+       handshake_client =
+                       (struct lttng_notification_channel_command_handshake *)
+                                       client->communication.inbound.payload.buffer
+                                                       .data;
+       client->major = handshake_client->major;
+       client->minor = handshake_client->minor;
+       if (!client->communication.inbound.creds_received) {
+               ERR("[notification-thread] No credentials received from client");
+               ret = -1;
+               goto end;
+       }
 
-               client->uid = LTTNG_SOCK_GET_UID_CRED(
-                               &client->communication.inbound.creds);
-               client->gid = LTTNG_SOCK_GET_GID_CRED(
-                               &client->communication.inbound.creds);
-               DBG("[notification-thread] Received handshake from client (uid = %u, gid = %u) with version %i.%i",
-                               client->uid, client->gid, (int) client->major,
-                               (int) client->minor);
+       client->uid = LTTNG_SOCK_GET_UID_CRED(
+                       &client->communication.inbound.creds);
+       client->gid = LTTNG_SOCK_GET_GID_CRED(
+                       &client->communication.inbound.creds);
+       DBG("[notification-thread] Received handshake from client (uid = %u, gid = %u) with version %i.%i",
+                       client->uid, client->gid, (int) client->major,
+                       (int) client->minor);
 
-               if (handshake_client->major != LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR) {
-                       status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION;
-               }
+       if (handshake_client->major !=
+                       LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR) {
+               status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION;
+       }
 
-               ret = lttng_dynamic_buffer_append(&client->communication.outbound.buffer,
-                               send_buffer, sizeof(send_buffer));
-               if (ret) {
-                       ERR("[notification-thread] Failed to send protocol version to notification channel client");
-                       goto end;
-               }
+       pthread_mutex_lock(&client->lock);
+       /* Outgoing queue will be flushed when the command reply is sent. */
+       ret = lttng_dynamic_buffer_append(
+                       &client->communication.outbound.payload.buffer, send_buffer,
+                       sizeof(send_buffer));
+       if (ret) {
+               ERR("[notification-thread] Failed to send protocol version to notification channel client");
+               goto end_unlock;
+       }
 
-               ret = client_flush_outgoing_queue(client, state);
-               if (ret) {
-                       goto end;
-               }
+       client->validated = true;
+       client->communication.active = true;
+       pthread_mutex_unlock(&client->lock);
 
-               ret = client_send_command_reply(client, state, status);
-               if (ret) {
-                       ERR("[notification-thread] Failed to send reply to notification channel client");
-                       goto end;
-               }
+       /* Set reception state to receive the next message header. */
+       ret = client_reset_inbound_state(client);
+       if (ret) {
+               ERR("[notification-thread] Failed to reset client communication's inbound state");
+               goto end;
+       }
 
-               /* Set reception state to receive the next message header. */
-               ret = client_reset_inbound_state(client);
-               if (ret) {
-                       ERR("[notification-thread] Failed to reset client communication's inbound state");
-                       goto end;
-               }
-               client->validated = true;
-               break;
+       /* Flushes the outgoing queue. */
+       ret = client_send_command_reply(client, state, status);
+       if (ret) {
+               ERR("[notification-thread] Failed to send reply to notification channel client");
+               goto end;
        }
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
-       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
-       {
-               struct lttng_condition *condition;
-               enum lttng_notification_channel_status status =
-                               LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
-               struct lttng_payload_view condition_view =
-                               lttng_payload_view_from_dynamic_buffer(
-                                       &client->communication.inbound.buffer,
+
+       goto end;
+end_unlock:
+       pthread_mutex_unlock(&client->lock);
+end:
+       return ret;
+}
+
+static
+int client_handle_message_subscription(
+               struct notification_client *client,
+               enum lttng_notification_channel_message_type msg_type,
+               struct notification_thread_state *state)
+{
+       int ret;
+       struct lttng_condition *condition;
+       enum lttng_notification_channel_status status =
+                       LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+       struct lttng_payload_view condition_view =
+                       lttng_payload_view_from_payload(
+                                       &client->communication.inbound.payload,
                                        0, -1);
-               size_t expected_condition_size =
-                               client->communication.inbound.buffer.size;
+       size_t expected_condition_size;
 
-               ret = lttng_condition_create_from_payload(&condition_view,
-                               &condition);
-               if (ret != expected_condition_size) {
-                       ERR("[notification-thread] Malformed condition received from client");
-                       goto end;
-               }
+       /*
+        * No need to lock client to sample the inbound state as the only
+        * other thread accessing clients (action executor) only uses the
+        * outbound state.
+        */
+       expected_condition_size = client->communication.inbound.payload.buffer.size;
+       ret = lttng_condition_create_from_payload(&condition_view, &condition);
+       if (ret != expected_condition_size) {
+               ERR("[notification-thread] Malformed condition received from client");
+               goto end;
+       }
 
-               if (client->communication.inbound.msg_type ==
-                               LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE) {
-                       ret = notification_thread_client_subscribe(client,
-                                       condition, state, &status);
-               } else {
-                       ret = notification_thread_client_unsubscribe(client,
-                                       condition, state, &status);
-               }
-               if (ret) {
-                       goto end;
-               }
+       if (msg_type == LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE) {
+               ret = notification_thread_client_subscribe(
+                               client, condition, state, &status);
+       } else {
+               ret = notification_thread_client_unsubscribe(
+                               client, condition, state, &status);
+       }
 
-               ret = client_send_command_reply(client, state, status);
-               if (ret) {
-                       ERR("[notification-thread] Failed to send reply to notification channel client");
-                       goto end;
-               }
+       if (ret) {
+               goto end;
+       }
 
-               /* Set reception state to receive the next message header. */
-               ret = client_reset_inbound_state(client);
-               if (ret) {
-                       ERR("[notification-thread] Failed to reset client communication's inbound state");
-                       goto end;
-               }
+       /* Set reception state to receive the next message header. */
+       ret = client_reset_inbound_state(client);
+       if (ret) {
+               ERR("[notification-thread] Failed to reset client communication's inbound state");
+               goto end;
+       }
+
+       ret = client_send_command_reply(client, state, status);
+       if (ret) {
+               ERR("[notification-thread] Failed to send reply to notification channel client");
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+static
+int client_dispatch_message(struct notification_client *client,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+
+       if (client->communication.inbound.msg_type !=
+                       LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE &&
+                       client->communication.inbound.msg_type !=
+                               LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN &&
+                       !client->validated) {
+               WARN("[notification-thread] client attempted a command before handshake");
+               ret = -1;
+               goto end;
+       }
+
+       switch (client->communication.inbound.msg_type) {
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN:
+       {
+               ret = client_handle_message_unknown(client, state);
+               break;
+       }
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE:
+       {
+               ret = client_handle_message_handshake(client, state);
+               break;
+       }
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE:
+       case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE:
+       {
+               ret = client_handle_message_subscription(client,
+                               client->communication.inbound.msg_type, state);
                break;
        }
        default:
@@ -2848,6 +3733,7 @@ int handle_notification_thread_client_in(
        ssize_t recv_ret;
        size_t offset;
 
+       rcu_read_lock();
        client = get_client_from_socket(socket, state);
        if (!client) {
                /* Internal error, abort. */
@@ -2855,11 +3741,11 @@ int handle_notification_thread_client_in(
                goto end;
        }
 
-       offset = client->communication.inbound.buffer.size -
+       offset = client->communication.inbound.payload.buffer.size -
                        client->communication.inbound.bytes_to_receive;
        if (client->communication.inbound.expect_creds) {
                recv_ret = lttcomm_recv_creds_unix_sock(socket,
-                               client->communication.inbound.buffer.data + offset,
+                               client->communication.inbound.payload.buffer.data + offset,
                                client->communication.inbound.bytes_to_receive,
                                &client->communication.inbound.creds);
                if (recv_ret > 0) {
@@ -2868,31 +3754,69 @@ int handle_notification_thread_client_in(
                }
        } else {
                recv_ret = lttcomm_recv_unix_sock_non_block(socket,
-                               client->communication.inbound.buffer.data + offset,
+                               client->communication.inbound.payload.buffer.data + offset,
                                client->communication.inbound.bytes_to_receive);
        }
-       if (recv_ret < 0) {
+       if (recv_ret >= 0) {
+               client->communication.inbound.bytes_to_receive -= recv_ret;
+       } else {
                goto error_disconnect_client;
        }
 
-       client->communication.inbound.bytes_to_receive -= recv_ret;
-       if (client->communication.inbound.bytes_to_receive == 0) {
-               ret = client_dispatch_message(client, state);
-               if (ret) {
+       if (client->communication.inbound.bytes_to_receive != 0) {
+               /* Message incomplete wait for more data. */
+               ret = 0;
+               goto end;
+       }
+
+       assert(client->communication.inbound.bytes_to_receive == 0);
+
+       /* Receive fds. */
+       if (client->communication.inbound.fds_to_receive != 0) {
+               ret = lttcomm_recv_payload_fds_unix_sock_non_block(
+                               client->socket,
+                               client->communication.inbound.fds_to_receive,
+                               &client->communication.inbound.payload);
+               if (ret > 0) {
                        /*
-                        * Only returns an error if this client must be
-                        * disconnected.
+                        * Fds received. non blocking fds passing is all
+                        * or nothing.
                         */
+                       ssize_t expected_size;
+
+                       expected_size = sizeof(int) *
+                                       client->communication.inbound
+                                                       .fds_to_receive;
+                       assert(ret == expected_size);
+                       client->communication.inbound.fds_to_receive = 0;
+               } else if (ret == 0) {
+                       /* Received nothing. */
+                       ret = 0;
+                       goto end;
+               } else {
                        goto error_disconnect_client;
                }
-       } else {
-               goto end;
        }
+
+       /* At this point the message is complete.*/
+       assert(client->communication.inbound.bytes_to_receive == 0 &&
+                       client->communication.inbound.fds_to_receive == 0);
+       ret = client_dispatch_message(client, state);
+       if (ret) {
+               /*
+                * Only returns an error if this client must be
+                * disconnected.
+                */
+               goto error_disconnect_client;
+       }
+
 end:
+       rcu_read_unlock();
        return ret;
+
 error_disconnect_client:
-       ret = handle_notification_thread_client_disconnect(socket, state);
-       return ret;
+       ret = notification_thread_client_disconnect(client, state);
+       goto end;
 }
 
 /* Client ready to receive outgoing data. */
@@ -2901,7 +3825,9 @@ int handle_notification_thread_client_out(
 {
        int ret;
        struct notification_client *client;
+       enum client_transmission_status transmission_status;
 
+       rcu_read_lock();
        client = get_client_from_socket(socket, state);
        if (!client) {
                /* Internal error, abort. */
@@ -2909,11 +3835,17 @@ int handle_notification_thread_client_out(
                goto end;
        }
 
-       ret = client_flush_outgoing_queue(client, state);
+       pthread_mutex_lock(&client->lock);
+       transmission_status = client_flush_outgoing_queue(client);
+       pthread_mutex_unlock(&client->lock);
+
+       ret = client_handle_transmission_status(
+                       client, transmission_status, state);
        if (ret) {
                goto end;
        }
 end:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -3080,32 +4012,114 @@ end:
 }
 
 static
-int client_enqueue_dropped_notification(struct notification_client *client)
+int client_notification_overflow(struct notification_client *client)
 {
-       int ret;
-       struct lttng_notification_channel_message msg = {
+       int ret = 0;
+       const struct lttng_notification_channel_message msg = {
                .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION_DROPPED,
-               .size = 0,
        };
 
+       ASSERT_LOCKED(client->lock);
+
+       DBG("Dropping notification addressed to client (socket fd = %i)",
+                       client->socket);
+       if (client->communication.outbound.dropped_notification) {
+               /*
+                * The client already has a "notification dropped" message
+                * in its outgoing queue. Nothing to do since all
+                * of those messages are coalesced.
+                */
+               goto end;
+       }
+
+       client->communication.outbound.dropped_notification = true;
        ret = lttng_dynamic_buffer_append(
-                       &client->communication.outbound.buffer, &msg,
+                       &client->communication.outbound.payload.buffer, &msg,
                        sizeof(msg));
+       if (ret) {
+               PERROR("Failed to enqueue \"dropped notification\" message in client's (socket fd = %i) outgoing queue",
+                               client->socket);
+       }
+end:
        return ret;
 }
 
+static int client_handle_transmission_status_wrapper(
+               struct notification_client *client,
+               enum client_transmission_status status,
+               void *user_data)
+{
+       return client_handle_transmission_status(client, status,
+                       (struct notification_thread_state *) user_data);
+}
+
 static
 int send_evaluation_to_clients(const struct lttng_trigger *trigger,
                const struct lttng_evaluation *evaluation,
                struct notification_client_list* client_list,
                struct notification_thread_state *state,
-               uid_t channel_uid, gid_t channel_gid)
+               uid_t object_uid, gid_t object_gid)
+{
+       const struct lttng_credentials creds = {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(object_uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(object_gid),
+       };
+
+       return notification_client_list_send_evaluation(client_list,
+                       lttng_trigger_get_const_condition(trigger), evaluation,
+                       lttng_trigger_get_credentials(trigger),
+                       &creds,
+                       client_handle_transmission_status_wrapper, state);
+}
+
+/*
+ * Permission checks relative to notification channel clients are performed
+ * here. Notice how object, client, and trigger credentials are involved in
+ * this check.
+ *
+ * The `object` credentials are the credentials associated with the "subject"
+ * of a condition. For instance, a `rotation completed` condition applies
+ * to a session. When that condition is met, it will produce an evaluation
+ * against a session. Hence, in this case, the `object` credentials are the
+ * credentials of the "subject" session.
+ *
+ * The `trigger` credentials are the credentials of the user that registered the
+ * trigger.
+ *
+ * The `client` credentials are the credentials of the user that created a given
+ * notification channel.
+ *
+ * In terms of visibility, it is expected that non-privilieged users can only
+ * register triggers against "their" objects (their own sessions and
+ * applications they are allowed to interact with). They can then open a
+ * notification channel and subscribe to notifications associated with those
+ * triggers.
+ *
+ * As for privilieged users, they can register triggers against the objects of
+ * other users. They can then subscribe to the notifications associated to their
+ * triggers. Privilieged users _can't_ subscribe to the notifications of
+ * triggers owned by other users; they must create their own triggers.
+ *
+ * This is more a concern of usability than security. It would be difficult for
+ * a root user reliably subscribe to a specific set of conditions without
+ * interference from external users (those could, for instance, unregister
+ * their triggers).
+ */
+LTTNG_HIDDEN
+int notification_client_list_send_evaluation(
+               struct notification_client_list *client_list,
+               const struct lttng_condition *condition,
+               const struct lttng_evaluation *evaluation,
+               const struct lttng_credentials *trigger_creds,
+               const struct lttng_credentials *source_object_creds,
+               report_client_transmission_result_cb client_report,
+               void *user_data)
 {
        int ret = 0;
        struct lttng_payload msg_payload;
        struct notification_client_list_element *client_list_element, *tmp;
        const struct lttng_notification notification = {
-               .condition = (struct lttng_condition *) lttng_trigger_get_const_condition(trigger),
+               .condition = (struct lttng_condition *) condition,
                .evaluation = (struct lttng_evaluation *) evaluation,
        };
        struct lttng_notification_channel_message msg_header = {
@@ -3128,24 +4142,60 @@ int send_evaluation_to_clients(const struct lttng_trigger *trigger,
        }
 
        /* Update payload size. */
-       ((struct lttng_notification_channel_message * ) msg_payload.buffer.data)->size =
-                       (uint32_t) (msg_payload.buffer.size - sizeof(msg_header));
+       ((struct lttng_notification_channel_message *) msg_payload.buffer.data)
+                       ->size = (uint32_t)(
+                       msg_payload.buffer.size - sizeof(msg_header));
+
+       /* Update the payload number of fds. */
+       {
+               const struct lttng_payload_view pv = lttng_payload_view_from_payload(
+                               &msg_payload, 0, -1);
 
+               ((struct lttng_notification_channel_message *)
+                               msg_payload.buffer.data)->fds = (uint32_t)
+                               lttng_payload_view_get_fd_handle_count(&pv);
+       }
+
+       pthread_mutex_lock(&client_list->lock);
        cds_list_for_each_entry_safe(client_list_element, tmp,
                        &client_list->list, node) {
+               enum client_transmission_status transmission_status;
                struct notification_client *client =
                                client_list_element->client;
 
-               if (client->uid != channel_uid && client->gid != channel_gid &&
-                               client->uid != 0) {
-                       /* Client is not allowed to monitor this channel. */
-                       DBG("[notification-thread] Skipping client at it does not have the permission to receive notification for this channel");
-                       continue;
+               ret = 0;
+               pthread_mutex_lock(&client->lock);
+               if (!client->communication.active) {
+                       /*
+                        * Skip inactive client (protocol error or
+                        * disconnecting).
+                        */
+                       DBG("Skipping client at it is marked as inactive");
+                       goto skip_client;
+               }
+
+               if (source_object_creds) {
+                       if (client->uid != lttng_credentials_get_uid(source_object_creds) &&
+                                       client->gid != lttng_credentials_get_gid(source_object_creds) &&
+                                       client->uid != 0) {
+                               /*
+                                * Client is not allowed to monitor this
+                                * object.
+                                */
+                               DBG("[notification-thread] Skipping client at it does not have the object permission to receive notification for this trigger");
+                               goto skip_client;
+                       }
+               }
+
+               if (client->uid != lttng_credentials_get_uid(trigger_creds) && client->gid != lttng_credentials_get_gid(trigger_creds)) {
+                       DBG("[notification-thread] Skipping client at it does not have the permission to receive notification for this trigger");
+                       goto skip_client;
                }
 
                DBG("[notification-thread] Sending notification to client (fd = %i, %zu bytes)",
                                client->socket, msg_payload.buffer.size);
-               if (client->communication.outbound.buffer.size) {
+
+               if (client_has_outbound_data_left(client)) {
                        /*
                         * Outgoing data is already buffered for this client;
                         * drop the notification and enqueue a "dropped
@@ -3153,37 +4203,325 @@ int send_evaluation_to_clients(const struct lttng_trigger *trigger,
                         * notification since the socket spilled-over to the
                         * queue.
                         */
-                       DBG("[notification-thread] Dropping notification addressed to client (socket fd = %i)",
-                                       client->socket);
-                       if (!client->communication.outbound.dropped_notification) {
-                               client->communication.outbound.dropped_notification = true;
-                               ret = client_enqueue_dropped_notification(
-                                               client);
-                               if (ret) {
-                                       goto end;
-                               }
+                       ret = client_notification_overflow(client);
+                       if (ret) {
+                               /* Fatal error. */
+                               goto skip_client;
                        }
-                       continue;
                }
 
-               ret = lttng_dynamic_buffer_append_buffer(
-                               &client->communication.outbound.buffer,
-                               &msg_payload.buffer);
+               ret = lttng_payload_copy(&msg_payload, &client->communication.outbound.payload);
                if (ret) {
-                       goto end;
+                       /* Fatal error. */
+                       goto skip_client;
                }
 
-               ret = client_flush_outgoing_queue(client, state);
+               transmission_status = client_flush_outgoing_queue(client);
+               pthread_mutex_unlock(&client->lock);
+               ret = client_report(client, transmission_status, user_data);
                if (ret) {
-                       goto end;
+                       /* Fatal error. */
+                       goto end_unlock_list;
+               }
+
+               continue;
+
+skip_client:
+               pthread_mutex_unlock(&client->lock);
+               if (ret) {
+                       /* Fatal error. */
+                       goto end_unlock_list;
                }
        }
        ret = 0;
+
+end_unlock_list:
+       pthread_mutex_unlock(&client_list->lock);
 end:
        lttng_payload_reset(&msg_payload);
        return ret;
 }
 
+static
+struct lttng_event_notifier_notification *recv_one_event_notifier_notification(
+               int notification_pipe_read_fd, enum lttng_domain_type domain)
+{
+       int ret;
+       uint64_t token;
+       struct lttng_event_notifier_notification *notification = NULL;
+       char *capture_buffer = NULL;
+       size_t capture_buffer_size;
+       void *reception_buffer;
+       size_t reception_size;
+
+       struct lttng_ust_abi_event_notifier_notification ust_notification;
+       struct lttng_kernel_event_notifier_notification kernel_notification;
+
+       /* Init lttng_event_notifier_notification */
+       switch(domain) {
+       case LTTNG_DOMAIN_UST:
+               reception_buffer = (void *) &ust_notification;
+               reception_size = sizeof(ust_notification);
+               break;
+       case LTTNG_DOMAIN_KERNEL:
+               reception_buffer = (void *) &kernel_notification;
+               reception_size = sizeof(kernel_notification);
+               break;
+       default:
+               abort();
+       }
+
+       /*
+        * The monitoring pipe only holds messages smaller than PIPE_BUF,
+        * ensuring that read/write of tracer notifications are atomic.
+        */
+       ret = lttng_read(notification_pipe_read_fd, reception_buffer,
+                       reception_size);
+       if (ret != reception_size) {
+               PERROR("Failed to read from event source notification pipe: fd = %d, size to read = %zu, ret = %d",
+                               notification_pipe_read_fd, reception_size, ret);
+               ret = -1;
+               goto end;
+       }
+
+       switch(domain) {
+       case LTTNG_DOMAIN_UST:
+               token = ust_notification.token;
+               capture_buffer_size = ust_notification.capture_buf_size;
+               break;
+       case LTTNG_DOMAIN_KERNEL:
+               token = kernel_notification.token;
+               capture_buffer_size = 0;
+               break;
+       default:
+               abort();
+       }
+
+       if (capture_buffer_size == 0) {
+               capture_buffer = NULL;
+               goto skip_capture;
+       }
+
+       if (capture_buffer_size > MAX_CAPTURE_SIZE) {
+               ERR("[notification-thread] Event notifier has a capture payload size which exceeds the maximum allowed size: capture_payload_size = %zu bytes, max allowed size = %d bytes",
+                               capture_buffer_size, MAX_CAPTURE_SIZE);
+               goto end;
+       }
+
+       capture_buffer = zmalloc(capture_buffer_size);
+       if (!capture_buffer) {
+               ERR("[notification-thread] Failed to allocate capture buffer");
+               goto end;
+       }
+
+       /* Fetch additional payload (capture). */
+       ret = lttng_read(notification_pipe_read_fd, capture_buffer, capture_buffer_size);
+       if (ret != capture_buffer_size) {
+               ERR("[notification-thread] Failed to read from event source pipe (fd = %i)",
+                               notification_pipe_read_fd);
+               goto end;
+       }
+
+skip_capture:
+       notification = lttng_event_notifier_notification_create(token, domain,
+                       capture_buffer, capture_buffer_size);
+       if (notification == NULL) {
+               goto end;
+       }
+
+       /*
+        * Ownership transfered to the lttng_event_notifier_notification object.
+        */
+       capture_buffer = NULL;
+
+end:
+       free(capture_buffer);
+       return notification;
+}
+
+static
+int dispatch_one_event_notifier_notification(struct notification_thread_state *state,
+               struct lttng_event_notifier_notification *notification)
+{
+       struct cds_lfht_node *node;
+       struct cds_lfht_iter iter;
+       struct notification_trigger_tokens_ht_element *element;
+       enum lttng_trigger_status trigger_status;
+       struct lttng_evaluation *evaluation = NULL;
+       enum action_executor_status executor_status;
+       struct notification_client_list *client_list = NULL;
+       const char *trigger_name;
+       int ret;
+       unsigned int capture_count = 0;
+
+       /* Find triggers associated with this token. */
+       rcu_read_lock();
+       cds_lfht_lookup(state->trigger_tokens_ht,
+                       hash_key_u64(&notification->tracer_token, lttng_ht_seed),
+                       match_trigger_token, &notification->tracer_token, &iter);
+       node = cds_lfht_iter_get_node(&iter);
+       if (caa_unlikely(!node)) {
+               /*
+                * This is not an error, slow consumption of the tracer
+                * notifications can lead to situations where a trigger is
+                * removed but we still get tracer notifications matching a
+                * trigger that no longer exists.
+                */
+               ret = 0;
+               goto end_unlock;
+       }
+
+       element = caa_container_of(node,
+                       struct notification_trigger_tokens_ht_element,
+                       node);
+
+       if (!lttng_trigger_should_fire(element->trigger)) {
+               ret = 0;
+               goto end_unlock;
+       }
+
+       lttng_trigger_fire(element->trigger);
+
+       trigger_status = lttng_trigger_get_name(element->trigger, &trigger_name);
+       assert(trigger_status == LTTNG_TRIGGER_STATUS_OK);
+
+       if (lttng_condition_event_rule_get_capture_descriptor_count(
+                           lttng_trigger_get_const_condition(element->trigger),
+                           &capture_count) != LTTNG_CONDITION_STATUS_OK) {
+               ERR("Failed to get capture count");
+               ret = -1;
+               goto end;
+       }
+
+       if (!notification->capture_buffer && capture_count != 0) {
+               ERR("Expected capture but capture buffer is null");
+               ret = -1;
+               goto end;
+       }
+
+       evaluation = lttng_evaluation_event_rule_create(
+                       container_of(lttng_trigger_get_const_condition(
+                                                    element->trigger),
+                                       struct lttng_condition_event_rule,
+                                       parent),
+                       trigger_name,
+                       notification->capture_buffer,
+                       notification->capture_buf_size, false);
+
+       if (evaluation == NULL) {
+               ERR("[notification-thread] Failed to create event rule hit evaluation while creating and enqueuing action executor job");
+               ret = -1;
+               goto end_unlock;
+       }
+       client_list = get_client_list_from_condition(state,
+                       lttng_trigger_get_const_condition(element->trigger));
+       executor_status = action_executor_enqueue(state->executor,
+                       element->trigger, evaluation, NULL, client_list);
+       switch (executor_status) {
+       case ACTION_EXECUTOR_STATUS_OK:
+               ret = 0;
+               break;
+       case ACTION_EXECUTOR_STATUS_OVERFLOW:
+       {
+               struct notification_client_list_element *client_list_element,
+                               *tmp;
+
+               /*
+                * Not a fatal error; this is expected and simply means the
+                * executor has too much work queued already.
+                */
+               ret = 0;
+
+               /* No clients subscribed to notifications for this trigger. */
+               if (!client_list) {
+                       break;
+               }
+
+               /* Warn clients that a notification (or more) was dropped. */
+               pthread_mutex_lock(&client_list->lock);
+               cds_list_for_each_entry_safe(client_list_element, tmp,
+                               &client_list->list, node) {
+                       enum client_transmission_status transmission_status;
+                       struct notification_client *client =
+                                       client_list_element->client;
+
+                       pthread_mutex_lock(&client->lock);
+                       ret = client_notification_overflow(client);
+                       if (ret) {
+                               /* Fatal error. */
+                               goto next_client;
+                       }
+
+                       transmission_status =
+                                       client_flush_outgoing_queue(client);
+                       ret = client_handle_transmission_status(
+                                       client, transmission_status, state);
+                       if (ret) {
+                               /* Fatal error. */
+                               goto next_client;
+                       }
+next_client:
+                       pthread_mutex_unlock(&client->lock);
+                       if (ret) {
+                               break;
+                       }
+               }
+
+               pthread_mutex_unlock(&client_list->lock);
+               break;
+       }
+       case ACTION_EXECUTOR_STATUS_INVALID:
+       case ACTION_EXECUTOR_STATUS_ERROR:
+               /* Fatal error, shut down everything. */
+               ERR("Fatal error encoutered while enqueuing action to the action executor");
+               ret = -1;
+               goto end_unlock;
+       default:
+               /* Unhandled error. */
+               abort();
+       }
+
+end_unlock:
+       notification_client_list_put(client_list);
+       rcu_read_unlock();
+end:
+       return ret;
+}
+
+static
+int handle_one_event_notifier_notification(
+               struct notification_thread_state *state,
+               int pipe, enum lttng_domain_type domain)
+{
+       int ret = 0;
+       struct lttng_event_notifier_notification *notification = NULL;
+
+       notification = recv_one_event_notifier_notification(pipe, domain);
+       if (notification == NULL) {
+               /* Reception failed, don't consider it fatal. */
+               ERR("[notification-thread] Error receiving an event notifier notification from tracer: fd = %i, domain = %s",
+                               pipe, lttng_domain_type_str(domain));
+               goto end;
+       }
+
+       ret = dispatch_one_event_notifier_notification(state, notification);
+       if (ret) {
+               ERR("[notification-thread] Error dispatching an event notifier notification from tracer: fd = %i, domain = %s",
+                               pipe, lttng_domain_type_str(domain));
+               goto end;
+       }
+
+end:
+       lttng_event_notifier_notification_destroy(notification);
+       return ret;
+}
+
+int handle_notification_thread_event_notification(struct notification_thread_state *state,
+               int pipe, enum lttng_domain_type domain)
+{
+       return handle_one_event_notifier_notification(state, pipe, domain);
+}
+
 int handle_notification_thread_channel_sample(
                struct notification_thread_state *state, int pipe,
                enum lttng_domain_type domain)
@@ -3198,6 +4536,7 @@ int handle_notification_thread_channel_sample(
        bool previous_sample_available = false;
        struct channel_state_sample previous_sample, latest_sample;
        uint64_t previous_session_consumed_total, latest_session_consumed_total;
+       struct lttng_credentials channel_creds;
 
        /*
         * The monitoring pipe only holds messages smaller than PIPE_BUF,
@@ -3236,8 +4575,7 @@ int handle_notification_thread_channel_sample(
                 */
                DBG("[notification-thread] Received a sample for an unknown channel from consumerd, key = %" PRIu64 " in %s domain",
                                latest_sample.key.key,
-                               domain == LTTNG_DOMAIN_KERNEL ? "kernel" :
-                                       "user space");
+                               lttng_domain_type_str(domain));
                goto end_unlock;
        }
        channel_info = caa_container_of(node, struct channel_info,
@@ -3316,38 +4654,31 @@ int handle_notification_thread_channel_sample(
                goto end_unlock;
        }
 
+       channel_creds = (typeof(channel_creds)) {
+               .uid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->uid),
+               .gid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->gid),
+       };
+
        trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
                        channel_triggers_ht_node);
        cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
                        node) {
                const struct lttng_condition *condition;
-               const struct lttng_action *action;
-               const struct lttng_trigger *trigger;
-               struct notification_client_list *client_list;
+               struct lttng_trigger *trigger;
+               struct notification_client_list *client_list = NULL;
                struct lttng_evaluation *evaluation = NULL;
+               enum action_executor_status executor_status;
 
+               ret = 0;
                trigger = trigger_list_element->trigger;
                condition = lttng_trigger_get_const_condition(trigger);
                assert(condition);
-               action = lttng_trigger_get_const_action(trigger);
-
-               /* Notify actions are the only type currently supported. */
-               assert(lttng_action_get_type_const(action) ==
-                               LTTNG_ACTION_TYPE_NOTIFY);
 
                /*
                 * Check if any client is subscribed to the result of this
                 * evaluation.
                 */
                client_list = get_client_list_from_condition(state, condition);
-               assert(client_list);
-               if (cds_list_empty(&client_list->list)) {
-                       /*
-                        * No clients interested in the evaluation's result,
-                        * skip it.
-                        */
-                       continue;
-               }
 
                ret = evaluate_buffer_condition(condition, &evaluation, state,
                                previous_sample_available ? &previous_sample : NULL,
@@ -3356,21 +4687,57 @@ int handle_notification_thread_channel_sample(
                                latest_session_consumed_total,
                                channel_info);
                if (caa_unlikely(ret)) {
-                       goto end_unlock;
+                       goto put_list;
                }
 
                if (caa_likely(!evaluation)) {
-                       continue;
+                       goto put_list;
+               }
+
+               if (!lttng_trigger_should_fire(trigger)) {
+                       goto put_list;
                }
 
-               /* Dispatch evaluation result to all clients. */
-               ret = send_evaluation_to_clients(trigger_list_element->trigger,
-                               evaluation, client_list, state,
-                               channel_info->session_info->uid,
-                               channel_info->session_info->gid);
-               lttng_evaluation_destroy(evaluation);
+               lttng_trigger_fire(trigger);
+
+               /*
+                * Ownership of `evaluation` transferred to the action executor
+                * no matter the result.
+                */
+               executor_status = action_executor_enqueue(state->executor,
+                               trigger, evaluation, &channel_creds,
+                               client_list);
+               evaluation = NULL;
+               switch (executor_status) {
+               case ACTION_EXECUTOR_STATUS_OK:
+                       break;
+               case ACTION_EXECUTOR_STATUS_ERROR:
+               case ACTION_EXECUTOR_STATUS_INVALID:
+                       /*
+                        * TODO Add trigger identification (name/id) when
+                        * it is added to the API.
+                        */
+                       ERR("Fatal error occurred while enqueuing action associated with buffer-condition trigger");
+                       ret = -1;
+                       goto put_list;
+               case ACTION_EXECUTOR_STATUS_OVERFLOW:
+                       /*
+                        * TODO Add trigger identification (name/id) when
+                        * it is added to the API.
+                        *
+                        * Not a fatal error.
+                        */
+                       WARN("No space left when enqueuing action associated with buffer-condition trigger");
+                       ret = 0;
+                       goto put_list;
+               default:
+                       abort();
+               }
+
+put_list:
+               notification_client_list_put(client_list);
                if (caa_unlikely(ret)) {
-                       goto end_unlock;
+                       break;
                }
        }
 end_unlock:
This page took 0.097282 seconds and 4 git commands to generate.