X-Git-Url: https://git.lttng.org/?p=lttng-tools.git;a=blobdiff_plain;f=src%2Fbin%2Flttng-sessiond%2Fnotification-thread-events.c;h=86a114c07ddb0cafc9acd5533d1315bce10d7343;hp=9d82b8313bfc59701619378a22cb69550bfc5eb4;hb=882093eef6fdd658833928a62be5d42fc0cdcb00;hpb=a5d64ae7893cc1d2b595614a8f8e3d03c401cf39 diff --git a/src/bin/lttng-sessiond/notification-thread-events.c b/src/bin/lttng-sessiond/notification-thread-events.c index 9d82b8313..86a114c07 100644 --- a/src/bin/lttng-sessiond/notification-thread-events.c +++ b/src/bin/lttng-sessiond/notification-thread-events.c @@ -1,20 +1,12 @@ /* - * Copyright (C) 2017 - Jérémie Galarneau + * Copyright (C) 2017 Jérémie Galarneau * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License, version 2 only, as - * published by the Free Software Foundation. + * SPDX-License-Identifier: GPL-2.0-only * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., 51 - * Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ +#include "lttng/action/action.h" +#include "lttng/trigger/trigger-internal.h" #define _LGPL_SOURCE #include #include @@ -60,7 +52,7 @@ enum lttng_object_type { struct lttng_trigger_list_element { /* No ownership of the trigger object is assumed. */ - const struct lttng_trigger *trigger; + struct lttng_trigger *trigger; struct cds_list_head node; }; @@ -127,87 +119,6 @@ struct lttng_condition_list_element { struct cds_list_head node; }; -struct notification_client_list_element { - struct notification_client *client; - struct cds_list_head node; -}; - -struct notification_client_list { - const struct lttng_trigger *trigger; - struct cds_list_head list; - struct cds_lfht_node notification_trigger_ht_node; - /* call_rcu delayed reclaim. */ - struct rcu_head rcu_node; -}; - -struct notification_client { - int socket; - /* Client protocol version. */ - uint8_t major, minor; - uid_t uid; - gid_t gid; - /* - * Indicates if the credentials and versions of the client have been - * checked. - */ - bool validated; - /* - * Conditions to which the client's notification channel is subscribed. - * List of struct lttng_condition_list_node. The condition member is - * owned by the client. - */ - struct cds_list_head condition_list; - struct cds_lfht_node client_socket_ht_node; - struct { - struct { - /* - * During the reception of a message, the reception - * buffers' "size" is set to contain the current - * message's complete payload. - */ - struct lttng_dynamic_buffer buffer; - /* Bytes left to receive for the current message. */ - size_t bytes_to_receive; - /* Type of the message being received. */ - enum lttng_notification_channel_message_type msg_type; - /* - * Indicates whether or not credentials are expected - * from the client. - */ - bool expect_creds; - /* - * Indicates whether or not credentials were received - * from the client. - */ - bool creds_received; - /* Only used during credentials reception. */ - lttng_sock_cred creds; - } inbound; - struct { - /* - * Indicates whether or not a notification addressed to - * this client was dropped because a command reply was - * already buffered. - * - * A notification is dropped whenever the buffer is not - * empty. - */ - bool dropped_notification; - /* - * Indicates whether or not a command reply is already - * buffered. In this case, it means that the client is - * not consuming command replies before emitting a new - * one. This could be caused by a protocol error or a - * misbehaving/malicious client. - */ - bool queued_command_reply; - struct lttng_dynamic_buffer buffer; - } outbound; - } communication; - /* call_rcu delayed reclaim. */ - struct rcu_head rcu_node; -}; - struct channel_state_sample { struct channel_key key; struct cds_lfht_node channel_state_ht_node; @@ -268,20 +179,34 @@ void lttng_session_trigger_list_destroy( struct lttng_session_trigger_list *list); static int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list, - const struct lttng_trigger *trigger); + struct lttng_trigger *trigger); +static +int client_handle_transmission_status( + struct notification_client *client, + enum client_transmission_status transmission_status, + struct notification_thread_state *state); static -int match_client(struct cds_lfht_node *node, const void *key) +int match_client_socket(struct cds_lfht_node *node, const void *key) { /* This double-cast is intended to supress pointer-to-cast warning. */ - int socket = (int) (intptr_t) key; - struct notification_client *client; + const int socket = (int) (intptr_t) key; + const struct notification_client *client = caa_container_of(node, + struct notification_client, client_socket_ht_node); - client = caa_container_of(node, struct notification_client, - client_socket_ht_node); + return client->socket == socket; +} + +static +int match_client_id(struct cds_lfht_node *node, const void *key) +{ + /* This double-cast is intended to supress pointer-to-cast warning. */ + const notification_client_id id = *((notification_client_id *) key); + const struct notification_client *client = caa_container_of( + node, struct notification_client, client_id_ht_node); - return !!(client->socket == socket); + return client->id == id; } static @@ -360,7 +285,7 @@ int match_client_list_condition(struct cds_lfht_node *node, const void *key) assert(condition_key); client_list = caa_container_of(node, struct notification_client_list, - notification_trigger_ht_node); + notification_trigger_clients_ht_node); condition = lttng_trigger_get_const_condition(client_list->trigger); return !!lttng_condition_is_equal(condition_key, condition); @@ -485,6 +410,18 @@ unsigned long hash_channel_key(struct channel_key *key) return key_hash ^ domain_hash; } +static +unsigned long hash_client_socket(int socket) +{ + return hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed); +} + +static +unsigned long hash_client_id(notification_client_id id) +{ + return hash_key_u64(&id, lttng_ht_seed); +} + /* * Get the type of object to which a given condition applies. Bindings let * the notification system evaluate a trigger's condition when a given @@ -501,7 +438,7 @@ enum lttng_object_type get_condition_binding_object( case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW: case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH: case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE: - return LTTNG_OBJECT_TYPE_CHANNEL; + return LTTNG_OBJECT_TYPE_CHANNEL; case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING: case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED: return LTTNG_OBJECT_TYPE_SESSION; @@ -676,7 +613,91 @@ error: return NULL; } -/* RCU read lock must be held by the caller. */ +LTTNG_HIDDEN +bool notification_client_list_get(struct notification_client_list *list) +{ + return urcu_ref_get_unless_zero(&list->ref); +} + +static +void free_notification_client_list_rcu(struct rcu_head *node) +{ + free(caa_container_of(node, struct notification_client_list, + rcu_node)); +} + +static +void notification_client_list_release(struct urcu_ref *list_ref) +{ + struct notification_client_list *list = + container_of(list_ref, typeof(*list), ref); + struct notification_client_list_element *client_list_element, *tmp; + + if (list->notification_trigger_clients_ht) { + rcu_read_lock(); + cds_lfht_del(list->notification_trigger_clients_ht, + &list->notification_trigger_clients_ht_node); + rcu_read_unlock(); + list->notification_trigger_clients_ht = NULL; + } + cds_list_for_each_entry_safe(client_list_element, tmp, + &list->list, node) { + free(client_list_element); + } + pthread_mutex_destroy(&list->lock); + call_rcu(&list->rcu_node, free_notification_client_list_rcu); +} + +static +struct notification_client_list *notification_client_list_create( + const struct lttng_trigger *trigger) +{ + struct notification_client_list *client_list = + zmalloc(sizeof(*client_list)); + + if (!client_list) { + goto error; + } + pthread_mutex_init(&client_list->lock, NULL); + urcu_ref_init(&client_list->ref); + cds_lfht_node_init(&client_list->notification_trigger_clients_ht_node); + CDS_INIT_LIST_HEAD(&client_list->list); + client_list->trigger = trigger; +error: + return client_list; +} + +static +void publish_notification_client_list( + struct notification_thread_state *state, + struct notification_client_list *list) +{ + const struct lttng_condition *condition = + lttng_trigger_get_const_condition(list->trigger); + + assert(!list->notification_trigger_clients_ht); + notification_client_list_get(list); + + list->notification_trigger_clients_ht = + state->notification_trigger_clients_ht; + + rcu_read_lock(); + cds_lfht_add(state->notification_trigger_clients_ht, + lttng_condition_hash(condition), + &list->notification_trigger_clients_ht_node); + rcu_read_unlock(); +} + +LTTNG_HIDDEN +void notification_client_list_put(struct notification_client_list *list) +{ + if (!list) { + return; + } + return urcu_ref_put(&list->ref, notification_client_list_release); +} + +/* Provides a reference to the returned list. */ static struct notification_client_list *get_client_list_from_condition( struct notification_thread_state *state, @@ -684,20 +705,25 @@ struct notification_client_list *get_client_list_from_condition( { struct cds_lfht_node *node; struct cds_lfht_iter iter; + struct notification_client_list *list = NULL; + rcu_read_lock(); cds_lfht_lookup(state->notification_trigger_clients_ht, lttng_condition_hash(condition), match_client_list_condition, condition, &iter); node = cds_lfht_iter_get_node(&iter); + if (node) { + list = container_of(node, struct notification_client_list, + notification_trigger_clients_ht_node); + list = notification_client_list_get(list) ? list : NULL; + } - return node ? caa_container_of(node, - struct notification_client_list, - notification_trigger_ht_node) : NULL; + rcu_read_unlock(); + return list; } -/* This function must be called with the RCU read lock held. */ static int evaluate_channel_condition_for_client( const struct lttng_condition *condition, @@ -713,6 +739,8 @@ int evaluate_channel_condition_for_client( struct channel_state_sample *last_sample = NULL; struct lttng_channel_trigger_list *channel_trigger_list = NULL; + rcu_read_lock(); + /* Find the channel associated with the condition. */ cds_lfht_for_each_entry(state->channel_triggers_ht, &iter, channel_trigger_list, channel_triggers_ht_node) { @@ -787,6 +815,7 @@ int evaluate_channel_condition_for_client( *session_uid = channel_info->session_info->uid; *session_gid = channel_info->session_info->gid; end: + rcu_read_unlock(); return ret; } @@ -822,7 +851,6 @@ end: return session_name; } -/* This function must be called with the RCU read lock held. */ static int evaluate_session_condition_for_client( const struct lttng_condition *condition, @@ -836,6 +864,7 @@ int evaluate_session_condition_for_client( const char *session_name; struct session_info *session_info = NULL; + rcu_read_lock(); session_name = get_condition_session_name(condition); /* Find the session associated with the trigger. */ @@ -889,10 +918,10 @@ int evaluate_session_condition_for_client( end_session_put: session_info_put(session_info); end: + rcu_read_unlock(); return ret; } -/* This function must be called with the RCU read lock held. */ static int evaluate_condition_for_client(const struct lttng_trigger *trigger, const struct lttng_condition *condition, @@ -901,7 +930,9 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger, { int ret; struct lttng_evaluation *evaluation = NULL; - struct notification_client_list client_list = { 0 }; + struct notification_client_list client_list = { + .lock = PTHREAD_MUTEX_INITIALIZER, + }; struct notification_client_list_element client_list_element = { 0 }; uid_t object_uid = 0; gid_t object_gid = 0; @@ -928,7 +959,10 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger, ret = -1; goto end; } - + if (ret) { + /* Fatal error. */ + goto end; + } if (!evaluation) { /* Evaluation yielded nothing. Normal exit. */ DBG("[notification-thread] Newly subscribed-to condition evaluated to false, nothing to report to client"); @@ -940,7 +974,7 @@ int evaluate_condition_for_client(const struct lttng_trigger *trigger, * Create a temporary client list with the client currently * subscribing. */ - cds_lfht_node_init(&client_list.notification_trigger_ht_node); + cds_lfht_node_init(&client_list.notification_trigger_clients_ht_node); CDS_INIT_LIST_HEAD(&client_list.list); client_list.trigger = trigger; @@ -964,7 +998,7 @@ int notification_thread_client_subscribe(struct notification_client *client, enum lttng_notification_channel_status *_status) { int ret = 0; - struct notification_client_list *client_list; + struct notification_client_list *client_list = NULL; struct lttng_condition_list_element *condition_list_element = NULL; struct notification_client_list_element *client_list_element = NULL; enum lttng_notification_channel_status status = @@ -993,8 +1027,6 @@ int notification_thread_client_subscribe(struct notification_client *client, goto error; } - rcu_read_lock(); - /* * Add the newly-subscribed condition to the client's subscription list. */ @@ -1010,20 +1042,24 @@ int notification_thread_client_subscribe(struct notification_client *client, * since this trigger is not registered yet. */ free(client_list_element); - goto end_unlock; + goto end; } /* * The condition to which the client just subscribed is evaluated * at this point so that conditions that are already TRUE result * in a notification being sent out. + * + * The client_list's trigger is used without locking the list itself. + * This is correct since the list doesn't own the trigger and the + * object is immutable. */ if (evaluate_condition_for_client(client_list->trigger, condition, client, state)) { WARN("[notification-thread] Evaluation of a condition on client subscription failed, aborting."); ret = -1; free(client_list_element); - goto end_unlock; + goto end; } /* @@ -1033,13 +1069,17 @@ int notification_thread_client_subscribe(struct notification_client *client, */ client_list_element->client = client; CDS_INIT_LIST_HEAD(&client_list_element->node); + + pthread_mutex_lock(&client_list->lock); cds_list_add(&client_list_element->node, &client_list->list); -end_unlock: - rcu_read_unlock(); + pthread_mutex_unlock(&client_list->lock); end: if (_status) { *_status = status; } + if (client_list) { + notification_client_list_put(client_list); + } return ret; error: free(condition_list_element); @@ -1095,23 +1135,24 @@ int notification_thread_client_unsubscribe( * Remove the client from the list of clients interested the trigger * matching the condition. */ - rcu_read_lock(); client_list = get_client_list_from_condition(state, condition); if (!client_list) { - goto end_unlock; + goto end; } + pthread_mutex_lock(&client_list->lock); cds_list_for_each_entry_safe(client_list_element, client_tmp, &client_list->list, node) { - if (client_list_element->client->socket != client->socket) { + if (client_list_element->client->id != client->id) { continue; } cds_list_del(&client_list_element->node); free(client_list_element); break; } -end_unlock: - rcu_read_unlock(); + pthread_mutex_unlock(&client_list->lock); + notification_client_list_put(client_list); + client_list = NULL; end: lttng_condition_destroy(condition); if (_status) { @@ -1130,24 +1171,22 @@ static void notification_client_destroy(struct notification_client *client, struct notification_thread_state *state) { - struct lttng_condition_list_element *condition_list_element, *tmp; - if (!client) { return; } - /* Release all conditions to which the client was subscribed. */ - cds_list_for_each_entry_safe(condition_list_element, tmp, - &client->condition_list, node) { - (void) notification_thread_client_unsubscribe(client, - condition_list_element->condition, state, NULL); - } - + /* + * The client object is not reachable by other threads, no need to lock + * the client here. + */ if (client->socket >= 0) { (void) lttcomm_close_unix_sock(client->socket); + client->socket = -1; } - lttng_dynamic_buffer_reset(&client->communication.inbound.buffer); - lttng_dynamic_buffer_reset(&client->communication.outbound.buffer); + client->communication.active = false; + lttng_payload_reset(&client->communication.inbound.payload); + lttng_payload_reset(&client->communication.outbound.payload); + pthread_mutex_destroy(&client->lock); call_rcu(&client->rcu_node, free_notification_client_rcu); } @@ -1164,8 +1203,8 @@ struct notification_client *get_client_from_socket(int socket, struct notification_client *client = NULL; cds_lfht_lookup(state->client_socket_ht, - hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed), - match_client, + hash_client_socket(socket), + match_client_socket, (void *) (unsigned long) socket, &iter); node = cds_lfht_iter_get_node(&iter); @@ -1179,6 +1218,34 @@ end: return client; } +/* + * Call with rcu_read_lock held (and hold for the lifetime of the returned + * client pointer). + */ +static +struct notification_client *get_client_from_id(notification_client_id id, + struct notification_thread_state *state) +{ + struct cds_lfht_iter iter; + struct cds_lfht_node *node; + struct notification_client *client = NULL; + + cds_lfht_lookup(state->client_id_ht, + hash_client_id(id), + match_client_id, + &id, + &iter); + node = cds_lfht_iter_get_node(&iter); + if (!node) { + goto end; + } + + client = caa_container_of(node, struct notification_client, + client_id_ht_node); +end: + return client; +} + static bool buffer_usage_condition_applies_to_channel( const struct lttng_condition *condition, @@ -1313,7 +1380,7 @@ struct lttng_session_trigger_list *get_session_trigger_list( goto end; } - list = caa_container_of(node, + list = caa_container_of(node, struct lttng_session_trigger_list, session_triggers_ht_node); end: @@ -1382,7 +1449,7 @@ void lttng_session_trigger_list_destroy(struct lttng_session_trigger_list *list) static int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list, - const struct lttng_trigger *trigger) + struct lttng_trigger *trigger) { int ret = 0; struct lttng_trigger_list_element *new_element = @@ -1739,6 +1806,10 @@ int handle_notification_thread_command_session_rotation( struct lttng_session_trigger_list *trigger_list; struct lttng_trigger_list_element *trigger_list_element; struct session_info *session_info; + const struct lttng_credentials session_creds = { + .uid = session_uid, + .gid = session_gid, + }; rcu_read_lock(); @@ -1764,11 +1835,11 @@ int handle_notification_thread_command_session_rotation( cds_list_for_each_entry(trigger_list_element, &trigger_list->list, node) { const struct lttng_condition *condition; - const struct lttng_action *action; - const struct lttng_trigger *trigger; + struct lttng_trigger *trigger; struct notification_client_list *client_list; struct lttng_evaluation *evaluation = NULL; enum lttng_condition_type condition_type; + enum action_executor_status executor_status; trigger = trigger_list_element->trigger; condition = lttng_trigger_get_const_condition(trigger); @@ -1783,23 +1854,7 @@ int handle_notification_thread_command_session_rotation( continue; } - action = lttng_trigger_get_const_action(trigger); - - /* Notify actions are the only type currently supported. */ - assert(lttng_action_get_type_const(action) == - LTTNG_ACTION_TYPE_NOTIFY); - client_list = get_client_list_from_condition(state, condition); - assert(client_list); - - if (cds_list_empty(&client_list->list)) { - /* - * No clients interested in the evaluation's result, - * skip it. - */ - continue; - } - if (cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) { evaluation = lttng_evaluation_session_rotation_ongoing_create( trace_archive_chunk_id); @@ -1812,17 +1867,47 @@ int handle_notification_thread_command_session_rotation( /* Internal error */ ret = -1; cmd_result = LTTNG_ERR_UNK; - goto end; + goto put_list; + } + + /* + * Ownership of `evaluation` transferred to the action executor + * no matter the result. + */ + executor_status = action_executor_enqueue(state->executor, + trigger, evaluation, &session_creds, + client_list); + evaluation = NULL; + switch (executor_status) { + case ACTION_EXECUTOR_STATUS_OK: + break; + case ACTION_EXECUTOR_STATUS_ERROR: + case ACTION_EXECUTOR_STATUS_INVALID: + /* + * TODO Add trigger identification (name/id) when + * it is added to the API. + */ + ERR("Fatal error occurred while enqueuing action associated with session rotation trigger"); + ret = -1; + goto put_list; + case ACTION_EXECUTOR_STATUS_OVERFLOW: + /* + * TODO Add trigger identification (name/id) when + * it is added to the API. + * + * Not a fatal error. + */ + WARN("No space left when enqueuing action associated with session rotation trigger"); + ret = 0; + goto put_list; + default: + abort(); } - /* Dispatch evaluation result to all clients. */ - ret = send_evaluation_to_clients(trigger_list_element->trigger, - evaluation, client_list, state, - session_info->uid, - session_info->gid); - lttng_evaluation_destroy(evaluation); +put_list: + notification_client_list_put(client_list); if (caa_unlikely(ret)) { - goto end; + break; } } end: @@ -1860,8 +1945,7 @@ int condition_is_supported(struct lttng_condition *condition) * buffers. Therefore, we reject triggers that require that * mechanism to be available to be evaluated. */ - ret = kernel_supports_ring_buffer_snapshot_sample_positions( - kernel_tracer_fd); + ret = kernel_supports_ring_buffer_snapshot_sample_positions(); break; } default: @@ -1873,7 +1957,7 @@ end: /* Must be called with RCU read lock held. */ static -int bind_trigger_to_matching_session(const struct lttng_trigger *trigger, +int bind_trigger_to_matching_session(struct lttng_trigger *trigger, struct notification_thread_state *state) { int ret = 0; @@ -1919,7 +2003,7 @@ end: /* Must be called with RCU read lock held. */ static -int bind_trigger_to_matching_channels(const struct lttng_trigger *trigger, +int bind_trigger_to_matching_channels(struct lttng_trigger *trigger, struct notification_thread_state *state) { int ret = 0; @@ -1963,9 +2047,46 @@ end: return ret; } +static +bool is_trigger_action_notify(const struct lttng_trigger *trigger) +{ + bool is_notify = false; + unsigned int i, count; + enum lttng_action_status action_status; + const struct lttng_action *action = + lttng_trigger_get_const_action(trigger); + enum lttng_action_type action_type; + + assert(action); + action_type = lttng_action_get_type_const(action); + if (action_type == LTTNG_ACTION_TYPE_NOTIFY) { + is_notify = true; + goto end; + } else if (action_type != LTTNG_ACTION_TYPE_GROUP) { + goto end; + } + + action_status = lttng_action_group_get_count(action, &count); + assert(action_status == LTTNG_ACTION_STATUS_OK); + + for (i = 0; i < count; i++) { + const struct lttng_action *inner_action = + lttng_action_group_get_at_index( + action, i); + + action_type = lttng_action_get_type_const(inner_action); + if (action_type == LTTNG_ACTION_TYPE_NOTIFY) { + is_notify = true; + goto end; + } + } + +end: + return is_notify; +} + /* - * FIXME A client's credentials are not checked when registering a trigger, nor - * are they stored alongside with the trigger. + * FIXME A client's credentials are not checked when registering a trigger. * * The effects of this are benign since: * - The client will succeed in registering the trigger, as it is valid, @@ -1990,10 +2111,13 @@ int handle_notification_thread_command_register_trigger( struct notification_client *client; struct notification_client_list *client_list = NULL; struct lttng_trigger_ht_element *trigger_ht_element = NULL; - struct notification_client_list_element *client_list_element, *tmp; + struct notification_client_list_element *client_list_element; struct cds_lfht_node *node; struct cds_lfht_iter iter; bool free_trigger = true; + struct lttng_evaluation *evaluation = NULL; + struct lttng_credentials object_creds; + enum action_executor_status executor_status; rcu_read_lock(); @@ -2044,42 +2168,46 @@ int handle_notification_thread_command_register_trigger( * It is not skipped as this is the only action type currently * supported. */ - client_list = zmalloc(sizeof(*client_list)); - if (!client_list) { - ret = -1; - goto error_free_ht_element; - } - cds_lfht_node_init(&client_list->notification_trigger_ht_node); - CDS_INIT_LIST_HEAD(&client_list->list); - client_list->trigger = trigger; - - /* Build a list of clients to which this new trigger applies. */ - cds_lfht_for_each_entry(state->client_socket_ht, &iter, client, - client_socket_ht_node) { - if (!trigger_applies_to_client(trigger, client)) { - continue; + if (is_trigger_action_notify(trigger)) { + client_list = notification_client_list_create(trigger); + if (!client_list) { + ret = -1; + goto error_free_ht_element; } - client_list_element = zmalloc(sizeof(*client_list_element)); - if (!client_list_element) { - ret = -1; - goto error_free_client_list; + /* Build a list of clients to which this new trigger applies. */ + cds_lfht_for_each_entry (state->client_socket_ht, &iter, client, + client_socket_ht_node) { + if (!trigger_applies_to_client(trigger, client)) { + continue; + } + + client_list_element = + zmalloc(sizeof(*client_list_element)); + if (!client_list_element) { + ret = -1; + goto error_put_client_list; + } + + CDS_INIT_LIST_HEAD(&client_list_element->node); + client_list_element->client = client; + cds_list_add(&client_list_element->node, + &client_list->list); } - CDS_INIT_LIST_HEAD(&client_list_element->node); - client_list_element->client = client; - cds_list_add(&client_list_element->node, &client_list->list); - } - cds_lfht_add(state->notification_trigger_clients_ht, - lttng_condition_hash(condition), - &client_list->notification_trigger_ht_node); + /* + * Client list ownership transferred to the + * notification_trigger_clients_ht. + */ + publish_notification_client_list(state, client_list); + } switch (get_condition_binding_object(condition)) { case LTTNG_OBJECT_TYPE_SESSION: /* Add the trigger to the list if it matches a known session. */ ret = bind_trigger_to_matching_session(trigger, state); if (ret) { - goto error_free_client_list; + goto error_put_client_list; } break; case LTTNG_OBJECT_TYPE_CHANNEL: @@ -2089,21 +2217,24 @@ int handle_notification_thread_command_register_trigger( */ ret = bind_trigger_to_matching_channels(trigger, state); if (ret) { - goto error_free_client_list; + goto error_put_client_list; } break; case LTTNG_OBJECT_TYPE_NONE: break; default: - ERR("[notification-thread] Unknown object type on which to bind a newly registered trigger was encountered"); + ERR("Unknown object type on which to bind a newly registered trigger was encountered"); ret = -1; - goto error_free_client_list; + goto error_put_client_list; } /* - * Since there is nothing preventing clients from subscribing to a - * condition before the corresponding trigger is registered, we have - * to evaluate this new condition right away. + * The new trigger's condition must be evaluated against the current + * state. + * + * In the case of `notify` action, nothing preventing clients from + * subscribing to a condition before the corresponding trigger is + * registered, we have to evaluate this new condition right away. * * At some point, we were waiting for the next "evaluation" (e.g. on * reception of a channel sample) to evaluate this new condition, but @@ -2126,51 +2257,87 @@ int handle_notification_thread_command_register_trigger( * that the evaluations remain the same (true for samples n-1 and n) and * the client will never know that the condition has been met. */ - cds_list_for_each_entry_safe(client_list_element, tmp, - &client_list->list, node) { - ret = evaluate_condition_for_client(trigger, condition, - client_list_element->client, state); - if (ret) { - goto error_free_client_list; - } + switch (get_condition_binding_object(condition)) { + case LTTNG_OBJECT_TYPE_SESSION: + ret = evaluate_session_condition_for_client(condition, state, + &evaluation, &object_creds.uid, + &object_creds.gid); + break; + case LTTNG_OBJECT_TYPE_CHANNEL: + ret = evaluate_channel_condition_for_client(condition, state, + &evaluation, &object_creds.uid, + &object_creds.gid); + break; + case LTTNG_OBJECT_TYPE_NONE: + ret = 0; + goto error_put_client_list; + case LTTNG_OBJECT_TYPE_UNKNOWN: + default: + ret = -1; + goto error_put_client_list; } - /* - * Client list ownership transferred to the - * notification_trigger_clients_ht. - */ - client_list = NULL; + if (ret) { + /* Fatal error. */ + goto error_put_client_list; + } - *cmd_result = LTTNG_OK; -error_free_client_list: - if (client_list) { - cds_list_for_each_entry_safe(client_list_element, tmp, - &client_list->list, node) { - free(client_list_element); - } - free(client_list); + DBG("Newly registered trigger's condition evaluated to %s", + evaluation ? "true" : "false"); + if (!evaluation) { + /* Evaluation yielded nothing. Normal exit. */ + ret = 0; + goto error_put_client_list; } + + /* + * Ownership of `evaluation` transferred to the action executor + * no matter the result. + */ + executor_status = action_executor_enqueue(state->executor, trigger, + evaluation, &object_creds, client_list); + evaluation = NULL; + switch (executor_status) { + case ACTION_EXECUTOR_STATUS_OK: + break; + case ACTION_EXECUTOR_STATUS_ERROR: + case ACTION_EXECUTOR_STATUS_INVALID: + /* + * TODO Add trigger identification (name/id) when + * it is added to the API. + */ + ERR("Fatal error occurred while enqueuing action associated to newly registered trigger"); + ret = -1; + goto error_put_client_list; + case ACTION_EXECUTOR_STATUS_OVERFLOW: + /* + * TODO Add trigger identification (name/id) when + * it is added to the API. + * + * Not a fatal error. + */ + WARN("No space left when enqueuing action associated to newly registered trigger"); + ret = 0; + goto error_put_client_list; + default: + abort(); + } + + *cmd_result = LTTNG_OK; + +error_put_client_list: + notification_client_list_put(client_list); + error_free_ht_element: free(trigger_ht_element); error: if (free_trigger) { - struct lttng_action *action = lttng_trigger_get_action(trigger); - - lttng_condition_destroy(condition); - lttng_action_destroy(action); lttng_trigger_destroy(trigger); } rcu_read_unlock(); return ret; } -static -void free_notification_client_list_rcu(struct rcu_head *node) -{ - free(caa_container_of(node, struct notification_client_list, - rcu_node)); -} - static void free_lttng_trigger_ht_element_rcu(struct rcu_head *node) { @@ -2188,11 +2355,9 @@ int handle_notification_thread_command_unregister_trigger( struct cds_lfht_node *triggers_ht_node; struct lttng_channel_trigger_list *trigger_list; struct notification_client_list *client_list; - struct notification_client_list_element *client_list_element, *tmp; struct lttng_trigger_ht_element *trigger_ht_element = NULL; struct lttng_condition *condition = lttng_trigger_get_condition( trigger); - struct lttng_action *action; enum lttng_error_code cmd_reply; rcu_read_lock(); @@ -2241,23 +2406,17 @@ int handle_notification_thread_command_unregister_trigger( client_list = get_client_list_from_condition(state, condition); assert(client_list); - cds_list_for_each_entry_safe(client_list_element, tmp, - &client_list->list, node) { - free(client_list_element); - } - cds_lfht_del(state->notification_trigger_clients_ht, - &client_list->notification_trigger_ht_node); - call_rcu(&client_list->rcu_node, free_notification_client_list_rcu); + /* Put new reference and the hashtable's reference. */ + notification_client_list_put(client_list); + notification_client_list_put(client_list); + client_list = NULL; /* Remove trigger from triggers_ht. */ trigger_ht_element = caa_container_of(triggers_ht_node, struct lttng_trigger_ht_element, node); cds_lfht_del(state->triggers_ht, triggers_ht_node); - condition = lttng_trigger_get_condition(trigger_ht_element->trigger); - lttng_condition_destroy(condition); - action = lttng_trigger_get_action(trigger_ht_element->trigger); - lttng_action_destroy(action); + /* Release the ownership of the trigger. */ lttng_trigger_destroy(trigger_ht_element->trigger); call_rcu(&trigger_ht_element->rcu_node, free_lttng_trigger_ht_element_rcu); end: @@ -2287,6 +2446,8 @@ int handle_notification_thread_command( pthread_mutex_lock(&handle->cmd_queue.lock); cmd = cds_list_first_entry(&handle->cmd_queue.list, struct notification_thread_command, cmd_list_node); + cds_list_del(&cmd->cmd_list_node); + pthread_mutex_unlock(&handle->cmd_queue.lock); switch (cmd->type) { case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER: DBG("[notification-thread] Received register trigger command"); @@ -2340,6 +2501,34 @@ int handle_notification_thread_command( cmd->reply_code = LTTNG_OK; ret = 1; goto end; + case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE: + { + const enum client_transmission_status client_status = + cmd->parameters.client_communication_update + .status; + const notification_client_id client_id = + cmd->parameters.client_communication_update.id; + struct notification_client *client; + + rcu_read_lock(); + client = get_client_from_id(client_id, state); + + if (!client) { + /* + * Client error was probably already picked-up by the + * notification thread or it has disconnected + * gracefully while this command was queued. + */ + DBG("Failed to find notification client to update communication status, client id = %" PRIu64, + client_id); + ret = 0; + } else { + ret = client_handle_transmission_status( + client, client_status, state); + } + rcu_read_unlock(); + break; + } default: ERR("[notification-thread] Unknown internal command received"); goto error_unlock; @@ -2349,26 +2538,22 @@ int handle_notification_thread_command( goto error_unlock; } end: - cds_list_del(&cmd->cmd_list_node); - lttng_waiter_wake_up(&cmd->reply_waiter); - pthread_mutex_unlock(&handle->cmd_queue.lock); + if (cmd->is_async) { + free(cmd); + cmd = NULL; + } else { + lttng_waiter_wake_up(&cmd->reply_waiter); + } return ret; error_unlock: /* Wake-up and return a fatal error to the calling thread. */ lttng_waiter_wake_up(&cmd->reply_waiter); - pthread_mutex_unlock(&handle->cmd_queue.lock); cmd->reply_code = LTTNG_ERR_FATAL; error: /* Indicate a fatal error to the caller. */ return -1; } -static -unsigned long hash_client_socket(int socket) -{ - return hash_key_ulong((void *) (unsigned long) socket, lttng_ht_seed); -} - static int socket_set_non_blocking(int socket) { @@ -2397,9 +2582,8 @@ int client_reset_inbound_state(struct notification_client *client) { int ret; - ret = lttng_dynamic_buffer_set_size( - &client->communication.inbound.buffer, 0); - assert(!ret); + + lttng_payload_clear(&client->communication.inbound.payload); client->communication.inbound.bytes_to_receive = sizeof(struct lttng_notification_channel_message); @@ -2408,8 +2592,9 @@ int client_reset_inbound_state(struct notification_client *client) LTTNG_SOCK_SET_UID_CRED(&client->communication.inbound.creds, -1); LTTNG_SOCK_SET_GID_CRED(&client->communication.inbound.creds, -1); ret = lttng_dynamic_buffer_set_size( - &client->communication.inbound.buffer, + &client->communication.inbound.payload.buffer, client->communication.inbound.bytes_to_receive); + return ret; } @@ -2427,14 +2612,18 @@ int handle_notification_thread_client_connect( ret = -1; goto error; } + + pthread_mutex_init(&client->lock, NULL); + client->id = state->next_notification_client_id++; CDS_INIT_LIST_HEAD(&client->condition_list); - lttng_dynamic_buffer_init(&client->communication.inbound.buffer); - lttng_dynamic_buffer_init(&client->communication.outbound.buffer); + lttng_payload_init(&client->communication.inbound.payload); + lttng_payload_init(&client->communication.outbound.payload); client->communication.inbound.expect_creds = true; + ret = client_reset_inbound_state(client); if (ret) { ERR("[notification-thread] Failed to reset client communication's inbound state"); - ret = 0; + ret = 0; goto error; } @@ -2475,17 +2664,60 @@ int handle_notification_thread_client_connect( cds_lfht_add(state->client_socket_ht, hash_client_socket(client->socket), &client->client_socket_ht_node); + cds_lfht_add(state->client_id_ht, + hash_client_id(client->id), + &client->client_id_ht_node); rcu_read_unlock(); return ret; + error: notification_client_destroy(client, state); return ret; } -int handle_notification_thread_client_disconnect( - int client_socket, +/* + * RCU read-lock must be held by the caller. + * Client lock must _not_ be held by the caller. + */ +static +int notification_thread_client_disconnect( + struct notification_client *client, struct notification_thread_state *state) +{ + int ret; + struct lttng_condition_list_element *condition_list_element, *tmp; + + /* Acquire the client lock to disable its communication atomically. */ + pthread_mutex_lock(&client->lock); + client->communication.active = false; + cds_lfht_del(state->client_socket_ht, &client->client_socket_ht_node); + cds_lfht_del(state->client_id_ht, &client->client_id_ht_node); + pthread_mutex_unlock(&client->lock); + + ret = lttng_poll_del(&state->events, client->socket); + if (ret) { + ERR("[notification-thread] Failed to remove client socket %d from poll set", + client->socket); + } + + /* Release all conditions to which the client was subscribed. */ + cds_list_for_each_entry_safe(condition_list_element, tmp, + &client->condition_list, node) { + (void) notification_thread_client_unsubscribe(client, + condition_list_element->condition, state, NULL); + } + + /* + * Client no longer accessible to other threads (through the + * client lists). + */ + notification_client_destroy(client, state); + return ret; +} + +int handle_notification_thread_client_disconnect( + int client_socket, struct notification_thread_state *state) { int ret = 0; struct notification_client *client; @@ -2502,13 +2734,7 @@ int handle_notification_thread_client_disconnect( goto end; } - ret = lttng_poll_del(&state->events, client_socket); - if (ret) { - ERR("[notification-thread] Failed to remove client socket from poll set"); - } - cds_lfht_del(state->client_socket_ht, - &client->client_socket_ht_node); - notification_client_destroy(client, state); + ret = notification_thread_client_disconnect(client, state); end: rcu_read_unlock(); return ret; @@ -2524,11 +2750,11 @@ int handle_notification_thread_client_disconnect_all( rcu_read_lock(); DBG("[notification-thread] Closing all client connections"); cds_lfht_for_each_entry(state->client_socket_ht, &iter, client, - client_socket_ht_node) { + client_socket_ht_node) { int ret; - ret = handle_notification_thread_client_disconnect( - client->socket, state); + ret = notification_thread_client_disconnect( + client, state); if (ret) { error_encoutered = true; } @@ -2544,6 +2770,7 @@ int handle_notification_thread_trigger_unregister_all( struct cds_lfht_iter iter; struct lttng_trigger_ht_element *trigger_ht_element; + rcu_read_lock(); cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element, node) { int ret = handle_notification_thread_command_unregister_trigger( @@ -2552,81 +2779,190 @@ int handle_notification_thread_trigger_unregister_all( error_occurred = true; } } + rcu_read_unlock(); return error_occurred ? -1 : 0; } static -int client_flush_outgoing_queue(struct notification_client *client, +int client_handle_transmission_status( + struct notification_client *client, + enum client_transmission_status transmission_status, struct notification_thread_state *state) +{ + int ret = 0; + + switch (transmission_status) { + case CLIENT_TRANSMISSION_STATUS_COMPLETE: + ret = lttng_poll_mod(&state->events, client->socket, + CLIENT_POLL_MASK_IN); + if (ret) { + goto end; + } + + break; + case CLIENT_TRANSMISSION_STATUS_QUEUED: + /* + * We want to be notified whenever there is buffer space + * available to send the rest of the payload. + */ + ret = lttng_poll_mod(&state->events, client->socket, + CLIENT_POLL_MASK_IN_OUT); + if (ret) { + goto end; + } + break; + case CLIENT_TRANSMISSION_STATUS_FAIL: + ret = notification_thread_client_disconnect(client, state); + if (ret) { + goto end; + } + break; + case CLIENT_TRANSMISSION_STATUS_ERROR: + ret = -1; + goto end; + default: + abort(); + } +end: + return ret; +} + +/* Client lock must be acquired by caller. */ +static +enum client_transmission_status client_flush_outgoing_queue( + struct notification_client *client) { ssize_t ret; size_t to_send_count; + enum client_transmission_status status; + struct lttng_payload_view pv = lttng_payload_view_from_payload( + &client->communication.outbound.payload, 0, -1); + const int fds_to_send_count = + lttng_payload_view_get_fd_handle_count(&pv); + + ASSERT_LOCKED(client->lock); - assert(client->communication.outbound.buffer.size != 0); - to_send_count = client->communication.outbound.buffer.size; + if (!client->communication.active) { + status = CLIENT_TRANSMISSION_STATUS_FAIL; + goto end; + } + + if (pv.buffer.size == 0) { + /* + * If both data and fds are equal to zero, we are in an invalid + * state. + */ + assert(fds_to_send_count != 0); + goto send_fds; + } + + /* Send data. */ + to_send_count = pv.buffer.size; DBG("[notification-thread] Flushing client (socket fd = %i) outgoing queue", client->socket); ret = lttcomm_send_unix_sock_non_block(client->socket, - client->communication.outbound.buffer.data, + pv.buffer.data, to_send_count); - if ((ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK)) || - (ret > 0 && ret < to_send_count)) { + if ((ret >= 0 && ret < to_send_count)) { DBG("[notification-thread] Client (socket fd = %i) outgoing queue could not be completely flushed", client->socket); to_send_count -= max(ret, 0); - memcpy(client->communication.outbound.buffer.data, - client->communication.outbound.buffer.data + - client->communication.outbound.buffer.size - to_send_count, + memcpy(client->communication.outbound.payload.buffer.data, + pv.buffer.data + + pv.buffer.size - to_send_count, to_send_count); ret = lttng_dynamic_buffer_set_size( - &client->communication.outbound.buffer, + &client->communication.outbound.payload.buffer, to_send_count); if (ret) { goto error; } - /* - * We want to be notified whenever there is buffer space - * available to send the rest of the payload. - */ - ret = lttng_poll_mod(&state->events, client->socket, - CLIENT_POLL_MASK_IN_OUT); - if (ret) { - goto error; - } + status = CLIENT_TRANSMISSION_STATUS_QUEUED; + goto end; } else if (ret < 0) { - /* Generic error, disconnect the client. */ - ERR("[notification-thread] Failed to send flush outgoing queue, disconnecting client (socket fd = %i)", + /* Generic error, disable the client's communication. */ + ERR("[notification-thread] Failed to flush outgoing queue, disconnecting client (socket fd = %i)", client->socket); - ret = handle_notification_thread_client_disconnect( - client->socket, state); - if (ret) { - goto error; - } + client->communication.active = false; + status = CLIENT_TRANSMISSION_STATUS_FAIL; + goto end; } else { - /* No error and flushed the queue completely. */ + /* + * No error and flushed the queue completely. + * + * The payload buffer size is used later to + * check if there is notifications queued. So albeit that the + * direct caller knows that the transmission is complete, we + * need to set the buffer size to zero. + */ ret = lttng_dynamic_buffer_set_size( - &client->communication.outbound.buffer, 0); - if (ret) { - goto error; - } - ret = lttng_poll_mod(&state->events, client->socket, - CLIENT_POLL_MASK_IN); + &client->communication.outbound.payload.buffer, 0); if (ret) { goto error; } + } + +send_fds: + /* No fds to send, transmission is complete. */ + if (fds_to_send_count == 0) { + status = CLIENT_TRANSMISSION_STATUS_COMPLETE; + goto end; + } + ret = lttcomm_send_payload_view_fds_unix_sock_non_block( + client->socket, &pv); + if (ret < 0) { + /* Generic error, disable the client's communication. */ + ERR("[notification-thread] Failed to flush outgoing fds queue, disconnecting client (socket fd = %i)", + client->socket); + client->communication.active = false; + status = CLIENT_TRANSMISSION_STATUS_FAIL; + goto end; + } else if (ret == 0) { + /* Nothing could be sent. */ + status = CLIENT_TRANSMISSION_STATUS_QUEUED; + } else { + /* Fd passing is an all or nothing kind of thing. */ + status = CLIENT_TRANSMISSION_STATUS_COMPLETE; + /* + * The payload _fd_array count is used later to + * check if there is notifications queued. So although the + * direct caller knows that the transmission is complete, we + * need to clear the _fd_array for the queuing check. + */ + lttng_dynamic_pointer_array_clear( + &client->communication.outbound.payload + ._fd_handles); + } + +end: + if (status == CLIENT_TRANSMISSION_STATUS_COMPLETE) { client->communication.outbound.queued_command_reply = false; client->communication.outbound.dropped_notification = false; + lttng_payload_clear(&client->communication.outbound.payload); } - return 0; + return status; error: - return -1; + return CLIENT_TRANSMISSION_STATUS_ERROR; +} + +static +bool client_has_outbound_data_left( + const struct notification_client *client) +{ + const struct lttng_payload_view pv = lttng_payload_view_from_payload( + &client->communication.outbound.payload, 0, -1); + const bool has_data = pv.buffer.size != 0; + const bool has_fds = lttng_payload_view_get_fd_handle_count(&pv); + + return has_data || has_fds; } +/* Client lock must _not_ be held by the caller. */ static int client_send_command_reply(struct notification_client *client, struct notification_thread_state *state, @@ -2641,39 +2977,234 @@ int client_send_command_reply(struct notification_client *client, .size = sizeof(reply), }; char buffer[sizeof(msg) + sizeof(reply)]; - - if (client->communication.outbound.queued_command_reply) { - /* Protocol error. */ - goto error; - } + enum client_transmission_status transmission_status; memcpy(buffer, &msg, sizeof(msg)); memcpy(buffer + sizeof(msg), &reply, sizeof(reply)); DBG("[notification-thread] Send command reply (%i)", (int) status); + pthread_mutex_lock(&client->lock); + if (client->communication.outbound.queued_command_reply) { + /* Protocol error. */ + goto error_unlock; + } + /* Enqueue buffer to outgoing queue and flush it. */ ret = lttng_dynamic_buffer_append( - &client->communication.outbound.buffer, + &client->communication.outbound.payload.buffer, buffer, sizeof(buffer)); if (ret) { - goto error; + goto error_unlock; } - ret = client_flush_outgoing_queue(client, state); - if (ret) { - goto error; - } + transmission_status = client_flush_outgoing_queue(client); - if (client->communication.outbound.buffer.size != 0) { + if (client_has_outbound_data_left(client)) { /* Queue could not be emptied. */ client->communication.outbound.queued_command_reply = true; } + pthread_mutex_unlock(&client->lock); + ret = client_handle_transmission_status( + client, transmission_status, state); + if (ret) { + goto error; + } + return 0; +error_unlock: + pthread_mutex_unlock(&client->lock); error: return -1; } +static +int client_handle_message_unknown(struct notification_client *client, + struct notification_thread_state *state) +{ + int ret; + /* + * Receiving message header. The function will be called again + * once the rest of the message as been received and can be + * interpreted. + */ + const struct lttng_notification_channel_message *msg; + + assert(sizeof(*msg) == client->communication.inbound.payload.buffer.size); + msg = (const struct lttng_notification_channel_message *) + client->communication.inbound.payload.buffer.data; + + if (msg->size == 0 || + msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) { + ERR("[notification-thread] Invalid notification channel message: length = %u", + msg->size); + ret = -1; + goto end; + } + + switch (msg->type) { + case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE: + case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE: + case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE: + break; + default: + ret = -1; + ERR("[notification-thread] Invalid notification channel message: unexpected message type"); + goto end; + } + + client->communication.inbound.bytes_to_receive = msg->size; + client->communication.inbound.fds_to_receive = msg->fds; + client->communication.inbound.msg_type = + (enum lttng_notification_channel_message_type) msg->type; + ret = lttng_dynamic_buffer_set_size( + &client->communication.inbound.payload.buffer, msg->size); + + /* msg is not valid anymore due to lttng_dynamic_buffer_set_size. */ + msg = NULL; +end: + return ret; +} + +static +int client_handle_message_handshake(struct notification_client *client, + struct notification_thread_state *state) +{ + int ret; + struct lttng_notification_channel_command_handshake *handshake_client; + const struct lttng_notification_channel_command_handshake handshake_reply = { + .major = LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR, + .minor = LTTNG_NOTIFICATION_CHANNEL_VERSION_MINOR, + }; + const struct lttng_notification_channel_message msg_header = { + .type = LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE, + .size = sizeof(handshake_reply), + }; + enum lttng_notification_channel_status status = + LTTNG_NOTIFICATION_CHANNEL_STATUS_OK; + char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)]; + + memcpy(send_buffer, &msg_header, sizeof(msg_header)); + memcpy(send_buffer + sizeof(msg_header), &handshake_reply, + sizeof(handshake_reply)); + + handshake_client = + (struct lttng_notification_channel_command_handshake *) + client->communication.inbound.payload.buffer + .data; + client->major = handshake_client->major; + client->minor = handshake_client->minor; + if (!client->communication.inbound.creds_received) { + ERR("[notification-thread] No credentials received from client"); + ret = -1; + goto end; + } + + client->uid = LTTNG_SOCK_GET_UID_CRED( + &client->communication.inbound.creds); + client->gid = LTTNG_SOCK_GET_GID_CRED( + &client->communication.inbound.creds); + DBG("[notification-thread] Received handshake from client (uid = %u, gid = %u) with version %i.%i", + client->uid, client->gid, (int) client->major, + (int) client->minor); + + if (handshake_client->major != + LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR) { + status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION; + } + + pthread_mutex_lock(&client->lock); + /* Outgoing queue will be flushed when the command reply is sent. */ + ret = lttng_dynamic_buffer_append( + &client->communication.outbound.payload.buffer, send_buffer, + sizeof(send_buffer)); + if (ret) { + ERR("[notification-thread] Failed to send protocol version to notification channel client"); + goto end_unlock; + } + + client->validated = true; + client->communication.active = true; + pthread_mutex_unlock(&client->lock); + + /* Set reception state to receive the next message header. */ + ret = client_reset_inbound_state(client); + if (ret) { + ERR("[notification-thread] Failed to reset client communication's inbound state"); + goto end; + } + + /* Flushes the outgoing queue. */ + ret = client_send_command_reply(client, state, status); + if (ret) { + ERR("[notification-thread] Failed to send reply to notification channel client"); + goto end; + } + + goto end; +end_unlock: + pthread_mutex_unlock(&client->lock); +end: + return ret; +} + +static +int client_handle_message_subscription( + struct notification_client *client, + enum lttng_notification_channel_message_type msg_type, + struct notification_thread_state *state) +{ + int ret; + struct lttng_condition *condition; + enum lttng_notification_channel_status status = + LTTNG_NOTIFICATION_CHANNEL_STATUS_OK; + struct lttng_payload_view condition_view = + lttng_payload_view_from_payload( + &client->communication.inbound.payload, + 0, -1); + size_t expected_condition_size; + + /* + * No need to lock client to sample the inbound state as the only + * other thread accessing clients (action executor) only uses the + * outbound state. + */ + expected_condition_size = client->communication.inbound.payload.buffer.size; + ret = lttng_condition_create_from_payload(&condition_view, &condition); + if (ret != expected_condition_size) { + ERR("[notification-thread] Malformed condition received from client"); + goto end; + } + + if (msg_type == LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE) { + ret = notification_thread_client_subscribe( + client, condition, state, &status); + } else { + ret = notification_thread_client_unsubscribe( + client, condition, state, &status); + } + + if (ret) { + goto end; + } + + /* Set reception state to receive the next message header. */ + ret = client_reset_inbound_state(client); + if (ret) { + ERR("[notification-thread] Failed to reset client communication's inbound state"); + goto end; + } + + ret = client_send_command_reply(client, state, status); + if (ret) { + ERR("[notification-thread] Failed to send reply to notification channel client"); + goto end; + } + +end: + return ret; +} + static int client_dispatch_message(struct notification_client *client, struct notification_thread_state *state) @@ -2693,158 +3224,19 @@ int client_dispatch_message(struct notification_client *client, switch (client->communication.inbound.msg_type) { case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNKNOWN: { - /* - * Receiving message header. The function will be called again - * once the rest of the message as been received and can be - * interpreted. - */ - const struct lttng_notification_channel_message *msg; - - assert(sizeof(*msg) == - client->communication.inbound.buffer.size); - msg = (const struct lttng_notification_channel_message *) - client->communication.inbound.buffer.data; - - if (msg->size == 0 || msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) { - ERR("[notification-thread] Invalid notification channel message: length = %u", msg->size); - ret = -1; - goto end; - } - - switch (msg->type) { - case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE: - case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE: - case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE: - break; - default: - ret = -1; - ERR("[notification-thread] Invalid notification channel message: unexpected message type"); - goto end; - } - - client->communication.inbound.bytes_to_receive = msg->size; - client->communication.inbound.msg_type = - (enum lttng_notification_channel_message_type) msg->type; - ret = lttng_dynamic_buffer_set_size( - &client->communication.inbound.buffer, msg->size); - if (ret) { - goto end; - } + ret = client_handle_message_unknown(client, state); break; } case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE: { - struct lttng_notification_channel_command_handshake *handshake_client; - struct lttng_notification_channel_command_handshake handshake_reply = { - .major = LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR, - .minor = LTTNG_NOTIFICATION_CHANNEL_VERSION_MINOR, - }; - struct lttng_notification_channel_message msg_header = { - .type = LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_HANDSHAKE, - .size = sizeof(handshake_reply), - }; - enum lttng_notification_channel_status status = - LTTNG_NOTIFICATION_CHANNEL_STATUS_OK; - char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)]; - - memcpy(send_buffer, &msg_header, sizeof(msg_header)); - memcpy(send_buffer + sizeof(msg_header), &handshake_reply, - sizeof(handshake_reply)); - - handshake_client = - (struct lttng_notification_channel_command_handshake *) - client->communication.inbound.buffer.data; - client->major = handshake_client->major; - client->minor = handshake_client->minor; - if (!client->communication.inbound.creds_received) { - ERR("[notification-thread] No credentials received from client"); - ret = -1; - goto end; - } - - client->uid = LTTNG_SOCK_GET_UID_CRED( - &client->communication.inbound.creds); - client->gid = LTTNG_SOCK_GET_GID_CRED( - &client->communication.inbound.creds); - DBG("[notification-thread] Received handshake from client (uid = %u, gid = %u) with version %i.%i", - client->uid, client->gid, (int) client->major, - (int) client->minor); - - if (handshake_client->major != LTTNG_NOTIFICATION_CHANNEL_VERSION_MAJOR) { - status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION; - } - - ret = lttng_dynamic_buffer_append(&client->communication.outbound.buffer, - send_buffer, sizeof(send_buffer)); - if (ret) { - ERR("[notification-thread] Failed to send protocol version to notification channel client"); - goto end; - } - - ret = client_flush_outgoing_queue(client, state); - if (ret) { - goto end; - } - - ret = client_send_command_reply(client, state, status); - if (ret) { - ERR("[notification-thread] Failed to send reply to notification channel client"); - goto end; - } - - /* Set reception state to receive the next message header. */ - ret = client_reset_inbound_state(client); - if (ret) { - ERR("[notification-thread] Failed to reset client communication's inbound state"); - goto end; - } - client->validated = true; + ret = client_handle_message_handshake(client, state); break; } case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE: case LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_UNSUBSCRIBE: { - struct lttng_condition *condition; - enum lttng_notification_channel_status status = - LTTNG_NOTIFICATION_CHANNEL_STATUS_OK; - const struct lttng_buffer_view condition_view = - lttng_buffer_view_from_dynamic_buffer( - &client->communication.inbound.buffer, - 0, -1); - size_t expected_condition_size = - client->communication.inbound.buffer.size; - - ret = lttng_condition_create_from_buffer(&condition_view, - &condition); - if (ret != expected_condition_size) { - ERR("[notification-thread] Malformed condition received from client"); - goto end; - } - - if (client->communication.inbound.msg_type == - LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_SUBSCRIBE) { - ret = notification_thread_client_subscribe(client, - condition, state, &status); - } else { - ret = notification_thread_client_unsubscribe(client, - condition, state, &status); - } - if (ret) { - goto end; - } - - ret = client_send_command_reply(client, state, status); - if (ret) { - ERR("[notification-thread] Failed to send reply to notification channel client"); - goto end; - } - - /* Set reception state to receive the next message header. */ - ret = client_reset_inbound_state(client); - if (ret) { - ERR("[notification-thread] Failed to reset client communication's inbound state"); - goto end; - } + ret = client_handle_message_subscription(client, + client->communication.inbound.msg_type, state); break; } default: @@ -2863,6 +3255,7 @@ int handle_notification_thread_client_in( ssize_t recv_ret; size_t offset; + rcu_read_lock(); client = get_client_from_socket(socket, state); if (!client) { /* Internal error, abort. */ @@ -2870,11 +3263,11 @@ int handle_notification_thread_client_in( goto end; } - offset = client->communication.inbound.buffer.size - + offset = client->communication.inbound.payload.buffer.size - client->communication.inbound.bytes_to_receive; if (client->communication.inbound.expect_creds) { recv_ret = lttcomm_recv_creds_unix_sock(socket, - client->communication.inbound.buffer.data + offset, + client->communication.inbound.payload.buffer.data + offset, client->communication.inbound.bytes_to_receive, &client->communication.inbound.creds); if (recv_ret > 0) { @@ -2883,31 +3276,69 @@ int handle_notification_thread_client_in( } } else { recv_ret = lttcomm_recv_unix_sock_non_block(socket, - client->communication.inbound.buffer.data + offset, + client->communication.inbound.payload.buffer.data + offset, client->communication.inbound.bytes_to_receive); } - if (recv_ret < 0) { + if (recv_ret >= 0) { + client->communication.inbound.bytes_to_receive -= recv_ret; + } else { goto error_disconnect_client; } - client->communication.inbound.bytes_to_receive -= recv_ret; - if (client->communication.inbound.bytes_to_receive == 0) { - ret = client_dispatch_message(client, state); - if (ret) { + if (client->communication.inbound.bytes_to_receive != 0) { + /* Message incomplete wait for more data. */ + ret = 0; + goto end; + } + + assert(client->communication.inbound.bytes_to_receive == 0); + + /* Receive fds. */ + if (client->communication.inbound.fds_to_receive != 0) { + ret = lttcomm_recv_payload_fds_unix_sock_non_block( + client->socket, + client->communication.inbound.fds_to_receive, + &client->communication.inbound.payload); + if (ret > 0) { /* - * Only returns an error if this client must be - * disconnected. + * Fds received. non blocking fds passing is all + * or nothing. */ + ssize_t expected_size; + + expected_size = sizeof(int) * + client->communication.inbound + .fds_to_receive; + assert(ret == expected_size); + client->communication.inbound.fds_to_receive = 0; + } else if (ret == 0) { + /* Received nothing. */ + ret = 0; + goto end; + } else { goto error_disconnect_client; } - } else { - goto end; } + + /* At this point the message is complete.*/ + assert(client->communication.inbound.bytes_to_receive == 0 && + client->communication.inbound.fds_to_receive == 0); + ret = client_dispatch_message(client, state); + if (ret) { + /* + * Only returns an error if this client must be + * disconnected. + */ + goto error_disconnect_client; + } + end: + rcu_read_unlock(); return ret; + error_disconnect_client: - ret = handle_notification_thread_client_disconnect(socket, state); - return ret; + ret = notification_thread_client_disconnect(client, state); + goto end; } /* Client ready to receive outgoing data. */ @@ -2916,7 +3347,9 @@ int handle_notification_thread_client_out( { int ret; struct notification_client *client; + enum client_transmission_status transmission_status; + rcu_read_lock(); client = get_client_from_socket(socket, state); if (!client) { /* Internal error, abort. */ @@ -2924,11 +3357,17 @@ int handle_notification_thread_client_out( goto end; } - ret = client_flush_outgoing_queue(client, state); + pthread_mutex_lock(&client->lock); + transmission_status = client_flush_outgoing_queue(client); + pthread_mutex_unlock(&client->lock); + + ret = client_handle_transmission_status( + client, transmission_status, state); if (ret) { goto end; } end: + rcu_read_unlock(); return ret; } @@ -2955,7 +3394,7 @@ bool evaluate_buffer_usage_condition(const struct lttng_condition *condition, * forego this double-multiplication or it could be performed * as fixed-point math. * - * Note that caching should accomodate the case where the + * Note that caching should accommodates the case where the * condition applies to multiple channels (i.e. don't assume * that all channels matching my_chann* have the same size...) */ @@ -3095,48 +3534,125 @@ end: } static -int client_enqueue_dropped_notification(struct notification_client *client, - struct notification_thread_state *state) +int client_notification_overflow(struct notification_client *client) { - int ret; - struct lttng_notification_channel_message msg = { + int ret = 0; + const struct lttng_notification_channel_message msg = { .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION_DROPPED, - .size = 0, }; + ASSERT_LOCKED(client->lock); + + DBG("Dropping notification addressed to client (socket fd = %i)", + client->socket); + if (client->communication.outbound.dropped_notification) { + /* + * The client already has a "notification dropped" message + * in its outgoing queue. Nothing to do since all + * of those messages are coalesced. + */ + goto end; + } + + client->communication.outbound.dropped_notification = true; ret = lttng_dynamic_buffer_append( - &client->communication.outbound.buffer, &msg, + &client->communication.outbound.payload.buffer, &msg, sizeof(msg)); + if (ret) { + PERROR("Failed to enqueue \"dropped notification\" message in client's (socket fd = %i) outgoing queue", + client->socket); + } +end: return ret; } +static int client_handle_transmission_status_wrapper( + struct notification_client *client, + enum client_transmission_status status, + void *user_data) +{ + return client_handle_transmission_status(client, status, + (struct notification_thread_state *) user_data); +} + static int send_evaluation_to_clients(const struct lttng_trigger *trigger, const struct lttng_evaluation *evaluation, struct notification_client_list* client_list, struct notification_thread_state *state, - uid_t channel_uid, gid_t channel_gid) + uid_t object_uid, gid_t object_gid) +{ + return notification_client_list_send_evaluation(client_list, + lttng_trigger_get_const_condition(trigger), evaluation, + lttng_trigger_get_credentials(trigger), + &(struct lttng_credentials){ + .uid = object_uid, .gid = object_gid}, + client_handle_transmission_status_wrapper, state); +} + +/* + * Permission checks relative to notification channel clients are performed + * here. Notice how object, client, and trigger credentials are involved in + * this check. + * + * The `object` credentials are the credentials associated with the "subject" + * of a condition. For instance, a `rotation completed` condition applies + * to a session. When that condition is met, it will produce an evaluation + * against a session. Hence, in this case, the `object` credentials are the + * credentials of the "subject" session. + * + * The `trigger` credentials are the credentials of the user that registered the + * trigger. + * + * The `client` credentials are the credentials of the user that created a given + * notification channel. + * + * In terms of visibility, it is expected that non-privilieged users can only + * register triggers against "their" objects (their own sessions and + * applications they are allowed to interact with). They can then open a + * notification channel and subscribe to notifications associated with those + * triggers. + * + * As for privilieged users, they can register triggers against the objects of + * other users. They can then subscribe to the notifications associated to their + * triggers. Privilieged users _can't_ subscribe to the notifications of + * triggers owned by other users; they must create their own triggers. + * + * This is more a concern of usability than security. It would be difficult for + * a root user reliably subscribe to a specific set of conditions without + * interference from external users (those could, for instance, unregister + * their triggers). + */ +LTTNG_HIDDEN +int notification_client_list_send_evaluation( + struct notification_client_list *client_list, + const struct lttng_condition *condition, + const struct lttng_evaluation *evaluation, + const struct lttng_credentials *trigger_creds, + const struct lttng_credentials *source_object_creds, + report_client_transmission_result_cb client_report, + void *user_data) { int ret = 0; - struct lttng_dynamic_buffer msg_buffer; + struct lttng_payload msg_payload; struct notification_client_list_element *client_list_element, *tmp; const struct lttng_notification notification = { - .condition = (struct lttng_condition *) lttng_trigger_get_const_condition(trigger), + .condition = (struct lttng_condition *) condition, .evaluation = (struct lttng_evaluation *) evaluation, }; struct lttng_notification_channel_message msg_header = { .type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION, }; - lttng_dynamic_buffer_init(&msg_buffer); + lttng_payload_init(&msg_payload); - ret = lttng_dynamic_buffer_append(&msg_buffer, &msg_header, + ret = lttng_dynamic_buffer_append(&msg_payload.buffer, &msg_header, sizeof(msg_header)); if (ret) { goto end; } - ret = lttng_notification_serialize(¬ification, &msg_buffer); + ret = lttng_notification_serialize(¬ification, &msg_payload); if (ret) { ERR("[notification-thread] Failed to serialize notification"); ret = -1; @@ -3144,24 +3660,60 @@ int send_evaluation_to_clients(const struct lttng_trigger *trigger, } /* Update payload size. */ - ((struct lttng_notification_channel_message * ) msg_buffer.data)->size = - (uint32_t) (msg_buffer.size - sizeof(msg_header)); + ((struct lttng_notification_channel_message *) msg_payload.buffer.data) + ->size = (uint32_t)( + msg_payload.buffer.size - sizeof(msg_header)); + + /* Update the payload number of fds. */ + { + const struct lttng_payload_view pv = lttng_payload_view_from_payload( + &msg_payload, 0, -1); + + ((struct lttng_notification_channel_message *) + msg_payload.buffer.data)->fds = (uint32_t) + lttng_payload_view_get_fd_handle_count(&pv); + } + pthread_mutex_lock(&client_list->lock); cds_list_for_each_entry_safe(client_list_element, tmp, &client_list->list, node) { + enum client_transmission_status transmission_status; struct notification_client *client = client_list_element->client; - if (client->uid != channel_uid && client->gid != channel_gid && - client->uid != 0) { - /* Client is not allowed to monitor this channel. */ - DBG("[notification-thread] Skipping client at it does not have the permission to receive notification for this channel"); - continue; + ret = 0; + pthread_mutex_lock(&client->lock); + if (!client->communication.active) { + /* + * Skip inactive client (protocol error or + * disconnecting). + */ + DBG("Skipping client at it is marked as inactive"); + goto skip_client; + } + + if (source_object_creds) { + if (client->uid != source_object_creds->uid && + client->gid != source_object_creds->gid && + client->uid != 0) { + /* + * Client is not allowed to monitor this + * object. + */ + DBG("[notification-thread] Skipping client at it does not have the object permission to receive notification for this trigger"); + goto skip_client; + } + } + + if (client->uid != trigger_creds->uid && client->gid != trigger_creds->gid) { + DBG("[notification-thread] Skipping client at it does not have the permission to receive notification for this trigger"); + goto skip_client; } DBG("[notification-thread] Sending notification to client (fd = %i, %zu bytes)", - client->socket, msg_buffer.size); - if (client->communication.outbound.buffer.size) { + client->socket, msg_payload.buffer.size); + + if (client_has_outbound_data_left(client)) { /* * Outgoing data is already buffered for this client; * drop the notification and enqueue a "dropped @@ -3169,34 +3721,42 @@ int send_evaluation_to_clients(const struct lttng_trigger *trigger, * notification since the socket spilled-over to the * queue. */ - DBG("[notification-thread] Dropping notification addressed to client (socket fd = %i)", - client->socket); - if (!client->communication.outbound.dropped_notification) { - client->communication.outbound.dropped_notification = true; - ret = client_enqueue_dropped_notification( - client, state); - if (ret) { - goto end; - } + ret = client_notification_overflow(client); + if (ret) { + /* Fatal error. */ + goto skip_client; } - continue; } - ret = lttng_dynamic_buffer_append_buffer( - &client->communication.outbound.buffer, - &msg_buffer); + ret = lttng_payload_copy(&msg_payload, &client->communication.outbound.payload); if (ret) { - goto end; + /* Fatal error. */ + goto skip_client; } - ret = client_flush_outgoing_queue(client, state); + transmission_status = client_flush_outgoing_queue(client); + pthread_mutex_unlock(&client->lock); + ret = client_report(client, transmission_status, user_data); if (ret) { - goto end; + /* Fatal error. */ + goto end_unlock_list; + } + + continue; + +skip_client: + pthread_mutex_unlock(&client->lock); + if (ret) { + /* Fatal error. */ + goto end_unlock_list; } } ret = 0; + +end_unlock_list: + pthread_mutex_unlock(&client_list->lock); end: - lttng_dynamic_buffer_reset(&msg_buffer); + lttng_payload_reset(&msg_payload); return ret; } @@ -3214,6 +3774,7 @@ int handle_notification_thread_channel_sample( bool previous_sample_available = false; struct channel_state_sample previous_sample, latest_sample; uint64_t previous_session_consumed_total, latest_session_consumed_total; + struct lttng_credentials channel_creds; /* * The monitoring pipe only holds messages smaller than PIPE_BUF, @@ -3332,38 +3893,31 @@ int handle_notification_thread_channel_sample( goto end_unlock; } + channel_creds = (typeof(channel_creds)) { + .uid = channel_info->session_info->uid, + .gid = channel_info->session_info->gid, + }; + trigger_list = caa_container_of(node, struct lttng_channel_trigger_list, channel_triggers_ht_node); cds_list_for_each_entry(trigger_list_element, &trigger_list->list, - node) { + node) { const struct lttng_condition *condition; - const struct lttng_action *action; - const struct lttng_trigger *trigger; - struct notification_client_list *client_list; + struct lttng_trigger *trigger; + struct notification_client_list *client_list = NULL; struct lttng_evaluation *evaluation = NULL; + enum action_executor_status executor_status; + ret = 0; trigger = trigger_list_element->trigger; condition = lttng_trigger_get_const_condition(trigger); assert(condition); - action = lttng_trigger_get_const_action(trigger); - - /* Notify actions are the only type currently supported. */ - assert(lttng_action_get_type_const(action) == - LTTNG_ACTION_TYPE_NOTIFY); /* * Check if any client is subscribed to the result of this * evaluation. */ client_list = get_client_list_from_condition(state, condition); - assert(client_list); - if (cds_list_empty(&client_list->list)) { - /* - * No clients interested in the evaluation's result, - * skip it. - */ - continue; - } ret = evaluate_buffer_condition(condition, &evaluation, state, previous_sample_available ? &previous_sample : NULL, @@ -3372,21 +3926,51 @@ int handle_notification_thread_channel_sample( latest_session_consumed_total, channel_info); if (caa_unlikely(ret)) { - goto end_unlock; + goto put_list; } if (caa_likely(!evaluation)) { - continue; + goto put_list; } - /* Dispatch evaluation result to all clients. */ - ret = send_evaluation_to_clients(trigger_list_element->trigger, - evaluation, client_list, state, - channel_info->session_info->uid, - channel_info->session_info->gid); - lttng_evaluation_destroy(evaluation); + /* + * Ownership of `evaluation` transferred to the action executor + * no matter the result. + */ + executor_status = action_executor_enqueue(state->executor, + trigger, evaluation, &channel_creds, + client_list); + evaluation = NULL; + switch (executor_status) { + case ACTION_EXECUTOR_STATUS_OK: + break; + case ACTION_EXECUTOR_STATUS_ERROR: + case ACTION_EXECUTOR_STATUS_INVALID: + /* + * TODO Add trigger identification (name/id) when + * it is added to the API. + */ + ERR("Fatal error occurred while enqueuing action associated with buffer-condition trigger"); + ret = -1; + goto put_list; + case ACTION_EXECUTOR_STATUS_OVERFLOW: + /* + * TODO Add trigger identification (name/id) when + * it is added to the API. + * + * Not a fatal error. + */ + WARN("No space left when enqueuing action associated with buffer-condition trigger"); + ret = 0; + goto put_list; + default: + abort(); + } + +put_list: + notification_client_list_put(client_list); if (caa_unlikely(ret)) { - goto end_unlock; + break; } } end_unlock: