*
*/
+#include "lttng/action/action.h"
+#include "lttng/trigger/trigger-internal.h"
#define _LGPL_SOURCE
#include <urcu.h>
#include <urcu/rculfhash.h>
#include <lttng/condition/buffer-usage-internal.h>
#include <lttng/condition/session-consumed-size-internal.h>
#include <lttng/condition/session-rotation-internal.h>
+#include <lttng/condition/event-rule-internal.h>
+#include <lttng/domain-internal.h>
#include <lttng/notification/channel-internal.h>
+#include <lttng/trigger/trigger-internal.h>
+#include <lttng/event-rule/event-rule-internal.h>
#include <time.h>
#include <unistd.h>
#include <inttypes.h>
#include <fcntl.h>
+#include "condition-internal.h"
#include "notification-thread.h"
#include "notification-thread-events.h"
#include "notification-thread-commands.h"
struct lttng_trigger_list_element {
/* No ownership of the trigger object is assumed. */
- const struct lttng_trigger *trigger;
+ struct lttng_trigger *trigger;
struct cds_list_head node;
};
struct lttng_trigger_ht_element {
struct lttng_trigger *trigger;
struct cds_lfht_node node;
+ struct cds_lfht_node node_by_name_uid;
/* call_rcu delayed reclaim. */
struct rcu_head rcu_node;
};
struct cds_list_head node;
};
-struct notification_client_list_element {
- struct notification_client *client;
- struct cds_list_head node;
-};
-
/*
- * Thread safety of notification_client and notification_client_list.
- *
- * The notification thread (main thread) and the action executor
- * interact through client lists. Hence, when the action executor
- * thread looks-up the list of clients subscribed to a given
- * condition, it will acquire a reference to the list and lock it
- * while attempting to communicate with the various clients.
- *
- * It is not necessary to reference-count clients as they are guaranteed
- * to be 'alive' if they are present in a list and that list is locked. Indeed,
- * removing references to the client from those subscription lists is part of
- * the work performed on destruction of a client.
- *
- * No provision for other access scenarios are taken into account;
- * this is the bare minimum to make these accesses safe and the
- * notification thread's state is _not_ "thread-safe" in any general
- * sense.
+ * Facilities to carry the different notifications type in the action processing
+ * code path.
*/
-struct notification_client_list {
- pthread_mutex_t lock;
- struct urcu_ref ref;
- const struct lttng_trigger *trigger;
- struct cds_list_head list;
- /* Weak reference to container. */
- struct cds_lfht *notification_trigger_clients_ht;
- struct cds_lfht_node notification_trigger_clients_ht_node;
- /* call_rcu delayed reclaim. */
- struct rcu_head rcu_node;
-};
-
-struct notification_client {
- /* Nests within the notification_client_list lock. */
- pthread_mutex_t lock;
- notification_client_id id;
- int socket;
- /* Client protocol version. */
- uint8_t major, minor;
- uid_t uid;
- gid_t gid;
- /*
- * Indicates if the credentials and versions of the client have been
- * checked.
- */
- bool validated;
- /*
- * Conditions to which the client's notification channel is subscribed.
- * List of struct lttng_condition_list_node. The condition member is
- * owned by the client.
- */
- struct cds_list_head condition_list;
- struct cds_lfht_node client_socket_ht_node;
- struct cds_lfht_node client_id_ht_node;
- struct {
- /*
- * If a client's communication is inactive, it means that a
- * fatal error has occurred (could be either a protocol error or
- * the socket API returned a fatal error). No further
- * communication should be attempted; the client is queued for
- * clean-up.
- */
- bool active;
- struct {
- /*
- * During the reception of a message, the reception
- * buffers' "size" is set to contain the current
- * message's complete payload.
- */
- struct lttng_dynamic_buffer buffer;
- /* Bytes left to receive for the current message. */
- size_t bytes_to_receive;
- /* Type of the message being received. */
- enum lttng_notification_channel_message_type msg_type;
- /*
- * Indicates whether or not credentials are expected
- * from the client.
- */
- bool expect_creds;
- /*
- * Indicates whether or not credentials were received
- * from the client.
- */
- bool creds_received;
- /* Only used during credentials reception. */
- lttng_sock_cred creds;
- } inbound;
- struct {
- /*
- * Indicates whether or not a notification addressed to
- * this client was dropped because a command reply was
- * already buffered.
- *
- * A notification is dropped whenever the buffer is not
- * empty.
- */
- bool dropped_notification;
- /*
- * Indicates whether or not a command reply is already
- * buffered. In this case, it means that the client is
- * not consuming command replies before emitting a new
- * one. This could be caused by a protocol error or a
- * misbehaving/malicious client.
- */
- bool queued_command_reply;
- struct lttng_dynamic_buffer buffer;
- } outbound;
- } communication;
- /* call_rcu delayed reclaim. */
- struct rcu_head rcu_node;
+struct lttng_event_notifier_notification {
+ union {
+ struct lttng_ust_event_notifier_notification *ust;
+ struct lttng_kernel_event_notifier_notification *kernel;
+ } notification;
+ uint64_t token;
+ enum lttng_domain_type type;
};
struct channel_state_sample {
struct lttng_session_trigger_list *list);
static
int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
- const struct lttng_trigger *trigger);
+ struct lttng_trigger *trigger);
+static
+int client_handle_transmission_status(
+ struct notification_client *client,
+ enum client_transmission_status transmission_status,
+ struct notification_thread_state *state);
+
+static
+void free_lttng_trigger_ht_element_rcu(struct rcu_head *node);
static
int match_client_socket(struct cds_lfht_node *node, const void *key)
}
static
-int match_condition(struct cds_lfht_node *node, const void *key)
+int match_trigger(struct cds_lfht_node *node, const void *key)
{
- struct lttng_condition *condition_key = (struct lttng_condition *) key;
- struct lttng_trigger_ht_element *trigger;
- struct lttng_condition *condition;
+ struct lttng_trigger *trigger_key = (struct lttng_trigger *) key;
+ struct lttng_trigger_ht_element *trigger_ht_element;
- trigger = caa_container_of(node, struct lttng_trigger_ht_element,
+ trigger_ht_element = caa_container_of(node, struct lttng_trigger_ht_element,
node);
- condition = lttng_trigger_get_condition(trigger->trigger);
- assert(condition);
- return !!lttng_condition_is_equal(condition_key, condition);
+ return !!lttng_trigger_is_equal(trigger_key, trigger_ht_element->trigger);
+}
+
+static
+int match_trigger_token(struct cds_lfht_node *node, const void *key)
+{
+ const uint64_t *_key = key;
+ struct notification_trigger_tokens_ht_element *element;
+
+ element = caa_container_of(node,
+ struct notification_trigger_tokens_ht_element, node);
+ return *_key == element->token;
}
static
}
static
-unsigned long lttng_condition_buffer_usage_hash(
- const struct lttng_condition *_condition)
+const char *notification_command_type_str(
+ enum notification_thread_command_type type)
{
- unsigned long hash;
- unsigned long condition_type;
- struct lttng_condition_buffer_usage *condition;
-
- condition = container_of(_condition,
- struct lttng_condition_buffer_usage, parent);
-
- condition_type = (unsigned long) condition->parent.type;
- hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
- if (condition->session_name) {
- hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
- }
- if (condition->channel_name) {
- hash ^= hash_key_str(condition->channel_name, lttng_ht_seed);
- }
- if (condition->domain.set) {
- hash ^= hash_key_ulong(
- (void *) condition->domain.type,
- lttng_ht_seed);
- }
- if (condition->threshold_ratio.set) {
- uint64_t val;
-
- val = condition->threshold_ratio.value * (double) UINT32_MAX;
- hash ^= hash_key_u64(&val, lttng_ht_seed);
- } else if (condition->threshold_bytes.set) {
- uint64_t val;
-
- val = condition->threshold_bytes.value;
- hash ^= hash_key_u64(&val, lttng_ht_seed);
+ switch (type) {
+ case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER:
+ return "REGISTER_TRIGGER";
+ case NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER:
+ return "UNREGISTER_TRIGGER";
+ case NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL:
+ return "ADD_CHANNEL";
+ case NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL:
+ return "REMOVE_CHANNEL";
+ case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING:
+ return "SESSION_ROTATION_ONGOING";
+ case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED:
+ return "SESSION_ROTATION_COMPLETED";
+ case NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE:
+ return "ADD_TRACER_EVENT_SOURCE";
+ case NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE:
+ return "REMOVE_TRACER_EVENT_SOURCE";
+ case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
+ return "LIST_TRIGGERS";
+ case NOTIFICATION_COMMAND_TYPE_QUIT:
+ return "QUIT";
+ case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE:
+ return "CLIENT_COMMUNICATION_UPDATE";
+ default:
+ abort();
}
- return hash;
}
+/*
+ * Match trigger based on name and credentials only.
+ * Name duplication is NOT allowed for the same uid.
+ */
static
-unsigned long lttng_condition_session_consumed_size_hash(
- const struct lttng_condition *_condition)
+int match_trigger_by_name_uid(struct cds_lfht_node *node,
+ const void *key)
{
- unsigned long hash;
- unsigned long condition_type =
- (unsigned long) LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE;
- struct lttng_condition_session_consumed_size *condition;
- uint64_t val;
-
- condition = container_of(_condition,
- struct lttng_condition_session_consumed_size, parent);
-
- hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
- if (condition->session_name) {
- hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
+ bool match = false;
+ const char *name;
+ const char *key_name;
+ enum lttng_trigger_status status;
+ const struct lttng_credentials *key_creds;
+ const struct lttng_credentials *node_creds;
+ const struct lttng_trigger *trigger_key =
+ (const struct lttng_trigger *) key;
+ const struct lttng_trigger_ht_element *trigger_ht_element =
+ caa_container_of(node,
+ struct lttng_trigger_ht_element,
+ node_by_name_uid);
+
+ status = lttng_trigger_get_name(trigger_ht_element->trigger, &name);
+ assert(status == LTTNG_TRIGGER_STATUS_OK);
+
+ status = lttng_trigger_get_name(trigger_key, &key_name);
+ assert(status == LTTNG_TRIGGER_STATUS_OK);
+
+ /* Compare the names. */
+ if (strcmp(name, key_name) != 0) {
+ goto end;
}
- val = condition->consumed_threshold_bytes.value;
- hash ^= hash_key_u64(&val, lttng_ht_seed);
- return hash;
-}
-static
-unsigned long lttng_condition_session_rotation_hash(
- const struct lttng_condition *_condition)
-{
- unsigned long hash, condition_type;
- struct lttng_condition_session_rotation *condition;
+ /* Compare the owners' UIDs. */
+ key_creds = lttng_trigger_get_credentials(trigger_key);
+ node_creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
- condition = container_of(_condition,
- struct lttng_condition_session_rotation, parent);
- condition_type = (unsigned long) condition->parent.type;
- hash = hash_key_ulong((void *) condition_type, lttng_ht_seed);
- assert(condition->session_name);
- hash ^= hash_key_str(condition->session_name, lttng_ht_seed);
- return hash;
+ match = lttng_credentials_is_equal_uid(key_creds, node_creds);
+
+end:
+ return match;
}
/*
- * The lttng_condition hashing code is kept in this file (rather than
- * condition.c) since it makes use of GPLv2 code (hashtable utils), which we
- * don't want to link in liblttng-ctl.
+ * Hash trigger based on name and credentials only.
*/
static
-unsigned long lttng_condition_hash(const struct lttng_condition *condition)
+unsigned long hash_trigger_by_name_uid(const struct lttng_trigger *trigger)
{
- switch (condition->type) {
- case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
- case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
- return lttng_condition_buffer_usage_hash(condition);
- case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
- return lttng_condition_session_consumed_size_hash(condition);
- case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
- case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
- return lttng_condition_session_rotation_hash(condition);
- default:
- ERR("[notification-thread] Unexpected condition type caught");
- abort();
+ unsigned long hash = 0;
+ const struct lttng_credentials *trigger_creds;
+ const char *trigger_name;
+ enum lttng_trigger_status status;
+
+ status = lttng_trigger_get_name(trigger, &trigger_name);
+ if (status == LTTNG_TRIGGER_STATUS_OK) {
+ hash = hash_key_str(trigger_name, lttng_ht_seed);
}
+
+ trigger_creds = lttng_trigger_get_credentials(trigger);
+ hash ^= hash_key_ulong((void *) (unsigned long) LTTNG_OPTIONAL_GET(trigger_creds->uid),
+ lttng_ht_seed);
+
+ return hash;
}
static
case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
return LTTNG_OBJECT_TYPE_SESSION;
+ case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+ return LTTNG_OBJECT_TYPE_NONE;
default:
return LTTNG_OBJECT_TYPE_UNKNOWN;
}
return NULL;
}
-static
+LTTNG_HIDDEN
bool notification_client_list_get(struct notification_client_list *list)
{
return urcu_ref_get_unless_zero(&list->ref);
lttng_trigger_get_const_condition(list->trigger);
assert(!list->notification_trigger_clients_ht);
+ notification_client_list_get(list);
list->notification_trigger_clients_ht =
state->notification_trigger_clients_ht;
rcu_read_unlock();
}
-static
+LTTNG_HIDDEN
void notification_client_list_put(struct notification_client_list *list)
{
if (!list) {
&evaluation, &object_uid, &object_gid);
break;
case LTTNG_OBJECT_TYPE_NONE:
+ DBG("[notification-thread] Newly subscribed-to condition not bound to object, nothing to evaluate");
ret = 0;
goto end;
case LTTNG_OBJECT_TYPE_UNKNOWN:
client->socket = -1;
}
client->communication.active = false;
- lttng_dynamic_buffer_reset(&client->communication.inbound.buffer);
- lttng_dynamic_buffer_reset(&client->communication.outbound.buffer);
+ lttng_payload_reset(&client->communication.inbound.payload);
+ lttng_payload_reset(&client->communication.outbound.payload);
pthread_mutex_destroy(&client->lock);
call_rcu(&client->rcu_node, free_notification_client_rcu);
}
return client;
}
+/*
+ * Call with rcu_read_lock held (and hold for the lifetime of the returned
+ * client pointer).
+ */
+static
+struct notification_client *get_client_from_id(notification_client_id id,
+ struct notification_thread_state *state)
+{
+ struct cds_lfht_iter iter;
+ struct cds_lfht_node *node;
+ struct notification_client *client = NULL;
+
+ cds_lfht_lookup(state->client_id_ht,
+ hash_client_id(id),
+ match_client_id,
+ &id,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (!node) {
+ goto end;
+ }
+
+ client = caa_container_of(node, struct notification_client,
+ client_id_ht_node);
+end:
+ return client;
+}
+
static
bool buffer_usage_condition_applies_to_channel(
const struct lttng_condition *condition,
static
int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
- const struct lttng_trigger *trigger)
+ struct lttng_trigger *trigger)
{
int ret = 0;
struct lttng_trigger_list_element *new_element =
struct lttng_session_trigger_list *trigger_list;
struct lttng_trigger_list_element *trigger_list_element;
struct session_info *session_info;
+ const struct lttng_credentials session_creds = {
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(session_uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(session_gid),
+ };
rcu_read_lock();
cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
node) {
const struct lttng_condition *condition;
- const struct lttng_action *action;
- const struct lttng_trigger *trigger;
+ struct lttng_trigger *trigger;
struct notification_client_list *client_list;
struct lttng_evaluation *evaluation = NULL;
enum lttng_condition_type condition_type;
- bool client_list_is_empty;
+ enum action_executor_status executor_status;
trigger = trigger_list_element->trigger;
condition = lttng_trigger_get_const_condition(trigger);
continue;
}
- action = lttng_trigger_get_const_action(trigger);
-
- /* Notify actions are the only type currently supported. */
- assert(lttng_action_get_type_const(action) ==
- LTTNG_ACTION_TYPE_NOTIFY);
-
client_list = get_client_list_from_condition(state, condition);
- assert(client_list);
-
- pthread_mutex_lock(&client_list->lock);
- client_list_is_empty = cds_list_empty(&client_list->list);
- pthread_mutex_unlock(&client_list->lock);
- if (client_list_is_empty) {
- /*
- * No clients interested in the evaluation's result,
- * skip it.
- */
- continue;
- }
-
if (cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
evaluation = lttng_evaluation_session_rotation_ongoing_create(
trace_archive_chunk_id);
goto put_list;
}
- /* Dispatch evaluation result to all clients. */
- ret = send_evaluation_to_clients(trigger_list_element->trigger,
- evaluation, client_list, state,
- session_info->uid,
- session_info->gid);
- lttng_evaluation_destroy(evaluation);
+ /*
+ * Ownership of `evaluation` transferred to the action executor
+ * no matter the result.
+ */
+ executor_status = action_executor_enqueue(state->executor,
+ trigger, evaluation, &session_creds,
+ client_list);
+ evaluation = NULL;
+ switch (executor_status) {
+ case ACTION_EXECUTOR_STATUS_OK:
+ break;
+ case ACTION_EXECUTOR_STATUS_ERROR:
+ case ACTION_EXECUTOR_STATUS_INVALID:
+ /*
+ * TODO Add trigger identification (name/id) when
+ * it is added to the API.
+ */
+ ERR("Fatal error occurred while enqueuing action associated with session rotation trigger");
+ ret = -1;
+ goto put_list;
+ case ACTION_EXECUTOR_STATUS_OVERFLOW:
+ /*
+ * TODO Add trigger identification (name/id) when
+ * it is added to the API.
+ *
+ * Not a fatal error.
+ */
+ WARN("No space left when enqueuing action associated with session rotation trigger");
+ ret = 0;
+ goto put_list;
+ default:
+ abort();
+ }
+
put_list:
notification_client_list_put(client_list);
if (caa_unlikely(ret)) {
}
static
-int condition_is_supported(struct lttng_condition *condition)
+int handle_notification_thread_command_add_tracer_event_source(
+ struct notification_thread_state *state,
+ int tracer_event_source_fd,
+ enum lttng_domain_type domain_type,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct notification_event_tracer_event_source_element *element = NULL;
+
+ element = zmalloc(sizeof(*element));
+ if (!element) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ ret = -1;
+ goto end;
+ }
+
+ element->fd = tracer_event_source_fd;
+ element->domain = domain_type;
+
+ cds_list_add(&element->node, &state->tracer_event_sources_list);
+
+ DBG3("[notification-thread] Adding tracer event source fd to poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(domain_type));
+
+ /* Adding the read side pipe to the event poll. */
+ ret = lttng_poll_add(&state->events, tracer_event_source_fd, LPOLLIN | LPOLLERR);
+ if (ret < 0) {
+ ERR("[notification-thread] Failed to add tracer event source to poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(element->domain));
+ cds_list_del(&element->node);
+ free(element);
+ goto end;
+ }
+
+ element->is_fd_in_poll_set = true;
+
+end:
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int handle_notification_thread_command_remove_tracer_event_source(
+ struct notification_thread_state *state,
+ int tracer_event_source_fd,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ bool found = false;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct notification_event_tracer_event_source_element *source_element = NULL, *tmp;
+
+ cds_list_for_each_entry_safe(source_element, tmp,
+ &state->tracer_event_sources_list, node) {
+ if (source_element->fd != tracer_event_source_fd) {
+ continue;
+ }
+
+ DBG("[notification-thread] Removed tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(source_element->domain));
+ cds_list_del(&source_element->node);
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ /*
+ * This is temporarily allowed since the poll activity set is
+ * not properly cleaned-up for the moment. This is adressed in
+ * an upcoming fix.
+ */
+ source_element = NULL;
+ goto end;
+ }
+
+ if (!source_element->is_fd_in_poll_set) {
+ /* Skip the poll set removal. */
+ goto end;
+ }
+
+ DBG3("[notification-thread] Removing tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(source_element->domain));
+
+ /* Removing the fd from the event poll set. */
+ ret = lttng_poll_del(&state->events, tracer_event_source_fd);
+ if (ret < 0) {
+ ERR("[notification-thread] Failed to remove tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(source_element->domain));
+ cmd_result = LTTNG_ERR_FATAL;
+ goto end;
+ }
+
+ source_element->is_fd_in_poll_set = false;
+
+end:
+ free(source_element);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+int handle_notification_thread_remove_tracer_event_source_no_result(
+ struct notification_thread_state *state,
+ int tracer_event_source_fd)
{
int ret;
+ enum lttng_error_code cmd_result;
+
+ ret = handle_notification_thread_command_remove_tracer_event_source(
+ state, tracer_event_source_fd, &cmd_result);
+ (void) cmd_result;
+ return ret;
+}
+
+static int handle_notification_thread_command_list_triggers(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ uid_t client_uid,
+ struct lttng_triggers **triggers,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct cds_lfht_iter iter;
+ struct lttng_trigger_ht_element *trigger_ht_element;
+ struct lttng_triggers *local_triggers = NULL;
+ const struct lttng_credentials *creds;
+
+ rcu_read_lock();
+
+ local_triggers = lttng_triggers_create();
+ if (!local_triggers) {
+ /* Not a fatal error. */
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ cds_lfht_for_each_entry(state->triggers_ht, &iter,
+ trigger_ht_element, node) {
+ /*
+ * Only return the triggers to which the client has access.
+ * The root user has visibility over all triggers.
+ */
+ creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+ if (client_uid != lttng_credentials_get_uid(creds) && client_uid != 0) {
+ continue;
+ }
+
+ ret = lttng_triggers_add(local_triggers,
+ trigger_ht_element->trigger);
+ if (ret < 0) {
+ /* Not a fatal error. */
+ ret = 0;
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+ }
+
+ /* Transferring ownership to the caller. */
+ *triggers = local_triggers;
+ local_triggers = NULL;
+
+end:
+ rcu_read_unlock();
+ lttng_triggers_destroy(local_triggers);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+bool condition_is_supported(struct lttng_condition *condition)
+{
+ bool is_supported;
switch (lttng_condition_get_type(condition)) {
case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
{
+ int ret;
enum lttng_domain_type domain;
ret = lttng_condition_buffer_usage_get_domain_type(condition,
&domain);
- if (ret) {
- ret = -1;
- goto end;
- }
+ assert(ret == 0);
if (domain != LTTNG_DOMAIN_KERNEL) {
- ret = 1;
+ is_supported = true;
goto end;
}
* Older kernel tracers don't expose the API to monitor their
* buffers. Therefore, we reject triggers that require that
* mechanism to be available to be evaluated.
+ *
+ * Assume unsupported on error.
+ */
+ is_supported = kernel_supports_ring_buffer_snapshot_sample_positions() == 1;
+ break;
+ }
+ case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+ {
+ const struct lttng_event_rule *event_rule;
+ enum lttng_domain_type domain;
+ const enum lttng_condition_status status =
+ lttng_condition_event_rule_get_rule(
+ condition, &event_rule);
+
+ assert(status == LTTNG_CONDITION_STATUS_OK);
+
+ domain = lttng_event_rule_get_domain_type(event_rule);
+ if (domain != LTTNG_DOMAIN_KERNEL) {
+ is_supported = true;
+ goto end;
+ }
+
+ /*
+ * Older kernel tracers can't emit notification. Therefore, we
+ * reject triggers that require that mechanism to be available
+ * to be evaluated.
+ *
+ * Assume unsupported on error.
*/
- ret = kernel_supports_ring_buffer_snapshot_sample_positions();
+ is_supported = kernel_supports_event_notifiers() == 1;
break;
}
default:
- ret = 1;
+ is_supported = true;
}
end:
- return ret;
+ return is_supported;
}
/* Must be called with RCU read lock held. */
static
-int bind_trigger_to_matching_session(const struct lttng_trigger *trigger,
+int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
struct notification_thread_state *state)
{
int ret = 0;
/* Must be called with RCU read lock held. */
static
-int bind_trigger_to_matching_channels(const struct lttng_trigger *trigger,
+int bind_trigger_to_matching_channels(struct lttng_trigger *trigger,
struct notification_thread_state *state)
{
int ret = 0;
return ret;
}
-/*
- * FIXME A client's credentials are not checked when registering a trigger, nor
- * are they stored alongside with the trigger.
- *
- * The effects of this are benign since:
- * - The client will succeed in registering the trigger, as it is valid,
- * - The trigger will, internally, be bound to the channel/session,
+static
+bool is_trigger_action_notify(const struct lttng_trigger *trigger)
+{
+ bool is_notify = false;
+ unsigned int i, count;
+ enum lttng_action_status action_status;
+ const struct lttng_action *action =
+ lttng_trigger_get_const_action(trigger);
+ enum lttng_action_type action_type;
+
+ assert(action);
+ action_type = lttng_action_get_type(action);
+ if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
+ is_notify = true;
+ goto end;
+ } else if (action_type != LTTNG_ACTION_TYPE_GROUP) {
+ goto end;
+ }
+
+ action_status = lttng_action_group_get_count(action, &count);
+ assert(action_status == LTTNG_ACTION_STATUS_OK);
+
+ for (i = 0; i < count; i++) {
+ const struct lttng_action *inner_action =
+ lttng_action_group_get_at_index(
+ action, i);
+
+ action_type = lttng_action_get_type(inner_action);
+ if (action_type == LTTNG_ACTION_TYPE_NOTIFY) {
+ is_notify = true;
+ goto end;
+ }
+ }
+
+end:
+ return is_notify;
+}
+
+static bool trigger_name_taken(struct notification_thread_state *state,
+ const struct lttng_trigger *trigger)
+{
+ struct cds_lfht_iter iter;
+
+ /*
+ * No duplicata is allowed in the triggers_by_name_uid_ht.
+ * The match is done against the trigger name and uid.
+ */
+ cds_lfht_lookup(state->triggers_by_name_uid_ht,
+ hash_trigger_by_name_uid(trigger),
+ match_trigger_by_name_uid,
+ trigger,
+ &iter);
+ return !!cds_lfht_iter_get_node(&iter);
+}
+
+static
+enum lttng_error_code generate_trigger_name(
+ struct notification_thread_state *state,
+ struct lttng_trigger *trigger, const char **name)
+{
+ enum lttng_error_code ret_code = LTTNG_OK;
+ bool taken = false;
+ enum lttng_trigger_status status;
+
+ do {
+ const int ret = lttng_trigger_generate_name(trigger,
+ state->trigger_id.name_offset++);
+ if (ret) {
+ /* The only reason this can fail right now. */
+ ret_code = LTTNG_ERR_NOMEM;
+ break;
+ }
+
+ status = lttng_trigger_get_name(trigger, name);
+ assert(status == LTTNG_TRIGGER_STATUS_OK);
+
+ taken = trigger_name_taken(state, trigger);
+ } while (taken || state->trigger_id.name_offset == UINT64_MAX);
+
+ return ret_code;
+}
+
+/*
+ * FIXME A client's credentials are not checked when registering a trigger.
+ *
+ * The effects of this are benign since:
+ * - The client will succeed in registering the trigger, as it is valid,
+ * - The trigger will, internally, be bound to the channel/session,
* - The notifications will not be sent since the client's credentials
* are checked against the channel at that moment.
*
struct notification_client *client;
struct notification_client_list *client_list = NULL;
struct lttng_trigger_ht_element *trigger_ht_element = NULL;
- struct notification_client_list_element *client_list_element, *tmp;
+ struct notification_client_list_element *client_list_element;
+ struct notification_trigger_tokens_ht_element *trigger_tokens_ht_element = NULL;
struct cds_lfht_node *node;
struct cds_lfht_iter iter;
+ const char* trigger_name;
bool free_trigger = true;
+ struct lttng_evaluation *evaluation = NULL;
+ struct lttng_credentials object_creds;
+ uid_t object_uid;
+ gid_t object_gid;
+ enum action_executor_status executor_status;
+ const uint64_t trigger_tracer_token =
+ state->trigger_id.next_tracer_token++;
rcu_read_lock();
+ /* Set the trigger's tracer token. */
+ lttng_trigger_set_tracer_token(trigger, trigger_tracer_token);
+
+ if (lttng_trigger_get_name(trigger, &trigger_name) ==
+ LTTNG_TRIGGER_STATUS_UNSET) {
+ const enum lttng_error_code ret_code = generate_trigger_name(
+ state, trigger, &trigger_name);
+
+ if (ret_code != LTTNG_OK) {
+ /* Fatal error. */
+ ret = -1;
+ *cmd_result = ret_code;
+ goto error;
+ }
+ } else if (trigger_name_taken(state, trigger)) {
+ /* Not a fatal error. */
+ *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+ ret = 0;
+ goto error;
+ }
+
condition = lttng_trigger_get_condition(trigger);
assert(condition);
- ret = condition_is_supported(condition);
- if (ret < 0) {
- goto error;
- } else if (ret == 0) {
+ /* Some conditions require tracers to implement a minimal ABI version. */
+ if (!condition_is_supported(condition)) {
*cmd_result = LTTNG_ERR_NOT_SUPPORTED;
goto error;
- } else {
- /* Feature is supported, continue. */
- ret = 0;
}
trigger_ht_element = zmalloc(sizeof(*trigger_ht_element));
/* Add trigger to the trigger_ht. */
cds_lfht_node_init(&trigger_ht_element->node);
+ cds_lfht_node_init(&trigger_ht_element->node_by_name_uid);
trigger_ht_element->trigger = trigger;
node = cds_lfht_add_unique(state->triggers_ht,
lttng_condition_hash(condition),
- match_condition,
- condition,
+ match_trigger,
+ trigger,
&trigger_ht_element->node);
if (node != &trigger_ht_element->node) {
/* Not a fatal error, simply report it to the client. */
goto error_free_ht_element;
}
+ node = cds_lfht_add_unique(state->triggers_by_name_uid_ht,
+ hash_trigger_by_name_uid(trigger),
+ match_trigger_by_name_uid,
+ trigger,
+ &trigger_ht_element->node_by_name_uid);
+ if (node != &trigger_ht_element->node_by_name_uid) {
+ /* Not a fatal error, simply report it to the client. */
+ cds_lfht_del(state->triggers_ht, &trigger_ht_element->node);
+ *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+ goto error_free_ht_element;
+ }
+
+ if (lttng_condition_get_type(condition) == LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
+ trigger_tokens_ht_element = zmalloc(sizeof(*trigger_tokens_ht_element));
+ if (!trigger_tokens_ht_element) {
+ /* Fatal error. */
+ ret = -1;
+ cds_lfht_del(state->triggers_ht,
+ &trigger_ht_element->node);
+ cds_lfht_del(state->triggers_by_name_uid_ht,
+ &trigger_ht_element->node_by_name_uid);
+ goto error_free_ht_element;
+ }
+
+ /* Add trigger token to the trigger_tokens_ht. */
+ cds_lfht_node_init(&trigger_tokens_ht_element->node);
+ trigger_tokens_ht_element->token =
+ LTTNG_OPTIONAL_GET(trigger->tracer_token);
+ trigger_tokens_ht_element->trigger = trigger;
+
+ node = cds_lfht_add_unique(state->trigger_tokens_ht,
+ hash_key_u64(&trigger_tokens_ht_element->token,
+ lttng_ht_seed),
+ match_trigger_token,
+ &trigger_tokens_ht_element->token,
+ &trigger_tokens_ht_element->node);
+ if (node != &trigger_tokens_ht_element->node) {
+ /* Internal corruption, fatal error. */
+ ret = -1;
+ *cmd_result = LTTNG_ERR_TRIGGER_EXISTS;
+ cds_lfht_del(state->triggers_ht,
+ &trigger_ht_element->node);
+ cds_lfht_del(state->triggers_by_name_uid_ht,
+ &trigger_ht_element->node_by_name_uid);
+ goto error_free_ht_element;
+ }
+ }
+
/*
* Ownership of the trigger and of its wrapper was transfered to
- * the triggers_ht.
+ * the triggers_ht. Same for token ht element if necessary.
*/
+ trigger_tokens_ht_element = NULL;
trigger_ht_element = NULL;
free_trigger = false;
* It is not skipped as this is the only action type currently
* supported.
*/
- client_list = notification_client_list_create(trigger);
- if (!client_list) {
- ret = -1;
- goto error_free_ht_element;
- }
-
- /* Build a list of clients to which this new trigger applies. */
- cds_lfht_for_each_entry(state->client_socket_ht, &iter, client,
- client_socket_ht_node) {
- if (!trigger_applies_to_client(trigger, client)) {
- continue;
+ if (is_trigger_action_notify(trigger)) {
+ client_list = notification_client_list_create(trigger);
+ if (!client_list) {
+ ret = -1;
+ goto error_free_ht_element;
}
- client_list_element = zmalloc(sizeof(*client_list_element));
- if (!client_list_element) {
- ret = -1;
- goto error_put_client_list;
+ /* Build a list of clients to which this new trigger applies. */
+ cds_lfht_for_each_entry (state->client_socket_ht, &iter, client,
+ client_socket_ht_node) {
+ if (!trigger_applies_to_client(trigger, client)) {
+ continue;
+ }
+
+ client_list_element =
+ zmalloc(sizeof(*client_list_element));
+ if (!client_list_element) {
+ ret = -1;
+ goto error_put_client_list;
+ }
+
+ CDS_INIT_LIST_HEAD(&client_list_element->node);
+ client_list_element->client = client;
+ cds_list_add(&client_list_element->node,
+ &client_list->list);
}
- CDS_INIT_LIST_HEAD(&client_list_element->node);
- client_list_element->client = client;
- cds_list_add(&client_list_element->node, &client_list->list);
+
+ /*
+ * Client list ownership transferred to the
+ * notification_trigger_clients_ht.
+ */
+ publish_notification_client_list(state, client_list);
}
switch (get_condition_binding_object(condition)) {
case LTTNG_OBJECT_TYPE_NONE:
break;
default:
- ERR("[notification-thread] Unknown object type on which to bind a newly registered trigger was encountered");
+ ERR("Unknown object type on which to bind a newly registered trigger was encountered");
ret = -1;
goto error_put_client_list;
}
/*
- * Since there is nothing preventing clients from subscribing to a
- * condition before the corresponding trigger is registered, we have
- * to evaluate this new condition right away.
+ * The new trigger's condition must be evaluated against the current
+ * state.
+ *
+ * In the case of `notify` action, nothing preventing clients from
+ * subscribing to a condition before the corresponding trigger is
+ * registered, we have to evaluate this new condition right away.
*
* At some point, we were waiting for the next "evaluation" (e.g. on
* reception of a channel sample) to evaluate this new condition, but
* current state. Otherwise, the next evaluation cycle may only see
* that the evaluations remain the same (true for samples n-1 and n) and
* the client will never know that the condition has been met.
- *
- * No need to lock the list here as it has not been published yet.
*/
- cds_list_for_each_entry_safe(client_list_element, tmp,
- &client_list->list, node) {
- ret = evaluate_condition_for_client(trigger, condition,
- client_list_element->client, state);
- if (ret) {
- goto error_put_client_list;
- }
+ switch (get_condition_binding_object(condition)) {
+ case LTTNG_OBJECT_TYPE_SESSION:
+ ret = evaluate_session_condition_for_client(condition, state,
+ &evaluation, &object_uid,
+ &object_gid);
+ break;
+ case LTTNG_OBJECT_TYPE_CHANNEL:
+ ret = evaluate_channel_condition_for_client(condition, state,
+ &evaluation, &object_uid,
+ &object_gid);
+ break;
+ case LTTNG_OBJECT_TYPE_NONE:
+ ret = 0;
+ break;
+ case LTTNG_OBJECT_TYPE_UNKNOWN:
+ default:
+ ret = -1;
+ break;
+ }
+
+ if (ret) {
+ /* Fatal error. */
+ goto error_put_client_list;
+ }
+
+ LTTNG_OPTIONAL_SET(&object_creds.uid, object_uid);
+ LTTNG_OPTIONAL_SET(&object_creds.gid, object_gid);
+
+ DBG("Newly registered trigger's condition evaluated to %s",
+ evaluation ? "true" : "false");
+ if (!evaluation) {
+ /* Evaluation yielded nothing. Normal exit. */
+ ret = 0;
+ goto end;
}
/*
- * Client list ownership transferred to the
- * notification_trigger_clients_ht.
+ * Ownership of `evaluation` transferred to the action executor
+ * no matter the result.
*/
- publish_notification_client_list(state, client_list);
- client_list = NULL;
+ executor_status = action_executor_enqueue(state->executor, trigger,
+ evaluation, &object_creds, client_list);
+ evaluation = NULL;
+ switch (executor_status) {
+ case ACTION_EXECUTOR_STATUS_OK:
+ break;
+ case ACTION_EXECUTOR_STATUS_ERROR:
+ case ACTION_EXECUTOR_STATUS_INVALID:
+ /*
+ * TODO Add trigger identification (name/id) when
+ * it is added to the API.
+ */
+ ERR("Fatal error occurred while enqueuing action associated to newly registered trigger");
+ ret = -1;
+ goto error_put_client_list;
+ case ACTION_EXECUTOR_STATUS_OVERFLOW:
+ /*
+ * TODO Add trigger identification (name/id) when
+ * it is added to the API.
+ *
+ * Not a fatal error.
+ */
+ WARN("No space left when enqueuing action associated to newly registered trigger");
+ ret = 0;
+ goto end;
+ default:
+ abort();
+ }
+end:
*cmd_result = LTTNG_OK;
+ DBG("Registered trigger: name = `%s`, tracer token = %" PRIu64,
+ trigger_name, trigger_tracer_token);
error_put_client_list:
notification_client_list_put(client_list);
error_free_ht_element:
- free(trigger_ht_element);
+ if (trigger_ht_element) {
+ /* Delayed removal due to RCU constraint on delete. */
+ call_rcu(&trigger_ht_element->rcu_node,
+ free_lttng_trigger_ht_element_rcu);
+ }
+
+ free(trigger_tokens_ht_element);
error:
if (free_trigger) {
lttng_trigger_destroy(trigger);
rcu_node));
}
+static
+void free_notification_trigger_tokens_ht_element_rcu(struct rcu_head *node)
+{
+ free(caa_container_of(node, struct notification_trigger_tokens_ht_element,
+ rcu_node));
+}
+
static
int handle_notification_thread_command_unregister_trigger(
struct notification_thread_state *state,
- struct lttng_trigger *trigger,
+ const struct lttng_trigger *trigger,
enum lttng_error_code *_cmd_reply)
{
struct cds_lfht_iter iter;
struct lttng_channel_trigger_list *trigger_list;
struct notification_client_list *client_list;
struct lttng_trigger_ht_element *trigger_ht_element = NULL;
- struct lttng_condition *condition = lttng_trigger_get_condition(
+ const struct lttng_condition *condition = lttng_trigger_get_const_condition(
trigger);
enum lttng_error_code cmd_reply;
cds_lfht_lookup(state->triggers_ht,
lttng_condition_hash(condition),
- match_condition,
- condition,
+ match_trigger,
+ trigger,
&iter);
triggers_ht_node = cds_lfht_iter_get_node(&iter);
if (!triggers_ht_node) {
cds_list_for_each_entry_safe(trigger_element, tmp,
&trigger_list->list, node) {
- const struct lttng_condition *current_condition =
- lttng_trigger_get_const_condition(
- trigger_element->trigger);
-
- assert(current_condition);
- if (!lttng_condition_is_equal(condition,
- current_condition)) {
+ if (!lttng_trigger_is_equal(trigger, trigger_element->trigger)) {
continue;
}
}
}
- /*
- * Remove and release the client list from
- * notification_trigger_clients_ht.
- */
- client_list = get_client_list_from_condition(state, condition);
- assert(client_list);
+ if (lttng_condition_get_type(condition) ==
+ LTTNG_CONDITION_TYPE_EVENT_RULE_HIT) {
+ struct notification_trigger_tokens_ht_element
+ *trigger_tokens_ht_element;
- /* Put new reference and the hashtable's reference. */
- notification_client_list_put(client_list);
- notification_client_list_put(client_list);
- client_list = NULL;
+ cds_lfht_for_each_entry (state->trigger_tokens_ht, &iter,
+ trigger_tokens_ht_element, node) {
+ if (!lttng_trigger_is_equal(trigger,
+ trigger_tokens_ht_element->trigger)) {
+ continue;
+ }
+
+ DBG("[notification-thread] Removed trigger from tokens_ht");
+ cds_lfht_del(state->trigger_tokens_ht,
+ &trigger_tokens_ht_element->node);
+ call_rcu(&trigger_tokens_ht_element->rcu_node,
+ free_notification_trigger_tokens_ht_element_rcu);
+
+ break;
+ }
+ }
+
+ if (is_trigger_action_notify(trigger)) {
+ /*
+ * Remove and release the client list from
+ * notification_trigger_clients_ht.
+ */
+ client_list = get_client_list_from_condition(state, condition);
+ assert(client_list);
+
+ /* Put new reference and the hashtable's reference. */
+ notification_client_list_put(client_list);
+ notification_client_list_put(client_list);
+ client_list = NULL;
+ }
/* Remove trigger from triggers_ht. */
trigger_ht_element = caa_container_of(triggers_ht_node,
struct lttng_trigger_ht_element, node);
+ cds_lfht_del(state->triggers_by_name_uid_ht, &trigger_ht_element->node_by_name_uid);
cds_lfht_del(state->triggers_ht, triggers_ht_node);
/* Release the ownership of the trigger. */
struct notification_thread_command, cmd_list_node);
cds_list_del(&cmd->cmd_list_node);
pthread_mutex_unlock(&handle->cmd_queue.lock);
+
+ DBG("[notification-thread] Received `%s` command",
+ notification_command_type_str(cmd->type));
switch (cmd->type) {
case NOTIFICATION_COMMAND_TYPE_REGISTER_TRIGGER:
- DBG("[notification-thread] Received register trigger command");
- ret = handle_notification_thread_command_register_trigger(
- state, cmd->parameters.trigger,
+ ret = handle_notification_thread_command_register_trigger(state,
+ cmd->parameters.register_trigger.trigger,
&cmd->reply_code);
break;
case NOTIFICATION_COMMAND_TYPE_UNREGISTER_TRIGGER:
- DBG("[notification-thread] Received unregister trigger command");
ret = handle_notification_thread_command_unregister_trigger(
- state, cmd->parameters.trigger,
+ state,
+ cmd->parameters.unregister_trigger.trigger,
&cmd->reply_code);
break;
case NOTIFICATION_COMMAND_TYPE_ADD_CHANNEL:
- DBG("[notification-thread] Received add channel command");
ret = handle_notification_thread_command_add_channel(
state,
cmd->parameters.add_channel.session.name,
&cmd->reply_code);
break;
case NOTIFICATION_COMMAND_TYPE_REMOVE_CHANNEL:
- DBG("[notification-thread] Received remove channel command");
ret = handle_notification_thread_command_remove_channel(
state, cmd->parameters.remove_channel.key,
cmd->parameters.remove_channel.domain,
break;
case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING:
case NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED:
- DBG("[notification-thread] Received session rotation %s command",
- cmd->type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING ?
- "ongoing" : "completed");
ret = handle_notification_thread_command_session_rotation(
state,
cmd->type,
cmd->parameters.session_rotation.location,
&cmd->reply_code);
break;
+ case NOTIFICATION_COMMAND_TYPE_ADD_TRACER_EVENT_SOURCE:
+ ret = handle_notification_thread_command_add_tracer_event_source(
+ state,
+ cmd->parameters.tracer_event_source.tracer_event_source_fd,
+ cmd->parameters.tracer_event_source.domain,
+ &cmd->reply_code);
+ break;
+ case NOTIFICATION_COMMAND_TYPE_REMOVE_TRACER_EVENT_SOURCE:
+ ret = handle_notification_thread_command_remove_tracer_event_source(
+ state,
+ cmd->parameters.tracer_event_source.tracer_event_source_fd,
+ &cmd->reply_code);
+ break;
+ case NOTIFICATION_COMMAND_TYPE_LIST_TRIGGERS:
+ {
+ struct lttng_triggers *triggers = NULL;
+
+ ret = handle_notification_thread_command_list_triggers(
+ handle,
+ state,
+ cmd->parameters.list_triggers.uid,
+ &triggers,
+ &cmd->reply_code);
+ cmd->reply.list_triggers.triggers = triggers;
+ ret = 0;
+ break;
+ }
case NOTIFICATION_COMMAND_TYPE_QUIT:
- DBG("[notification-thread] Received quit command");
cmd->reply_code = LTTNG_OK;
ret = 1;
goto end;
+ case NOTIFICATION_COMMAND_TYPE_CLIENT_COMMUNICATION_UPDATE:
+ {
+ const enum client_transmission_status client_status =
+ cmd->parameters.client_communication_update
+ .status;
+ const notification_client_id client_id =
+ cmd->parameters.client_communication_update.id;
+ struct notification_client *client;
+
+ rcu_read_lock();
+ client = get_client_from_id(client_id, state);
+
+ if (!client) {
+ /*
+ * Client error was probably already picked-up by the
+ * notification thread or it has disconnected
+ * gracefully while this command was queued.
+ */
+ DBG("Failed to find notification client to update communication status, client id = %" PRIu64,
+ client_id);
+ ret = 0;
+ } else {
+ ret = client_handle_transmission_status(
+ client, client_status, state);
+ }
+ rcu_read_unlock();
+ break;
+ }
default:
ERR("[notification-thread] Unknown internal command received");
goto error_unlock;
return ret;
}
-/* Client lock must be acquired by caller. */
static
int client_reset_inbound_state(struct notification_client *client)
{
int ret;
- ASSERT_LOCKED(client->lock);
- ret = lttng_dynamic_buffer_set_size(
- &client->communication.inbound.buffer, 0);
- assert(!ret);
+ lttng_payload_clear(&client->communication.inbound.payload);
client->communication.inbound.bytes_to_receive =
sizeof(struct lttng_notification_channel_message);
LTTNG_SOCK_SET_UID_CRED(&client->communication.inbound.creds, -1);
LTTNG_SOCK_SET_GID_CRED(&client->communication.inbound.creds, -1);
ret = lttng_dynamic_buffer_set_size(
- &client->communication.inbound.buffer,
+ &client->communication.inbound.payload.buffer,
client->communication.inbound.bytes_to_receive);
+
return ret;
}
ret = -1;
goto error;
}
+
pthread_mutex_init(&client->lock, NULL);
client->id = state->next_notification_client_id++;
CDS_INIT_LIST_HEAD(&client->condition_list);
- lttng_dynamic_buffer_init(&client->communication.inbound.buffer);
- lttng_dynamic_buffer_init(&client->communication.outbound.buffer);
+ lttng_payload_init(&client->communication.inbound.payload);
+ lttng_payload_init(&client->communication.outbound.payload);
client->communication.inbound.expect_creds = true;
- pthread_mutex_lock(&client->lock);
ret = client_reset_inbound_state(client);
- pthread_mutex_unlock(&client->lock);
if (ret) {
ERR("[notification-thread] Failed to reset client communication's inbound state");
ret = 0;
rcu_read_unlock();
return ret;
+
error:
notification_client_destroy(client, state);
return ret;
}
-/* RCU read-lock must be held by the caller. */
-/* Client lock must be held by the caller */
+/*
+ * RCU read-lock must be held by the caller.
+ * Client lock must _not_ be held by the caller.
+ */
static
int notification_thread_client_disconnect(
struct notification_client *client,
struct lttng_condition_list_element *condition_list_element, *tmp;
/* Acquire the client lock to disable its communication atomically. */
+ pthread_mutex_lock(&client->lock);
client->communication.active = false;
+ cds_lfht_del(state->client_socket_ht, &client->client_socket_ht_node);
+ cds_lfht_del(state->client_id_ht, &client->client_id_ht_node);
+ pthread_mutex_unlock(&client->lock);
+
ret = lttng_poll_del(&state->events, client->socket);
if (ret) {
ERR("[notification-thread] Failed to remove client socket %d from poll set",
client->socket);
}
- cds_lfht_del(state->client_socket_ht, &client->client_socket_ht_node);
- cds_lfht_del(state->client_id_ht, &client->client_id_ht_node);
-
/* Release all conditions to which the client was subscribed. */
cds_list_for_each_entry_safe(condition_list_element, tmp,
&client->condition_list, node) {
goto end;
}
- pthread_mutex_lock(&client->lock);
ret = notification_thread_client_disconnect(client, state);
- pthread_mutex_unlock(&client->lock);
end:
rcu_read_unlock();
return ret;
client_socket_ht_node) {
int ret;
- pthread_mutex_lock(&client->lock);
ret = notification_thread_client_disconnect(
client, state);
- pthread_mutex_unlock(&client->lock);
if (ret) {
error_encoutered = true;
}
goto end;
}
- client->communication.outbound.queued_command_reply = false;
- client->communication.outbound.dropped_notification = false;
break;
case CLIENT_TRANSMISSION_STATUS_QUEUED:
/*
/* Client lock must be acquired by caller. */
static
enum client_transmission_status client_flush_outgoing_queue(
- struct notification_client *client,
- struct notification_thread_state *state)
+ struct notification_client *client)
{
ssize_t ret;
size_t to_send_count;
enum client_transmission_status status;
+ struct lttng_payload_view pv = lttng_payload_view_from_payload(
+ &client->communication.outbound.payload, 0, -1);
+ const int fds_to_send_count =
+ lttng_payload_view_get_fd_handle_count(&pv);
ASSERT_LOCKED(client->lock);
- assert(client->communication.outbound.buffer.size != 0);
- to_send_count = client->communication.outbound.buffer.size;
+ if (!client->communication.active) {
+ status = CLIENT_TRANSMISSION_STATUS_FAIL;
+ goto end;
+ }
+
+ if (pv.buffer.size == 0) {
+ /*
+ * If both data and fds are equal to zero, we are in an invalid
+ * state.
+ */
+ assert(fds_to_send_count != 0);
+ goto send_fds;
+ }
+
+ /* Send data. */
+ to_send_count = pv.buffer.size;
DBG("[notification-thread] Flushing client (socket fd = %i) outgoing queue",
client->socket);
ret = lttcomm_send_unix_sock_non_block(client->socket,
- client->communication.outbound.buffer.data,
+ pv.buffer.data,
to_send_count);
if ((ret >= 0 && ret < to_send_count)) {
DBG("[notification-thread] Client (socket fd = %i) outgoing queue could not be completely flushed",
client->socket);
to_send_count -= max(ret, 0);
- memcpy(client->communication.outbound.buffer.data,
- client->communication.outbound.buffer.data +
- client->communication.outbound.buffer.size - to_send_count,
+ memmove(client->communication.outbound.payload.buffer.data,
+ pv.buffer.data +
+ pv.buffer.size - to_send_count,
to_send_count);
ret = lttng_dynamic_buffer_set_size(
- &client->communication.outbound.buffer,
+ &client->communication.outbound.payload.buffer,
to_send_count);
if (ret) {
- status = CLIENT_TRANSMISSION_STATUS_ERROR;
goto error;
}
+
status = CLIENT_TRANSMISSION_STATUS_QUEUED;
+ goto end;
} else if (ret < 0) {
- /* Generic error, disconnect the client. */
+ /* Generic error, disable the client's communication. */
ERR("[notification-thread] Failed to flush outgoing queue, disconnecting client (socket fd = %i)",
client->socket);
+ client->communication.active = false;
status = CLIENT_TRANSMISSION_STATUS_FAIL;
+ goto end;
} else {
- /* No error and flushed the queue completely. */
+ /*
+ * No error and flushed the queue completely.
+ *
+ * The payload buffer size is used later to
+ * check if there is notifications queued. So albeit that the
+ * direct caller knows that the transmission is complete, we
+ * need to set the buffer size to zero.
+ */
ret = lttng_dynamic_buffer_set_size(
- &client->communication.outbound.buffer, 0);
+ &client->communication.outbound.payload.buffer, 0);
if (ret) {
- status = CLIENT_TRANSMISSION_STATUS_ERROR;
goto error;
}
+ }
+
+send_fds:
+ /* No fds to send, transmission is complete. */
+ if (fds_to_send_count == 0) {
status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
+ goto end;
}
- ret = client_handle_transmission_status(client, status, state);
- if (ret) {
- goto error;
+ ret = lttcomm_send_payload_view_fds_unix_sock_non_block(
+ client->socket, &pv);
+ if (ret < 0) {
+ /* Generic error, disable the client's communication. */
+ ERR("[notification-thread] Failed to flush outgoing fds queue, disconnecting client (socket fd = %i)",
+ client->socket);
+ client->communication.active = false;
+ status = CLIENT_TRANSMISSION_STATUS_FAIL;
+ goto end;
+ } else if (ret == 0) {
+ /* Nothing could be sent. */
+ status = CLIENT_TRANSMISSION_STATUS_QUEUED;
+ } else {
+ /* Fd passing is an all or nothing kind of thing. */
+ status = CLIENT_TRANSMISSION_STATUS_COMPLETE;
+ /*
+ * The payload _fd_array count is used later to
+ * check if there is notifications queued. So although the
+ * direct caller knows that the transmission is complete, we
+ * need to clear the _fd_array for the queuing check.
+ */
+ lttng_dynamic_pointer_array_clear(
+ &client->communication.outbound.payload
+ ._fd_handles);
}
- return 0;
+end:
+ if (status == CLIENT_TRANSMISSION_STATUS_COMPLETE) {
+ client->communication.outbound.queued_command_reply = false;
+ client->communication.outbound.dropped_notification = false;
+ lttng_payload_clear(&client->communication.outbound.payload);
+ }
+
+ return status;
error:
- return -1;
+ return CLIENT_TRANSMISSION_STATUS_ERROR;
}
-/* Client lock must be acquired by caller. */
+static
+bool client_has_outbound_data_left(
+ const struct notification_client *client)
+{
+ const struct lttng_payload_view pv = lttng_payload_view_from_payload(
+ &client->communication.outbound.payload, 0, -1);
+ const bool has_data = pv.buffer.size != 0;
+ const bool has_fds = lttng_payload_view_get_fd_handle_count(&pv);
+
+ return has_data || has_fds;
+}
+
+/* Client lock must _not_ be held by the caller. */
static
int client_send_command_reply(struct notification_client *client,
struct notification_thread_state *state,
.size = sizeof(reply),
};
char buffer[sizeof(msg) + sizeof(reply)];
+ enum client_transmission_status transmission_status;
- ASSERT_LOCKED(client->lock);
+ memcpy(buffer, &msg, sizeof(msg));
+ memcpy(buffer + sizeof(msg), &reply, sizeof(reply));
+ DBG("[notification-thread] Send command reply (%i)", (int) status);
+ pthread_mutex_lock(&client->lock);
if (client->communication.outbound.queued_command_reply) {
/* Protocol error. */
- goto error;
+ goto error_unlock;
}
- memcpy(buffer, &msg, sizeof(msg));
- memcpy(buffer + sizeof(msg), &reply, sizeof(reply));
- DBG("[notification-thread] Send command reply (%i)", (int) status);
-
/* Enqueue buffer to outgoing queue and flush it. */
ret = lttng_dynamic_buffer_append(
- &client->communication.outbound.buffer,
+ &client->communication.outbound.payload.buffer,
buffer, sizeof(buffer));
if (ret) {
- goto error;
+ goto error_unlock;
}
- ret = client_flush_outgoing_queue(client, state);
- if (ret) {
- goto error;
- }
+ transmission_status = client_flush_outgoing_queue(client);
- if (client->communication.outbound.buffer.size != 0) {
+ if (client_has_outbound_data_left(client)) {
/* Queue could not be emptied. */
client->communication.outbound.queued_command_reply = true;
}
+ pthread_mutex_unlock(&client->lock);
+ ret = client_handle_transmission_status(
+ client, transmission_status, state);
+ if (ret) {
+ goto error;
+ }
+
return 0;
+error_unlock:
+ pthread_mutex_unlock(&client->lock);
error:
return -1;
}
struct notification_thread_state *state)
{
int ret;
-
- pthread_mutex_lock(&client->lock);
-
/*
* Receiving message header. The function will be called again
* once the rest of the message as been received and can be
*/
const struct lttng_notification_channel_message *msg;
- assert(sizeof(*msg) == client->communication.inbound.buffer.size);
+ assert(sizeof(*msg) == client->communication.inbound.payload.buffer.size);
msg = (const struct lttng_notification_channel_message *)
- client->communication.inbound.buffer.data;
+ client->communication.inbound.payload.buffer.data;
if (msg->size == 0 ||
msg->size > DEFAULT_MAX_NOTIFICATION_CLIENT_MESSAGE_PAYLOAD_SIZE) {
}
client->communication.inbound.bytes_to_receive = msg->size;
+ client->communication.inbound.fds_to_receive = msg->fds;
client->communication.inbound.msg_type =
(enum lttng_notification_channel_message_type) msg->type;
ret = lttng_dynamic_buffer_set_size(
- &client->communication.inbound.buffer, msg->size);
+ &client->communication.inbound.payload.buffer, msg->size);
+
+ /* msg is not valid anymore due to lttng_dynamic_buffer_set_size. */
+ msg = NULL;
end:
- pthread_mutex_unlock(&client->lock);
return ret;
}
LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
char send_buffer[sizeof(msg_header) + sizeof(handshake_reply)];
- pthread_mutex_lock(&client->lock);
-
memcpy(send_buffer, &msg_header, sizeof(msg_header));
memcpy(send_buffer + sizeof(msg_header), &handshake_reply,
sizeof(handshake_reply));
handshake_client =
(struct lttng_notification_channel_command_handshake *)
- client->communication.inbound.buffer
+ client->communication.inbound.payload.buffer
.data;
client->major = handshake_client->major;
client->minor = handshake_client->minor;
status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNSUPPORTED_VERSION;
}
+ pthread_mutex_lock(&client->lock);
+ /* Outgoing queue will be flushed when the command reply is sent. */
ret = lttng_dynamic_buffer_append(
- &client->communication.outbound.buffer, send_buffer,
+ &client->communication.outbound.payload.buffer, send_buffer,
sizeof(send_buffer));
if (ret) {
ERR("[notification-thread] Failed to send protocol version to notification channel client");
- goto end;
+ goto end_unlock;
}
client->validated = true;
client->communication.active = true;
+ pthread_mutex_unlock(&client->lock);
- ret = client_flush_outgoing_queue(client, state);
+ /* Set reception state to receive the next message header. */
+ ret = client_reset_inbound_state(client);
if (ret) {
+ ERR("[notification-thread] Failed to reset client communication's inbound state");
goto end;
}
+ /* Flushes the outgoing queue. */
ret = client_send_command_reply(client, state, status);
if (ret) {
ERR("[notification-thread] Failed to send reply to notification channel client");
goto end;
}
- /* Set reception state to receive the next message header. */
- ret = client_reset_inbound_state(client);
- if (ret) {
- ERR("[notification-thread] Failed to reset client communication's inbound state");
- goto end;
- }
-
-end:
+ goto end;
+end_unlock:
pthread_mutex_unlock(&client->lock);
+end:
return ret;
}
enum lttng_notification_channel_status status =
LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
struct lttng_payload_view condition_view =
- lttng_payload_view_from_dynamic_buffer(
- &client->communication.inbound.buffer,
+ lttng_payload_view_from_payload(
+ &client->communication.inbound.payload,
0, -1);
size_t expected_condition_size;
- pthread_mutex_lock(&client->lock);
- expected_condition_size = client->communication.inbound.buffer.size;
- pthread_mutex_unlock(&client->lock);
-
+ /*
+ * No need to lock client to sample the inbound state as the only
+ * other thread accessing clients (action executor) only uses the
+ * outbound state.
+ */
+ expected_condition_size = client->communication.inbound.payload.buffer.size;
ret = lttng_condition_create_from_payload(&condition_view, &condition);
if (ret != expected_condition_size) {
ERR("[notification-thread] Malformed condition received from client");
ret = notification_thread_client_unsubscribe(
client, condition, state, &status);
}
- if (ret) {
- goto end;
- }
- pthread_mutex_lock(&client->lock);
- ret = client_send_command_reply(client, state, status);
if (ret) {
- ERR("[notification-thread] Failed to send reply to notification channel client");
- goto end_unlock;
+ goto end;
}
/* Set reception state to receive the next message header. */
ret = client_reset_inbound_state(client);
if (ret) {
ERR("[notification-thread] Failed to reset client communication's inbound state");
- goto end_unlock;
+ goto end;
+ }
+
+ ret = client_send_command_reply(client, state, status);
+ if (ret) {
+ ERR("[notification-thread] Failed to send reply to notification channel client");
+ goto end;
}
-end_unlock:
- pthread_mutex_unlock(&client->lock);
end:
return ret;
}
struct notification_client *client;
ssize_t recv_ret;
size_t offset;
- bool message_is_complete = false;
+ rcu_read_lock();
client = get_client_from_socket(socket, state);
if (!client) {
/* Internal error, abort. */
goto end;
}
- pthread_mutex_lock(&client->lock);
- offset = client->communication.inbound.buffer.size -
+ offset = client->communication.inbound.payload.buffer.size -
client->communication.inbound.bytes_to_receive;
if (client->communication.inbound.expect_creds) {
recv_ret = lttcomm_recv_creds_unix_sock(socket,
- client->communication.inbound.buffer.data + offset,
+ client->communication.inbound.payload.buffer.data + offset,
client->communication.inbound.bytes_to_receive,
&client->communication.inbound.creds);
if (recv_ret > 0) {
}
} else {
recv_ret = lttcomm_recv_unix_sock_non_block(socket,
- client->communication.inbound.buffer.data + offset,
+ client->communication.inbound.payload.buffer.data + offset,
client->communication.inbound.bytes_to_receive);
}
if (recv_ret >= 0) {
client->communication.inbound.bytes_to_receive -= recv_ret;
- message_is_complete = client->communication.inbound
- .bytes_to_receive == 0;
- }
- pthread_mutex_unlock(&client->lock);
- if (recv_ret < 0) {
+ } else {
goto error_disconnect_client;
}
- if (message_is_complete) {
- ret = client_dispatch_message(client, state);
- if (ret) {
+ if (client->communication.inbound.bytes_to_receive != 0) {
+ /* Message incomplete wait for more data. */
+ ret = 0;
+ goto end;
+ }
+
+ assert(client->communication.inbound.bytes_to_receive == 0);
+
+ /* Receive fds. */
+ if (client->communication.inbound.fds_to_receive != 0) {
+ ret = lttcomm_recv_payload_fds_unix_sock_non_block(
+ client->socket,
+ client->communication.inbound.fds_to_receive,
+ &client->communication.inbound.payload);
+ if (ret > 0) {
/*
- * Only returns an error if this client must be
- * disconnected.
+ * Fds received. non blocking fds passing is all
+ * or nothing.
*/
+ ssize_t expected_size;
+
+ expected_size = sizeof(int) *
+ client->communication.inbound
+ .fds_to_receive;
+ assert(ret == expected_size);
+ client->communication.inbound.fds_to_receive = 0;
+ } else if (ret == 0) {
+ /* Received nothing. */
+ ret = 0;
+ goto end;
+ } else {
goto error_disconnect_client;
}
}
+
+ /* At this point the message is complete.*/
+ assert(client->communication.inbound.bytes_to_receive == 0 &&
+ client->communication.inbound.fds_to_receive == 0);
+ ret = client_dispatch_message(client, state);
+ if (ret) {
+ /*
+ * Only returns an error if this client must be
+ * disconnected.
+ */
+ goto error_disconnect_client;
+ }
+
end:
+ rcu_read_unlock();
return ret;
+
error_disconnect_client:
- pthread_mutex_lock(&client->lock);
ret = notification_thread_client_disconnect(client, state);
- pthread_mutex_unlock(&client->lock);
- return ret;
+ goto end;
}
/* Client ready to receive outgoing data. */
{
int ret;
struct notification_client *client;
+ enum client_transmission_status transmission_status;
+ rcu_read_lock();
client = get_client_from_socket(socket, state);
if (!client) {
/* Internal error, abort. */
}
pthread_mutex_lock(&client->lock);
- ret = client_flush_outgoing_queue(client, state);
+ transmission_status = client_flush_outgoing_queue(client);
pthread_mutex_unlock(&client->lock);
+
+ ret = client_handle_transmission_status(
+ client, transmission_status, state);
if (ret) {
goto end;
}
end:
+ rcu_read_unlock();
return ret;
}
}
static
-int client_enqueue_dropped_notification(struct notification_client *client)
+int client_notification_overflow(struct notification_client *client)
{
- int ret;
- struct lttng_notification_channel_message msg = {
+ int ret = 0;
+ const struct lttng_notification_channel_message msg = {
.type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION_DROPPED,
- .size = 0,
};
ASSERT_LOCKED(client->lock);
+ DBG("Dropping notification addressed to client (socket fd = %i)",
+ client->socket);
+ if (client->communication.outbound.dropped_notification) {
+ /*
+ * The client already has a "notification dropped" message
+ * in its outgoing queue. Nothing to do since all
+ * of those messages are coalesced.
+ */
+ goto end;
+ }
+
+ client->communication.outbound.dropped_notification = true;
ret = lttng_dynamic_buffer_append(
- &client->communication.outbound.buffer, &msg,
+ &client->communication.outbound.payload.buffer, &msg,
sizeof(msg));
+ if (ret) {
+ PERROR("Failed to enqueue \"dropped notification\" message in client's (socket fd = %i) outgoing queue",
+ client->socket);
+ }
+end:
return ret;
}
+static int client_handle_transmission_status_wrapper(
+ struct notification_client *client,
+ enum client_transmission_status status,
+ void *user_data)
+{
+ return client_handle_transmission_status(client, status,
+ (struct notification_thread_state *) user_data);
+}
+
+static
+int send_evaluation_to_clients(const struct lttng_trigger *trigger,
+ const struct lttng_evaluation *evaluation,
+ struct notification_client_list* client_list,
+ struct notification_thread_state *state,
+ uid_t object_uid, gid_t object_gid)
+{
+ const struct lttng_credentials creds = {
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(object_uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(object_gid),
+ };
+
+ return notification_client_list_send_evaluation(client_list,
+ lttng_trigger_get_const_condition(trigger), evaluation,
+ lttng_trigger_get_credentials(trigger),
+ &creds,
+ client_handle_transmission_status_wrapper, state);
+}
+
/*
* Permission checks relative to notification channel clients are performed
* here. Notice how object, client, and trigger credentials are involved in
* interference from external users (those could, for instance, unregister
* their triggers).
*/
-static
-int send_evaluation_to_clients(const struct lttng_trigger *trigger,
+LTTNG_HIDDEN
+int notification_client_list_send_evaluation(
+ struct notification_client_list *client_list,
+ const struct lttng_condition *condition,
const struct lttng_evaluation *evaluation,
- struct notification_client_list* client_list,
- struct notification_thread_state *state,
- uid_t object_uid, gid_t object_gid)
+ const struct lttng_credentials *trigger_creds,
+ const struct lttng_credentials *source_object_creds,
+ report_client_transmission_result_cb client_report,
+ void *user_data)
{
int ret = 0;
struct lttng_payload msg_payload;
struct notification_client_list_element *client_list_element, *tmp;
const struct lttng_notification notification = {
- .condition = (struct lttng_condition *) lttng_trigger_get_const_condition(trigger),
+ .condition = (struct lttng_condition *) condition,
.evaluation = (struct lttng_evaluation *) evaluation,
};
struct lttng_notification_channel_message msg_header = {
.type = (int8_t) LTTNG_NOTIFICATION_CHANNEL_MESSAGE_TYPE_NOTIFICATION,
};
- const struct lttng_credentials *trigger_creds = lttng_trigger_get_credentials(trigger);
lttng_payload_init(&msg_payload);
->size = (uint32_t)(
msg_payload.buffer.size - sizeof(msg_header));
+ /* Update the payload number of fds. */
+ {
+ const struct lttng_payload_view pv = lttng_payload_view_from_payload(
+ &msg_payload, 0, -1);
+
+ ((struct lttng_notification_channel_message *)
+ msg_payload.buffer.data)->fds = (uint32_t)
+ lttng_payload_view_get_fd_handle_count(&pv);
+ }
+
pthread_mutex_lock(&client_list->lock);
cds_list_for_each_entry_safe(client_list_element, tmp,
&client_list->list, node) {
+ enum client_transmission_status transmission_status;
struct notification_client *client =
client_list_element->client;
ret = 0;
pthread_mutex_lock(&client->lock);
- if (client->uid != object_uid && client->gid != object_gid &&
- client->uid != 0) {
- /* Client is not allowed to monitor this channel. */
- DBG("[notification-thread] Skipping client at it does not have the object permission to receive notification for this trigger");
- goto unlock_client;
+ if (!client->communication.active) {
+ /*
+ * Skip inactive client (protocol error or
+ * disconnecting).
+ */
+ DBG("Skipping client at it is marked as inactive");
+ goto skip_client;
+ }
+
+ if (source_object_creds) {
+ if (client->uid != lttng_credentials_get_uid(source_object_creds) &&
+ client->gid != lttng_credentials_get_gid(source_object_creds) &&
+ client->uid != 0) {
+ /*
+ * Client is not allowed to monitor this
+ * object.
+ */
+ DBG("[notification-thread] Skipping client at it does not have the object permission to receive notification for this trigger");
+ goto skip_client;
+ }
}
- if (client->uid != trigger_creds->uid && client->gid != trigger_creds->gid) {
+ if (client->uid != lttng_credentials_get_uid(trigger_creds) && client->gid != lttng_credentials_get_gid(trigger_creds)) {
DBG("[notification-thread] Skipping client at it does not have the permission to receive notification for this trigger");
- goto unlock_client;
+ goto skip_client;
}
DBG("[notification-thread] Sending notification to client (fd = %i, %zu bytes)",
client->socket, msg_payload.buffer.size);
- if (client->communication.outbound.buffer.size) {
+
+ if (client_has_outbound_data_left(client)) {
/*
* Outgoing data is already buffered for this client;
* drop the notification and enqueue a "dropped
* notification since the socket spilled-over to the
* queue.
*/
- DBG("[notification-thread] Dropping notification addressed to client (socket fd = %i)",
- client->socket);
- if (!client->communication.outbound.dropped_notification) {
- client->communication.outbound.dropped_notification = true;
- ret = client_enqueue_dropped_notification(
- client);
- if (ret) {
- goto unlock_client;
- }
+ ret = client_notification_overflow(client);
+ if (ret) {
+ /* Fatal error. */
+ goto skip_client;
}
- goto unlock_client;
}
- ret = lttng_dynamic_buffer_append_buffer(
- &client->communication.outbound.buffer,
- &msg_payload.buffer);
+ ret = lttng_payload_copy(&msg_payload, &client->communication.outbound.payload);
if (ret) {
- goto unlock_client;
+ /* Fatal error. */
+ goto skip_client;
}
- ret = client_flush_outgoing_queue(client, state);
+ transmission_status = client_flush_outgoing_queue(client);
+ pthread_mutex_unlock(&client->lock);
+ ret = client_report(client, transmission_status, user_data);
if (ret) {
- goto unlock_client;
+ /* Fatal error. */
+ goto end_unlock_list;
}
-unlock_client:
+
+ continue;
+
+skip_client:
pthread_mutex_unlock(&client->lock);
if (ret) {
+ /* Fatal error. */
goto end_unlock_list;
}
}
return ret;
}
+int handle_notification_thread_event_notification(struct notification_thread_state *state,
+ int notification_pipe_read_fd,
+ enum lttng_domain_type domain)
+{
+ int ret;
+ struct lttng_ust_event_notifier_notification ust_notification;
+ struct lttng_kernel_event_notifier_notification kernel_notification;
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+ struct notification_trigger_tokens_ht_element *element;
+ enum lttng_action_type action_type;
+ const struct lttng_action *action;
+ struct lttng_event_notifier_notification notification;
+ void *reception_buffer;
+ size_t reception_size;
+
+ notification.type = domain;
+
+ switch(domain) {
+ case LTTNG_DOMAIN_UST:
+ reception_buffer = (void *) &ust_notification;
+ reception_size = sizeof(ust_notification);
+ notification.notification.ust = &ust_notification;
+ break;
+ case LTTNG_DOMAIN_KERNEL:
+ reception_buffer = (void *) &kernel_notification;
+ reception_size = sizeof(kernel_notification);
+ notification.notification.kernel = &kernel_notification;
+ break;
+ default:
+ abort();
+ }
+
+ /*
+ * The monitoring pipe only holds messages smaller than PIPE_BUF,
+ * ensuring that read/write of tracer notifications are atomic.
+ */
+ ret = lttng_read(notification_pipe_read_fd, reception_buffer,
+ reception_size);
+ if (ret != reception_size) {
+ PERROR("Failed to read from event source notification pipe: fd = %d, size to read = %zu, ret = %d",
+ notification_pipe_read_fd, reception_size, ret);
+ ret = -1;
+ goto end;
+ }
+
+ switch(domain) {
+ case LTTNG_DOMAIN_UST:
+ notification.token = ust_notification.token;
+ break;
+ case LTTNG_DOMAIN_KERNEL:
+ notification.token = kernel_notification.token;
+ break;
+ default:
+ abort();
+ }
+
+ /* Find triggers associated with this token. */
+ rcu_read_lock();
+ cds_lfht_lookup(state->trigger_tokens_ht,
+ hash_key_u64(¬ification.token, lttng_ht_seed),
+ match_trigger_token, ¬ification.token, &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (caa_likely(!node)) {
+ /*
+ * This is not an error, slow consumption of the pipe can lead
+ * to situations where a trigger is removed but we still get
+ * tracer notification matching to a previous trigger.
+ */
+ ret = 0;
+ goto end_unlock;
+ }
+
+ element = caa_container_of(node,
+ struct notification_trigger_tokens_ht_element,
+ node);
+
+ action = lttng_trigger_get_const_action(element->trigger);
+ action_type = lttng_action_get_type(action);
+ DBG("Received message from tracer event source: event source fd = %d, token = %" PRIu64 ", action type = '%s'",
+ notification_pipe_read_fd, notification.token,
+ lttng_action_type_string(action_type));
+
+ /* TODO: Perform actions */
+
+ ret = 0;
+
+end_unlock:
+ rcu_read_unlock();
+end:
+ return ret;
+}
+
int handle_notification_thread_channel_sample(
struct notification_thread_state *state, int pipe,
enum lttng_domain_type domain)
bool previous_sample_available = false;
struct channel_state_sample previous_sample, latest_sample;
uint64_t previous_session_consumed_total, latest_session_consumed_total;
+ struct lttng_credentials channel_creds;
/*
* The monitoring pipe only holds messages smaller than PIPE_BUF,
goto end_unlock;
}
+ channel_creds = (typeof(channel_creds)) {
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(channel_info->session_info->gid),
+ };
+
trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
channel_triggers_ht_node);
cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
node) {
const struct lttng_condition *condition;
- const struct lttng_action *action;
- const struct lttng_trigger *trigger;
+ struct lttng_trigger *trigger;
struct notification_client_list *client_list = NULL;
struct lttng_evaluation *evaluation = NULL;
- bool client_list_is_empty;
+ enum action_executor_status executor_status;
ret = 0;
trigger = trigger_list_element->trigger;
condition = lttng_trigger_get_const_condition(trigger);
assert(condition);
- action = lttng_trigger_get_const_action(trigger);
-
- /* Notify actions are the only type currently supported. */
- assert(lttng_action_get_type_const(action) ==
- LTTNG_ACTION_TYPE_NOTIFY);
/*
* Check if any client is subscribed to the result of this
* evaluation.
*/
client_list = get_client_list_from_condition(state, condition);
- assert(client_list);
- client_list_is_empty = cds_list_empty(&client_list->list);
- if (client_list_is_empty) {
- /*
- * No clients interested in the evaluation's result,
- * skip it.
- */
- goto put_list;
- }
ret = evaluate_buffer_condition(condition, &evaluation, state,
previous_sample_available ? &previous_sample : NULL,
goto put_list;
}
- /* Dispatch evaluation result to all clients. */
- ret = send_evaluation_to_clients(trigger_list_element->trigger,
- evaluation, client_list, state,
- channel_info->session_info->uid,
- channel_info->session_info->gid);
- lttng_evaluation_destroy(evaluation);
+ if (!lttng_trigger_should_fire(trigger)) {
+ goto put_list;
+ }
+
+ lttng_trigger_fire(trigger);
+
+ /*
+ * Ownership of `evaluation` transferred to the action executor
+ * no matter the result.
+ */
+ executor_status = action_executor_enqueue(state->executor,
+ trigger, evaluation, &channel_creds,
+ client_list);
+ evaluation = NULL;
+ switch (executor_status) {
+ case ACTION_EXECUTOR_STATUS_OK:
+ break;
+ case ACTION_EXECUTOR_STATUS_ERROR:
+ case ACTION_EXECUTOR_STATUS_INVALID:
+ /*
+ * TODO Add trigger identification (name/id) when
+ * it is added to the API.
+ */
+ ERR("Fatal error occurred while enqueuing action associated with buffer-condition trigger");
+ ret = -1;
+ goto put_list;
+ case ACTION_EXECUTOR_STATUS_OVERFLOW:
+ /*
+ * TODO Add trigger identification (name/id) when
+ * it is added to the API.
+ *
+ * Not a fatal error.
+ */
+ WARN("No space left when enqueuing action associated with buffer-condition trigger");
+ ret = 0;
+ goto put_list;
+ default:
+ abort();
+ }
+
put_list:
notification_client_list_put(client_list);
if (caa_unlikely(ret)) {