+ assert(trigger);
+ assert(condition);
+ assert(client);
+ assert(state);
+
+ switch (get_condition_binding_object(condition)) {
+ case LTTNG_OBJECT_TYPE_SESSION:
+ ret = evaluate_session_condition_for_client(condition, state,
+ &evaluation, &object_uid, &object_gid);
+ break;
+ case LTTNG_OBJECT_TYPE_CHANNEL:
+ ret = evaluate_channel_condition_for_client(condition, state,
+ &evaluation, &object_uid, &object_gid);
+ break;
+ case LTTNG_OBJECT_TYPE_NONE:
+ DBG("[notification-thread] Newly subscribed-to condition not bound to object, nothing to evaluate");
+ ret = 0;
+ goto end;
+ case LTTNG_OBJECT_TYPE_UNKNOWN:
+ default:
+ ret = -1;
+ goto end;
+ }
+ if (ret) {
+ /* Fatal error. */
+ goto end;
+ }
+ if (!evaluation) {
+ /* Evaluation yielded nothing. Normal exit. */
+ DBG("[notification-thread] Newly subscribed-to condition evaluated to false, nothing to report to client");
+ ret = 0;
+ goto end;
+ }
+
+ /*
+ * Create a temporary client list with the client currently
+ * subscribing.
+ */
+ cds_lfht_node_init(&client_list.notification_trigger_clients_ht_node);
+ CDS_INIT_LIST_HEAD(&client_list.list);
+ client_list.trigger = trigger;
+
+ CDS_INIT_LIST_HEAD(&client_list_element.node);
+ client_list_element.client = client;
+ cds_list_add(&client_list_element.node, &client_list.list);
+
+ /* Send evaluation result to the newly-subscribed client. */
+ DBG("[notification-thread] Newly subscribed-to condition evaluated to true, notifying client");
+ ret = send_evaluation_to_clients(trigger, evaluation, &client_list,
+ state, object_uid, object_gid);
+
+end:
+ return ret;
+}
+
+static
+int notification_thread_client_subscribe(struct notification_client *client,
+ struct lttng_condition *condition,
+ struct notification_thread_state *state,
+ enum lttng_notification_channel_status *_status)
+{
+ int ret = 0;
+ struct notification_client_list *client_list = NULL;
+ struct lttng_condition_list_element *condition_list_element = NULL;
+ struct notification_client_list_element *client_list_element = NULL;
+ enum lttng_notification_channel_status status =
+ LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+
+ /*
+ * Ensure that the client has not already subscribed to this condition
+ * before.
+ */
+ cds_list_for_each_entry(condition_list_element, &client->condition_list, node) {
+ if (lttng_condition_is_equal(condition_list_element->condition,
+ condition)) {
+ status = LTTNG_NOTIFICATION_CHANNEL_STATUS_ALREADY_SUBSCRIBED;
+ goto end;
+ }
+ }
+
+ condition_list_element = zmalloc(sizeof(*condition_list_element));
+ if (!condition_list_element) {
+ ret = -1;
+ goto error;
+ }
+ client_list_element = zmalloc(sizeof(*client_list_element));
+ if (!client_list_element) {
+ ret = -1;
+ goto error;
+ }
+
+ /*
+ * Add the newly-subscribed condition to the client's subscription list.
+ */
+ CDS_INIT_LIST_HEAD(&condition_list_element->node);
+ condition_list_element->condition = condition;
+ cds_list_add(&condition_list_element->node, &client->condition_list);
+
+ client_list = get_client_list_from_condition(state, condition);
+ if (!client_list) {
+ /*
+ * No notification-emiting trigger registered with this
+ * condition. We don't evaluate the condition right away
+ * since this trigger is not registered yet.
+ */
+ free(client_list_element);
+ goto end;
+ }
+
+ /*
+ * The condition to which the client just subscribed is evaluated
+ * at this point so that conditions that are already TRUE result
+ * in a notification being sent out.
+ *
+ * The client_list's trigger is used without locking the list itself.
+ * This is correct since the list doesn't own the trigger and the
+ * object is immutable.
+ */
+ if (evaluate_condition_for_client(client_list->trigger, condition,
+ client, state)) {
+ WARN("[notification-thread] Evaluation of a condition on client subscription failed, aborting.");
+ ret = -1;
+ free(client_list_element);
+ goto end;
+ }
+
+ /*
+ * Add the client to the list of clients interested in a given trigger
+ * if a "notification" trigger with a corresponding condition was
+ * added prior.
+ */
+ client_list_element->client = client;
+ CDS_INIT_LIST_HEAD(&client_list_element->node);
+
+ pthread_mutex_lock(&client_list->lock);
+ cds_list_add(&client_list_element->node, &client_list->list);
+ pthread_mutex_unlock(&client_list->lock);
+end:
+ if (_status) {
+ *_status = status;
+ }
+ if (client_list) {
+ notification_client_list_put(client_list);
+ }
+ return ret;
+error:
+ free(condition_list_element);
+ free(client_list_element);
+ return ret;
+}
+
+static
+int notification_thread_client_unsubscribe(
+ struct notification_client *client,
+ struct lttng_condition *condition,
+ struct notification_thread_state *state,
+ enum lttng_notification_channel_status *_status)
+{
+ struct notification_client_list *client_list;
+ struct lttng_condition_list_element *condition_list_element,
+ *condition_tmp;
+ struct notification_client_list_element *client_list_element,
+ *client_tmp;
+ bool condition_found = false;
+ enum lttng_notification_channel_status status =
+ LTTNG_NOTIFICATION_CHANNEL_STATUS_OK;
+
+ /* Remove the condition from the client's condition list. */
+ cds_list_for_each_entry_safe(condition_list_element, condition_tmp,
+ &client->condition_list, node) {
+ if (!lttng_condition_is_equal(condition_list_element->condition,
+ condition)) {
+ continue;
+ }
+
+ cds_list_del(&condition_list_element->node);
+ /*
+ * The caller may be iterating on the client's conditions to
+ * tear down a client's connection. In this case, the condition
+ * will be destroyed at the end.
+ */
+ if (condition != condition_list_element->condition) {
+ lttng_condition_destroy(
+ condition_list_element->condition);
+ }
+ free(condition_list_element);
+ condition_found = true;
+ break;
+ }
+
+ if (!condition_found) {
+ status = LTTNG_NOTIFICATION_CHANNEL_STATUS_UNKNOWN_CONDITION;
+ goto end;
+ }
+
+ /*
+ * Remove the client from the list of clients interested the trigger
+ * matching the condition.
+ */
+ client_list = get_client_list_from_condition(state, condition);
+ if (!client_list) {
+ goto end;
+ }
+
+ pthread_mutex_lock(&client_list->lock);
+ cds_list_for_each_entry_safe(client_list_element, client_tmp,
+ &client_list->list, node) {
+ if (client_list_element->client->id != client->id) {
+ continue;
+ }
+ cds_list_del(&client_list_element->node);
+ free(client_list_element);
+ break;
+ }
+ pthread_mutex_unlock(&client_list->lock);
+ notification_client_list_put(client_list);
+ client_list = NULL;
+end:
+ lttng_condition_destroy(condition);
+ if (_status) {
+ *_status = status;
+ }
+ return 0;
+}
+
+static
+void free_notification_client_rcu(struct rcu_head *node)
+{
+ free(caa_container_of(node, struct notification_client, rcu_node));
+}
+
+static
+void notification_client_destroy(struct notification_client *client,
+ struct notification_thread_state *state)
+{
+ if (!client) {
+ return;
+ }
+
+ /*
+ * The client object is not reachable by other threads, no need to lock
+ * the client here.
+ */
+ if (client->socket >= 0) {
+ (void) lttcomm_close_unix_sock(client->socket);
+ client->socket = -1;
+ }
+ client->communication.active = false;
+ lttng_payload_reset(&client->communication.inbound.payload);
+ lttng_payload_reset(&client->communication.outbound.payload);
+ pthread_mutex_destroy(&client->lock);
+ call_rcu(&client->rcu_node, free_notification_client_rcu);
+}
+
+/*
+ * Call with rcu_read_lock held (and hold for the lifetime of the returned
+ * client pointer).
+ */
+static
+struct notification_client *get_client_from_socket(int socket,
+ struct notification_thread_state *state)
+{
+ struct cds_lfht_iter iter;
+ struct cds_lfht_node *node;
+ struct notification_client *client = NULL;
+
+ cds_lfht_lookup(state->client_socket_ht,
+ hash_client_socket(socket),
+ match_client_socket,
+ (void *) (unsigned long) socket,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (!node) {
+ goto end;
+ }
+
+ client = caa_container_of(node, struct notification_client,
+ client_socket_ht_node);
+end:
+ return client;
+}
+
+/*
+ * Call with rcu_read_lock held (and hold for the lifetime of the returned
+ * client pointer).
+ */
+static
+struct notification_client *get_client_from_id(notification_client_id id,
+ struct notification_thread_state *state)
+{
+ struct cds_lfht_iter iter;
+ struct cds_lfht_node *node;
+ struct notification_client *client = NULL;
+
+ cds_lfht_lookup(state->client_id_ht,
+ hash_client_id(id),
+ match_client_id,
+ &id,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (!node) {
+ goto end;
+ }
+
+ client = caa_container_of(node, struct notification_client,
+ client_id_ht_node);
+end:
+ return client;
+}
+
+static
+bool buffer_usage_condition_applies_to_channel(
+ const struct lttng_condition *condition,
+ const struct channel_info *channel_info)
+{
+ enum lttng_condition_status status;
+ enum lttng_domain_type condition_domain;
+ const char *condition_session_name = NULL;
+ const char *condition_channel_name = NULL;
+
+ status = lttng_condition_buffer_usage_get_domain_type(condition,
+ &condition_domain);
+ assert(status == LTTNG_CONDITION_STATUS_OK);
+ if (channel_info->key.domain != condition_domain) {
+ goto fail;
+ }
+
+ status = lttng_condition_buffer_usage_get_session_name(
+ condition, &condition_session_name);
+ assert((status == LTTNG_CONDITION_STATUS_OK) && condition_session_name);
+
+ status = lttng_condition_buffer_usage_get_channel_name(
+ condition, &condition_channel_name);
+ assert((status == LTTNG_CONDITION_STATUS_OK) && condition_channel_name);
+
+ if (strcmp(channel_info->session_info->name, condition_session_name)) {
+ goto fail;
+ }
+ if (strcmp(channel_info->name, condition_channel_name)) {
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static
+bool session_consumed_size_condition_applies_to_channel(
+ const struct lttng_condition *condition,
+ const struct channel_info *channel_info)
+{
+ enum lttng_condition_status status;
+ const char *condition_session_name = NULL;
+
+ status = lttng_condition_session_consumed_size_get_session_name(
+ condition, &condition_session_name);
+ assert((status == LTTNG_CONDITION_STATUS_OK) && condition_session_name);
+
+ if (strcmp(channel_info->session_info->name, condition_session_name)) {
+ goto fail;
+ }
+
+ return true;
+fail:
+ return false;
+}
+
+static
+bool trigger_applies_to_channel(const struct lttng_trigger *trigger,
+ const struct channel_info *channel_info)
+{
+ const struct lttng_condition *condition;
+ bool trigger_applies;
+
+ condition = lttng_trigger_get_const_condition(trigger);
+ if (!condition) {
+ goto fail;
+ }
+
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+ case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+ trigger_applies = buffer_usage_condition_applies_to_channel(
+ condition, channel_info);
+ break;
+ case LTTNG_CONDITION_TYPE_SESSION_CONSUMED_SIZE:
+ trigger_applies = session_consumed_size_condition_applies_to_channel(
+ condition, channel_info);
+ break;
+ default:
+ goto fail;
+ }
+
+ return trigger_applies;
+fail:
+ return false;
+}
+
+static
+bool trigger_applies_to_client(struct lttng_trigger *trigger,
+ struct notification_client *client)
+{
+ bool applies = false;
+ struct lttng_condition_list_element *condition_list_element;
+
+ cds_list_for_each_entry(condition_list_element, &client->condition_list,
+ node) {
+ applies = lttng_condition_is_equal(
+ condition_list_element->condition,
+ lttng_trigger_get_condition(trigger));
+ if (applies) {
+ break;
+ }
+ }
+ return applies;
+}
+
+/* Must be called with RCU read lock held. */
+static
+struct lttng_session_trigger_list *get_session_trigger_list(
+ struct notification_thread_state *state,
+ const char *session_name)
+{
+ struct lttng_session_trigger_list *list = NULL;
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+
+ cds_lfht_lookup(state->session_triggers_ht,
+ hash_key_str(session_name, lttng_ht_seed),
+ match_session_trigger_list,
+ session_name,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (!node) {
+ /*
+ * Not an error, the list of triggers applying to that session
+ * will be initialized when the session is created.
+ */
+ DBG("[notification-thread] No trigger list found for session \"%s\" as it is not yet known to the notification system",
+ session_name);
+ goto end;
+ }
+
+ list = caa_container_of(node,
+ struct lttng_session_trigger_list,
+ session_triggers_ht_node);
+end:
+ return list;
+}
+
+/*
+ * Allocate an empty lttng_session_trigger_list for the session named
+ * 'session_name'.
+ *
+ * No ownership of 'session_name' is assumed by the session trigger list.
+ * It is the caller's responsability to ensure the session name is alive
+ * for as long as this list is.
+ */
+static
+struct lttng_session_trigger_list *lttng_session_trigger_list_create(
+ const char *session_name,
+ struct cds_lfht *session_triggers_ht)
+{
+ struct lttng_session_trigger_list *list;
+
+ list = zmalloc(sizeof(*list));
+ if (!list) {
+ goto end;
+ }
+ list->session_name = session_name;
+ CDS_INIT_LIST_HEAD(&list->list);
+ cds_lfht_node_init(&list->session_triggers_ht_node);
+ list->session_triggers_ht = session_triggers_ht;
+
+ rcu_read_lock();
+ /* Publish the list through the session_triggers_ht. */
+ cds_lfht_add(session_triggers_ht,
+ hash_key_str(session_name, lttng_ht_seed),
+ &list->session_triggers_ht_node);
+ rcu_read_unlock();
+end:
+ return list;
+}
+
+static
+void free_session_trigger_list_rcu(struct rcu_head *node)
+{
+ free(caa_container_of(node, struct lttng_session_trigger_list,
+ rcu_node));
+}
+
+static
+void lttng_session_trigger_list_destroy(struct lttng_session_trigger_list *list)
+{
+ struct lttng_trigger_list_element *trigger_list_element, *tmp;
+
+ /* Empty the list element by element, and then free the list itself. */
+ cds_list_for_each_entry_safe(trigger_list_element, tmp,
+ &list->list, node) {
+ cds_list_del(&trigger_list_element->node);
+ free(trigger_list_element);
+ }
+ rcu_read_lock();
+ /* Unpublish the list from the session_triggers_ht. */
+ cds_lfht_del(list->session_triggers_ht,
+ &list->session_triggers_ht_node);
+ rcu_read_unlock();
+ call_rcu(&list->rcu_node, free_session_trigger_list_rcu);
+}
+
+static
+int lttng_session_trigger_list_add(struct lttng_session_trigger_list *list,
+ struct lttng_trigger *trigger)
+{
+ int ret = 0;
+ struct lttng_trigger_list_element *new_element =
+ zmalloc(sizeof(*new_element));
+
+ if (!new_element) {
+ ret = -1;
+ goto end;
+ }
+ CDS_INIT_LIST_HEAD(&new_element->node);
+ new_element->trigger = trigger;
+ cds_list_add(&new_element->node, &list->list);
+end:
+ return ret;
+}
+
+static
+bool trigger_applies_to_session(const struct lttng_trigger *trigger,
+ const char *session_name)
+{
+ bool applies = false;
+ const struct lttng_condition *condition;
+
+ condition = lttng_trigger_get_const_condition(trigger);
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+ {
+ enum lttng_condition_status condition_status;
+ const char *condition_session_name;
+
+ condition_status = lttng_condition_session_rotation_get_session_name(
+ condition, &condition_session_name);
+ if (condition_status != LTTNG_CONDITION_STATUS_OK) {
+ ERR("[notification-thread] Failed to retrieve session rotation condition's session name");
+ goto end;
+ }
+
+ assert(condition_session_name);
+ applies = !strcmp(condition_session_name, session_name);
+ break;
+ }
+ default:
+ goto end;
+ }
+end:
+ return applies;
+}
+
+/*
+ * Allocate and initialize an lttng_session_trigger_list which contains
+ * all triggers that apply to the session named 'session_name'.
+ *
+ * No ownership of 'session_name' is assumed by the session trigger list.
+ * It is the caller's responsability to ensure the session name is alive
+ * for as long as this list is.
+ */
+static
+struct lttng_session_trigger_list *lttng_session_trigger_list_build(
+ const struct notification_thread_state *state,
+ const char *session_name)
+{
+ int trigger_count = 0;
+ struct lttng_session_trigger_list *session_trigger_list = NULL;
+ struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+ struct cds_lfht_iter iter;
+
+ session_trigger_list = lttng_session_trigger_list_create(session_name,
+ state->session_triggers_ht);
+
+ /* Add all triggers applying to the session named 'session_name'. */
+ cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
+ node) {
+ int ret;
+
+ if (!trigger_applies_to_session(trigger_ht_element->trigger,
+ session_name)) {
+ continue;
+ }
+
+ ret = lttng_session_trigger_list_add(session_trigger_list,
+ trigger_ht_element->trigger);
+ if (ret) {
+ goto error;
+ }
+
+ trigger_count++;
+ }
+
+ DBG("[notification-thread] Found %i triggers that apply to newly created session",
+ trigger_count);
+ return session_trigger_list;
+error:
+ lttng_session_trigger_list_destroy(session_trigger_list);
+ return NULL;
+}
+
+static
+struct session_info *find_or_create_session_info(
+ struct notification_thread_state *state,
+ const char *name, uid_t uid, gid_t gid)
+{
+ struct session_info *session = NULL;
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+ struct lttng_session_trigger_list *trigger_list;
+
+ rcu_read_lock();
+ cds_lfht_lookup(state->sessions_ht,
+ hash_key_str(name, lttng_ht_seed),
+ match_session,
+ name,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ if (node) {
+ DBG("[notification-thread] Found session info of session \"%s\" (uid = %i, gid = %i)",
+ name, uid, gid);
+ session = caa_container_of(node, struct session_info,
+ sessions_ht_node);
+ assert(session->uid == uid);
+ assert(session->gid == gid);
+ session_info_get(session);
+ goto end;
+ }
+
+ trigger_list = lttng_session_trigger_list_build(state, name);
+ if (!trigger_list) {
+ goto error;
+ }
+
+ session = session_info_create(name, uid, gid, trigger_list,
+ state->sessions_ht);
+ if (!session) {
+ ERR("[notification-thread] Failed to allocation session info for session \"%s\" (uid = %i, gid = %i)",
+ name, uid, gid);
+ lttng_session_trigger_list_destroy(trigger_list);
+ goto error;
+ }
+ trigger_list = NULL;
+
+ cds_lfht_add(state->sessions_ht, hash_key_str(name, lttng_ht_seed),
+ &session->sessions_ht_node);
+end:
+ rcu_read_unlock();
+ return session;
+error:
+ rcu_read_unlock();
+ session_info_put(session);
+ return NULL;
+}
+
+static
+int handle_notification_thread_command_add_channel(
+ struct notification_thread_state *state,
+ const char *session_name, uid_t session_uid, gid_t session_gid,
+ const char *channel_name, enum lttng_domain_type channel_domain,
+ uint64_t channel_key_int, uint64_t channel_capacity,
+ enum lttng_error_code *cmd_result)
+{
+ struct cds_list_head trigger_list;
+ struct channel_info *new_channel_info = NULL;
+ struct channel_key channel_key = {
+ .key = channel_key_int,
+ .domain = channel_domain,
+ };
+ struct lttng_channel_trigger_list *channel_trigger_list = NULL;
+ struct lttng_trigger_ht_element *trigger_ht_element = NULL;
+ int trigger_count = 0;
+ struct cds_lfht_iter iter;
+ struct session_info *session_info = NULL;
+
+ DBG("[notification-thread] Adding channel %s from session %s, channel key = %" PRIu64 " in %s domain",
+ channel_name, session_name, channel_key_int,
+ lttng_domain_type_str(channel_domain));
+
+ CDS_INIT_LIST_HEAD(&trigger_list);
+
+ session_info = find_or_create_session_info(state, session_name,
+ session_uid, session_gid);
+ if (!session_info) {
+ /* Allocation error or an internal error occurred. */
+ goto error;
+ }
+
+ new_channel_info = channel_info_create(channel_name, &channel_key,
+ channel_capacity, session_info);
+ if (!new_channel_info) {
+ goto error;
+ }
+
+ rcu_read_lock();
+ /* Build a list of all triggers applying to the new channel. */
+ cds_lfht_for_each_entry(state->triggers_ht, &iter, trigger_ht_element,
+ node) {
+ struct lttng_trigger_list_element *new_element;
+
+ if (!trigger_applies_to_channel(trigger_ht_element->trigger,
+ new_channel_info)) {
+ continue;
+ }
+
+ new_element = zmalloc(sizeof(*new_element));
+ if (!new_element) {
+ rcu_read_unlock();
+ goto error;
+ }
+ CDS_INIT_LIST_HEAD(&new_element->node);
+ new_element->trigger = trigger_ht_element->trigger;
+ cds_list_add(&new_element->node, &trigger_list);
+ trigger_count++;
+ }
+ rcu_read_unlock();
+
+ DBG("[notification-thread] Found %i triggers that apply to newly added channel",
+ trigger_count);
+ channel_trigger_list = zmalloc(sizeof(*channel_trigger_list));
+ if (!channel_trigger_list) {
+ goto error;
+ }
+ channel_trigger_list->channel_key = new_channel_info->key;
+ CDS_INIT_LIST_HEAD(&channel_trigger_list->list);
+ cds_lfht_node_init(&channel_trigger_list->channel_triggers_ht_node);
+ cds_list_splice(&trigger_list, &channel_trigger_list->list);
+
+ rcu_read_lock();
+ /* Add channel to the channel_ht which owns the channel_infos. */
+ cds_lfht_add(state->channels_ht,
+ hash_channel_key(&new_channel_info->key),
+ &new_channel_info->channels_ht_node);
+ /*
+ * Add the list of triggers associated with this channel to the
+ * channel_triggers_ht.
+ */
+ cds_lfht_add(state->channel_triggers_ht,
+ hash_channel_key(&new_channel_info->key),
+ &channel_trigger_list->channel_triggers_ht_node);
+ rcu_read_unlock();
+ session_info_put(session_info);
+ *cmd_result = LTTNG_OK;
+ return 0;
+error:
+ channel_info_destroy(new_channel_info);
+ session_info_put(session_info);
+ return 1;
+}
+
+static
+void free_channel_trigger_list_rcu(struct rcu_head *node)
+{
+ free(caa_container_of(node, struct lttng_channel_trigger_list,
+ rcu_node));
+}
+
+static
+void free_channel_state_sample_rcu(struct rcu_head *node)
+{
+ free(caa_container_of(node, struct channel_state_sample,
+ rcu_node));
+}
+
+static
+int handle_notification_thread_command_remove_channel(
+ struct notification_thread_state *state,
+ uint64_t channel_key, enum lttng_domain_type domain,
+ enum lttng_error_code *cmd_result)
+{
+ struct cds_lfht_node *node;
+ struct cds_lfht_iter iter;
+ struct lttng_channel_trigger_list *trigger_list;
+ struct lttng_trigger_list_element *trigger_list_element, *tmp;
+ struct channel_key key = { .key = channel_key, .domain = domain };
+ struct channel_info *channel_info;
+
+ DBG("[notification-thread] Removing channel key = %" PRIu64 " in %s domain",
+ channel_key, lttng_domain_type_str(domain));
+
+ rcu_read_lock();
+
+ cds_lfht_lookup(state->channel_triggers_ht,
+ hash_channel_key(&key),
+ match_channel_trigger_list,
+ &key,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ /*
+ * There is a severe internal error if we are being asked to remove a
+ * channel that doesn't exist.
+ */
+ if (!node) {
+ ERR("[notification-thread] Channel being removed is unknown to the notification thread");
+ goto end;
+ }
+
+ /* Free the list of triggers associated with this channel. */
+ trigger_list = caa_container_of(node, struct lttng_channel_trigger_list,
+ channel_triggers_ht_node);
+ cds_list_for_each_entry_safe(trigger_list_element, tmp,
+ &trigger_list->list, node) {
+ cds_list_del(&trigger_list_element->node);
+ free(trigger_list_element);
+ }
+ cds_lfht_del(state->channel_triggers_ht, node);
+ call_rcu(&trigger_list->rcu_node, free_channel_trigger_list_rcu);
+
+ /* Free sampled channel state. */
+ cds_lfht_lookup(state->channel_state_ht,
+ hash_channel_key(&key),
+ match_channel_state_sample,
+ &key,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ /*
+ * This is expected to be NULL if the channel is destroyed before we
+ * received a sample.
+ */
+ if (node) {
+ struct channel_state_sample *sample = caa_container_of(node,
+ struct channel_state_sample,
+ channel_state_ht_node);
+
+ cds_lfht_del(state->channel_state_ht, node);
+ call_rcu(&sample->rcu_node, free_channel_state_sample_rcu);
+ }
+
+ /* Remove the channel from the channels_ht and free it. */
+ cds_lfht_lookup(state->channels_ht,
+ hash_channel_key(&key),
+ match_channel_info,
+ &key,
+ &iter);
+ node = cds_lfht_iter_get_node(&iter);
+ assert(node);
+ channel_info = caa_container_of(node, struct channel_info,
+ channels_ht_node);
+ cds_lfht_del(state->channels_ht, node);
+ channel_info_destroy(channel_info);
+end:
+ rcu_read_unlock();
+ *cmd_result = LTTNG_OK;
+ return 0;
+}
+
+static
+int handle_notification_thread_command_session_rotation(
+ struct notification_thread_state *state,
+ enum notification_thread_command_type cmd_type,
+ const char *session_name, uid_t session_uid, gid_t session_gid,
+ uint64_t trace_archive_chunk_id,
+ struct lttng_trace_archive_location *location,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct lttng_session_trigger_list *trigger_list;
+ struct lttng_trigger_list_element *trigger_list_element;
+ struct session_info *session_info;
+ const struct lttng_credentials session_creds = {
+ .uid = LTTNG_OPTIONAL_INIT_VALUE(session_uid),
+ .gid = LTTNG_OPTIONAL_INIT_VALUE(session_gid),
+ };
+
+ rcu_read_lock();
+
+ session_info = find_or_create_session_info(state, session_name,
+ session_uid, session_gid);
+ if (!session_info) {
+ /* Allocation error or an internal error occurred. */
+ ret = -1;
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ session_info->rotation.ongoing =
+ cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING;
+ session_info->rotation.id = trace_archive_chunk_id;
+ trigger_list = get_session_trigger_list(state, session_name);
+ if (!trigger_list) {
+ DBG("[notification-thread] No triggers applying to session \"%s\" found",
+ session_name);
+ goto end;
+ }
+
+ cds_list_for_each_entry(trigger_list_element, &trigger_list->list,
+ node) {
+ const struct lttng_condition *condition;
+ struct lttng_trigger *trigger;
+ struct notification_client_list *client_list;
+ struct lttng_evaluation *evaluation = NULL;
+ enum lttng_condition_type condition_type;
+ enum action_executor_status executor_status;
+
+ trigger = trigger_list_element->trigger;
+ condition = lttng_trigger_get_const_condition(trigger);
+ assert(condition);
+ condition_type = lttng_condition_get_type(condition);
+
+ if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING &&
+ cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
+ continue;
+ } else if (condition_type == LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED &&
+ cmd_type != NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_COMPLETED) {
+ continue;
+ }
+
+ client_list = get_client_list_from_condition(state, condition);
+ if (cmd_type == NOTIFICATION_COMMAND_TYPE_SESSION_ROTATION_ONGOING) {
+ evaluation = lttng_evaluation_session_rotation_ongoing_create(
+ trace_archive_chunk_id);
+ } else {
+ evaluation = lttng_evaluation_session_rotation_completed_create(
+ trace_archive_chunk_id, location);
+ }
+
+ if (!evaluation) {
+ /* Internal error */
+ ret = -1;
+ cmd_result = LTTNG_ERR_UNK;
+ goto put_list;
+ }
+
+ /*
+ * Ownership of `evaluation` transferred to the action executor
+ * no matter the result.
+ */
+ executor_status = action_executor_enqueue(state->executor,
+ trigger, evaluation, &session_creds,
+ client_list);
+ evaluation = NULL;
+ switch (executor_status) {
+ case ACTION_EXECUTOR_STATUS_OK:
+ break;
+ case ACTION_EXECUTOR_STATUS_ERROR:
+ case ACTION_EXECUTOR_STATUS_INVALID:
+ /*
+ * TODO Add trigger identification (name/id) when
+ * it is added to the API.
+ */
+ ERR("Fatal error occurred while enqueuing action associated with session rotation trigger");
+ ret = -1;
+ goto put_list;
+ case ACTION_EXECUTOR_STATUS_OVERFLOW:
+ /*
+ * TODO Add trigger identification (name/id) when
+ * it is added to the API.
+ *
+ * Not a fatal error.
+ */
+ WARN("No space left when enqueuing action associated with session rotation trigger");
+ ret = 0;
+ goto put_list;
+ default:
+ abort();
+ }
+
+put_list:
+ notification_client_list_put(client_list);
+ if (caa_unlikely(ret)) {
+ break;
+ }
+ }
+end:
+ session_info_put(session_info);
+ *_cmd_result = cmd_result;
+ rcu_read_unlock();
+ return ret;
+}
+
+static
+int handle_notification_thread_command_add_tracer_event_source(
+ struct notification_thread_state *state,
+ int tracer_event_source_fd,
+ enum lttng_domain_type domain_type,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct notification_event_tracer_event_source_element *element = NULL;
+
+ element = zmalloc(sizeof(*element));
+ if (!element) {
+ cmd_result = LTTNG_ERR_NOMEM;
+ ret = -1;
+ goto end;
+ }
+
+ element->fd = tracer_event_source_fd;
+ element->domain = domain_type;
+
+ cds_list_add(&element->node, &state->tracer_event_sources_list);
+
+ DBG3("[notification-thread] Adding tracer event source fd to poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(domain_type));
+
+ /* Adding the read side pipe to the event poll. */
+ ret = lttng_poll_add(&state->events, tracer_event_source_fd, LPOLLIN | LPOLLERR);
+ if (ret < 0) {
+ ERR("[notification-thread] Failed to add tracer event source to poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(element->domain));
+ cds_list_del(&element->node);
+ free(element);
+ goto end;
+ }
+
+ element->is_fd_in_poll_set = true;
+
+end:
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+int drain_event_notifier_notification_pipe(
+ struct notification_thread_state *state,
+ int pipe, enum lttng_domain_type domain)
+{
+ struct lttng_poll_event events = {0};
+ int ret;
+
+ ret = lttng_poll_create(&events, 1, LTTNG_CLOEXEC);
+ if (ret < 0) {
+ ERR("[notification-thread] Error creating lttng_poll_event");
+ goto end;
+ }
+
+ ret = lttng_poll_add(&events, pipe, LPOLLIN);
+ if (ret < 0) {
+ ERR("[notification-thread] Error adding fd event notifier notification pipe to lttng_poll_event: fd = %d",
+ pipe);
+ goto end;
+ }
+
+ while (true) {
+ /*
+ * Continue to consume notifications as long as there are new
+ * ones coming in. The tracer has been asked to stop producing
+ * them.
+ *
+ * LPOLLIN is explicitly checked since LPOLLHUP is implicitly
+ * monitored (on Linux, at least) and will be returned when
+ * the pipe is closed but empty.
+ */
+ ret = lttng_poll_wait_interruptible(&events, 0);
+ if (ret == 0 || (LTTNG_POLL_GETEV(&events, 0) & LPOLLIN) == 0) {
+ /* No more notification to be read on this pipe. */
+ ret = 0;
+ goto end;
+ } else if (ret < 0) {
+ PERROR("Failed on lttng_poll_wait_interruptible() call");
+ ret = -1;
+ goto end;
+ }
+
+ ret = handle_one_event_notifier_notification(state, pipe, domain);
+ if (ret) {
+ ERR("[notification-thread] Error consuming an event notifier notification from pipe: fd = %d",
+ pipe);
+ }
+ }
+end:
+ lttng_poll_clean(&events);
+ return ret;
+}
+
+static
+int handle_notification_thread_command_remove_tracer_event_source(
+ struct notification_thread_state *state,
+ int tracer_event_source_fd,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ bool found = false;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct notification_event_tracer_event_source_element *source_element = NULL, *tmp;
+
+ cds_list_for_each_entry_safe(source_element, tmp,
+ &state->tracer_event_sources_list, node) {
+ if (source_element->fd != tracer_event_source_fd) {
+ continue;
+ }
+
+ DBG("[notification-thread] Removed tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(source_element->domain));
+ cds_list_del(&source_element->node);
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ /*
+ * This is temporarily allowed since the poll activity set is
+ * not properly cleaned-up for the moment. This is adressed in
+ * an upcoming fix.
+ */
+ source_element = NULL;
+ goto end;
+ }
+
+ if (!source_element->is_fd_in_poll_set) {
+ /* Skip the poll set removal. */
+ goto end;
+ }
+
+ DBG3("[notification-thread] Removing tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(source_element->domain));
+
+ /* Removing the fd from the event poll set. */
+ ret = lttng_poll_del(&state->events, tracer_event_source_fd);
+ if (ret < 0) {
+ ERR("[notification-thread] Failed to remove tracer event source from poll set: tracer_event_source_fd = %d, domain = '%s'",
+ tracer_event_source_fd,
+ lttng_domain_type_str(source_element->domain));
+ cmd_result = LTTNG_ERR_FATAL;
+ goto end;
+ }
+
+ source_element->is_fd_in_poll_set = false;
+
+ ret = drain_event_notifier_notification_pipe(state, tracer_event_source_fd,
+ source_element->domain);
+ if (ret) {
+ ERR("[notification-thread] Error draining event notifier notification: tracer_event_source_fd = %d, domain = %s",
+ tracer_event_source_fd,
+ lttng_domain_type_str(source_element->domain));
+ cmd_result = LTTNG_ERR_FATAL;
+ goto end;
+ }
+
+ /*
+ * The drain_event_notifier_notification_pipe() call might have read
+ * data from an fd that we received in event in the latest _poll_wait()
+ * call. Make sure the thread call poll_wait() again to ensure we have
+ * a clean state.
+ */
+ state->restart_poll = true;
+
+end:
+ free(source_element);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+int handle_notification_thread_remove_tracer_event_source_no_result(
+ struct notification_thread_state *state,
+ int tracer_event_source_fd)
+{
+ int ret;
+ enum lttng_error_code cmd_result;
+
+ ret = handle_notification_thread_command_remove_tracer_event_source(
+ state, tracer_event_source_fd, &cmd_result);
+ (void) cmd_result;
+ return ret;
+}
+
+static int handle_notification_thread_command_list_triggers(
+ struct notification_thread_handle *handle,
+ struct notification_thread_state *state,
+ uid_t client_uid,
+ struct lttng_triggers **triggers,
+ enum lttng_error_code *_cmd_result)
+{
+ int ret = 0;
+ enum lttng_error_code cmd_result = LTTNG_OK;
+ struct cds_lfht_iter iter;
+ struct lttng_trigger_ht_element *trigger_ht_element;
+ struct lttng_triggers *local_triggers = NULL;
+ const struct lttng_credentials *creds;
+
+ rcu_read_lock();
+
+ local_triggers = lttng_triggers_create();
+ if (!local_triggers) {
+ /* Not a fatal error. */
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+
+ cds_lfht_for_each_entry(state->triggers_ht, &iter,
+ trigger_ht_element, node) {
+ /*
+ * Only return the triggers to which the client has access.
+ * The root user has visibility over all triggers.
+ */
+ creds = lttng_trigger_get_credentials(trigger_ht_element->trigger);
+ if (client_uid != lttng_credentials_get_uid(creds) && client_uid != 0) {
+ continue;
+ }
+
+ ret = lttng_triggers_add(local_triggers,
+ trigger_ht_element->trigger);
+ if (ret < 0) {
+ /* Not a fatal error. */
+ ret = 0;
+ cmd_result = LTTNG_ERR_NOMEM;
+ goto end;
+ }
+ }
+
+ /* Transferring ownership to the caller. */
+ *triggers = local_triggers;
+ local_triggers = NULL;
+
+end:
+ rcu_read_unlock();
+ lttng_triggers_destroy(local_triggers);
+ *_cmd_result = cmd_result;
+ return ret;
+}
+
+static
+bool condition_is_supported(struct lttng_condition *condition)
+{
+ bool is_supported;
+
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_BUFFER_USAGE_LOW:
+ case LTTNG_CONDITION_TYPE_BUFFER_USAGE_HIGH:
+ {
+ int ret;
+ enum lttng_domain_type domain;
+
+ ret = lttng_condition_buffer_usage_get_domain_type(condition,
+ &domain);
+ assert(ret == 0);
+
+ if (domain != LTTNG_DOMAIN_KERNEL) {
+ is_supported = true;
+ goto end;
+ }
+
+ /*
+ * Older kernel tracers don't expose the API to monitor their
+ * buffers. Therefore, we reject triggers that require that
+ * mechanism to be available to be evaluated.
+ *
+ * Assume unsupported on error.
+ */
+ is_supported = kernel_supports_ring_buffer_snapshot_sample_positions() == 1;
+ break;
+ }
+ case LTTNG_CONDITION_TYPE_EVENT_RULE_HIT:
+ {
+ const struct lttng_event_rule *event_rule;
+ enum lttng_domain_type domain;
+ const enum lttng_condition_status status =
+ lttng_condition_event_rule_get_rule(
+ condition, &event_rule);
+
+ assert(status == LTTNG_CONDITION_STATUS_OK);
+
+ domain = lttng_event_rule_get_domain_type(event_rule);
+ if (domain != LTTNG_DOMAIN_KERNEL) {
+ is_supported = true;
+ goto end;
+ }
+
+ /*
+ * Older kernel tracers can't emit notification. Therefore, we
+ * reject triggers that require that mechanism to be available
+ * to be evaluated.
+ *
+ * Assume unsupported on error.
+ */
+ is_supported = kernel_supports_event_notifiers() == 1;
+ break;
+ }
+ default:
+ is_supported = true;
+ }
+end:
+ return is_supported;
+}
+
+/* Must be called with RCU read lock held. */
+static
+int bind_trigger_to_matching_session(struct lttng_trigger *trigger,
+ struct notification_thread_state *state)
+{
+ int ret = 0;
+ const struct lttng_condition *condition;
+ const char *session_name;
+ struct lttng_session_trigger_list *trigger_list;
+
+ condition = lttng_trigger_get_const_condition(trigger);
+ switch (lttng_condition_get_type(condition)) {
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_ONGOING:
+ case LTTNG_CONDITION_TYPE_SESSION_ROTATION_COMPLETED:
+ {
+ enum lttng_condition_status status;
+
+ status = lttng_condition_session_rotation_get_session_name(
+ condition, &session_name);
+ if (status != LTTNG_CONDITION_STATUS_OK) {
+ ERR("[notification-thread] Failed to bind trigger to session: unable to get 'session_rotation' condition's session name");
+ ret = -1;
+ goto end;
+ }
+ break;
+ }
+ default:
+ ret = -1;
+ goto end;