Clean-up: sessiond: prepend `the_` to global variable names
[lttng-tools.git] / src / bin / lttng-sessiond / notification-thread.c
index ba308bc831d7e26802f6995ae4fa433f89e1ced1..56f1124fbfbfbde93c7a1ba9959182221e58095d 100644 (file)
 #include "lttng-sessiond.h"
 #include "health-sessiond.h"
 #include "thread.h"
+#include "testpoint.h"
+
+#include "kernel.h"
+#include <common/kernel-ctl/kernel-ctl.h>
 
 #include <urcu.h>
 #include <urcu/list.h>
 #include <urcu/rculfhash.h>
 
+
+int notifier_consumption_paused;
 /*
  * Destroy the thread data previously created by the init function.
  */
@@ -69,6 +75,7 @@ void notification_thread_handle_destroy(
                        PERROR("close kernel consumer channel monitoring pipe");
                }
        }
+
 end:
        free(handle);
 }
@@ -134,6 +141,7 @@ struct notification_thread_handle *notification_thread_handle_create(
        } else {
                handle->channel_monitoring_pipes.kernel_consumer = -1;
        }
+
 end:
        return handle;
 error:
@@ -230,8 +238,8 @@ int notification_channel_socket_create(void)
        if (getuid() == 0) {
                gid_t gid;
 
-               ret =  utils_get_group_id(config.tracing_group_name.value, true,
-                               &gid);
+               ret = utils_get_group_id(the_config.tracing_group_name.value,
+                               true, &gid);
                if (ret) {
                        /* Default to root group. */
                        gid = 0;
@@ -362,6 +370,14 @@ void fini_thread_state(struct notification_thread_state *state)
                ret = cds_lfht_destroy(state->sessions_ht, NULL);
                assert(!ret);
        }
+       if (state->triggers_by_name_uid_ht) {
+               ret = cds_lfht_destroy(state->triggers_by_name_uid_ht, NULL);
+               assert(!ret);
+       }
+       if (state->trigger_tokens_ht) {
+               ret = cds_lfht_destroy(state->trigger_tokens_ht, NULL);
+               assert(!ret);
+       }
        /*
         * Must be destroyed after all channels have been destroyed.
         * See comment in struct lttng_session_trigger_list.
@@ -374,6 +390,9 @@ void fini_thread_state(struct notification_thread_state *state)
                notification_channel_socket_destroy(
                                state->notification_channel_socket);
        }
+
+       assert(cds_list_empty(&state->tracer_event_sources_list));
+
        if (state->executor) {
                action_executor_destroy(state->executor);
        }
@@ -403,6 +422,7 @@ int init_thread_state(struct notification_thread_handle *handle,
 
        memset(state, 0, sizeof(*state));
        state->notification_channel_socket = -1;
+       state->trigger_id.next_tracer_token = 1;
        lttng_poll_init(&state->events);
 
        ret = notification_channel_socket_create();
@@ -475,11 +495,27 @@ int init_thread_state(struct notification_thread_handle *handle,
        if (!state->triggers_ht) {
                goto error;
        }
+       state->triggers_by_name_uid_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->triggers_by_name_uid_ht) {
+               goto error;
+       }
+
+       state->trigger_tokens_ht = cds_lfht_new(DEFAULT_HT_SIZE,
+                       1, 0, CDS_LFHT_AUTO_RESIZE | CDS_LFHT_ACCOUNTING, NULL);
+       if (!state->trigger_tokens_ht) {
+               goto error;
+       }
+
+       CDS_INIT_LIST_HEAD(&state->tracer_event_sources_list);
 
        state->executor = action_executor_create(handle);
        if (!state->executor) {
                goto error;
        }
+
+       state->restart_poll = false;
+
        mark_thread_as_ready(handle);
 end:
        return 0;
@@ -524,6 +560,71 @@ end:
        return ret;
 }
 
+static int handle_event_notification_pipe(int event_source_fd,
+               enum lttng_domain_type domain,
+               uint32_t revents,
+               struct notification_thread_state *state)
+{
+       int ret = 0;
+
+       if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
+               ret = handle_notification_thread_remove_tracer_event_source_no_result(
+                               state, event_source_fd);
+               if (ret) {
+                       ERR("[notification-thread] Failed to remove event notification pipe from poll set: fd = %d",
+                                       event_source_fd);
+               }
+               goto end;
+       }
+
+       if (testpoint(sessiond_handle_notifier_event_pipe)) {
+               ret = 0;
+               goto end;
+       }
+
+       if (caa_unlikely(notifier_consumption_paused)) {
+               DBG("Event notifier notification consumption paused, sleeping...");
+               sleep(1);
+               goto end;
+       }
+
+       ret = handle_notification_thread_event_notification(
+                       state, event_source_fd, domain);
+       if (ret) {
+               ERR("[notification-thread] Event notification handling error occurred for fd: %d",
+                               event_source_fd);
+               ret = -1;
+               goto end;
+       }
+
+end:
+       return ret;
+}
+
+/*
+ * Return the event source domain type via parameter.
+ */
+static bool fd_is_event_notification_source(const struct notification_thread_state *state,
+               int fd,
+               enum lttng_domain_type *domain)
+{
+       struct notification_event_tracer_event_source_element *source_element;
+
+       assert(domain);
+
+       cds_list_for_each_entry(source_element,
+                       &state->tracer_event_sources_list, node) {
+               if (source_element->fd != fd) {
+                       continue;
+               }
+
+               *domain = source_element->domain;
+               return true;
+       }
+
+       return false;
+}
+
 /*
  * This thread services notification channel clients and commands received
  * from various lttng-sessiond components over a command queue.
@@ -534,10 +635,11 @@ void *thread_notification(void *data)
        int ret;
        struct notification_thread_handle *handle = data;
        struct notification_thread_state state;
+       enum lttng_domain_type domain;
 
        DBG("[notification-thread] Started notification thread");
 
-       health_register(health_sessiond, HEALTH_SESSIOND_TYPE_NOTIFICATION);
+       health_register(the_health_sessiond, HEALTH_SESSIOND_TYPE_NOTIFICATION);
        rcu_register_thread();
        rcu_thread_online();
 
@@ -553,6 +655,10 @@ void *thread_notification(void *data)
                goto end;
        }
 
+       if (testpoint(sessiond_thread_notification)) {
+               goto end;
+       }
+
        while (true) {
                int fd_count, i;
 
@@ -572,6 +678,12 @@ void *thread_notification(void *data)
                        goto error;
                }
 
+               /*
+                * Reset restart_poll flag so that calls below might turn it
+                * on.
+                */
+               state.restart_poll = false;
+
                fd_count = ret;
                for (i = 0; i < fd_count; i++) {
                        int fd = LTTNG_POLL_GETFD(&state.events, i);
@@ -611,6 +723,11 @@ void *thread_notification(void *data)
                                if (ret) {
                                        goto error;
                                }
+                       } else if (fd_is_event_notification_source(&state, fd, &domain)) {
+                               ret = handle_event_notification_pipe(fd, domain, revents, &state);
+                               if (ret) {
+                                       goto error;
+                               }
                        } else {
                                /* Activity on a client's socket. */
                                if (revents & (LPOLLERR | LPOLLHUP | LPOLLRDHUP)) {
@@ -644,6 +761,15 @@ void *thread_notification(void *data)
                                        }
                                }
                        }
+
+                       /*
+                        * Calls above might have changed the state of the
+                        * FDs in `state.events`. Call _poll_wait() again to
+                        * ensure we have a consistent state.
+                        */
+                       if (state.restart_poll) {
+                               break;
+                       }
                }
        }
 exit:
@@ -652,7 +778,7 @@ error:
 end:
        rcu_thread_offline();
        rcu_unregister_thread();
-       health_unregister(health_sessiond);
+       health_unregister(the_health_sessiond);
        return NULL;
 }
 
This page took 0.025816 seconds and 4 git commands to generate.