Cleanup: ust_session_id unused by buffer_reg_uid_consumer_channel_key
[lttng-tools.git] / src / bin / lttng-sessiond / ust-app.c
index 9a766e4ab68e628cbdec4d9f0432ec0b83dea547..97083e15a95f7c5b1c919143e9724ff51fd4cbcc 100644 (file)
@@ -41,6 +41,9 @@
 #include "ust-ctl.h"
 #include "utils.h"
 #include "session.h"
+#include "lttng-sessiond.h"
+#include "notification-thread-commands.h"
+#include "rotate.h"
 
 static
 int ust_app_flush_app_session(struct ust_app *app, struct ust_app_session *ua_sess);
@@ -90,6 +93,7 @@ static void copy_channel_attr_to_ustctl(
        attr->switch_timer_interval = uattr->switch_timer_interval;
        attr->read_timer_interval = uattr->read_timer_interval;
        attr->output = uattr->output;
+       attr->blocking_timeout = uattr->u.s.blocking_timeout;
 }
 
 /*
@@ -482,7 +486,8 @@ void delete_ust_app_channel(int sock, struct ust_app_channel *ua_chan,
                /* Wipe and free registry from session registry. */
                registry = get_session_registry(ua_chan->session);
                if (registry) {
-                       ust_registry_channel_del_free(registry, ua_chan->key);
+                       ust_registry_channel_del_free(registry, ua_chan->key,
+                               true);
                }
                save_per_pid_lost_discarded_counters(ua_chan);
        }
@@ -566,18 +571,6 @@ ssize_t ust_app_push_metadata(struct ust_registry_session *registry,
                return 0;
        }
 
-       /*
-        * On a push metadata error either the consumer is dead or the
-        * metadata channel has been destroyed because its endpoint
-        * might have died (e.g: relayd), or because the application has
-        * exited. If so, the metadata closed flag is set to 1 so we
-        * deny pushing metadata again which is not valid anymore on the
-        * consumer side.
-        */
-       if (registry->metadata_closed) {
-               return -EPIPE;
-       }
-
        offset = registry->metadata_len_sent;
        len = registry->metadata_len - registry->metadata_len_sent;
        new_metadata_len_sent = registry->metadata_len;
@@ -822,6 +815,7 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
        ua_sess->deleted = true;
 
        registry = get_session_registry(ua_sess);
+       /* Registry can be null on error path during initialization. */
        if (registry) {
                /* Push metadata for application before freeing the application. */
                (void) push_metadata(registry, ua_sess->consumer);
@@ -849,6 +843,10 @@ void delete_ust_app_session(int sock, struct ust_app_session *ua_sess,
        if (ua_sess->buffer_type == LTTNG_BUFFER_PER_PID) {
                struct buffer_reg_pid *reg_pid = buffer_reg_pid_find(ua_sess->id);
                if (reg_pid) {
+                       /*
+                        * Registry can be null on error path during
+                        * initialization.
+                        */
                        buffer_reg_pid_remove(reg_pid);
                        buffer_reg_pid_destroy(reg_pid);
                }
@@ -1044,6 +1042,7 @@ struct ust_app_channel *alloc_ust_app_channel(char *name,
                ua_chan->attr.switch_timer_interval = attr->switch_timer_interval;
                ua_chan->attr.read_timer_interval = attr->read_timer_interval;
                ua_chan->attr.output = attr->output;
+               ua_chan->attr.blocking_timeout = attr->u.s.blocking_timeout;
        }
        /* By default, the channel is a per cpu channel. */
        ua_chan->attr.type = LTTNG_UST_CHAN_PER_CPU;
@@ -1805,7 +1804,10 @@ static void shadow_copy_channel(struct ust_app_channel *ua_chan,
        ua_chan->attr.overwrite = uchan->attr.overwrite;
        ua_chan->attr.switch_timer_interval = uchan->attr.switch_timer_interval;
        ua_chan->attr.read_timer_interval = uchan->attr.read_timer_interval;
+       ua_chan->monitor_timer_interval = uchan->monitor_timer_interval;
        ua_chan->attr.output = uchan->attr.output;
+       ua_chan->attr.blocking_timeout = uchan->attr.u.s.blocking_timeout;
+
        /*
         * Note that the attribute channel type is not set since the channel on the
         * tracing registry side does not have this information.
@@ -2152,7 +2154,7 @@ error:
  * Returns 0 on success or else a negative code which is either -ENOMEM or
  * -ENOTCONN which is the default code if the ustctl_create_session fails.
  */
-static int create_ust_app_session(struct ltt_ust_session *usess,
+static int find_or_create_ust_app_session(struct ltt_ust_session *usess,
                struct ust_app *app, struct ust_app_session **ua_sess_ptr,
                int *is_created)
 {
@@ -2364,7 +2366,7 @@ int create_ust_app_channel_context(struct ust_app_session *ua_sess,
        ua_ctx = alloc_ust_app_ctx(uctx);
        if (ua_ctx == NULL) {
                /* malloc failed */
-               ret = -1;
+               ret = -ENOMEM;
                goto error;
        }
 
@@ -2476,6 +2478,8 @@ error:
 /*
  * Ask the consumer to create a channel and get it if successful.
  *
+ * Called with UST app session lock held.
+ *
  * Return 0 on success or else a negative value.
  */
 static int do_consumer_create_channel(struct ltt_ust_session *usess,
@@ -2821,9 +2825,6 @@ static int send_channel_uid_to_ust(struct buffer_reg_channel *reg_chan,
                        (void) release_ust_app_stream(-1, &stream, app);
                        if (ret == -EPIPE || ret == -LTTNG_UST_ERR_EXITING) {
                                ret = -ENOTCONN; /* Caused by app exiting. */
-                               goto error_stream_unlock;
-                       } else if (ret < 0) {
-                               goto error_stream_unlock;
                        }
                        goto error_stream_unlock;
                }
@@ -2845,6 +2846,9 @@ error:
 /*
  * Create and send to the application the created buffers with per UID buffers.
  *
+ * This MUST be called with a RCU read side lock acquired.
+ * The session list lock and the session's lock must be acquired.
+ *
  * Return 0 on success else a negative value.
  */
 static int create_channel_per_uid(struct ust_app *app,
@@ -2872,50 +2876,85 @@ static int create_channel_per_uid(struct ust_app *app,
 
        reg_chan = buffer_reg_channel_find(ua_chan->tracing_channel_id,
                        reg_uid);
-       if (!reg_chan) {
-               /* Create the buffer registry channel object. */
-               ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
-               if (ret < 0) {
-                       ERR("Error creating the UST channel \"%s\" registry instance",
-                               ua_chan->name);
-                       goto error;
-               }
-               assert(reg_chan);
+       if (reg_chan) {
+               goto send_channel;
+       }
 
-               /*
-                * Create the buffers on the consumer side. This call populates the
-                * ust app channel object with all streams and data object.
-                */
-               ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
-                               app->bits_per_long, reg_uid->registry->reg.ust);
-               if (ret < 0) {
-                       ERR("Error creating UST channel \"%s\" on the consumer daemon",
+       /* Create the buffer registry channel object. */
+       ret = create_buffer_reg_channel(reg_uid->registry, ua_chan, &reg_chan);
+       if (ret < 0) {
+               ERR("Error creating the UST channel \"%s\" registry instance",
                                ua_chan->name);
+               goto error;
+       }
 
-                       /*
-                        * Let's remove the previously created buffer registry channel so
-                        * it's not visible anymore in the session registry.
-                        */
-                       ust_registry_channel_del_free(reg_uid->registry->reg.ust,
-                                       ua_chan->tracing_channel_id);
-                       buffer_reg_channel_remove(reg_uid->registry, reg_chan);
-                       buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
-                       goto error;
-               }
+       /*
+        * Create the buffers on the consumer side. This call populates the
+        * ust app channel object with all streams and data object.
+        */
+       ret = do_consumer_create_channel(usess, ua_sess, ua_chan,
+                       app->bits_per_long, reg_uid->registry->reg.ust);
+       if (ret < 0) {
+               ERR("Error creating UST channel \"%s\" on the consumer daemon",
+                               ua_chan->name);
 
                /*
-                * Setup the streams and add it to the session registry.
+                * Let's remove the previously created buffer registry channel so
+                * it's not visible anymore in the session registry.
                 */
-               ret = setup_buffer_reg_channel(reg_uid->registry,
-                               ua_chan, reg_chan, app);
-               if (ret < 0) {
-                       ERR("Error setting up UST channel \"%s\"",
-                               ua_chan->name);
+               ust_registry_channel_del_free(reg_uid->registry->reg.ust,
+                               ua_chan->tracing_channel_id, false);
+               buffer_reg_channel_remove(reg_uid->registry, reg_chan);
+               buffer_reg_channel_destroy(reg_chan, LTTNG_DOMAIN_UST);
+               goto error;
+       }
+
+       /*
+        * Setup the streams and add it to the session registry.
+        */
+       ret = setup_buffer_reg_channel(reg_uid->registry,
+                       ua_chan, reg_chan, app);
+       if (ret < 0) {
+               ERR("Error setting up UST channel \"%s\"", ua_chan->name);
+               goto error;
+       }
+
+       {
+               enum lttng_error_code cmd_ret;
+               struct ltt_session *session;
+               uint64_t chan_reg_key;
+               struct ust_registry_channel *chan_reg;
+
+               chan_reg_key = ua_chan->tracing_channel_id;
+
+               pthread_mutex_lock(&reg_uid->registry->reg.ust->lock);
+               chan_reg = ust_registry_channel_find(reg_uid->registry->reg.ust,
+                               chan_reg_key);
+               assert(chan_reg);
+               chan_reg->consumer_key = ua_chan->key;
+               chan_reg = NULL;
+               pthread_mutex_unlock(&reg_uid->registry->reg.ust->lock);
+
+               session = session_find_by_id(ua_sess->tracing_id);
+               assert(session);
+
+               assert(pthread_mutex_trylock(&session->lock));
+               assert(session_trylock_list());
+               cmd_ret = notification_thread_command_add_channel(
+                               notification_thread_handle, session->name,
+                               ua_sess->euid, ua_sess->egid,
+                               ua_chan->name,
+                               ua_chan->key,
+                               LTTNG_DOMAIN_UST,
+                               ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+               if (cmd_ret != LTTNG_OK) {
+                       ret = - (int) cmd_ret;
+                       ERR("Failed to add channel to notification thread");
                        goto error;
                }
-
        }
 
+send_channel:
        /* Send buffers to the application. */
        ret = send_channel_uid_to_ust(reg_chan, app, ua_sess, ua_chan);
        if (ret < 0) {
@@ -2932,6 +2971,9 @@ error:
 /*
  * Create and send to the application the created buffers with per PID buffers.
  *
+ * Called with UST app session lock held.
+ * The session list lock and the session's lock must be acquired.
+ *
  * Return 0 on success else a negative value.
  */
 static int create_channel_per_pid(struct ust_app *app,
@@ -2940,6 +2982,10 @@ static int create_channel_per_pid(struct ust_app *app,
 {
        int ret;
        struct ust_registry_session *registry;
+       enum lttng_error_code cmd_ret;
+       struct ltt_session *session;
+       uint64_t chan_reg_key;
+       struct ust_registry_channel *chan_reg;
 
        assert(app);
        assert(usess);
@@ -2951,6 +2997,7 @@ static int create_channel_per_pid(struct ust_app *app,
        rcu_read_lock();
 
        registry = get_session_registry(ua_sess);
+       /* The UST app session lock is held, registry shall not be null. */
        assert(registry);
 
        /* Create and add a new channel registry to session. */
@@ -2978,6 +3025,32 @@ static int create_channel_per_pid(struct ust_app *app,
                goto error;
        }
 
+       session = session_find_by_id(ua_sess->tracing_id);
+       assert(session);
+
+       chan_reg_key = ua_chan->key;
+       pthread_mutex_lock(&registry->lock);
+       chan_reg = ust_registry_channel_find(registry, chan_reg_key);
+       assert(chan_reg);
+       chan_reg->consumer_key = ua_chan->key;
+       pthread_mutex_unlock(&registry->lock);
+
+       assert(pthread_mutex_trylock(&session->lock));
+       assert(session_trylock_list());
+
+       cmd_ret = notification_thread_command_add_channel(
+                       notification_thread_handle, session->name,
+                       ua_sess->euid, ua_sess->egid,
+                       ua_chan->name,
+                       ua_chan->key,
+                       LTTNG_DOMAIN_UST,
+                       ua_chan->attr.subbuf_size * ua_chan->attr.num_subbuf);
+       if (cmd_ret != LTTNG_OK) {
+               ret = - (int) cmd_ret;
+               ERR("Failed to add channel to notification thread");
+               goto error;
+       }
+
 error:
        rcu_read_unlock();
        return ret;
@@ -2988,6 +3061,8 @@ error:
  * need and send it to the application. This MUST be called with a RCU read
  * side lock acquired.
  *
+ * Called with UST app session lock held.
+ *
  * Return 0 on success or else a negative value. Returns -ENOTCONN if
  * the application exited concurrently.
  */
@@ -3090,7 +3165,6 @@ static int create_ust_app_channel(struct ust_app_session *ua_sess,
 
        /* Only add the channel if successful on the tracer side. */
        lttng_ht_add_unique_str(ua_sess->channels, &ua_chan->node);
-
 end:
        if (ua_chanp) {
                *ua_chanp = ua_chan;
@@ -3175,6 +3249,7 @@ static int create_ust_app_metadata(struct ust_app_session *ua_sess,
        assert(consumer);
 
        registry = get_session_registry(ua_sess);
+       /* The UST app session is held registry shall not be null. */
        assert(registry);
 
        pthread_mutex_lock(&registry->lock);
@@ -3509,8 +3584,8 @@ void ust_app_unregister(int sock)
        /*
         * Remove application from notify hash table. The thread handling the
         * notify socket could have deleted the node so ignore on error because
-        * either way it's valid. The close of that socket is handled by the other
-        * thread.
+        * either way it's valid. The close of that socket is handled by the
+        * apps_notify_thread.
         */
        iter.iter.node = &lta->notify_sock_n.node;
        (void) lttng_ht_del(ust_app_ht_by_notify_sock, &iter);
@@ -4096,7 +4171,7 @@ int ust_app_create_channel_glb(struct ltt_ust_session *usess,
                 * that if session exist, it will simply return a pointer to the ust
                 * app session.
                 */
-               ret = create_ust_app_session(usess, app, &ua_sess, &created);
+               ret = find_or_create_ust_app_session(usess, app, &ua_sess, &created);
                if (ret < 0) {
                        switch (ret) {
                        case -ENOTCONN:
@@ -4311,6 +4386,9 @@ int ust_app_create_event_glb(struct ltt_ust_session *usess,
 
 /*
  * Start tracing for a specific UST session and app.
+ *
+ * Called with UST app session lock held.
+ *
  */
 static
 int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
@@ -4346,9 +4424,32 @@ int ust_app_start_trace(struct ltt_ust_session *usess, struct ust_app *app)
 
        /* Create directories if consumer is LOCAL and has a path defined. */
        if (usess->consumer->type == CONSUMER_DST_LOCAL &&
-                       strlen(usess->consumer->dst.trace_path) > 0) {
-               ret = run_as_mkdir_recursive(usess->consumer->dst.trace_path,
-                               S_IRWXU | S_IRWXG, ua_sess->euid, ua_sess->egid);
+                       usess->consumer->dst.session_root_path[0] != '\0') {
+               char *tmp_path;
+
+               tmp_path = zmalloc(LTTNG_PATH_MAX);
+               if (!tmp_path) {
+                       ERR("Alloc tmp_path");
+                       goto error_unlock;
+               }
+               ret = snprintf(tmp_path, LTTNG_PATH_MAX, "%s%s%s",
+                               usess->consumer->dst.session_root_path,
+                               usess->consumer->chunk_path,
+                               usess->consumer->subdir);
+               if (ret >= LTTNG_PATH_MAX) {
+                       ERR("Local destination path exceeds the maximal allowed length of %i bytes (needs %i bytes) with path = \"%s%s%s\"",
+                                       LTTNG_PATH_MAX, ret,
+                                       usess->consumer->dst.session_root_path,
+                                       usess->consumer->chunk_path,
+                                       usess->consumer->subdir);
+                       goto error_unlock;
+               }
+
+               DBG("Creating directory path for local tracing: \"%s\"",
+                               tmp_path);
+               ret = run_as_mkdir_recursive(tmp_path, S_IRWXU | S_IRWXG,
+                               ua_sess->euid, ua_sess->egid);
+               free(tmp_path);
                if (ret < 0) {
                        if (errno != EEXIST) {
                                ERR("Trace directory creation error");
@@ -4494,6 +4595,8 @@ int ust_app_stop_trace(struct ltt_ust_session *usess, struct ust_app *app)
        health_code_update();
 
        registry = get_session_registry(ua_sess);
+
+       /* The UST app session is held registry shall not be null. */
        assert(registry);
 
        /* Push metadata for application before freeing the application. */
@@ -4650,6 +4753,155 @@ int ust_app_flush_session(struct ltt_ust_session *usess)
        return ret;
 }
 
+static
+int ust_app_clear_quiescent_app_session(struct ust_app *app,
+               struct ust_app_session *ua_sess)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app_channel *ua_chan;
+       struct consumer_socket *socket;
+
+       DBG("Clearing stream quiescent state for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       if (!app->compatible) {
+               goto end_not_compatible;
+       }
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       if (ua_sess->deleted) {
+               goto end_unlock;
+       }
+
+       health_code_update();
+
+       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                       ua_sess->consumer);
+       if (!socket) {
+               ERR("Failed to find consumer (%" PRIu32 ") socket",
+                               app->bits_per_long);
+               ret = -1;
+               goto end_unlock;
+       }
+
+       /* Clear quiescent state. */
+       switch (ua_sess->buffer_type) {
+       case LTTNG_BUFFER_PER_PID:
+               cds_lfht_for_each_entry(ua_sess->channels->ht, &iter.iter,
+                               ua_chan, node.node) {
+                       health_code_update();
+                       ret = consumer_clear_quiescent_channel(socket,
+                                       ua_chan->key);
+                       if (ret) {
+                               ERR("Error clearing quiescent state for consumer channel");
+                               ret = -1;
+                               continue;
+                       }
+               }
+               break;
+       case LTTNG_BUFFER_PER_UID:
+       default:
+               assert(0);
+               ret = -1;
+               break;
+       }
+
+       health_code_update();
+
+end_unlock:
+       pthread_mutex_unlock(&ua_sess->lock);
+
+end_not_compatible:
+       rcu_read_unlock();
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Clear quiescent state in each stream for all applications for a
+ * specific UST session.
+ * Called with UST session lock held.
+ */
+static
+int ust_app_clear_quiescent_session(struct ltt_ust_session *usess)
+
+{
+       int ret = 0;
+
+       DBG("Clearing stream quiescent state for all ust apps");
+
+       rcu_read_lock();
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct lttng_ht_iter iter;
+               struct buffer_reg_uid *reg;
+
+               /*
+                * Clear quiescent for all per UID buffers associated to
+                * that session.
+                */
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct consumer_socket *socket;
+                       struct buffer_reg_channel *reg_chan;
+
+                       /* Get associated consumer socket.*/
+                       socket = consumer_find_socket_by_bitness(
+                                       reg->bits_per_long, usess->consumer);
+                       if (!socket) {
+                               /*
+                                * Ignore request if no consumer is found for
+                                * the session.
+                                */
+                               continue;
+                       }
+
+                       cds_lfht_for_each_entry(reg->registry->channels->ht,
+                                       &iter.iter, reg_chan, node.node) {
+                               /*
+                                * The following call will print error values so
+                                * the return code is of little importance
+                                * because whatever happens, we have to try them
+                                * all.
+                                */
+                               (void) consumer_clear_quiescent_channel(socket,
+                                               reg_chan->consumer_key);
+                       }
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               struct ust_app_session *ua_sess;
+               struct lttng_ht_iter iter;
+               struct ust_app *app;
+
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app,
+                               pid_n.node) {
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (ua_sess == NULL) {
+                               continue;
+                       }
+                       (void) ust_app_clear_quiescent_app_session(app,
+                                       ua_sess);
+               }
+               break;
+       }
+       default:
+               ret = -1;
+               assert(0);
+               break;
+       }
+
+       rcu_read_unlock();
+       health_code_update();
+       return ret;
+}
+
 /*
  * Destroy a specific UST session in apps.
  */
@@ -4708,6 +4960,14 @@ int ust_app_start_trace_all(struct ltt_ust_session *usess)
 
        rcu_read_lock();
 
+       /*
+        * In a start-stop-start use-case, we need to clear the quiescent state
+        * of each channel set by the prior stop command, thus ensuring that a
+        * following stop or destroy is sure to grab a timestamp_end near those
+        * operations, even if the packet is empty.
+        */
+       (void) ust_app_clear_quiescent_session(usess);
+
        cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                ret = ust_app_start_trace(usess, app);
                if (ret < 0) {
@@ -4787,7 +5047,7 @@ void ust_app_global_create(struct ltt_ust_session *usess, struct ust_app *app)
        struct ust_app_ctx *ua_ctx;
        int is_created = 0;
 
-       ret = create_ust_app_session(usess, app, &ua_sess, &is_created);
+       ret = find_or_create_ust_app_session(usess, app, &ua_sess, &is_created);
        if (ret < 0) {
                /* Tracer is probably gone or ENOMEM. */
                goto error;
@@ -5049,54 +5309,6 @@ end:
        return ret;
 }
 
-/*
- * Calibrate registered applications.
- */
-int ust_app_calibrate_glb(struct lttng_ust_calibrate *calibrate)
-{
-       int ret = 0;
-       struct lttng_ht_iter iter;
-       struct ust_app *app;
-
-       rcu_read_lock();
-
-       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
-               if (!app->compatible) {
-                       /*
-                        * TODO: In time, we should notice the caller of this error by
-                        * telling him that this is a version error.
-                        */
-                       continue;
-               }
-
-               health_code_update();
-
-               pthread_mutex_lock(&app->sock_lock);
-               ret = ustctl_calibrate(app->sock, calibrate);
-               pthread_mutex_unlock(&app->sock_lock);
-               if (ret < 0) {
-                       switch (ret) {
-                       case -ENOSYS:
-                               /* Means that it's not implemented on the tracer side. */
-                               ret = 0;
-                               break;
-                       default:
-                               DBG2("Calibrate app PID %d returned with error %d",
-                                               app->pid, ret);
-                               break;
-                       }
-               }
-       }
-
-       DBG("UST app global domain calibration finished");
-
-       rcu_read_unlock();
-
-       health_code_update();
-
-       return ret;
-}
-
 /*
  * Receive registration and populate the given msg structure.
  *
@@ -5226,19 +5438,17 @@ static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
        /* Lookup application. If not found, there is a code flow error. */
        app = find_app_by_notify_sock(sock);
        if (!app) {
-               DBG("Application socket %d is being teardown. Abort event notify",
+               DBG("Application socket %d is being torn down. Abort event notify",
                                sock);
                ret = 0;
-               free(fields);
                goto error_rcu_unlock;
        }
 
        /* Lookup channel by UST object descriptor. */
        ua_chan = find_channel_by_objd(app, cobjd);
        if (!ua_chan) {
-               DBG("Application channel is being teardown. Abort event notify");
+               DBG("Application channel is being torn down. Abort event notify");
                ret = 0;
-               free(fields);
                goto error_rcu_unlock;
        }
 
@@ -5247,7 +5457,11 @@ static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
 
        /* Get right session registry depending on the session buffer type. */
        registry = get_session_registry(ua_sess);
-       assert(registry);
+       if (!registry) {
+               DBG("Application session is being torn down. Abort event notify");
+               ret = 0;
+               goto error_rcu_unlock;
+       };
 
        /* Depending on the buffer type, a different channel key is used. */
        if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
@@ -5271,13 +5485,11 @@ static int reply_ust_register_channel(int sock, int sobjd, int cobjd,
 
                chan_reg->nr_ctx_fields = nr_fields;
                chan_reg->ctx_fields = fields;
+               fields = NULL;
                chan_reg->header_type = type;
        } else {
                /* Get current already assigned values. */
                type = chan_reg->header_type;
-               free(fields);
-               /* Set to NULL so the error path does not do a double free. */
-               fields = NULL;
        }
        /* Channel id is set during the object creation. */
        chan_id = chan_reg->chan_id;
@@ -5313,9 +5525,7 @@ error:
        pthread_mutex_unlock(&registry->lock);
 error_rcu_unlock:
        rcu_read_unlock();
-       if (ret) {
-               free(fields);
-       }
+       free(fields);
        return ret;
 }
 
@@ -5345,23 +5555,17 @@ static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
        /* Lookup application. If not found, there is a code flow error. */
        app = find_app_by_notify_sock(sock);
        if (!app) {
-               DBG("Application socket %d is being teardown. Abort event notify",
+               DBG("Application socket %d is being torn down. Abort event notify",
                                sock);
                ret = 0;
-               free(sig);
-               free(fields);
-               free(model_emf_uri);
                goto error_rcu_unlock;
        }
 
        /* Lookup channel by UST object descriptor. */
        ua_chan = find_channel_by_objd(app, cobjd);
        if (!ua_chan) {
-               DBG("Application channel is being teardown. Abort event notify");
+               DBG("Application channel is being torn down. Abort event notify");
                ret = 0;
-               free(sig);
-               free(fields);
-               free(model_emf_uri);
                goto error_rcu_unlock;
        }
 
@@ -5369,7 +5573,11 @@ static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
        ua_sess = ua_chan->session;
 
        registry = get_session_registry(ua_sess);
-       assert(registry);
+       if (!registry) {
+               DBG("Application session is being torn down. Abort event notify");
+               ret = 0;
+               goto error_rcu_unlock;
+       }
 
        if (ua_sess->buffer_type == LTTNG_BUFFER_PER_UID) {
                chan_reg_key = ua_chan->tracing_channel_id;
@@ -5388,6 +5596,9 @@ static int add_event_ust_registry(int sock, int sobjd, int cobjd, char *name,
                        sobjd, cobjd, name, sig, nr_fields, fields,
                        loglevel_value, model_emf_uri, ua_sess->buffer_type,
                        &event_id, app);
+       sig = NULL;
+       fields = NULL;
+       model_emf_uri = NULL;
 
        /*
         * The return value is returned to ustctl so in case of an error, the
@@ -5415,6 +5626,9 @@ error:
        pthread_mutex_unlock(&registry->lock);
 error_rcu_unlock:
        rcu_read_unlock();
+       free(sig);
+       free(fields);
+       free(model_emf_uri);
        return ret;
 }
 
@@ -5451,13 +5665,17 @@ static int add_enum_ust_registry(int sock, int sobjd, char *name,
        ua_sess = find_session_by_objd(app, sobjd);
        if (!ua_sess) {
                /* Return an error since this is not an error */
-               DBG("Application session is being torn down. Aborting enum registration.");
+               DBG("Application session is being torn down (session not found). Aborting enum registration.");
                free(entries);
                goto error_rcu_unlock;
        }
 
        registry = get_session_registry(ua_sess);
-       assert(registry);
+       if (!registry) {
+               DBG("Application session is being torn down (registry not found). Aborting enum registration.");
+               free(entries);
+               goto error_rcu_unlock;
+       }
 
        pthread_mutex_lock(&registry->lock);
 
@@ -5726,7 +5944,6 @@ int ust_app_snapshot_record(struct ltt_ust_session *usess,
                uint64_t nb_packets_per_stream)
 {
        int ret = 0;
-       unsigned int snapshot_done = 0;
        struct lttng_ht_iter iter;
        struct ust_app *app;
        char pathname[PATH_MAX];
@@ -5778,7 +5995,6 @@ int ust_app_snapshot_record(struct ltt_ust_session *usess,
                        if (ret < 0) {
                                goto error;
                        }
-                       snapshot_done = 1;
                }
                break;
        }
@@ -5825,13 +6041,16 @@ int ust_app_snapshot_record(struct ltt_ust_session *usess,
                        }
 
                        registry = get_session_registry(ua_sess);
-                       assert(registry);
+                       if (!registry) {
+                               DBG("Application session is being torn down. Abort snapshot record.");
+                               ret = -1;
+                               goto error;
+                       }
                        ret = consumer_snapshot_channel(socket, registry->metadata_key, output,
                                        1, ua_sess->euid, ua_sess->egid, pathname, wait, 0);
                        if (ret < 0) {
                                goto error;
                        }
-                       snapshot_done = 1;
                }
                break;
        }
@@ -5840,15 +6059,6 @@ int ust_app_snapshot_record(struct ltt_ust_session *usess,
                break;
        }
 
-       if (!snapshot_done) {
-               /*
-                * If no snapshot was made and we are not in the error path, this means
-                * that there are no buffers thus no (prior) application to snapshot
-                * data from so we have simply NO data.
-                */
-               ret = -ENODATA;
-       }
-
 error:
        rcu_read_unlock();
        return ret;
@@ -5935,10 +6145,14 @@ int ust_app_uid_get_channel_runtime_stats(uint64_t ust_session_id,
        int ret;
        uint64_t consumer_chan_key;
 
+       *discarded = 0;
+       *lost = 0;
+
        ret = buffer_reg_uid_consumer_channel_key(
-                       buffer_reg_uid_list, ust_session_id,
-                       uchan_id, &consumer_chan_key);
+                       buffer_reg_uid_list, uchan_id, &consumer_chan_key);
        if (ret < 0) {
+               /* Not found */
+               ret = 0;
                goto end;
        }
 
@@ -5966,10 +6180,13 @@ int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
        struct ust_app_session *ua_sess;
        struct ust_app_channel *ua_chan;
 
+       *discarded = 0;
+       *lost = 0;
+
        rcu_read_lock();
        /*
-        * Iterate over every registered applications, return when we
-        * found one in the right session and channel.
+        * Iterate over every registered applications. Sum counters for
+        * all applications containing requested session and channel.
         */
        cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
                struct lttng_ht_iter uiter;
@@ -5988,17 +6205,292 @@ int ust_app_pid_get_channel_runtime_stats(struct ltt_ust_session *usess,
                ua_chan = caa_container_of(ua_chan_node, struct ust_app_channel, node);
 
                if (overwrite) {
+                       uint64_t _lost;
+
                        ret = consumer_get_lost_packets(usess->id, ua_chan->key,
-                                       consumer, lost);
-                       goto end;
+                                       consumer, &_lost);
+                       if (ret < 0) {
+                               break;
+                       }
+                       (*lost) += _lost;
                } else {
+                       uint64_t _discarded;
+
                        ret = consumer_get_discarded_events(usess->id,
-                                       ua_chan->key, consumer, discarded);
-                       goto end;
+                                       ua_chan->key, consumer, &_discarded);
+                       if (ret < 0) {
+                               break;
+                       }
+                       (*discarded) += _discarded;
                }
        }
 
+       rcu_read_unlock();
+       return ret;
+}
+
+static
+int ust_app_regenerate_statedump(struct ltt_ust_session *usess,
+               struct ust_app *app)
+{
+       int ret = 0;
+       struct ust_app_session *ua_sess;
+
+       DBG("Regenerating the metadata for ust app pid %d", app->pid);
+
+       rcu_read_lock();
+
+       ua_sess = lookup_session_by_app(usess, app);
+       if (ua_sess == NULL) {
+               /* The session is in teardown process. Ignore and continue. */
+               goto end;
+       }
+
+       pthread_mutex_lock(&ua_sess->lock);
+
+       if (ua_sess->deleted) {
+               goto end_unlock;
+       }
+
+       pthread_mutex_lock(&app->sock_lock);
+       ret = ustctl_regenerate_statedump(app->sock, ua_sess->handle);
+       pthread_mutex_unlock(&app->sock_lock);
+
+end_unlock:
+       pthread_mutex_unlock(&ua_sess->lock);
+
 end:
+       rcu_read_unlock();
+       health_code_update();
+       return ret;
+}
+
+/*
+ * Regenerate the statedump for each app in the session.
+ */
+int ust_app_regenerate_statedump_all(struct ltt_ust_session *usess)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+
+       DBG("Regenerating the metadata for all UST apps");
+
+       rcu_read_lock();
+
+       cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+               if (!app->compatible) {
+                       continue;
+               }
+
+               ret = ust_app_regenerate_statedump(usess, app);
+               if (ret < 0) {
+                       /* Continue to the next app even on error */
+                       continue;
+               }
+       }
+
+       rcu_read_unlock();
+
+       return 0;
+}
+
+/*
+ * Rotate all the channels of a session.
+ *
+ * Return 0 on success or else a negative value.
+ */
+int ust_app_rotate_session(struct ltt_session *session, bool *ust_active)
+{
+       int ret = 0;
+       struct lttng_ht_iter iter;
+       struct ust_app *app;
+       struct ltt_ust_session *usess = session->ust_session;
+       char pathname[LTTNG_PATH_MAX];
+
+       assert(usess);
+
+       rcu_read_lock();
+
+       switch (usess->buffer_type) {
+       case LTTNG_BUFFER_PER_UID:
+       {
+               struct buffer_reg_uid *reg;
+
+               cds_list_for_each_entry(reg, &usess->buffer_reg_uid_list, lnode) {
+                       struct buffer_reg_channel *reg_chan;
+                       struct consumer_socket *socket;
+
+                       /* Get consumer socket to use to push the metadata.*/
+                       socket = consumer_find_socket_by_bitness(reg->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+
+                       /*
+                        * Account the metadata channel first to make sure the
+                        * number of channels waiting for a rotation cannot
+                        * reach 0 before we complete the iteration over all
+                        * the channels.
+                        */
+                       ret = rotate_add_channel_pending(
+                                       reg->registry->reg.ust->metadata_key,
+                                       LTTNG_DOMAIN_UST, session);
+                       if (ret < 0) {
+                               ret = reg->bits_per_long == 32 ?
+                                               -LTTNG_ERR_UST_CONSUMER32_FAIL :
+                                               -LTTNG_ERR_UST_CONSUMER64_FAIL;
+                               goto error;
+                       }
+
+                       ret = snprintf(pathname, sizeof(pathname),
+                                       DEFAULT_UST_TRACE_DIR "/" DEFAULT_UST_TRACE_UID_PATH,
+                                       reg->uid, reg->bits_per_long);
+                       if (ret < 0 || ret == sizeof(pathname)) {
+                               PERROR("Failed to format rotation path");
+                               goto error;
+                       }
+
+                       /* Rotate the data channels. */
+                       cds_lfht_for_each_entry(reg->registry->channels->ht, &iter.iter,
+                                       reg_chan, node.node) {
+                               ret = rotate_add_channel_pending(
+                                               reg_chan->consumer_key,
+                                               LTTNG_DOMAIN_UST, session);
+                               if (ret < 0) {
+                                       ret = reg->bits_per_long == 32 ?
+                                                       -LTTNG_ERR_UST_CONSUMER32_FAIL :
+                                                       -LTTNG_ERR_UST_CONSUMER64_FAIL;
+                                       goto error;
+                               }
+                               ret = consumer_rotate_channel(socket,
+                                               reg_chan->consumer_key,
+                                               usess->uid, usess->gid,
+                                               usess->consumer, pathname,
+                                               /* is_metadata_channel */ false,
+                                               session->current_archive_id,
+                                               &session->rotate_pending_relay);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+                       }
+
+                       (void) push_metadata(reg->registry->reg.ust, usess->consumer);
+
+                       ret = consumer_rotate_channel(socket,
+                                       reg->registry->reg.ust->metadata_key,
+                                       usess->uid, usess->gid,
+                                       usess->consumer, pathname,
+                                       /* is_metadata_channel */ true,
+                                       session->current_archive_id,
+                                       &session->rotate_pending_relay);
+                       if (ret < 0) {
+                               goto error;
+                       }
+                       *ust_active = true;
+               }
+               break;
+       }
+       case LTTNG_BUFFER_PER_PID:
+       {
+               cds_lfht_for_each_entry(ust_app_ht->ht, &iter.iter, app, pid_n.node) {
+                       struct consumer_socket *socket;
+                       struct lttng_ht_iter chan_iter;
+                       struct ust_app_channel *ua_chan;
+                       struct ust_app_session *ua_sess;
+                       struct ust_registry_session *registry;
+
+                       ua_sess = lookup_session_by_app(usess, app);
+                       if (!ua_sess) {
+                               /* Session not associated with this app. */
+                               continue;
+                       }
+                       ret = snprintf(pathname, sizeof(pathname),
+                                       DEFAULT_UST_TRACE_DIR "/%s",
+                                       ua_sess->path);
+                       if (ret < 0 || ret == sizeof(pathname)) {
+                               PERROR("Failed to format rotation path");
+                               goto error;
+                       }
+
+                       /* Get the right consumer socket for the application. */
+                       socket = consumer_find_socket_by_bitness(app->bits_per_long,
+                                       usess->consumer);
+                       if (!socket) {
+                               ret = -EINVAL;
+                               goto error;
+                       }
+
+                       registry = get_session_registry(ua_sess);
+                       if (!registry) {
+                               DBG("Application session is being torn down. Abort snapshot record.");
+                               ret = -1;
+                               goto error;
+                       }
+
+                       /*
+                        * Account the metadata channel first to make sure the
+                        * number of channels waiting for a rotation cannot
+                        * reach 0 before we complete the iteration over all
+                        * the channels.
+                        */
+                       ret = rotate_add_channel_pending(registry->metadata_key,
+                                       LTTNG_DOMAIN_UST, session);
+                       if (ret < 0) {
+                               ret = app->bits_per_long == 32 ?
+                                               -LTTNG_ERR_UST_CONSUMER32_FAIL :
+                                               -LTTNG_ERR_UST_CONSUMER64_FAIL;
+                               goto error;
+                       }
+
+                       /* Rotate the data channels. */
+                       cds_lfht_for_each_entry(ua_sess->channels->ht, &chan_iter.iter,
+                                       ua_chan, node.node) {
+                               ret = rotate_add_channel_pending(
+                                               ua_chan->key, LTTNG_DOMAIN_UST,
+                                               session);
+                               if (ret < 0) {
+                                       ret = app->bits_per_long == 32 ?
+                                                       -LTTNG_ERR_UST_CONSUMER32_FAIL :
+                                                       -LTTNG_ERR_UST_CONSUMER64_FAIL;
+                                       goto error;
+                               }
+                               ret = consumer_rotate_channel(socket, ua_chan->key,
+                                               ua_sess->euid, ua_sess->egid,
+                                               ua_sess->consumer, pathname,
+                                               /* is_metadata_channel */ false,
+                                               session->current_archive_id,
+                                               &session->rotate_pending_relay);
+                               if (ret < 0) {
+                                       goto error;
+                               }
+                       }
+
+                       /* Rotate the metadata channel. */
+                       (void) push_metadata(registry, usess->consumer);
+                       ret = consumer_rotate_channel(socket, registry->metadata_key,
+                                       ua_sess->euid, ua_sess->egid,
+                                       ua_sess->consumer, pathname,
+                                       /* is_metadata_channel */ true,
+                                       session->current_archive_id,
+                                       &session->rotate_pending_relay);
+                       if (ret < 0) {
+                               goto error;
+                       }
+                       *ust_active = true;
+               }
+               break;
+       }
+       default:
+               assert(0);
+               break;
+       }
+
+       ret = LTTNG_OK;
+
+error:
        rcu_read_unlock();
        return ret;
 }
This page took 0.036166 seconds and 4 git commands to generate.