Fix: correctly close metadata on sessiond thread shutdown
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
index 18eb507dec003a08381f6c6a17f6da9997769850..6a692b9ba5a8bffd7826bd164dfc3f1de9f8c9df 100644 (file)
@@ -37,6 +37,7 @@
 #include <common/relayd/relayd.h>
 #include <common/compat/fcntl.h>
 #include <common/consumer-metadata-cache.h>
+#include <common/consumer-stream.h>
 #include <common/consumer-timer.h>
 #include <common/utils.h>
 
@@ -151,7 +152,8 @@ static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
                        channel->session_id,
                        cpu,
                        &alloc_ret,
-                       channel->type);
+                       channel->type,
+                       channel->monitor);
        if (stream == NULL) {
                switch (alloc_ret) {
                case -ENOENT:
@@ -192,53 +194,40 @@ static int send_stream_to_thread(struct lttng_consumer_stream *stream,
 
        /* Get the right pipe where the stream will be sent. */
        if (stream->metadata_flag) {
+               ret = consumer_add_metadata_stream(stream);
+               if (ret) {
+                       ERR("Consumer add metadata stream %" PRIu64 " failed.",
+                                       stream->key);
+                       goto error;
+               }
                stream_pipe = ctx->consumer_metadata_pipe;
        } else {
+               ret = consumer_add_data_stream(stream);
+               if (ret) {
+                       ERR("Consumer add stream %" PRIu64 " failed.",
+                                       stream->key);
+                       goto error;
+               }
                stream_pipe = ctx->consumer_data_pipe;
        }
 
+       /*
+        * From this point on, the stream's ownership has been moved away from
+        * the channel and becomes globally visible.
+        */
+       stream->globally_visible = 1;
+
        ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
        if (ret < 0) {
                ERR("Consumer write %s stream to pipe %d",
                                stream->metadata_flag ? "metadata" : "data",
                                lttng_pipe_get_writefd(stream_pipe));
-       }
-
-       return ret;
-}
-
-/*
- * Search for a relayd object related to the stream. If found, send the stream
- * to the relayd.
- *
- * On success, returns 0 else a negative value.
- */
-static int send_stream_to_relayd(struct lttng_consumer_stream *stream)
-{
-       int ret = 0;
-       struct consumer_relayd_sock_pair *relayd;
-
-       assert(stream);
-
-       relayd = consumer_find_relayd(stream->net_seq_idx);
-       if (relayd != NULL) {
-               pthread_mutex_lock(&relayd->ctrl_sock_mutex);
-               /* Add stream on the relayd */
-               ret = relayd_add_stream(&relayd->control_sock, stream->name,
-                               stream->chan->pathname, &stream->relayd_stream_id,
-                               stream->chan->tracefile_size,
-                               stream->chan->tracefile_count);
-               pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
-               if (ret < 0) {
-                       goto error;
+               if (stream->metadata_flag) {
+                       consumer_del_stream_for_metadata(stream);
+               } else {
+                       consumer_del_stream_for_data(stream);
                }
-       } else if (stream->net_seq_idx != (uint64_t) -1ULL) {
-               ERR("Network sequence index %" PRIu64 " unknown. Not adding stream.",
-                               stream->net_seq_idx);
-               ret = -1;
-               goto error;
        }
-
 error:
        return ret;
 }
@@ -264,8 +253,18 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
         */
        while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
                int wait_fd;
+               int ust_metadata_pipe[2];
 
-               wait_fd = ustctl_stream_get_wait_fd(ustream);
+               if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
+                       ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
+                       if (ret < 0) {
+                               ERR("Create ust metadata poll pipe");
+                               goto error;
+                       }
+                       wait_fd = ust_metadata_pipe[0];
+               } else {
+                       wait_fd = ustctl_stream_get_wait_fd(ustream);
+               }
 
                /* Allocate consumer stream object. */
                stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
@@ -284,7 +283,9 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
                 * Increment channel refcount since the channel reference has now been
                 * assigned in the allocation process above.
                 */
-               uatomic_inc(&stream->chan->refcount);
+               if (stream->chan->monitor) {
+                       uatomic_inc(&stream->chan->refcount);
+               }
 
                /*
                 * Order is important this is why a list is used. On error, the caller
@@ -317,6 +318,8 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
                /* Keep stream reference when creating metadata. */
                if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
                        channel->metadata_stream = stream;
+                       stream->ust_metadata_poll_pipe[0] = ust_metadata_pipe[0];
+                       stream->ust_metadata_poll_pipe[1] = ust_metadata_pipe[1];
                }
        }
 
@@ -375,7 +378,7 @@ static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
        assert(stream);
        assert(sock >= 0);
 
-       DBG2("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
+       DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
 
        /* Send stream to session daemon. */
        ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
@@ -405,18 +408,20 @@ static int send_sessiond_channel(int sock,
 
        DBG("UST consumer sending channel %s to sessiond", channel->name);
 
-       cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
-               /* Try to send the stream to the relayd if one is available. */
-               ret = send_stream_to_relayd(stream);
-               if (ret < 0) {
-                       /*
-                        * Flag that the relayd was the problem here probably due to a
-                        * communicaton error on the socket.
-                        */
-                       if (relayd_error) {
-                               *relayd_error = 1;
+       if (channel->relayd_id != (uint64_t) -1ULL) {
+               cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+                       /* Try to send the stream to the relayd if one is available. */
+                       ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
+                       if (ret < 0) {
+                               /*
+                                * Flag that the relayd was the problem here probably due to a
+                                * communicaton error on the socket.
+                                */
+                               if (relayd_error) {
+                                       *relayd_error = 1;
+                               }
+                               ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
                        }
-                       ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
                }
        }
 
@@ -503,18 +508,27 @@ static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
        /* The reply msg status is handled in the following call. */
        ret = create_ust_channel(attr, &channel->uchan);
        if (ret < 0) {
-               goto error;
+               goto end;
        }
 
        channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
 
+       /*
+        * For the snapshots (no monitor), we create the metadata streams
+        * on demand, not during the channel creation.
+        */
+       if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
+               ret = 0;
+               goto end;
+       }
+
        /* Open all streams for this channel. */
        ret = create_ust_streams(channel, ctx);
        if (ret < 0) {
-               goto error;
+               goto end;
        }
 
-error:
+end:
        return ret;
 }
 
@@ -542,50 +556,16 @@ static int send_streams_to_thread(struct lttng_consumer_channel *channel,
                         * If we are unable to send the stream to the thread, there is
                         * a big problem so just stop everything.
                         */
+                       /* Remove node from the channel stream list. */
+                       cds_list_del(&stream->send_node);
                        goto error;
                }
 
                /* Remove node from the channel stream list. */
                cds_list_del(&stream->send_node);
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Write metadata to the given channel using ustctl to convert the string to
- * the ringbuffer.
- * Called only from consumer_metadata_cache_write.
- * The metadata cache lock MUST be acquired to write in the cache.
- *
- * Return 0 on success else a negative value.
- */
-int lttng_ustconsumer_push_metadata(struct lttng_consumer_channel *metadata,
-               const char *metadata_str, uint64_t target_offset, uint64_t len)
-{
-       int ret;
-
-       assert(metadata);
-       assert(metadata_str);
 
-       DBG("UST consumer writing metadata to channel %s", metadata->name);
-
-       if (!metadata->metadata_stream) {
-               ret = 0;
-               goto error;
        }
 
-       assert(target_offset <= metadata->metadata_cache->max_offset);
-       ret = ustctl_write_metadata_to_channel(metadata->uchan,
-                       metadata_str + target_offset, len);
-       if (ret < 0) {
-               ERR("ustctl write metadata fail with ret %d, len %" PRIu64, ret, len);
-               goto error;
-       }
-
-       ustctl_flush_buffer(metadata->metadata_stream->ustream, 1);
-
 error:
        return ret;
 }
@@ -619,12 +599,51 @@ static int flush_channel(uint64_t chan_key)
        cds_lfht_for_each_entry_duplicate(ht->ht,
                        ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
                        &channel->key, &iter.iter, stream, node_channel_id.node) {
-                       ustctl_flush_buffer(stream->ustream, 1);
+               ustctl_flush_buffer(stream->ustream, 1);
        }
 error:
        rcu_read_unlock();
        return ret;
 }
+/*
+ * Close metadata stream wakeup_fd using the given key to retrieve the channel.
+ * RCU read side lock MUST be acquired before calling this function.
+ *
+ * NOTE: This function does NOT take any channel nor stream lock.
+ *
+ * Return 0 on success else LTTng error code.
+ */
+static int _close_metadata(struct lttng_consumer_channel *channel)
+{
+       int ret = LTTNG_OK;
+
+       assert(channel);
+       assert(channel->type == CONSUMER_CHANNEL_TYPE_METADATA);
+
+       if (channel->switch_timer_enabled == 1) {
+               DBG("Deleting timer on metadata channel");
+               consumer_timer_switch_stop(channel);
+       }
+
+       if (channel->metadata_stream) {
+               ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
+               if (ret < 0) {
+                       ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
+                       ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+               }
+
+               if (channel->monitor) {
+                       /* Close the read-side in consumer_del_metadata_stream */
+                       ret = close(channel->metadata_stream->ust_metadata_poll_pipe[1]);
+                       if (ret < 0) {
+                               PERROR("Close UST metadata write-side poll pipe");
+                               ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+                       }
+               }
+       }
+
+       return ret;
+}
 
 /*
  * Close metadata stream wakeup_fd using the given key to retrieve the channel.
@@ -653,26 +672,16 @@ static int close_metadata(uint64_t chan_key)
        }
 
        pthread_mutex_lock(&consumer_data.lock);
+       pthread_mutex_lock(&channel->lock);
 
        if (cds_lfht_is_node_deleted(&channel->node.node)) {
                goto error_unlock;
        }
 
-       if (channel->switch_timer_enabled == 1) {
-               DBG("Deleting timer on metadata channel");
-               consumer_timer_switch_stop(channel);
-       }
-
-       if (channel->metadata_stream) {
-               ret = ustctl_stream_close_wakeup_fd(channel->metadata_stream->ustream);
-               if (ret < 0) {
-                       ERR("UST consumer unable to close fd of metadata (ret: %d)", ret);
-                       ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
-                       goto error_unlock;
-               }
-       }
+       ret = _close_metadata(channel);
 
 error_unlock:
+       pthread_mutex_unlock(&channel->lock);
        pthread_mutex_unlock(&consumer_data.lock);
 error:
        return ret;
@@ -694,7 +703,17 @@ static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
        if (!metadata) {
                ERR("UST consumer push metadata %" PRIu64 " not found", key);
                ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-               goto error_find;
+               goto end;
+       }
+
+       /*
+        * In no monitor mode, the metadata channel has no stream(s) so skip the
+        * ownership transfer to the metadata thread.
+        */
+       if (!metadata->monitor) {
+               DBG("Metadata channel in no monitor");
+               ret = 0;
+               goto end;
        }
 
        /*
@@ -704,14 +723,17 @@ static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
        if (cds_list_empty(&metadata->streams.head)) {
                ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
                ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
-               goto error;
+               goto error_no_stream;
        }
 
        /* Send metadata stream to relayd if needed. */
-       ret = send_stream_to_relayd(metadata->metadata_stream);
-       if (ret < 0) {
-               ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
-               goto error;
+       if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
+               ret = consumer_send_relayd_stream(metadata->metadata_stream,
+                               metadata->pathname);
+               if (ret < 0) {
+                       ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+                       goto error;
+               }
        }
 
        ret = send_streams_to_thread(metadata, ctx);
@@ -726,7 +748,8 @@ static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
        /* List MUST be empty after or else it could be reused. */
        assert(cds_list_empty(&metadata->streams.head));
 
-       return 0;
+       ret = 0;
+       goto end;
 
 error:
        /*
@@ -735,8 +758,262 @@ error:
         * the stream is still in the local stream list of the channel. This call
         * will make sure to clean that list.
         */
-       consumer_del_channel(metadata);
-error_find:
+       cds_list_del(&metadata->metadata_stream->send_node);
+       consumer_stream_destroy(metadata->metadata_stream, NULL);
+error_no_stream:
+end:
+       return ret;
+}
+
+/*
+ * Snapshot the whole metadata.
+ *
+ * Returns 0 on success, < 0 on error
+ */
+static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
+               struct lttng_consumer_local_data *ctx)
+{
+       int ret = 0;
+       struct lttng_consumer_channel *metadata_channel;
+       struct lttng_consumer_stream *metadata_stream;
+
+       assert(path);
+       assert(ctx);
+
+       DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
+                       key, path);
+
+       rcu_read_lock();
+
+       metadata_channel = consumer_find_channel(key);
+       if (!metadata_channel) {
+               ERR("UST snapshot metadata channel not found for key %" PRIu64,
+                       key);
+               ret = -1;
+               goto error;
+       }
+       assert(!metadata_channel->monitor);
+
+       /*
+        * Ask the sessiond if we have new metadata waiting and update the
+        * consumer metadata cache.
+        */
+       ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /*
+        * The metadata stream is NOT created in no monitor mode when the channel
+        * is created on a sessiond ask channel command.
+        */
+       ret = create_ust_streams(metadata_channel, ctx);
+       if (ret < 0) {
+               goto error;
+       }
+
+       metadata_stream = metadata_channel->metadata_stream;
+       assert(metadata_stream);
+
+       if (relayd_id != (uint64_t) -1ULL) {
+               metadata_stream->net_seq_idx = relayd_id;
+               ret = consumer_send_relayd_stream(metadata_stream, path);
+               if (ret < 0) {
+                       goto error_stream;
+               }
+       } else {
+               ret = utils_create_stream_file(path, metadata_stream->name,
+                               metadata_stream->chan->tracefile_size,
+                               metadata_stream->tracefile_count_current,
+                               metadata_stream->uid, metadata_stream->gid);
+               if (ret < 0) {
+                       goto error_stream;
+               }
+               metadata_stream->out_fd = ret;
+               metadata_stream->tracefile_size_current = 0;
+       }
+
+       pthread_mutex_lock(&metadata_channel->metadata_cache->lock);
+
+       do {
+               ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
+               if (ret < 0) {
+                       goto error_unlock;
+               }
+       } while (ret > 0);
+
+error_unlock:
+       pthread_mutex_unlock(&metadata_channel->metadata_cache->lock);
+
+error_stream:
+       /*
+        * Clean up the stream completly because the next snapshot will use a new
+        * metadata stream.
+        */
+       cds_list_del(&metadata_stream->send_node);
+       consumer_stream_destroy(metadata_stream, NULL);
+       metadata_channel->metadata_stream = NULL;
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Take a snapshot of all the stream of a channel.
+ *
+ * Returns 0 on success, < 0 on error
+ */
+static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
+               uint64_t max_stream_size, struct lttng_consumer_local_data *ctx)
+{
+       int ret;
+       unsigned use_relayd = 0;
+       unsigned long consumed_pos, produced_pos;
+       struct lttng_consumer_channel *channel;
+       struct lttng_consumer_stream *stream;
+
+       assert(path);
+       assert(ctx);
+
+       rcu_read_lock();
+
+       if (relayd_id != (uint64_t) -1ULL) {
+               use_relayd = 1;
+       }
+
+       channel = consumer_find_channel(key);
+       if (!channel) {
+               ERR("UST snapshot channel not found for key %" PRIu64, key);
+               ret = -1;
+               goto error;
+       }
+       assert(!channel->monitor);
+       DBG("UST consumer snapshot channel %" PRIu64, key);
+
+       cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+               /* Lock stream because we are about to change its state. */
+               pthread_mutex_lock(&stream->lock);
+               stream->net_seq_idx = relayd_id;
+
+               if (use_relayd) {
+                       ret = consumer_send_relayd_stream(stream, path);
+                       if (ret < 0) {
+                               goto error_unlock;
+                       }
+               } else {
+                       ret = utils_create_stream_file(path, stream->name,
+                                       stream->chan->tracefile_size,
+                                       stream->tracefile_count_current,
+                                       stream->uid, stream->gid);
+                       if (ret < 0) {
+                               goto error_unlock;
+                       }
+                       stream->out_fd = ret;
+                       stream->tracefile_size_current = 0;
+
+                       DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
+                                       stream->name, stream->key);
+               }
+
+               ustctl_flush_buffer(stream->ustream, 1);
+
+               ret = lttng_ustconsumer_take_snapshot(stream);
+               if (ret < 0) {
+                       ERR("Taking UST snapshot");
+                       goto error_unlock;
+               }
+
+               ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
+               if (ret < 0) {
+                       ERR("Produced UST snapshot position");
+                       goto error_unlock;
+               }
+
+               ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
+               if (ret < 0) {
+                       ERR("Consumerd UST snapshot position");
+                       goto error_unlock;
+               }
+
+               /*
+                * The original value is sent back if max stream size is larger than
+                * the possible size of the snapshot. Also, we asume that the session
+                * daemon should never send a maximum stream size that is lower than
+                * subbuffer size.
+                */
+               consumed_pos = consumer_get_consumed_maxsize(consumed_pos,
+                               produced_pos, max_stream_size);
+
+               while (consumed_pos < produced_pos) {
+                       ssize_t read_len;
+                       unsigned long len, padded_len;
+
+                       DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
+
+                       ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
+                       if (ret < 0) {
+                               if (ret != -EAGAIN) {
+                                       PERROR("ustctl_get_subbuf snapshot");
+                                       goto error_close_stream;
+                               }
+                               DBG("UST consumer get subbuf failed. Skipping it.");
+                               consumed_pos += stream->max_sb_size;
+                               continue;
+                       }
+
+                       ret = ustctl_get_subbuf_size(stream->ustream, &len);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_get_subbuf_size");
+                               goto error_put_subbuf;
+                       }
+
+                       ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_get_padded_subbuf_size");
+                               goto error_put_subbuf;
+                       }
+
+                       read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
+                                       padded_len - len);
+                       if (use_relayd) {
+                               if (read_len != len) {
+                                       ret = -EPERM;
+                                       goto error_put_subbuf;
+                               }
+                       } else {
+                               if (read_len != padded_len) {
+                                       ret = -EPERM;
+                                       goto error_put_subbuf;
+                               }
+                       }
+
+                       ret = ustctl_put_subbuf(stream->ustream);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_put_subbuf");
+                               goto error_close_stream;
+                       }
+                       consumed_pos += stream->max_sb_size;
+               }
+
+               /* Simply close the stream so we can use it on the next snapshot. */
+               consumer_stream_close(stream);
+               pthread_mutex_unlock(&stream->lock);
+       }
+
+       rcu_read_unlock();
+       return 0;
+
+error_put_subbuf:
+       if (ustctl_put_subbuf(stream->ustream) < 0) {
+               ERR("Snapshot ustctl_put_subbuf");
+       }
+error_close_stream:
+       consumer_stream_close(stream);
+error_unlock:
+       pthread_mutex_unlock(&stream->lock);
+error:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -744,7 +1021,8 @@ error_find:
  * Receive the metadata updates from the sessiond.
  */
 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
-               uint64_t len, struct lttng_consumer_channel *channel)
+               uint64_t len, struct lttng_consumer_channel *channel,
+               int timer)
 {
        int ret, ret_code = LTTNG_OK;
        char *metadata_str;
@@ -766,17 +1044,6 @@ int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
                goto end_free;
        }
 
-       /*
-        * XXX: The consumer data lock is acquired before calling metadata cache
-        * write which calls push metadata that MUST be protected by the consumer
-        * lock in order to be able to check the validity of the metadata stream of
-        * the channel.
-        *
-        * Note that this will be subject to change to better fine grained locking
-        * and ultimately try to get rid of this global consumer data lock.
-        */
-       pthread_mutex_lock(&consumer_data.lock);
-
        pthread_mutex_lock(&channel->metadata_cache->lock);
        ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
        if (ret < 0) {
@@ -788,13 +1055,11 @@ int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
                 * waiting for the metadata cache to be flushed.
                 */
                pthread_mutex_unlock(&channel->metadata_cache->lock);
-               pthread_mutex_unlock(&consumer_data.lock);
                goto end_free;
        }
        pthread_mutex_unlock(&channel->metadata_cache->lock);
-       pthread_mutex_unlock(&consumer_data.lock);
 
-       while (consumer_metadata_cache_flushed(channel, offset + len)) {
+       while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
                DBG("Waiting for metadata to be flushed");
                usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
        }
@@ -822,12 +1087,12 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        if (ret != sizeof(msg)) {
                DBG("Consumer received unexpected message size %zd (expects %zu)",
                        ret, sizeof(msg));
-               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
                /*
                 * The ret value might 0 meaning an orderly shutdown but this is ok
                 * since the caller handles this.
                 */
                if (ret > 0) {
+                       lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
                        ret = -1;
                }
                return ret;
@@ -933,6 +1198,13 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        goto end_channel_error;
                }
 
+               /*
+                * Assign UST application UID to the channel. This value is ignored for
+                * per PID buffers. This is specific to UST thus setting this after the
+                * allocation.
+                */
+               channel->ust_app_uid = msg.u.ask_channel.ust_app_uid;
+
                /* Build channel attributes from received message. */
                attr.subbuf_size = msg.u.ask_channel.subbuf_size;
                attr.num_subbuf = msg.u.ask_channel.num_subbuf;
@@ -940,6 +1212,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
                attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
                attr.chan_id = msg.u.ask_channel.chan_id;
+               attr.output = msg.u.ask_channel.output;
                memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
 
                /* Translate and save channel type. */
@@ -1043,6 +1316,14 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        goto error_fatal;
                }
 
+               /*
+                * In no monitor mode, the streams ownership is kept inside the channel
+                * so don't send them to the data thread.
+                */
+               if (!channel->monitor) {
+                       goto end_msg_sessiond;
+               }
+
                ret = send_streams_to_thread(channel, ctx);
                if (ret < 0) {
                        /*
@@ -1053,7 +1334,6 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                }
                /* List MUST be empty after or else it could be reused. */
                assert(cds_list_empty(&channel->streams.head));
-
                goto end_msg_sessiond;
        }
        case LTTNG_CONSUMER_DESTROY_CHANNEL:
@@ -1121,7 +1401,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                }
 
                ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
-                               len, channel);
+                               len, channel, 0);
                if (ret < 0) {
                        /* error receiving from sessiond */
                        goto error_fatal;
@@ -1142,6 +1422,27 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        }
        case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
        {
+               if (msg.u.snapshot_channel.metadata) {
+                       ret = snapshot_metadata(msg.u.snapshot_channel.key,
+                                       msg.u.snapshot_channel.pathname,
+                                       msg.u.snapshot_channel.relayd_id,
+                                       ctx);
+                       if (ret < 0) {
+                               ERR("Snapshot metadata failed");
+                               ret_code = LTTNG_ERR_UST_META_FAIL;
+                       }
+               } else {
+                       ret = snapshot_channel(msg.u.snapshot_channel.key,
+                                       msg.u.snapshot_channel.pathname,
+                                       msg.u.snapshot_channel.relayd_id,
+                                       msg.u.snapshot_channel.max_stream_size,
+                                       ctx);
+                       if (ret < 0) {
+                               ERR("Snapshot channel failed");
+                               ret_code = LTTNG_ERR_UST_CHAN_FAIL;
+                       }
+               }
+
                ret = consumer_send_status_msg(sock, ret_code);
                if (ret < 0) {
                        /* Somehow, the session daemon is not responding anymore. */
@@ -1249,6 +1550,21 @@ int lttng_ustconsumer_get_produced_snapshot(
        return ustctl_snapshot_get_produced(stream->ustream, pos);
 }
 
+/*
+ * Get the consumed position
+ *
+ * Returns 0 on success, < 0 on error
+ */
+int lttng_ustconsumer_get_consumed_snapshot(
+               struct lttng_consumer_stream *stream, unsigned long *pos)
+{
+       assert(stream);
+       assert(stream->ustream);
+       assert(pos);
+
+       return ustctl_snapshot_get_consumed(stream->ustream, pos);
+}
+
 /*
  * Called when the stream signal the consumer that it has hang up.
  */
@@ -1297,28 +1613,57 @@ int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
        assert(stream->ustream);
        assert(ctx);
 
-       DBG2("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
+       DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
                        stream->name);
 
        /* Ease our life for what's next. */
        ustream = stream->ustream;
 
        /* We can consume the 1 byte written into the wait_fd by UST */
-       if (!stream->hangup_flush_done) {
+       if (stream->monitor && !stream->hangup_flush_done) {
                ssize_t readlen;
 
                do {
                        readlen = read(stream->wait_fd, &dummy, 1);
                } while (readlen == -1 && errno == EINTR);
-               if (readlen == -1) {
+               if (readlen == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
                        ret = readlen;
                        goto end;
                }
        }
 
+retry:
        /* Get the next subbuffer */
        err = ustctl_get_next_subbuf(ustream);
        if (err != 0) {
+               /*
+                * Populate metadata info if the existing info has
+                * already been read.
+                */
+               if (stream->metadata_flag) {
+                       ssize_t write_len;
+
+                       if (stream->chan->metadata_cache->contiguous
+                                       == stream->ust_metadata_pushed) {
+                               ret = 0;
+                               goto end;
+                       }
+
+                       write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
+                                       &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
+                                       stream->chan->metadata_cache->contiguous
+                                               - stream->ust_metadata_pushed);
+                       assert(write_len != 0);
+                       if (write_len < 0) {
+                               ERR("Writing one metadata packet");
+                               ret = -1;
+                               goto end;
+                       }
+                       stream->ust_metadata_pushed += write_len;
+                       ustctl_flush_buffer(stream->ustream, 1);
+                       goto retry;
+               }
+
                ret = err;      /* ustctl_get_next_subbuf returns negative, caller expect positive. */
                /*
                 * This is a debug message even for single-threaded consumer,
@@ -1379,8 +1724,10 @@ int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
 {
        int ret;
 
+       assert(stream);
+
        /* Don't create anything if this is set for streaming. */
-       if (stream->net_seq_idx == (uint64_t) -1ULL) {
+       if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
                ret = utils_create_stream_file(stream->chan->pathname, stream->name,
                                stream->chan->tracefile_size, stream->tracefile_count_current,
                                stream->uid, stream->gid);
@@ -1413,15 +1760,50 @@ int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
 
        DBG("UST consumer checking data pending");
 
-       ret = ustctl_get_next_subbuf(stream->ustream);
-       if (ret == 0) {
-               /* There is still data so let's put back this subbuffer. */
-               ret = ustctl_put_subbuf(stream->ustream);
-               assert(ret == 0);
-               ret = 1;  /* Data is pending */
+       if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
+               ret = 0;
                goto end;
        }
 
+       if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
+               uint64_t contiguous, pushed;
+
+               /* Ease our life a bit. */
+               contiguous = stream->chan->metadata_cache->contiguous;
+               pushed = stream->ust_metadata_pushed;
+
+               /*
+                * We can simply check whether all contiguously available data
+                * has been pushed to the ring buffer, since the push operation
+                * is performed within get_next_subbuf(), and because both
+                * get_next_subbuf() and put_next_subbuf() are issued atomically
+                * thanks to the stream lock within
+                * lttng_ustconsumer_read_subbuffer(). This basically means that
+                * whetnever ust_metadata_pushed is incremented, the associated
+                * metadata has been consumed from the metadata stream.
+                */
+               DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
+                               contiguous, pushed);
+               assert(((int64_t) contiguous - pushed) >= 0);
+               if ((contiguous != pushed) ||
+                               (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) {
+                       ret = 1;        /* Data is pending */
+                       goto end;
+               }
+       } else {
+               ret = ustctl_get_next_subbuf(stream->ustream);
+               if (ret == 0) {
+                       /*
+                        * There is still data so let's put back this
+                        * subbuffer.
+                        */
+                       ret = ustctl_put_subbuf(stream->ustream);
+                       assert(ret == 0);
+                       ret = 1;        /* Data is pending */
+                       goto end;
+               }
+       }
+
        /* Data is NOT pending so ready to be read. */
        ret = 0;
 
@@ -1440,7 +1822,6 @@ end:
  */
 void lttng_ustconsumer_close_metadata(struct lttng_ht *metadata_ht)
 {
-       int ret;
        struct lttng_ht_iter iter;
        struct lttng_consumer_stream *stream;
 
@@ -1452,17 +1833,16 @@ void lttng_ustconsumer_close_metadata(struct lttng_ht *metadata_ht)
        rcu_read_lock();
        cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream,
                        node.node) {
-               int fd = stream->wait_fd;
-
+               pthread_mutex_lock(&stream->chan->lock);
                /*
-                * Whatever happens here we have to continue to try to close every
-                * streams. Let's report at least the error on failure.
+                * Whatever returned value, we must continue to try to close everything
+                * so ignore it.
                 */
-               ret = ustctl_stream_close_wakeup_fd(stream->ustream);
-               if (ret) {
-                       ERR("Unable to close metadata stream fd %d ret %d", fd, ret);
-               }
-               DBG("Metadata wait fd %d closed", fd);
+               (void) _close_metadata(stream->chan);
+               DBG("Metadata wait fd %d and poll pipe fd %d closed", stream->wait_fd,
+                               stream->ust_metadata_poll_pipe[1]);
+               pthread_mutex_unlock(&stream->chan->lock);
+
        }
        rcu_read_unlock();
 }
@@ -1477,8 +1857,14 @@ void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
        }
 }
 
+/*
+ * Please refer to consumer-timer.c before adding any lock within this
+ * function or any of its callees. Timers have a very strict locking
+ * semantic with respect to teardown. Failure to respect this semantic
+ * introduces deadlocks.
+ */
 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
-               struct lttng_consumer_channel *channel)
+               struct lttng_consumer_channel *channel, int timer)
 {
        struct lttcomm_metadata_request_msg request;
        struct lttcomm_consumer_msg msg;
@@ -1504,13 +1890,20 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
 
        request.session_id = channel->session_id;
        request.session_id_per_pid = channel->session_id_per_pid;
-       request.uid = channel->uid;
+       /*
+        * Request the application UID here so the metadata of that application can
+        * be sent back. The channel UID corresponds to the user UID of the session
+        * used for the rights on the stream file(s).
+        */
+       request.uid = channel->ust_app_uid;
        request.key = channel->key;
+
        DBG("Sending metadata request to sessiond, session id %" PRIu64
-                       ", per-pid %" PRIu64,
-                       channel->session_id,
-                       channel->session_id_per_pid);
+                       ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
+                       request.session_id, request.session_id_per_pid, request.uid,
+                       request.key);
 
+       pthread_mutex_lock(&ctx->metadata_socket_lock);
        ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
                        sizeof(request));
        if (ret < 0) {
@@ -1565,7 +1958,7 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        }
 
        ret_code = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
-                       key, offset, len, channel);
+                       key, offset, len, channel, timer);
        if (ret_code >= 0) {
                /*
                 * Only send the status msg if the sessiond is alive meaning a positive
@@ -1576,5 +1969,6 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        ret = 0;
 
 end:
+       pthread_mutex_unlock(&ctx->metadata_socket_lock);
        return ret;
 }
This page took 0.034622 seconds and 4 git commands to generate.