Fix: format string type mismatch
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
index f7ffb0febf5b7bcdc338b30975ae7e3379724935..676aa0d9e2f066bec6b439f61e151579f4758bb5 100644 (file)
@@ -37,6 +37,7 @@
 #include <common/relayd/relayd.h>
 #include <common/compat/fcntl.h>
 #include <common/consumer-metadata-cache.h>
+#include <common/consumer-stream.h>
 #include <common/consumer-timer.h>
 #include <common/utils.h>
 
@@ -113,14 +114,16 @@ error:
  */
 static struct lttng_consumer_channel *allocate_channel(uint64_t session_id,
                const char *pathname, const char *name, uid_t uid, gid_t gid,
-               int relayd_id, uint64_t key, enum lttng_event_output output,
-               uint64_t tracefile_size, uint64_t tracefile_count)
+               uint64_t relayd_id, uint64_t key, enum lttng_event_output output,
+               uint64_t tracefile_size, uint64_t tracefile_count,
+               uint64_t session_id_per_pid, unsigned int monitor)
 {
        assert(pathname);
        assert(name);
 
-       return consumer_allocate_channel(key, session_id, pathname, name, uid, gid,
-                       relayd_id, output, tracefile_size, tracefile_count);
+       return consumer_allocate_channel(key, session_id, pathname, name, uid,
+                       gid, relayd_id, output, tracefile_size,
+                       tracefile_count, session_id_per_pid, monitor);
 }
 
 /*
@@ -149,7 +152,8 @@ static struct lttng_consumer_stream *allocate_stream(int cpu, int key,
                        channel->session_id,
                        cpu,
                        &alloc_ret,
-                       channel->type);
+                       channel->type,
+                       channel->monitor);
        if (stream == NULL) {
                switch (alloc_ret) {
                case -ENOENT:
@@ -190,53 +194,40 @@ static int send_stream_to_thread(struct lttng_consumer_stream *stream,
 
        /* Get the right pipe where the stream will be sent. */
        if (stream->metadata_flag) {
+               ret = consumer_add_metadata_stream(stream);
+               if (ret) {
+                       ERR("Consumer add metadata stream %" PRIu64 " failed.",
+                                       stream->key);
+                       goto error;
+               }
                stream_pipe = ctx->consumer_metadata_pipe;
        } else {
+               ret = consumer_add_data_stream(stream);
+               if (ret) {
+                       ERR("Consumer add stream %" PRIu64 " failed.",
+                                       stream->key);
+                       goto error;
+               }
                stream_pipe = ctx->consumer_data_pipe;
        }
 
+       /*
+        * From this point on, the stream's ownership has been moved away from
+        * the channel and becomes globally visible.
+        */
+       stream->globally_visible = 1;
+
        ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream));
        if (ret < 0) {
                ERR("Consumer write %s stream to pipe %d",
                                stream->metadata_flag ? "metadata" : "data",
                                lttng_pipe_get_writefd(stream_pipe));
-       }
-
-       return ret;
-}
-
-/*
- * Search for a relayd object related to the stream. If found, send the stream
- * to the relayd.
- *
- * On success, returns 0 else a negative value.
- */
-static int send_stream_to_relayd(struct lttng_consumer_stream *stream)
-{
-       int ret = 0;
-       struct consumer_relayd_sock_pair *relayd;
-
-       assert(stream);
-
-       relayd = consumer_find_relayd(stream->net_seq_idx);
-       if (relayd != NULL) {
-               pthread_mutex_lock(&relayd->ctrl_sock_mutex);
-               /* Add stream on the relayd */
-               ret = relayd_add_stream(&relayd->control_sock, stream->name,
-                               stream->chan->pathname, &stream->relayd_stream_id,
-                               stream->chan->tracefile_size,
-                               stream->chan->tracefile_count);
-               pthread_mutex_unlock(&relayd->ctrl_sock_mutex);
-               if (ret < 0) {
-                       goto error;
+               if (stream->metadata_flag) {
+                       consumer_del_stream_for_metadata(stream);
+               } else {
+                       consumer_del_stream_for_data(stream);
                }
-       } else if (stream->net_seq_idx != (uint64_t) -1ULL) {
-               ERR("Network sequence index %" PRIu64 " unknown. Not adding stream.",
-                               stream->net_seq_idx);
-               ret = -1;
-               goto error;
        }
-
 error:
        return ret;
 }
@@ -262,8 +253,18 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
         */
        while ((ustream = ustctl_create_stream(channel->uchan, cpu))) {
                int wait_fd;
+               int ust_metadata_pipe[2];
 
-               wait_fd = ustctl_stream_get_wait_fd(ustream);
+               if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) {
+                       ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe);
+                       if (ret < 0) {
+                               ERR("Create ust metadata poll pipe");
+                               goto error;
+                       }
+                       wait_fd = ust_metadata_pipe[0];
+               } else {
+                       wait_fd = ustctl_stream_get_wait_fd(ustream);
+               }
 
                /* Allocate consumer stream object. */
                stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret);
@@ -282,7 +283,9 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
                 * Increment channel refcount since the channel reference has now been
                 * assigned in the allocation process above.
                 */
-               uatomic_inc(&stream->chan->refcount);
+               if (stream->chan->monitor) {
+                       uatomic_inc(&stream->chan->refcount);
+               }
 
                /*
                 * Order is important this is why a list is used. On error, the caller
@@ -315,6 +318,8 @@ static int create_ust_streams(struct lttng_consumer_channel *channel,
                /* Keep stream reference when creating metadata. */
                if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) {
                        channel->metadata_stream = stream;
+                       stream->ust_metadata_poll_pipe[0] = ust_metadata_pipe[0];
+                       stream->ust_metadata_poll_pipe[1] = ust_metadata_pipe[1];
                }
        }
 
@@ -373,7 +378,7 @@ static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream)
        assert(stream);
        assert(sock >= 0);
 
-       DBG2("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
+       DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key);
 
        /* Send stream to session daemon. */
        ret = ustctl_send_stream_to_sessiond(sock, stream->ustream);
@@ -394,7 +399,7 @@ static int send_sessiond_channel(int sock,
                struct lttng_consumer_channel *channel,
                struct lttng_consumer_local_data *ctx, int *relayd_error)
 {
-       int ret;
+       int ret, ret_code = LTTNG_OK;
        struct lttng_consumer_stream *stream;
 
        assert(channel);
@@ -403,6 +408,33 @@ static int send_sessiond_channel(int sock,
 
        DBG("UST consumer sending channel %s to sessiond", channel->name);
 
+       if (channel->relayd_id != (uint64_t) -1ULL) {
+               cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+                       /* Try to send the stream to the relayd if one is available. */
+                       ret = consumer_send_relayd_stream(stream, stream->chan->pathname);
+                       if (ret < 0) {
+                               /*
+                                * Flag that the relayd was the problem here probably due to a
+                                * communicaton error on the socket.
+                                */
+                               if (relayd_error) {
+                                       *relayd_error = 1;
+                               }
+                               ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
+                       }
+               }
+       }
+
+       /* Inform sessiond that we are about to send channel and streams. */
+       ret = consumer_send_status_msg(sock, ret_code);
+       if (ret < 0 || ret_code != LTTNG_OK) {
+               /*
+                * Either the session daemon is not responding or the relayd died so we
+                * stop now.
+                */
+               goto error;
+       }
+
        /* Send channel to sessiond. */
        ret = ustctl_send_channel_to_sessiond(sock, channel->uchan);
        if (ret < 0) {
@@ -416,19 +448,6 @@ static int send_sessiond_channel(int sock,
 
        /* The channel was sent successfully to the sessiond at this point. */
        cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
-               /* Try to send the stream to the relayd if one is available. */
-               ret = send_stream_to_relayd(stream);
-               if (ret < 0) {
-                       /*
-                        * Flag that the relayd was the problem here probably due to a
-                        * communicaton error on the socket.
-                        */
-                       if (relayd_error) {
-                               *relayd_error = 1;
-                       }
-                       goto error;
-               }
-
                /* Send stream to session daemon. */
                ret = send_sessiond_stream(sock, stream);
                if (ret < 0) {
@@ -447,6 +466,9 @@ static int send_sessiond_channel(int sock,
        return 0;
 
 error:
+       if (ret_code != LTTNG_OK) {
+               ret = -1;
+       }
        return ret;
 }
 
@@ -486,18 +508,27 @@ static int ask_channel(struct lttng_consumer_local_data *ctx, int sock,
        /* The reply msg status is handled in the following call. */
        ret = create_ust_channel(attr, &channel->uchan);
        if (ret < 0) {
-               goto error;
+               goto end;
        }
 
        channel->wait_fd = ustctl_channel_get_wait_fd(channel->uchan);
 
+       /*
+        * For the snapshots (no monitor), we create the metadata streams
+        * on demand, not during the channel creation.
+        */
+       if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) {
+               ret = 0;
+               goto end;
+       }
+
        /* Open all streams for this channel. */
        ret = create_ust_streams(channel, ctx);
        if (ret < 0) {
-               goto error;
+               goto end;
        }
 
-error:
+end:
        return ret;
 }
 
@@ -525,50 +556,16 @@ static int send_streams_to_thread(struct lttng_consumer_channel *channel,
                         * If we are unable to send the stream to the thread, there is
                         * a big problem so just stop everything.
                         */
+                       /* Remove node from the channel stream list. */
+                       cds_list_del(&stream->send_node);
                        goto error;
                }
 
                /* Remove node from the channel stream list. */
                cds_list_del(&stream->send_node);
-       }
-
-error:
-       return ret;
-}
-
-/*
- * Write metadata to the given channel using ustctl to convert the string to
- * the ringbuffer.
- * Called only from consumer_metadata_cache_write.
- * The metadata cache lock MUST be acquired to write in the cache.
- *
- * Return 0 on success else a negative value.
- */
-int lttng_ustconsumer_push_metadata(struct lttng_consumer_channel *metadata,
-               const char *metadata_str, uint64_t target_offset, uint64_t len)
-{
-       int ret;
-
-       assert(metadata);
-       assert(metadata_str);
-
-       DBG("UST consumer writing metadata to channel %s", metadata->name);
 
-       if (!metadata->metadata_stream) {
-               ret = 0;
-               goto error;
-       }
-
-       assert(target_offset <= metadata->metadata_cache->max_offset);
-       ret = ustctl_write_metadata_to_channel(metadata->uchan,
-                       metadata_str + target_offset, len);
-       if (ret < 0) {
-               ERR("ustctl write metadata fail with ret %d, len %" PRIu64, ret, len);
-               goto error;
        }
 
-       ustctl_flush_buffer(metadata->metadata_stream->ustream, 1);
-
 error:
        return ret;
 }
@@ -602,7 +599,7 @@ static int flush_channel(uint64_t chan_key)
        cds_lfht_for_each_entry_duplicate(ht->ht,
                        ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
                        &channel->key, &iter.iter, stream, node_channel_id.node) {
-                       ustctl_flush_buffer(stream->ustream, 1);
+               ustctl_flush_buffer(stream->ustream, 1);
        }
 error:
        rcu_read_unlock();
@@ -624,12 +621,19 @@ static int close_metadata(uint64_t chan_key)
 
        channel = consumer_find_channel(chan_key);
        if (!channel) {
-               ERR("UST consumer close metadata %" PRIu64 " not found", chan_key);
+               /*
+                * This is possible if the metadata thread has issue a delete because
+                * the endpoint point of the stream hung up. There is no way the
+                * session daemon can know about it thus use a DBG instead of an actual
+                * error.
+                */
+               DBG("UST consumer close metadata %" PRIu64 " not found", chan_key);
                ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
                goto error;
        }
 
        pthread_mutex_lock(&consumer_data.lock);
+       pthread_mutex_lock(&channel->lock);
 
        if (cds_lfht_is_node_deleted(&channel->node.node)) {
                goto error_unlock;
@@ -647,9 +651,17 @@ static int close_metadata(uint64_t chan_key)
                        ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
                        goto error_unlock;
                }
+               if (channel->monitor) {
+                       /* close the read-side in consumer_del_metadata_stream */
+                       ret = close(channel->metadata_stream->ust_metadata_poll_pipe[1]);
+                       if (ret < 0) {
+                               PERROR("Close UST metadata write-side poll pipe");
+                       }
+               }
        }
 
 error_unlock:
+       pthread_mutex_unlock(&channel->lock);
        pthread_mutex_unlock(&consumer_data.lock);
 error:
        return ret;
@@ -671,7 +683,17 @@ static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
        if (!metadata) {
                ERR("UST consumer push metadata %" PRIu64 " not found", key);
                ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
-               goto error;
+               goto end;
+       }
+
+       /*
+        * In no monitor mode, the metadata channel has no stream(s) so skip the
+        * ownership transfer to the metadata thread.
+        */
+       if (!metadata->monitor) {
+               DBG("Metadata channel in no monitor");
+               ret = 0;
+               goto end;
        }
 
        /*
@@ -681,14 +703,17 @@ static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
        if (cds_list_empty(&metadata->streams.head)) {
                ERR("Metadata channel key %" PRIu64 ", no stream available.", key);
                ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
-               goto error;
+               goto error_no_stream;
        }
 
        /* Send metadata stream to relayd if needed. */
-       ret = send_stream_to_relayd(metadata->metadata_stream);
-       if (ret < 0) {
-               ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
-               goto error;
+       if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) {
+               ret = consumer_send_relayd_stream(metadata->metadata_stream,
+                               metadata->pathname);
+               if (ret < 0) {
+                       ret = LTTCOMM_CONSUMERD_ERROR_METADATA;
+                       goto error;
+               }
        }
 
        ret = send_streams_to_thread(metadata, ctx);
@@ -704,8 +729,271 @@ static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key)
        assert(cds_list_empty(&metadata->streams.head));
 
        ret = 0;
+       goto end;
+
+error:
+       /*
+        * Delete metadata channel on error. At this point, the metadata stream can
+        * NOT be monitored by the metadata thread thus having the guarantee that
+        * the stream is still in the local stream list of the channel. This call
+        * will make sure to clean that list.
+        */
+       cds_list_del(&metadata->metadata_stream->send_node);
+       consumer_stream_destroy(metadata->metadata_stream, NULL);
+error_no_stream:
+end:
+       return ret;
+}
+
+/*
+ * Snapshot the whole metadata.
+ *
+ * Returns 0 on success, < 0 on error
+ */
+static int snapshot_metadata(uint64_t key, char *path, uint64_t relayd_id,
+               struct lttng_consumer_local_data *ctx)
+{
+       int ret = 0;
+       struct lttng_consumer_channel *metadata_channel;
+       struct lttng_consumer_stream *metadata_stream;
+
+       assert(path);
+       assert(ctx);
+
+       DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s",
+                       key, path);
+
+       rcu_read_lock();
+
+       metadata_channel = consumer_find_channel(key);
+       if (!metadata_channel) {
+               ERR("UST snapshot metadata channel not found for key %" PRIu64,
+                       key);
+               ret = -1;
+               goto error;
+       }
+       assert(!metadata_channel->monitor);
+
+       /*
+        * Ask the sessiond if we have new metadata waiting and update the
+        * consumer metadata cache.
+        */
+       ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0);
+       if (ret < 0) {
+               goto error;
+       }
+
+       /*
+        * The metadata stream is NOT created in no monitor mode when the channel
+        * is created on a sessiond ask channel command.
+        */
+       ret = create_ust_streams(metadata_channel, ctx);
+       if (ret < 0) {
+               goto error;
+       }
+
+       metadata_stream = metadata_channel->metadata_stream;
+       assert(metadata_stream);
+
+       if (relayd_id != (uint64_t) -1ULL) {
+               metadata_stream->net_seq_idx = relayd_id;
+               ret = consumer_send_relayd_stream(metadata_stream, path);
+               if (ret < 0) {
+                       goto error_stream;
+               }
+       } else {
+               ret = utils_create_stream_file(path, metadata_stream->name,
+                               metadata_stream->chan->tracefile_size,
+                               metadata_stream->tracefile_count_current,
+                               metadata_stream->uid, metadata_stream->gid);
+               if (ret < 0) {
+                       goto error_stream;
+               }
+               metadata_stream->out_fd = ret;
+               metadata_stream->tracefile_size_current = 0;
+       }
+
+       pthread_mutex_lock(&metadata_channel->metadata_cache->lock);
+
+       do {
+               ret = lttng_consumer_read_subbuffer(metadata_stream, ctx);
+               if (ret < 0) {
+                       goto error_unlock;
+               }
+       } while (ret > 0);
+
+error_unlock:
+       pthread_mutex_unlock(&metadata_channel->metadata_cache->lock);
+
+error_stream:
+       /*
+        * Clean up the stream completly because the next snapshot will use a new
+        * metadata stream.
+        */
+       cds_list_del(&metadata_stream->send_node);
+       consumer_stream_destroy(metadata_stream, NULL);
+       metadata_channel->metadata_stream = NULL;
+
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Take a snapshot of all the stream of a channel.
+ *
+ * Returns 0 on success, < 0 on error
+ */
+static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
+               uint64_t max_stream_size, struct lttng_consumer_local_data *ctx)
+{
+       int ret;
+       unsigned use_relayd = 0;
+       unsigned long consumed_pos, produced_pos;
+       struct lttng_consumer_channel *channel;
+       struct lttng_consumer_stream *stream;
+
+       assert(path);
+       assert(ctx);
+
+       rcu_read_lock();
+
+       if (relayd_id != (uint64_t) -1ULL) {
+               use_relayd = 1;
+       }
+
+       channel = consumer_find_channel(key);
+       if (!channel) {
+               ERR("UST snapshot channel not found for key %" PRIu64, key);
+               ret = -1;
+               goto error;
+       }
+       assert(!channel->monitor);
+       DBG("UST consumer snapshot channel %" PRIu64, key);
+
+       cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+               /* Lock stream because we are about to change its state. */
+               pthread_mutex_lock(&stream->lock);
+               stream->net_seq_idx = relayd_id;
+
+               if (use_relayd) {
+                       ret = consumer_send_relayd_stream(stream, path);
+                       if (ret < 0) {
+                               goto error_unlock;
+                       }
+               } else {
+                       ret = utils_create_stream_file(path, stream->name,
+                                       stream->chan->tracefile_size,
+                                       stream->tracefile_count_current,
+                                       stream->uid, stream->gid);
+                       if (ret < 0) {
+                               goto error_unlock;
+                       }
+                       stream->out_fd = ret;
+                       stream->tracefile_size_current = 0;
+
+                       DBG("UST consumer snapshot stream %s/%s (%" PRIu64 ")", path,
+                                       stream->name, stream->key);
+               }
+
+               ustctl_flush_buffer(stream->ustream, 1);
+
+               ret = lttng_ustconsumer_take_snapshot(stream);
+               if (ret < 0) {
+                       ERR("Taking UST snapshot");
+                       goto error_unlock;
+               }
+
+               ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos);
+               if (ret < 0) {
+                       ERR("Produced UST snapshot position");
+                       goto error_unlock;
+               }
 
+               ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos);
+               if (ret < 0) {
+                       ERR("Consumerd UST snapshot position");
+                       goto error_unlock;
+               }
+
+               /*
+                * The original value is sent back if max stream size is larger than
+                * the possible size of the snapshot. Also, we asume that the session
+                * daemon should never send a maximum stream size that is lower than
+                * subbuffer size.
+                */
+               consumed_pos = consumer_get_consumed_maxsize(consumed_pos,
+                               produced_pos, max_stream_size);
+
+               while (consumed_pos < produced_pos) {
+                       ssize_t read_len;
+                       unsigned long len, padded_len;
+
+                       DBG("UST consumer taking snapshot at pos %lu", consumed_pos);
+
+                       ret = ustctl_get_subbuf(stream->ustream, &consumed_pos);
+                       if (ret < 0) {
+                               if (ret != -EAGAIN) {
+                                       PERROR("ustctl_get_subbuf snapshot");
+                                       goto error_close_stream;
+                               }
+                               DBG("UST consumer get subbuf failed. Skipping it.");
+                               consumed_pos += stream->max_sb_size;
+                               continue;
+                       }
+
+                       ret = ustctl_get_subbuf_size(stream->ustream, &len);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_get_subbuf_size");
+                               goto error_put_subbuf;
+                       }
+
+                       ret = ustctl_get_padded_subbuf_size(stream->ustream, &padded_len);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_get_padded_subbuf_size");
+                               goto error_put_subbuf;
+                       }
+
+                       read_len = lttng_consumer_on_read_subbuffer_mmap(ctx, stream, len,
+                                       padded_len - len);
+                       if (use_relayd) {
+                               if (read_len != len) {
+                                       ret = -1;
+                                       goto error_put_subbuf;
+                               }
+                       } else {
+                               if (read_len != padded_len) {
+                                       ret = -1;
+                                       goto error_put_subbuf;
+                               }
+                       }
+
+                       ret = ustctl_put_subbuf(stream->ustream);
+                       if (ret < 0) {
+                               ERR("Snapshot ustctl_put_subbuf");
+                               goto error_close_stream;
+                       }
+                       consumed_pos += stream->max_sb_size;
+               }
+
+               /* Simply close the stream so we can use it on the next snapshot. */
+               consumer_stream_close(stream);
+               pthread_mutex_unlock(&stream->lock);
+       }
+
+       rcu_read_unlock();
+       return 0;
+
+error_put_subbuf:
+       if (ustctl_put_subbuf(stream->ustream) < 0) {
+               ERR("Snapshot ustctl_put_subbuf");
+       }
+error_close_stream:
+       consumer_stream_close(stream);
+error_unlock:
+       pthread_mutex_unlock(&stream->lock);
 error:
+       rcu_read_unlock();
        return ret;
 }
 
@@ -713,7 +1001,8 @@ error:
  * Receive the metadata updates from the sessiond.
  */
 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
-               uint64_t len, struct lttng_consumer_channel *channel)
+               uint64_t len, struct lttng_consumer_channel *channel,
+               int timer)
 {
        int ret, ret_code = LTTNG_OK;
        char *metadata_str;
@@ -735,17 +1024,6 @@ int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
                goto end_free;
        }
 
-       /*
-        * XXX: The consumer data lock is acquired before calling metadata cache
-        * write which calls push metadata that MUST be protected by the consumer
-        * lock in order to be able to check the validity of the metadata stream of
-        * the channel.
-        *
-        * Note that this will be subject to change to better fine grained locking
-        * and ultimately try to get rid of this global consumer data lock.
-        */
-       pthread_mutex_lock(&consumer_data.lock);
-
        pthread_mutex_lock(&channel->metadata_cache->lock);
        ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
        if (ret < 0) {
@@ -757,13 +1035,11 @@ int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
                 * waiting for the metadata cache to be flushed.
                 */
                pthread_mutex_unlock(&channel->metadata_cache->lock);
-               pthread_mutex_unlock(&consumer_data.lock);
                goto end_free;
        }
        pthread_mutex_unlock(&channel->metadata_cache->lock);
-       pthread_mutex_unlock(&consumer_data.lock);
 
-       while (consumer_metadata_cache_flushed(channel, offset + len)) {
+       while (consumer_metadata_cache_flushed(channel, offset + len, timer)) {
                DBG("Waiting for metadata to be flushed");
                usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME);
        }
@@ -791,11 +1067,14 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        if (ret != sizeof(msg)) {
                DBG("Consumer received unexpected message size %zd (expects %zu)",
                        ret, sizeof(msg));
-               lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
                /*
                 * The ret value might 0 meaning an orderly shutdown but this is ok
                 * since the caller handles this.
                 */
+               if (ret > 0) {
+                       lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD);
+                       ret = -1;
+               }
                return ret;
        }
        if (msg.cmd_type == LTTNG_CONSUMER_STOP) {
@@ -871,6 +1150,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                                sizeof(is_data_pending));
                if (ret < 0) {
                        DBG("Error when sending the data pending ret code: %d", ret);
+                       goto error_fatal;
                }
 
                /*
@@ -891,7 +1171,9 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                                msg.u.ask_channel.relayd_id, msg.u.ask_channel.key,
                                (enum lttng_event_output) msg.u.ask_channel.output,
                                msg.u.ask_channel.tracefile_size,
-                               msg.u.ask_channel.tracefile_count);
+                               msg.u.ask_channel.tracefile_count,
+                               msg.u.ask_channel.session_id_per_pid,
+                               msg.u.ask_channel.monitor);
                if (!channel) {
                        goto end_channel_error;
                }
@@ -903,18 +1185,9 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval;
                attr.read_timer_interval = msg.u.ask_channel.read_timer_interval;
                attr.chan_id = msg.u.ask_channel.chan_id;
+               attr.output = msg.u.ask_channel.output;
                memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid));
 
-               /* Translate the event output type to UST. */
-               switch (channel->output) {
-               case LTTNG_EVENT_SPLICE:
-                       /* Splice not supported so fallback on mmap(). */
-               case LTTNG_EVENT_MMAP:
-               default:
-                       attr.output = CONSUMER_CHANNEL_MMAP;
-                       break;
-               };
-
                /* Translate and save channel type. */
                switch (msg.u.ask_channel.type) {
                case LTTNG_UST_CHAN_PER_CPU:
@@ -977,10 +1250,9 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                ret = consumer_send_status_channel(sock, channel);
                if (ret < 0) {
                        /*
-                        * There is probably a problem on the socket so the poll will get
-                        * it and clean everything up.
+                        * There is probably a problem on the socket.
                         */
-                       goto end_nosignal;
+                       goto error_fatal;
                }
 
                break;
@@ -998,13 +1270,6 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        goto end_msg_sessiond;
                }
 
-               /* Inform sessiond that we are about to send channel and streams. */
-               ret = consumer_send_status_msg(sock, LTTNG_OK);
-               if (ret < 0) {
-                       /* Somehow, the session daemon is not responding anymore. */
-                       goto end_nosignal;
-               }
-
                /* Send everything to sessiond. */
                ret = send_sessiond_channel(sock, channel, ctx, &relayd_err);
                if (ret < 0) {
@@ -1012,10 +1277,10 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                                /*
                                 * We were unable to send to the relayd the stream so avoid
                                 * sending back a fatal error to the thread since this is OK
-                                * and the consumer can continue its work.
+                                * and the consumer can continue its work. The above call
+                                * has sent the error status message to the sessiond.
                                 */
-                               ret_code = LTTNG_ERR_RELAYD_CONNECT_FAIL;
-                               goto end_msg_sessiond;
+                               goto end_nosignal;
                        }
                        /*
                         * The communicaton was broken hence there is a bad state between
@@ -1024,6 +1289,14 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                        goto error_fatal;
                }
 
+               /*
+                * In no monitor mode, the streams ownership is kept inside the channel
+                * so don't send them to the data thread.
+                */
+               if (!channel->monitor) {
+                       goto end_msg_sessiond;
+               }
+
                ret = send_streams_to_thread(channel, ctx);
                if (ret < 0) {
                        /*
@@ -1034,7 +1307,6 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                }
                /* List MUST be empty after or else it could be reused. */
                assert(cds_list_empty(&channel->streams.head));
-
                goto end_msg_sessiond;
        }
        case LTTNG_CONSUMER_DESTROY_CHANNEL:
@@ -1098,14 +1370,14 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
 
                /* Wait for more data. */
                if (lttng_consumer_poll_socket(consumer_sockpoll) < 0) {
-                       goto end_nosignal;
+                       goto error_fatal;
                }
 
                ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
-                               len, channel);
+                               len, channel, 0);
                if (ret < 0) {
                        /* error receiving from sessiond */
-                       goto end_nosignal;
+                       goto error_fatal;
                } else {
                        ret_code = ret;
                        goto end_msg_sessiond;
@@ -1121,6 +1393,36 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                }
                goto end_msg_sessiond;
        }
+       case LTTNG_CONSUMER_SNAPSHOT_CHANNEL:
+       {
+               if (msg.u.snapshot_channel.metadata) {
+                       ret = snapshot_metadata(msg.u.snapshot_channel.key,
+                                       msg.u.snapshot_channel.pathname,
+                                       msg.u.snapshot_channel.relayd_id,
+                                       ctx);
+                       if (ret < 0) {
+                               ERR("Snapshot metadata failed");
+                               ret_code = LTTNG_ERR_UST_META_FAIL;
+                       }
+               } else {
+                       ret = snapshot_channel(msg.u.snapshot_channel.key,
+                                       msg.u.snapshot_channel.pathname,
+                                       msg.u.snapshot_channel.relayd_id,
+                                       msg.u.snapshot_channel.max_stream_size,
+                                       ctx);
+                       if (ret < 0) {
+                               ERR("Snapshot channel failed");
+                               ret_code = LTTNG_ERR_UST_CHAN_FAIL;
+                       }
+               }
+
+               ret = consumer_send_status_msg(sock, ret_code);
+               if (ret < 0) {
+                       /* Somehow, the session daemon is not responding anymore. */
+                       goto end_nosignal;
+               }
+               break;
+       }
        default:
                break;
        }
@@ -1140,7 +1442,10 @@ end_msg_sessiond:
         * the caller because the session daemon socket management is done
         * elsewhere. Returning a negative code or 0 will shutdown the consumer.
         */
-       (void) consumer_send_status_msg(sock, ret_code);
+       ret = consumer_send_status_msg(sock, ret_code);
+       if (ret < 0) {
+               goto error_fatal;
+       }
        rcu_read_unlock();
        return 1;
 end_channel_error:
@@ -1218,6 +1523,21 @@ int lttng_ustconsumer_get_produced_snapshot(
        return ustctl_snapshot_get_produced(stream->ustream, pos);
 }
 
+/*
+ * Get the consumed position
+ *
+ * Returns 0 on success, < 0 on error
+ */
+int lttng_ustconsumer_get_consumed_snapshot(
+               struct lttng_consumer_stream *stream, unsigned long *pos)
+{
+       assert(stream);
+       assert(stream->ustream);
+       assert(pos);
+
+       return ustctl_snapshot_get_consumed(stream->ustream, pos);
+}
+
 /*
  * Called when the stream signal the consumer that it has hang up.
  */
@@ -1266,28 +1586,57 @@ int lttng_ustconsumer_read_subbuffer(struct lttng_consumer_stream *stream,
        assert(stream->ustream);
        assert(ctx);
 
-       DBG2("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
+       DBG("In UST read_subbuffer (wait_fd: %d, name: %s)", stream->wait_fd,
                        stream->name);
 
        /* Ease our life for what's next. */
        ustream = stream->ustream;
 
        /* We can consume the 1 byte written into the wait_fd by UST */
-       if (!stream->hangup_flush_done) {
+       if (stream->monitor && !stream->hangup_flush_done) {
                ssize_t readlen;
 
                do {
                        readlen = read(stream->wait_fd, &dummy, 1);
                } while (readlen == -1 && errno == EINTR);
-               if (readlen == -1) {
+               if (readlen == -1 && errno != EAGAIN && errno != EWOULDBLOCK) {
                        ret = readlen;
                        goto end;
                }
        }
 
+retry:
        /* Get the next subbuffer */
        err = ustctl_get_next_subbuf(ustream);
        if (err != 0) {
+               /*
+                * Populate metadata info if the existing info has
+                * already been read.
+                */
+               if (stream->metadata_flag) {
+                       ssize_t write_len;
+
+                       if (stream->chan->metadata_cache->contiguous
+                                       == stream->ust_metadata_pushed) {
+                               ret = 0;
+                               goto end;
+                       }
+
+                       write_len = ustctl_write_one_packet_to_channel(stream->chan->uchan,
+                                       &stream->chan->metadata_cache->data[stream->ust_metadata_pushed],
+                                       stream->chan->metadata_cache->contiguous
+                                               - stream->ust_metadata_pushed);
+                       assert(write_len != 0);
+                       if (write_len < 0) {
+                               ERR("Writing one metadata packet");
+                               ret = -1;
+                               goto end;
+                       }
+                       stream->ust_metadata_pushed += write_len;
+                       ustctl_flush_buffer(stream->ustream, 1);
+                       goto retry;
+               }
+
                ret = err;      /* ustctl_get_next_subbuf returns negative, caller expect positive. */
                /*
                 * This is a debug message even for single-threaded consumer,
@@ -1348,8 +1697,10 @@ int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream)
 {
        int ret;
 
+       assert(stream);
+
        /* Don't create anything if this is set for streaming. */
-       if (stream->net_seq_idx == (uint64_t) -1ULL) {
+       if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor) {
                ret = utils_create_stream_file(stream->chan->pathname, stream->name,
                                stream->chan->tracefile_size, stream->tracefile_count_current,
                                stream->uid, stream->gid);
@@ -1382,15 +1733,44 @@ int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream)
 
        DBG("UST consumer checking data pending");
 
-       ret = ustctl_get_next_subbuf(stream->ustream);
-       if (ret == 0) {
-               /* There is still data so let's put back this subbuffer. */
-               ret = ustctl_put_subbuf(stream->ustream);
-               assert(ret == 0);
-               ret = 1;  /* Data is pending */
+       if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) {
+               ret = 0;
                goto end;
        }
 
+       if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) {
+               /*
+                * We can simply check whether all contiguously available data
+                * has been pushed to the ring buffer, since the push operation
+                * is performed within get_next_subbuf(), and because both
+                * get_next_subbuf() and put_next_subbuf() are issued atomically
+                * thanks to the stream lock within
+                * lttng_ustconsumer_read_subbuffer(). This basically means that
+                * whetnever ust_metadata_pushed is incremented, the associated
+                * metadata has been consumed from the metadata stream.
+                */
+               DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64,
+                       stream->chan->metadata_cache->contiguous,
+                       stream->ust_metadata_pushed);
+               if (stream->chan->metadata_cache->contiguous
+                               != stream->ust_metadata_pushed) {
+                       ret = 1;        /* Data is pending */
+                       goto end;
+               }
+       } else {
+               ret = ustctl_get_next_subbuf(stream->ustream);
+               if (ret == 0) {
+                       /*
+                        * There is still data so let's put back this
+                        * subbuffer.
+                        */
+                       ret = ustctl_put_subbuf(stream->ustream);
+                       assert(ret == 0);
+                       ret = 1;        /* Data is pending */
+                       goto end;
+               }
+       }
+
        /* Data is NOT pending so ready to be read. */
        ret = 0;
 
@@ -1446,8 +1826,14 @@ void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream)
        }
 }
 
+/*
+ * Please refer to consumer-timer.c before adding any lock within this
+ * function or any of its callees. Timers have a very strict locking
+ * semantic with respect to teardown. Failure to respect this semantic
+ * introduces deadlocks.
+ */
 int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
-               struct lttng_consumer_channel *channel)
+               struct lttng_consumer_channel *channel, int timer)
 {
        struct lttcomm_metadata_request_msg request;
        struct lttcomm_consumer_msg msg;
@@ -1472,11 +1858,15 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        }
 
        request.session_id = channel->session_id;
+       request.session_id_per_pid = channel->session_id_per_pid;
        request.uid = channel->uid;
        request.key = channel->key;
-       DBG("Sending metadata request to sessiond, session %" PRIu64,
-                       channel->session_id);
+       DBG("Sending metadata request to sessiond, session id %" PRIu64
+                       ", per-pid %" PRIu64,
+                       channel->session_id,
+                       channel->session_id_per_pid);
 
+       pthread_mutex_lock(&ctx->metadata_socket_lock);
        ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request,
                        sizeof(request));
        if (ret < 0) {
@@ -1531,10 +1921,17 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        }
 
        ret_code = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
-                       key, offset, len, channel);
-       (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret_code);
+                       key, offset, len, channel, timer);
+       if (ret_code >= 0) {
+               /*
+                * Only send the status msg if the sessiond is alive meaning a positive
+                * ret code.
+                */
+               (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret_code);
+       }
        ret = 0;
 
 end:
+       pthread_mutex_unlock(&ctx->metadata_socket_lock);
        return ret;
 }
This page took 0.036307 seconds and 4 git commands to generate.