Fix: ust-consumer: flush empty packets on snapshot channel
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
index d0f7ac53ee651371ba4b59444df5a8ae6c391330..89109b919a85231615fdb7f86a3c98518a9d422a 100644 (file)
@@ -767,7 +767,54 @@ static int flush_channel(uint64_t chan_key)
 
                health_code_update();
 
-               ustctl_flush_buffer(stream->ustream, 1);
+               pthread_mutex_lock(&stream->lock);
+               if (!stream->quiescent) {
+                       ustctl_flush_buffer(stream->ustream, 0);
+                       stream->quiescent = true;
+               }
+               pthread_mutex_unlock(&stream->lock);
+       }
+error:
+       rcu_read_unlock();
+       return ret;
+}
+
+/*
+ * Clear quiescent state from channel's streams using the given key to
+ * retrieve the channel.
+ *
+ * Return 0 on success else an LTTng error code.
+ */
+static int clear_quiescent_channel(uint64_t chan_key)
+{
+       int ret = 0;
+       struct lttng_consumer_channel *channel;
+       struct lttng_consumer_stream *stream;
+       struct lttng_ht *ht;
+       struct lttng_ht_iter iter;
+
+       DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key);
+
+       rcu_read_lock();
+       channel = consumer_find_channel(chan_key);
+       if (!channel) {
+               ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key);
+               ret = LTTNG_ERR_UST_CHAN_NOT_FOUND;
+               goto error;
+       }
+
+       ht = consumer_data.stream_per_chan_id_ht;
+
+       /* For each stream of the channel id, clear quiescent state. */
+       cds_lfht_for_each_entry_duplicate(ht->ht,
+                       ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct,
+                       &channel->key, &iter.iter, stream, node_channel_id.node) {
+
+               health_code_update();
+
+               pthread_mutex_lock(&stream->lock);
+               stream->quiescent = false;
+               pthread_mutex_unlock(&stream->lock);
        }
 error:
        rcu_read_unlock();
@@ -1029,6 +1076,8 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
        DBG("UST consumer snapshot channel %" PRIu64, key);
 
        cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
+               /* Are we at a position _before_ the first available packet ? */
+               bool before_first_packet = true;
 
                health_code_update();
 
@@ -1062,7 +1111,13 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                        }
                }
 
-               ustctl_flush_buffer(stream->ustream, 1);
+               /*
+                * If tracing is active, we want to perform a "full" buffer flush.
+                * Else, if quiescent, it has already been done by the prior stop.
+                */
+               if (!stream->quiescent) {
+                       ustctl_flush_buffer(stream->ustream, 0);
+               }
 
                ret = lttng_ustconsumer_take_snapshot(stream);
                if (ret < 0) {
@@ -1095,6 +1150,7 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                while (consumed_pos < produced_pos) {
                        ssize_t read_len;
                        unsigned long len, padded_len;
+                       int lost_packet = 0;
 
                        health_code_update();
 
@@ -1108,6 +1164,15 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                                }
                                DBG("UST consumer get subbuf failed. Skipping it.");
                                consumed_pos += stream->max_sb_size;
+
+                               /*
+                                * Start accounting lost packets only when we
+                                * already have extracted packets (to match the
+                                * content of the final snapshot).
+                                */
+                               if (!before_first_packet) {
+                                       lost_packet = 1;
+                               }
                                continue;
                        }
 
@@ -1143,6 +1208,16 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                                goto error_close_stream;
                        }
                        consumed_pos += stream->max_sb_size;
+
+                       /*
+                        * Only account lost packets located between
+                        * succesfully extracted packets (do not account before
+                        * and after since they are not visible in the
+                        * resulting snapshot).
+                        */
+                       stream->chan->lost_packets += lost_packet;
+                       lost_packet = 0;
+                       before_first_packet = false;
                }
 
                /* Simply close the stream so we can use it on the next snapshot. */
@@ -1175,8 +1250,8 @@ error:
  * complete.
  */
 int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
-               uint64_t len, struct lttng_consumer_channel *channel,
-               int timer, int wait)
+               uint64_t len, uint64_t version,
+               struct lttng_consumer_channel *channel, int timer, int wait)
 {
        int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS;
        char *metadata_str;
@@ -1203,7 +1278,8 @@ int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset,
        health_code_update();
 
        pthread_mutex_lock(&channel->metadata_cache->lock);
-       ret = consumer_metadata_cache_write(channel, offset, len, metadata_str);
+       ret = consumer_metadata_cache_write(channel, offset, len, version,
+                       metadata_str);
        if (ret < 0) {
                /* Unable to handle metadata. Notify session daemon. */
                ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA;
@@ -1559,12 +1635,25 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
 
                goto end_msg_sessiond;
        }
+       case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL:
+       {
+               int ret;
+
+               ret = clear_quiescent_channel(
+                               msg.u.clear_quiescent_channel.key);
+               if (ret != 0) {
+                       ret_code = ret;
+               }
+
+               goto end_msg_sessiond;
+       }
        case LTTNG_CONSUMER_PUSH_METADATA:
        {
                int ret;
                uint64_t len = msg.u.push_metadata.len;
                uint64_t key = msg.u.push_metadata.key;
                uint64_t offset = msg.u.push_metadata.target_offset;
+               uint64_t version = msg.u.push_metadata.version;
                struct lttng_consumer_channel *channel;
 
                DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key,
@@ -1615,7 +1704,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                health_code_update();
 
                ret = lttng_ustconsumer_recv_metadata(sock, key, offset,
-                               len, channel, 0, 1);
+                               len, version, channel, 0, 1);
                if (ret < 0) {
                        /* error receiving from sessiond */
                        goto error_fatal;
@@ -1668,7 +1757,8 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
        }
        case LTTNG_CONSUMER_DISCARDED_EVENTS:
        {
-               uint64_t ret;
+               int ret = 0;
+               uint64_t discarded_events;
                struct lttng_ht_iter iter;
                struct lttng_ht *ht;
                struct lttng_consumer_stream *stream;
@@ -1689,13 +1779,13 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                 * found (no events are dropped if the channel is not yet in
                 * use).
                 */
-               ret = 0;
+               discarded_events = 0;
                cds_lfht_for_each_entry_duplicate(ht->ht,
                                ht->hash_fct(&id, lttng_ht_seed),
                                ht->match_fct, &id,
                                &iter.iter, stream, node_session_id.node) {
                        if (stream->chan->key == key) {
-                               ret = stream->chan->discarded_events;
+                               discarded_events = stream->chan->discarded_events;
                                break;
                        }
                }
@@ -1708,7 +1798,7 @@ int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx,
                health_code_update();
 
                /* Send back returned value to session daemon */
-               ret = lttcomm_send_unix_sock(sock, &ret, sizeof(ret));
+               ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events));
                if (ret < 0) {
                        PERROR("send discarded events");
                        goto error_fatal;
@@ -1920,14 +2010,19 @@ int lttng_ustconsumer_get_sequence_number(
 }
 
 /*
- * Called when the stream signal the consumer that it has hang up.
+ * Called when the stream signals the consumer that it has hung up.
  */
 void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream)
 {
        assert(stream);
        assert(stream->ustream);
 
-       ustctl_flush_buffer(stream->ustream, 0);
+       pthread_mutex_lock(&stream->lock);
+       if (!stream->quiescent) {
+               ustctl_flush_buffer(stream->ustream, 0);
+               stream->quiescent = true;
+       }
+       pthread_mutex_unlock(&stream->lock);
        stream->hangup_flush_done = 1;
 }
 
@@ -1961,11 +2056,6 @@ void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan)
                        }
                }
        }
-       /* Try to rmdir all directories under shm_path root. */
-       if (chan->root_shm_path[0]) {
-               (void) run_as_recursive_rmdir(chan->root_shm_path,
-                               chan->uid, chan->gid);
-       }
 }
 
 void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan)
@@ -1975,6 +2065,11 @@ void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan)
 
        consumer_metadata_cache_destroy(chan);
        ustctl_destroy_channel(chan->uchan);
+       /* Try to rmdir all directories under shm_path root. */
+       if (chan->root_shm_path[0]) {
+               (void) run_as_recursive_rmdir(chan->root_shm_path,
+                               chan->uid, chan->gid);
+       }
        free(chan->stream_fds);
 }
 
@@ -2057,10 +2152,56 @@ static int get_index_values(struct ctf_packet_index *index,
        }
        index->stream_id = htobe64(index->stream_id);
 
+       ret = ustctl_get_instance_id(ustream, &index->stream_instance_id);
+       if (ret < 0) {
+               PERROR("ustctl_get_instance_id");
+               goto error;
+       }
+       index->stream_instance_id = htobe64(index->stream_instance_id);
+
+       ret = ustctl_get_sequence_number(ustream, &index->packet_seq_num);
+       if (ret < 0) {
+               PERROR("ustctl_get_sequence_number");
+               goto error;
+       }
+       index->packet_seq_num = htobe64(index->packet_seq_num);
+
 error:
        return ret;
 }
 
+static
+void metadata_stream_reset_cache(struct lttng_consumer_stream *stream,
+               struct consumer_metadata_cache *cache)
+{
+       DBG("Metadata stream update to version %" PRIu64,
+                       cache->version);
+       stream->ust_metadata_pushed = 0;
+       stream->metadata_version = cache->version;
+       stream->reset_metadata_flag = 1;
+}
+
+/*
+ * Check if the version of the metadata stream and metadata cache match.
+ * If the cache got updated, reset the metadata stream.
+ * The stream lock and metadata cache lock MUST be held.
+ * Return 0 on success, a negative value on error.
+ */
+static
+int metadata_stream_check_version(struct lttng_consumer_stream *stream)
+{
+       int ret = 0;
+       struct consumer_metadata_cache *cache = stream->chan->metadata_cache;
+
+       if (cache->version == stream->metadata_version) {
+               goto end;
+       }
+       metadata_stream_reset_cache(stream, cache);
+
+end:
+       return ret;
+}
+
 /*
  * Write up to one packet from the metadata cache to the channel.
  *
@@ -2074,6 +2215,10 @@ int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
        int ret;
 
        pthread_mutex_lock(&stream->chan->metadata_cache->lock);
+       ret = metadata_stream_check_version(stream);
+       if (ret < 0) {
+               goto end;
+       }
        if (stream->chan->metadata_cache->max_offset
                        == stream->ust_metadata_pushed) {
                ret = 0;
@@ -2657,7 +2802,7 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        struct lttcomm_metadata_request_msg request;
        struct lttcomm_consumer_msg msg;
        enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS;
-       uint64_t len, key, offset;
+       uint64_t len, key, offset, version;
        int ret;
 
        assert(channel);
@@ -2737,6 +2882,7 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        len = msg.u.push_metadata.len;
        key = msg.u.push_metadata.key;
        offset = msg.u.push_metadata.target_offset;
+       version = msg.u.push_metadata.version;
 
        assert(key == channel->key);
        if (len == 0) {
@@ -2759,7 +2905,7 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        health_code_update();
 
        ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket,
-                       key, offset, len, channel, timer, wait);
+                       key, offset, len, version, channel, timer, wait);
        if (ret >= 0) {
                /*
                 * Only send the status msg if the sessiond is alive meaning a positive
This page took 0.027753 seconds and 4 git commands to generate.