consumer: fix: unaligned accesses to index fields
[lttng-tools.git] / src / common / ust-consumer / ust-consumer.c
index faf069c0df6f9319a53751a532cc5e88e769ca26..2be1e372fef3d2934b710244fbcfbca7e2b6bccf 100644 (file)
@@ -768,10 +768,19 @@ static int flush_channel(uint64_t chan_key)
                health_code_update();
 
                pthread_mutex_lock(&stream->lock);
+
+               /*
+                * Protect against concurrent teardown of a stream.
+                */
+               if (cds_lfht_is_node_deleted(&stream->node.node)) {
+                       goto next;
+               }
+
                if (!stream->quiescent) {
                        ustctl_flush_buffer(stream->ustream, 0);
                        stream->quiescent = true;
                }
+next:
                pthread_mutex_unlock(&stream->lock);
        }
 error:
@@ -831,6 +840,7 @@ static int close_metadata(uint64_t chan_key)
 {
        int ret = 0;
        struct lttng_consumer_channel *channel;
+       unsigned int channel_monitor;
 
        DBG("UST consumer close metadata key %" PRIu64, chan_key);
 
@@ -849,13 +859,48 @@ static int close_metadata(uint64_t chan_key)
 
        pthread_mutex_lock(&consumer_data.lock);
        pthread_mutex_lock(&channel->lock);
-
+       channel_monitor = channel->monitor;
        if (cds_lfht_is_node_deleted(&channel->node.node)) {
                goto error_unlock;
        }
 
        lttng_ustconsumer_close_metadata(channel);
+       pthread_mutex_unlock(&channel->lock);
+       pthread_mutex_unlock(&consumer_data.lock);
 
+       /*
+        * The ownership of a metadata channel depends on the type of
+        * session to which it belongs. In effect, the monitor flag is checked
+        * to determine if this metadata channel is in "snapshot" mode or not.
+        *
+        * In the non-snapshot case, the metadata channel is created along with
+        * a single stream which will remain present until the metadata channel
+        * is destroyed (on the destruction of its session). In this case, the
+        * metadata stream in "monitored" by the metadata poll thread and holds
+        * the ownership of its channel.
+        *
+        * Closing the metadata will cause the metadata stream's "metadata poll
+        * pipe" to be closed. Closing this pipe will wake-up the metadata poll
+        * thread which will teardown the metadata stream which, in return,
+        * deletes the metadata channel.
+        *
+        * In the snapshot case, the metadata stream is created and destroyed
+        * on every snapshot record. Since the channel doesn't have an owner
+        * other than the session daemon, it is safe to destroy it immediately
+        * on reception of the CLOSE_METADATA command.
+        */
+       if (!channel_monitor) {
+               /*
+                * The channel and consumer_data locks must be
+                * released before this call since consumer_del_channel
+                * re-acquires the channel and consumer_data locks to teardown
+                * the channel and queue its reclamation by the "call_rcu"
+                * worker thread.
+                */
+               consumer_del_channel(channel);
+       }
+
+       return ret;
 error_unlock:
        pthread_mutex_unlock(&channel->lock);
        pthread_mutex_unlock(&consumer_data.lock);
@@ -1076,9 +1121,6 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
        DBG("UST consumer snapshot channel %" PRIu64, key);
 
        cds_list_for_each_entry(stream, &channel->streams.head, send_node) {
-               /* Are we at a position _before_ the first available packet ? */
-               bool before_first_packet = true;
-
                health_code_update();
 
                /* Lock stream because we are about to change its state. */
@@ -1147,10 +1189,9 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                                produced_pos, nb_packets_per_stream,
                                stream->max_sb_size);
 
-               while (consumed_pos < produced_pos) {
+               while ((long) (consumed_pos - produced_pos) < 0) {
                        ssize_t read_len;
                        unsigned long len, padded_len;
-                       int lost_packet = 0;
 
                        health_code_update();
 
@@ -1164,15 +1205,7 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                                }
                                DBG("UST consumer get subbuf failed. Skipping it.");
                                consumed_pos += stream->max_sb_size;
-
-                               /*
-                                * Start accounting lost packets only when we
-                                * already have extracted packets (to match the
-                                * content of the final snapshot).
-                                */
-                               if (!before_first_packet) {
-                                       lost_packet = 1;
-                               }
+                               stream->chan->lost_packets++;
                                continue;
                        }
 
@@ -1208,16 +1241,6 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id,
                                goto error_close_stream;
                        }
                        consumed_pos += stream->max_sb_size;
-
-                       /*
-                        * Only account lost packets located between
-                        * succesfully extracted packets (do not account before
-                        * and after since they are not visible in the
-                        * resulting snapshot).
-                        */
-                       stream->chan->lost_packets += lost_packet;
-                       lost_packet = 0;
-                       before_first_packet = false;
                }
 
                /* Simply close the stream so we can use it on the next snapshot. */
@@ -2109,62 +2132,69 @@ static int get_index_values(struct ctf_packet_index *index,
                struct ustctl_consumer_stream *ustream)
 {
        int ret;
+       uint64_t packet_size, content_size, timestamp_begin, timestamp_end,
+                       events_discarded, stream_id, stream_instance_id,
+                       packet_seq_num;
 
-       ret = ustctl_get_timestamp_begin(ustream, &index->timestamp_begin);
+       ret = ustctl_get_timestamp_begin(ustream, &timestamp_begin);
        if (ret < 0) {
                PERROR("ustctl_get_timestamp_begin");
                goto error;
        }
-       index->timestamp_begin = htobe64(index->timestamp_begin);
 
-       ret = ustctl_get_timestamp_end(ustream, &index->timestamp_end);
+       ret = ustctl_get_timestamp_end(ustream, &timestamp_end);
        if (ret < 0) {
                PERROR("ustctl_get_timestamp_end");
                goto error;
        }
-       index->timestamp_end = htobe64(index->timestamp_end);
 
-       ret = ustctl_get_events_discarded(ustream, &index->events_discarded);
+       ret = ustctl_get_events_discarded(ustream, &events_discarded);
        if (ret < 0) {
                PERROR("ustctl_get_events_discarded");
                goto error;
        }
-       index->events_discarded = htobe64(index->events_discarded);
 
-       ret = ustctl_get_content_size(ustream, &index->content_size);
+       ret = ustctl_get_content_size(ustream, &content_size);
        if (ret < 0) {
                PERROR("ustctl_get_content_size");
                goto error;
        }
-       index->content_size = htobe64(index->content_size);
 
-       ret = ustctl_get_packet_size(ustream, &index->packet_size);
+       ret = ustctl_get_packet_size(ustream, &packet_size);
        if (ret < 0) {
                PERROR("ustctl_get_packet_size");
                goto error;
        }
-       index->packet_size = htobe64(index->packet_size);
 
-       ret = ustctl_get_stream_id(ustream, &index->stream_id);
+       ret = ustctl_get_stream_id(ustream, &stream_id);
        if (ret < 0) {
                PERROR("ustctl_get_stream_id");
                goto error;
        }
-       index->stream_id = htobe64(index->stream_id);
 
-       ret = ustctl_get_instance_id(ustream, &index->stream_instance_id);
+       ret = ustctl_get_instance_id(ustream, &stream_instance_id);
        if (ret < 0) {
                PERROR("ustctl_get_instance_id");
                goto error;
        }
-       index->stream_instance_id = htobe64(index->stream_instance_id);
 
-       ret = ustctl_get_sequence_number(ustream, &index->packet_seq_num);
+       ret = ustctl_get_sequence_number(ustream, &packet_seq_num);
        if (ret < 0) {
                PERROR("ustctl_get_sequence_number");
                goto error;
        }
-       index->packet_seq_num = htobe64(index->packet_seq_num);
+
+       *index = (typeof(*index)) {
+               .offset = index->offset,
+               .packet_size = htobe64(packet_size),
+               .content_size = htobe64(content_size),
+               .timestamp_begin = htobe64(timestamp_begin),
+               .timestamp_end = htobe64(timestamp_end),
+               .events_discarded = htobe64(events_discarded),
+               .stream_id = htobe64(stream_id),
+               .stream_instance_id = htobe64(stream_instance_id),
+               .packet_seq_num = htobe64(packet_seq_num),
+       };
 
 error:
        return ret;
@@ -2241,6 +2271,13 @@ int commit_one_metadata_packet(struct lttng_consumer_stream *stream)
                        stream->ust_metadata_pushed);
        ret = write_len;
 
+       /*
+        * Switch packet (but don't open the next one) on every commit of
+        * a metadata packet. Since the subbuffer is fully filled (with padding,
+        * if needed), the stream is "quiescent" after this commit.
+        */
+       ustctl_flush_buffer(stream->ustream, 1);
+       stream->quiescent = true;
 end:
        pthread_mutex_unlock(&stream->chan->metadata_cache->lock);
        return ret;
@@ -2285,7 +2322,6 @@ int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx,
                retry = 1;
        }
 
-       ustctl_flush_buffer(metadata->ustream, 1);
        ret = ustctl_snapshot(metadata->ustream);
        if (ret < 0) {
                if (errno != EAGAIN) {
@@ -2475,7 +2511,6 @@ retry:
                        if (ret <= 0) {
                                goto end;
                        }
-                       ustctl_flush_buffer(stream->ustream, 1);
                        goto retry;
                }
 
@@ -2496,6 +2531,8 @@ retry:
                index.offset = htobe64(stream->out_fd_offset);
                ret = get_index_values(&index, ustream);
                if (ret < 0) {
+                       err = ustctl_put_subbuf(ustream);
+                       assert(err == 0);
                        goto end;
                }
 
@@ -2503,6 +2540,8 @@ retry:
                ret = update_stream_stats(stream);
                if (ret < 0) {
                        PERROR("kernctl_get_events_discarded");
+                       err = ustctl_put_subbuf(ustream);
+                       assert(err == 0);
                        goto end;
                }
        } else {
@@ -2837,7 +2876,7 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx,
        request.key = channel->key;
 
        DBG("Sending metadata request to sessiond, session id %" PRIu64
-                       ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64,
+                       ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64,
                        request.session_id, request.session_id_per_pid, request.uid,
                        request.key);
 
This page took 0.026802 seconds and 4 git commands to generate.