X-Git-Url: https://git.lttng.org/?a=blobdiff_plain;f=src%2Fcommon%2Fust-consumer%2Fust-consumer.c;h=a951788bd9025ee0c95a6db925f5313758696f2c;hb=9dfbcccfb87c30a74c6059b24bfc053bdcddee54;hp=a66f305cd02e433f41bf31d0591c0e545d524a1a;hpb=0dd01979d6f26886199ef746377640b57260421c;p=lttng-tools.git diff --git a/src/common/ust-consumer/ust-consumer.c b/src/common/ust-consumer/ust-consumer.c index a66f305cd..a951788bd 100644 --- a/src/common/ust-consumer/ust-consumer.c +++ b/src/common/ust-consumer/ust-consumer.c @@ -768,10 +768,19 @@ static int flush_channel(uint64_t chan_key) health_code_update(); pthread_mutex_lock(&stream->lock); + + /* + * Protect against concurrent teardown of a stream. + */ + if (cds_lfht_is_node_deleted(&stream->node.node)) { + goto next; + } + if (!stream->quiescent) { ustctl_flush_buffer(stream->ustream, 0); stream->quiescent = true; } +next: pthread_mutex_unlock(&stream->lock); } error: @@ -831,6 +840,7 @@ static int close_metadata(uint64_t chan_key) { int ret = 0; struct lttng_consumer_channel *channel; + unsigned int channel_monitor; DBG("UST consumer close metadata key %" PRIu64, chan_key); @@ -849,13 +859,48 @@ static int close_metadata(uint64_t chan_key) pthread_mutex_lock(&consumer_data.lock); pthread_mutex_lock(&channel->lock); - + channel_monitor = channel->monitor; if (cds_lfht_is_node_deleted(&channel->node.node)) { goto error_unlock; } lttng_ustconsumer_close_metadata(channel); + pthread_mutex_unlock(&channel->lock); + pthread_mutex_unlock(&consumer_data.lock); + /* + * The ownership of a metadata channel depends on the type of + * session to which it belongs. In effect, the monitor flag is checked + * to determine if this metadata channel is in "snapshot" mode or not. + * + * In the non-snapshot case, the metadata channel is created along with + * a single stream which will remain present until the metadata channel + * is destroyed (on the destruction of its session). In this case, the + * metadata stream in "monitored" by the metadata poll thread and holds + * the ownership of its channel. + * + * Closing the metadata will cause the metadata stream's "metadata poll + * pipe" to be closed. Closing this pipe will wake-up the metadata poll + * thread which will teardown the metadata stream which, in return, + * deletes the metadata channel. + * + * In the snapshot case, the metadata stream is created and destroyed + * on every snapshot record. Since the channel doesn't have an owner + * other than the session daemon, it is safe to destroy it immediately + * on reception of the CLOSE_METADATA command. + */ + if (!channel_monitor) { + /* + * The channel and consumer_data locks must be + * released before this call since consumer_del_channel + * re-acquires the channel and consumer_data locks to teardown + * the channel and queue its reclamation by the "call_rcu" + * worker thread. + */ + consumer_del_channel(channel); + } + + return ret; error_unlock: pthread_mutex_unlock(&channel->lock); pthread_mutex_unlock(&consumer_data.lock); @@ -1076,9 +1121,6 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id, DBG("UST consumer snapshot channel %" PRIu64, key); cds_list_for_each_entry(stream, &channel->streams.head, send_node) { - /* Are we at a position _before_ the first available packet ? */ - bool before_first_packet = true; - health_code_update(); /* Lock stream because we are about to change its state. */ @@ -1111,7 +1153,13 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id, } } - ustctl_flush_buffer(stream->ustream, 1); + /* + * If tracing is active, we want to perform a "full" buffer flush. + * Else, if quiescent, it has already been done by the prior stop. + */ + if (!stream->quiescent) { + ustctl_flush_buffer(stream->ustream, 0); + } ret = lttng_ustconsumer_take_snapshot(stream); if (ret < 0) { @@ -1141,10 +1189,9 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id, produced_pos, nb_packets_per_stream, stream->max_sb_size); - while (consumed_pos < produced_pos) { + while ((long) (consumed_pos - produced_pos) < 0) { ssize_t read_len; unsigned long len, padded_len; - int lost_packet = 0; health_code_update(); @@ -1158,15 +1205,7 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id, } DBG("UST consumer get subbuf failed. Skipping it."); consumed_pos += stream->max_sb_size; - - /* - * Start accounting lost packets only when we - * already have extracted packets (to match the - * content of the final snapshot). - */ - if (!before_first_packet) { - lost_packet = 1; - } + stream->chan->lost_packets++; continue; } @@ -1202,16 +1241,6 @@ static int snapshot_channel(uint64_t key, char *path, uint64_t relayd_id, goto error_close_stream; } consumed_pos += stream->max_sb_size; - - /* - * Only account lost packets located between - * succesfully extracted packets (do not account before - * and after since they are not visible in the - * resulting snapshot). - */ - stream->chan->lost_packets += lost_packet; - lost_packet = 0; - before_first_packet = false; } /* Simply close the stream so we can use it on the next snapshot. */ @@ -2235,6 +2264,13 @@ int commit_one_metadata_packet(struct lttng_consumer_stream *stream) stream->ust_metadata_pushed); ret = write_len; + /* + * Switch packet (but don't open the next one) on every commit of + * a metadata packet. Since the subbuffer is fully filled (with padding, + * if needed), the stream is "quiescent" after this commit. + */ + ustctl_flush_buffer(stream->ustream, 1); + stream->quiescent = true; end: pthread_mutex_unlock(&stream->chan->metadata_cache->lock); return ret; @@ -2267,10 +2303,10 @@ int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx, * because we locked the metadata thread. */ ret = lttng_ustconsumer_request_metadata(ctx, metadata->chan, 0, 0); + pthread_mutex_lock(&metadata->lock); if (ret < 0) { goto end; } - pthread_mutex_lock(&metadata->lock); ret = commit_one_metadata_packet(metadata); if (ret <= 0) { @@ -2279,7 +2315,6 @@ int lttng_ustconsumer_sync_metadata(struct lttng_consumer_local_data *ctx, retry = 1; } - ustctl_flush_buffer(metadata->ustream, 1); ret = ustctl_snapshot(metadata->ustream); if (ret < 0) { if (errno != EAGAIN) { @@ -2392,8 +2427,8 @@ int update_stream_stats(struct lttng_consumer_stream *stream) } if (discarded < stream->last_discarded_events) { /* - * Overflow has occured. We assume only one wrap-around - * has occured. + * Overflow has occurred. We assume only one wrap-around + * has occurred. */ stream->chan->discarded_events += (1ULL << (CAA_BITS_PER_LONG - 1)) - @@ -2469,7 +2504,6 @@ retry: if (ret <= 0) { goto end; } - ustctl_flush_buffer(stream->ustream, 1); goto retry; } @@ -2490,6 +2524,8 @@ retry: index.offset = htobe64(stream->out_fd_offset); ret = get_index_values(&index, ustream); if (ret < 0) { + err = ustctl_put_subbuf(ustream); + assert(err == 0); goto end; } @@ -2497,6 +2533,8 @@ retry: ret = update_stream_stats(stream); if (ret < 0) { PERROR("kernctl_get_events_discarded"); + err = ustctl_put_subbuf(ustream); + assert(err == 0); goto end; } } else { @@ -2614,14 +2652,17 @@ int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream) stream->tracefile_size_current = 0; if (!stream->metadata_flag) { - ret = index_create_file(stream->chan->pathname, + struct lttng_index_file *index_file; + + index_file = lttng_index_file_create(stream->chan->pathname, stream->name, stream->uid, stream->gid, stream->chan->tracefile_size, - stream->tracefile_count_current); - if (ret < 0) { + stream->tracefile_count_current, + CTF_INDEX_MAJOR, CTF_INDEX_MINOR); + if (!index_file) { goto error; } - stream->index_fd = ret; + stream->index_file = index_file; } } ret = 0; @@ -2828,7 +2869,7 @@ int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx, request.key = channel->key; DBG("Sending metadata request to sessiond, session id %" PRIu64 - ", per-pid %" PRIu64 ", app UID %u and channek key %" PRIu64, + ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64, request.session_id, request.session_id_per_pid, request.uid, request.key);