From: Simon Marchi Date: Fri, 3 Sep 2021 21:31:29 +0000 (-0400) Subject: common: compile libconsumer, libust-consumer, libkernel-consumer as C++ X-Git-Url: https://git.lttng.org/?p=lttng-tools.git;a=commitdiff_plain;h=97535efaa975ca52bf02c2d5e76351bfd2e3defa common: compile libconsumer, libust-consumer, libkernel-consumer as C++ Change-Id: I6d51a069b360121152286a674d551fd5e80bfe2f Signed-off-by: Simon Marchi Signed-off-by: Jérémie Galarneau --- diff --git a/src/common/consumer/Makefile.am b/src/common/consumer/Makefile.am index 55e47b210..ba3ecf1da 100644 --- a/src/common/consumer/Makefile.am +++ b/src/common/consumer/Makefile.am @@ -5,9 +5,15 @@ noinst_LTLIBRARIES = libconsumer.la noinst_HEADERS = consumer-metadata-cache.h consumer-timer.h \ consumer-testpoint.h -libconsumer_la_SOURCES = consumer.c consumer.h consumer-metadata-cache.c \ - consumer-timer.c consumer-stream.c consumer-stream.h \ - metadata-bucket.c metadata-bucket.h +libconsumer_la_SOURCES = \ + consumer.cpp \ + consumer.h \ + consumer-metadata-cache.cpp \ + consumer-stream.cpp \ + consumer-stream.h \ + consumer-timer.cpp \ + metadata-bucket.cpp \ + metadata-bucket.h libconsumer_la_LIBADD = \ $(top_builddir)/src/common/sessiond-comm/libsessiond-comm.la \ diff --git a/src/common/consumer/consumer-metadata-cache.c b/src/common/consumer/consumer-metadata-cache.c deleted file mode 100644 index 8ef284840..000000000 --- a/src/common/consumer/consumer-metadata-cache.c +++ /dev/null @@ -1,245 +0,0 @@ -/* - * Copyright (C) 2013 Julien Desfossez - * Copyright (C) 2013 David Goulet - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#define _LGPL_SOURCE -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "consumer-metadata-cache.h" - -enum metadata_cache_update_version_status { - METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED, - METADATA_CACHE_UPDATE_STATUS_VERSION_NOT_UPDATED, -}; - -extern struct lttng_consumer_global_data the_consumer_data; - -/* - * Reset the metadata cache. - */ -static -void metadata_cache_reset(struct consumer_metadata_cache *cache) -{ - const int ret = lttng_dynamic_buffer_set_size(&cache->contents, 0); - - LTTNG_ASSERT(ret == 0); -} - -/* - * Check if the metadata cache version changed. - * If it did, reset the metadata cache. - * The metadata cache lock MUST be held. - */ -static enum metadata_cache_update_version_status metadata_cache_update_version( - struct consumer_metadata_cache *cache, uint64_t version) -{ - enum metadata_cache_update_version_status status; - - if (cache->version == version) { - status = METADATA_CACHE_UPDATE_STATUS_VERSION_NOT_UPDATED; - goto end; - } - - DBG("Metadata cache version update to %" PRIu64, version); - cache->version = version; - status = METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED; - -end: - return status; -} - -/* - * Write metadata to the cache, extend the cache if necessary. We support - * overlapping updates, but they need to be contiguous. Send the - * contiguous metadata in cache to the ring buffer. The metadata cache - * lock MUST be acquired to write in the cache. - * - * See `enum consumer_metadata_cache_write_status` for the meaning of the - * various returned status codes. - */ -enum consumer_metadata_cache_write_status -consumer_metadata_cache_write(struct consumer_metadata_cache *cache, - unsigned int offset, unsigned int len, uint64_t version, - const char *data) -{ - int ret = 0; - enum consumer_metadata_cache_write_status status; - bool cache_is_invalidated = false; - uint64_t original_size; - - LTTNG_ASSERT(cache); - ASSERT_LOCKED(cache->lock); - original_size = cache->contents.size; - - if (metadata_cache_update_version(cache, version) == - METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED) { - metadata_cache_reset(cache); - cache_is_invalidated = true; - } - - DBG("Writing %u bytes from offset %u in metadata cache", len, offset); - if (offset + len > cache->contents.size) { - ret = lttng_dynamic_buffer_set_size( - &cache->contents, offset + len); - if (ret) { - ERR("Extending metadata cache"); - status = CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR; - goto end; - } - } - - memcpy(cache->contents.data + offset, data, len); - - if (cache_is_invalidated) { - status = CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED; - } else if (cache->contents.size > original_size) { - status = CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT; - } else { - status = CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE; - LTTNG_ASSERT(cache->contents.size == original_size); - } - -end: - return status; -} - -/* - * Create the metadata cache, original allocated size: max_sb_size - * - * Return 0 on success, a negative value on error. - */ -int consumer_metadata_cache_allocate(struct lttng_consumer_channel *channel) -{ - int ret; - - LTTNG_ASSERT(channel); - - channel->metadata_cache = zmalloc( - sizeof(struct consumer_metadata_cache)); - if (!channel->metadata_cache) { - PERROR("zmalloc metadata cache struct"); - ret = -1; - goto end; - } - ret = pthread_mutex_init(&channel->metadata_cache->lock, NULL); - if (ret != 0) { - PERROR("mutex init"); - goto end_free_cache; - } - - lttng_dynamic_buffer_init(&channel->metadata_cache->contents); - ret = lttng_dynamic_buffer_set_capacity( - &channel->metadata_cache->contents, - DEFAULT_METADATA_CACHE_SIZE); - if (ret) { - PERROR("Failed to pre-allocate metadata cache storage of %d bytes on creation", - DEFAULT_METADATA_CACHE_SIZE); - ret = -1; - goto end_free_mutex; - } - - DBG("Allocated metadata cache: current capacity = %zu", - lttng_dynamic_buffer_get_capacity_left( - &channel->metadata_cache->contents)); - - ret = 0; - goto end; - -end_free_mutex: - pthread_mutex_destroy(&channel->metadata_cache->lock); -end_free_cache: - free(channel->metadata_cache); -end: - return ret; -} - -/* - * Destroy and free the metadata cache - */ -void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel) -{ - if (!channel || !channel->metadata_cache) { - return; - } - - DBG("Destroying metadata cache"); - - pthread_mutex_destroy(&channel->metadata_cache->lock); - lttng_dynamic_buffer_reset(&channel->metadata_cache->contents); - free(channel->metadata_cache); -} - -/* - * Check if the cache is flushed up to the offset passed in parameter. - * - * Return 0 if everything has been flushed, 1 if there is data not flushed. - */ -int consumer_metadata_cache_flushed(struct lttng_consumer_channel *channel, - uint64_t offset, int timer) -{ - int ret = 0; - struct lttng_consumer_stream *metadata_stream; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(channel->metadata_cache); - - /* - * If not called from a timer handler, we have to take the - * channel lock to be mutually exclusive with channel teardown. - * Timer handler does not need to take this lock because it is - * already synchronized by timer stop (and, more importantly, - * taking this lock in a timer handler would cause a deadlock). - */ - if (!timer) { - pthread_mutex_lock(&channel->lock); - } - pthread_mutex_lock(&channel->timer_lock); - metadata_stream = channel->metadata_stream; - if (!metadata_stream) { - /* - * Having no metadata stream means the channel is being destroyed so there - * is no cache to flush anymore. - */ - ret = 0; - goto end_unlock_channel; - } - - pthread_mutex_lock(&metadata_stream->lock); - pthread_mutex_lock(&channel->metadata_cache->lock); - - if (metadata_stream->ust_metadata_pushed >= offset) { - ret = 0; - } else if (channel->metadata_stream->endpoint_status != - CONSUMER_ENDPOINT_ACTIVE) { - /* An inactive endpoint means we don't have to flush anymore. */ - ret = 0; - } else { - /* Still not completely flushed. */ - ret = 1; - } - - pthread_mutex_unlock(&channel->metadata_cache->lock); - pthread_mutex_unlock(&metadata_stream->lock); -end_unlock_channel: - pthread_mutex_unlock(&channel->timer_lock); - if (!timer) { - pthread_mutex_unlock(&channel->lock); - } - - return ret; -} diff --git a/src/common/consumer/consumer-metadata-cache.cpp b/src/common/consumer/consumer-metadata-cache.cpp new file mode 100644 index 000000000..fdda6be8b --- /dev/null +++ b/src/common/consumer/consumer-metadata-cache.cpp @@ -0,0 +1,245 @@ +/* + * Copyright (C) 2013 Julien Desfossez + * Copyright (C) 2013 David Goulet + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#define _LGPL_SOURCE +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "consumer-metadata-cache.h" + +enum metadata_cache_update_version_status { + METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED, + METADATA_CACHE_UPDATE_STATUS_VERSION_NOT_UPDATED, +}; + +extern struct lttng_consumer_global_data the_consumer_data; + +/* + * Reset the metadata cache. + */ +static +void metadata_cache_reset(struct consumer_metadata_cache *cache) +{ + const int ret = lttng_dynamic_buffer_set_size(&cache->contents, 0); + + LTTNG_ASSERT(ret == 0); +} + +/* + * Check if the metadata cache version changed. + * If it did, reset the metadata cache. + * The metadata cache lock MUST be held. + */ +static enum metadata_cache_update_version_status metadata_cache_update_version( + struct consumer_metadata_cache *cache, uint64_t version) +{ + enum metadata_cache_update_version_status status; + + if (cache->version == version) { + status = METADATA_CACHE_UPDATE_STATUS_VERSION_NOT_UPDATED; + goto end; + } + + DBG("Metadata cache version update to %" PRIu64, version); + cache->version = version; + status = METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED; + +end: + return status; +} + +/* + * Write metadata to the cache, extend the cache if necessary. We support + * overlapping updates, but they need to be contiguous. Send the + * contiguous metadata in cache to the ring buffer. The metadata cache + * lock MUST be acquired to write in the cache. + * + * See `enum consumer_metadata_cache_write_status` for the meaning of the + * various returned status codes. + */ +enum consumer_metadata_cache_write_status +consumer_metadata_cache_write(struct consumer_metadata_cache *cache, + unsigned int offset, unsigned int len, uint64_t version, + const char *data) +{ + int ret = 0; + enum consumer_metadata_cache_write_status status; + bool cache_is_invalidated = false; + uint64_t original_size; + + LTTNG_ASSERT(cache); + ASSERT_LOCKED(cache->lock); + original_size = cache->contents.size; + + if (metadata_cache_update_version(cache, version) == + METADATA_CACHE_UPDATE_STATUS_VERSION_UPDATED) { + metadata_cache_reset(cache); + cache_is_invalidated = true; + } + + DBG("Writing %u bytes from offset %u in metadata cache", len, offset); + if (offset + len > cache->contents.size) { + ret = lttng_dynamic_buffer_set_size( + &cache->contents, offset + len); + if (ret) { + ERR("Extending metadata cache"); + status = CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR; + goto end; + } + } + + memcpy(cache->contents.data + offset, data, len); + + if (cache_is_invalidated) { + status = CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED; + } else if (cache->contents.size > original_size) { + status = CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT; + } else { + status = CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE; + LTTNG_ASSERT(cache->contents.size == original_size); + } + +end: + return status; +} + +/* + * Create the metadata cache, original allocated size: max_sb_size + * + * Return 0 on success, a negative value on error. + */ +int consumer_metadata_cache_allocate(struct lttng_consumer_channel *channel) +{ + int ret; + + LTTNG_ASSERT(channel); + + channel->metadata_cache = (consumer_metadata_cache *) zmalloc( + sizeof(struct consumer_metadata_cache)); + if (!channel->metadata_cache) { + PERROR("zmalloc metadata cache struct"); + ret = -1; + goto end; + } + ret = pthread_mutex_init(&channel->metadata_cache->lock, NULL); + if (ret != 0) { + PERROR("mutex init"); + goto end_free_cache; + } + + lttng_dynamic_buffer_init(&channel->metadata_cache->contents); + ret = lttng_dynamic_buffer_set_capacity( + &channel->metadata_cache->contents, + DEFAULT_METADATA_CACHE_SIZE); + if (ret) { + PERROR("Failed to pre-allocate metadata cache storage of %d bytes on creation", + DEFAULT_METADATA_CACHE_SIZE); + ret = -1; + goto end_free_mutex; + } + + DBG("Allocated metadata cache: current capacity = %zu", + lttng_dynamic_buffer_get_capacity_left( + &channel->metadata_cache->contents)); + + ret = 0; + goto end; + +end_free_mutex: + pthread_mutex_destroy(&channel->metadata_cache->lock); +end_free_cache: + free(channel->metadata_cache); +end: + return ret; +} + +/* + * Destroy and free the metadata cache + */ +void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel) +{ + if (!channel || !channel->metadata_cache) { + return; + } + + DBG("Destroying metadata cache"); + + pthread_mutex_destroy(&channel->metadata_cache->lock); + lttng_dynamic_buffer_reset(&channel->metadata_cache->contents); + free(channel->metadata_cache); +} + +/* + * Check if the cache is flushed up to the offset passed in parameter. + * + * Return 0 if everything has been flushed, 1 if there is data not flushed. + */ +int consumer_metadata_cache_flushed(struct lttng_consumer_channel *channel, + uint64_t offset, int timer) +{ + int ret = 0; + struct lttng_consumer_stream *metadata_stream; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(channel->metadata_cache); + + /* + * If not called from a timer handler, we have to take the + * channel lock to be mutually exclusive with channel teardown. + * Timer handler does not need to take this lock because it is + * already synchronized by timer stop (and, more importantly, + * taking this lock in a timer handler would cause a deadlock). + */ + if (!timer) { + pthread_mutex_lock(&channel->lock); + } + pthread_mutex_lock(&channel->timer_lock); + metadata_stream = channel->metadata_stream; + if (!metadata_stream) { + /* + * Having no metadata stream means the channel is being destroyed so there + * is no cache to flush anymore. + */ + ret = 0; + goto end_unlock_channel; + } + + pthread_mutex_lock(&metadata_stream->lock); + pthread_mutex_lock(&channel->metadata_cache->lock); + + if (metadata_stream->ust_metadata_pushed >= offset) { + ret = 0; + } else if (channel->metadata_stream->endpoint_status != + CONSUMER_ENDPOINT_ACTIVE) { + /* An inactive endpoint means we don't have to flush anymore. */ + ret = 0; + } else { + /* Still not completely flushed. */ + ret = 1; + } + + pthread_mutex_unlock(&channel->metadata_cache->lock); + pthread_mutex_unlock(&metadata_stream->lock); +end_unlock_channel: + pthread_mutex_unlock(&channel->timer_lock); + if (!timer) { + pthread_mutex_unlock(&channel->lock); + } + + return ret; +} diff --git a/src/common/consumer/consumer-metadata-cache.h b/src/common/consumer/consumer-metadata-cache.h index b8f4efadc..4f8bb763f 100644 --- a/src/common/consumer/consumer-metadata-cache.h +++ b/src/common/consumer/consumer-metadata-cache.h @@ -12,6 +12,10 @@ #include #include +#ifdef __cplusplus +extern "C" { +#endif + enum consumer_metadata_cache_write_status { CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR = -1, /* @@ -59,4 +63,8 @@ void consumer_metadata_cache_destroy(struct lttng_consumer_channel *channel); int consumer_metadata_cache_flushed(struct lttng_consumer_channel *channel, uint64_t offset, int timer); +#ifdef __cplusplus +} +#endif + #endif /* CONSUMER_METADATA_CACHE_H */ diff --git a/src/common/consumer/consumer-stream.c b/src/common/consumer/consumer-stream.c deleted file mode 100644 index be19c1bfd..000000000 --- a/src/common/consumer/consumer-stream.c +++ /dev/null @@ -1,1329 +0,0 @@ -/* - * Copyright (C) 2011 Julien Desfossez - * Copyright (C) 2011 Mathieu Desnoyers - * Copyright (C) 2013 David Goulet - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#define _LGPL_SOURCE -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "consumer-stream.h" - -/* - * RCU call to free stream. MUST only be used with call_rcu(). - */ -static void free_stream_rcu(struct rcu_head *head) -{ - struct lttng_ht_node_u64 *node = - caa_container_of(head, struct lttng_ht_node_u64, head); - struct lttng_consumer_stream *stream = - caa_container_of(node, struct lttng_consumer_stream, node); - - pthread_mutex_destroy(&stream->lock); - free(stream); -} - -static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream) -{ - pthread_mutex_lock(&stream->chan->lock); - pthread_mutex_lock(&stream->lock); -} - -static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream) -{ - pthread_mutex_unlock(&stream->lock); - pthread_mutex_unlock(&stream->chan->lock); -} - -static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream) -{ - consumer_stream_data_lock_all(stream); - pthread_mutex_lock(&stream->metadata_rdv_lock); -} - -static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream) -{ - pthread_mutex_unlock(&stream->metadata_rdv_lock); - consumer_stream_data_unlock_all(stream); -} - -/* Only used for data streams. */ -static int consumer_stream_update_stats(struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuf) -{ - int ret = 0; - uint64_t sequence_number; - const uint64_t discarded_events = subbuf->info.data.events_discarded; - - if (!subbuf->info.data.sequence_number.is_set) { - /* Command not supported by the tracer. */ - sequence_number = -1ULL; - stream->sequence_number_unavailable = true; - } else { - sequence_number = subbuf->info.data.sequence_number.value; - } - - /* - * Start the sequence when we extract the first packet in case we don't - * start at 0 (for example if a consumer is not connected to the - * session immediately after the beginning). - */ - if (stream->last_sequence_number == -1ULL) { - stream->last_sequence_number = sequence_number; - } else if (sequence_number > stream->last_sequence_number) { - stream->chan->lost_packets += sequence_number - - stream->last_sequence_number - 1; - } else { - /* seq <= last_sequence_number */ - ERR("Sequence number inconsistent : prev = %" PRIu64 - ", current = %" PRIu64, - stream->last_sequence_number, sequence_number); - ret = -1; - goto end; - } - stream->last_sequence_number = sequence_number; - - if (discarded_events < stream->last_discarded_events) { - /* - * Overflow has occurred. We assume only one wrap-around - * has occurred. - */ - stream->chan->discarded_events += - (1ULL << (CAA_BITS_PER_LONG - 1)) - - stream->last_discarded_events + - discarded_events; - } else { - stream->chan->discarded_events += discarded_events - - stream->last_discarded_events; - } - stream->last_discarded_events = discarded_events; - ret = 0; - -end: - return ret; -} - -static -void ctf_packet_index_populate(struct ctf_packet_index *index, - off_t offset, const struct stream_subbuffer *subbuffer) -{ - *index = (typeof(*index)){ - .offset = htobe64(offset), - .packet_size = htobe64(subbuffer->info.data.packet_size), - .content_size = htobe64(subbuffer->info.data.content_size), - .timestamp_begin = htobe64( - subbuffer->info.data.timestamp_begin), - .timestamp_end = htobe64( - subbuffer->info.data.timestamp_end), - .events_discarded = htobe64( - subbuffer->info.data.events_discarded), - .stream_id = htobe64(subbuffer->info.data.stream_id), - .stream_instance_id = htobe64( - subbuffer->info.data.stream_instance_id.is_set ? - subbuffer->info.data.stream_instance_id.value : -1ULL), - .packet_seq_num = htobe64( - subbuffer->info.data.sequence_number.is_set ? - subbuffer->info.data.sequence_number.value : -1ULL), - }; -} - -static ssize_t consumer_stream_consume_mmap( - struct lttng_consumer_local_data *ctx, - struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuffer) -{ - const unsigned long padding_size = - subbuffer->info.data.padded_subbuf_size - - subbuffer->info.data.subbuf_size; - const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_mmap( - stream, &subbuffer->buffer.buffer, padding_size); - - if (stream->net_seq_idx == -1ULL) { - /* - * When writing on disk, check that only the subbuffer (no - * padding) was written to disk. - */ - if (written_bytes != subbuffer->info.data.padded_subbuf_size) { - DBG("Failed to write the entire padded subbuffer on disk (written_bytes: %zd, padded subbuffer size %lu)", - written_bytes, - subbuffer->info.data.padded_subbuf_size); - } - } else { - /* - * When streaming over the network, check that the entire - * subbuffer including padding was successfully written. - */ - if (written_bytes != subbuffer->info.data.subbuf_size) { - DBG("Failed to write only the subbuffer over the network (written_bytes: %zd, subbuffer size %lu)", - written_bytes, - subbuffer->info.data.subbuf_size); - } - } - - /* - * If `lttng_consumer_on_read_subbuffer_mmap()` returned an error, pass - * it along to the caller, else return zero. - */ - if (written_bytes < 0) { - ERR("Error reading mmap subbuffer: %zd", written_bytes); - } - - return written_bytes; -} - -static ssize_t consumer_stream_consume_splice( - struct lttng_consumer_local_data *ctx, - struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuffer) -{ - const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_splice( - ctx, stream, subbuffer->info.data.padded_subbuf_size, 0); - - if (written_bytes != subbuffer->info.data.padded_subbuf_size) { - DBG("Failed to write the entire padded subbuffer (written_bytes: %zd, padded subbuffer size %lu)", - written_bytes, - subbuffer->info.data.padded_subbuf_size); - } - - /* - * If `lttng_consumer_on_read_subbuffer_splice()` returned an error, - * pass it along to the caller, else return zero. - */ - if (written_bytes < 0) { - ERR("Error reading splice subbuffer: %zd", written_bytes); - } - - return written_bytes; -} - -static int consumer_stream_send_index( - struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuffer, - struct lttng_consumer_local_data *ctx) -{ - off_t packet_offset = 0; - struct ctf_packet_index index = {}; - - /* - * This is called after consuming the sub-buffer; substract the - * effect this sub-buffer from the offset. - */ - if (stream->net_seq_idx == (uint64_t) -1ULL) { - packet_offset = stream->out_fd_offset - - subbuffer->info.data.padded_subbuf_size; - } - - ctf_packet_index_populate(&index, packet_offset, subbuffer); - return consumer_stream_write_index(stream, &index); -} - -/* - * Actually do the metadata sync using the given metadata stream. - * - * Return 0 on success else a negative value. ENODATA can be returned also - * indicating that there is no metadata available for that stream. - */ -static int do_sync_metadata(struct lttng_consumer_stream *metadata, - struct lttng_consumer_local_data *ctx) -{ - int ret; - enum sync_metadata_status status; - - LTTNG_ASSERT(metadata); - LTTNG_ASSERT(metadata->metadata_flag); - LTTNG_ASSERT(ctx); - - /* - * In UST, since we have to write the metadata from the cache packet - * by packet, we might need to start this procedure multiple times - * until all the metadata from the cache has been extracted. - */ - do { - /* - * Steps : - * - Lock the metadata stream - * - Check if metadata stream node was deleted before locking. - * - if yes, release and return success - * - Check if new metadata is ready (flush + snapshot pos) - * - If nothing : release and return. - * - Lock the metadata_rdv_lock - * - Unlock the metadata stream - * - cond_wait on metadata_rdv to wait the wakeup from the - * metadata thread - * - Unlock the metadata_rdv_lock - */ - pthread_mutex_lock(&metadata->lock); - - /* - * There is a possibility that we were able to acquire a reference on the - * stream from the RCU hash table but between then and now, the node might - * have been deleted just before the lock is acquired. Thus, after locking, - * we make sure the metadata node has not been deleted which means that the - * buffers are closed. - * - * In that case, there is no need to sync the metadata hence returning a - * success return code. - */ - ret = cds_lfht_is_node_deleted(&metadata->node.node); - if (ret) { - ret = 0; - goto end_unlock_mutex; - } - - switch (ctx->type) { - case LTTNG_CONSUMER_KERNEL: - /* - * Empty the metadata cache and flush the current stream. - */ - status = lttng_kconsumer_sync_metadata(metadata); - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - /* - * Ask the sessiond if we have new metadata waiting and update the - * consumer metadata cache. - */ - status = lttng_ustconsumer_sync_metadata(ctx, metadata); - break; - default: - abort(); - } - - switch (status) { - case SYNC_METADATA_STATUS_NEW_DATA: - break; - case SYNC_METADATA_STATUS_NO_DATA: - ret = 0; - goto end_unlock_mutex; - case SYNC_METADATA_STATUS_ERROR: - ret = -1; - goto end_unlock_mutex; - default: - abort(); - } - - /* - * At this point, new metadata have been flushed, so we wait on the - * rendez-vous point for the metadata thread to wake us up when it - * finishes consuming the metadata and continue execution. - */ - - pthread_mutex_lock(&metadata->metadata_rdv_lock); - - /* - * Release metadata stream lock so the metadata thread can process it. - */ - pthread_mutex_unlock(&metadata->lock); - - /* - * Wait on the rendez-vous point. Once woken up, it means the metadata was - * consumed and thus synchronization is achieved. - */ - pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock); - pthread_mutex_unlock(&metadata->metadata_rdv_lock); - } while (status == SYNC_METADATA_STATUS_NEW_DATA); - - /* Success */ - return 0; - -end_unlock_mutex: - pthread_mutex_unlock(&metadata->lock); - return ret; -} - -/* - * Synchronize the metadata using a given session ID. A successful acquisition - * of a metadata stream will trigger a request to the session daemon and a - * snapshot so the metadata thread can consume it. - * - * This function call is a rendez-vous point between the metadata thread and - * the data thread. - * - * Return 0 on success or else a negative value. - */ -int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx, - uint64_t session_id) -{ - int ret; - struct lttng_consumer_stream *stream = NULL; - struct lttng_ht_iter iter; - struct lttng_ht *ht; - - LTTNG_ASSERT(ctx); - - /* Ease our life a bit. */ - ht = the_consumer_data.stream_list_ht; - - rcu_read_lock(); - - /* Search the metadata associated with the session id of the given stream. */ - - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct, - &session_id, &iter.iter, stream, node_session_id.node) { - if (!stream->metadata_flag) { - continue; - } - - ret = do_sync_metadata(stream, ctx); - if (ret < 0) { - goto end; - } - } - - /* - * Force return code to 0 (success) since ret might be ENODATA for instance - * which is not an error but rather that we should come back. - */ - ret = 0; - -end: - rcu_read_unlock(); - return ret; -} - -static int consumer_stream_sync_metadata_index( - struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuffer, - struct lttng_consumer_local_data *ctx) -{ - int ret; - - /* Block until all the metadata is sent. */ - pthread_mutex_lock(&stream->metadata_timer_lock); - LTTNG_ASSERT(!stream->missed_metadata_flush); - stream->waiting_on_metadata = true; - pthread_mutex_unlock(&stream->metadata_timer_lock); - - ret = consumer_stream_sync_metadata(ctx, stream->session_id); - - pthread_mutex_lock(&stream->metadata_timer_lock); - stream->waiting_on_metadata = false; - if (stream->missed_metadata_flush) { - stream->missed_metadata_flush = false; - pthread_mutex_unlock(&stream->metadata_timer_lock); - (void) stream->read_subbuffer_ops.send_live_beacon(stream); - } else { - pthread_mutex_unlock(&stream->metadata_timer_lock); - } - if (ret < 0) { - goto end; - } - - ret = consumer_stream_send_index(stream, subbuffer, ctx); -end: - return ret; -} - -/* - * Check if the local version of the metadata stream matches with the version - * of the metadata stream in the kernel. If it was updated, set the reset flag - * on the stream. - */ -static -int metadata_stream_check_version(struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuffer) -{ - if (stream->metadata_version == subbuffer->info.metadata.version) { - goto end; - } - - DBG("New metadata version detected"); - consumer_stream_metadata_set_version(stream, - subbuffer->info.metadata.version); - - if (stream->read_subbuffer_ops.reset_metadata) { - stream->read_subbuffer_ops.reset_metadata(stream); - } - -end: - return 0; -} - -static -bool stream_is_rotating_to_null_chunk( - const struct lttng_consumer_stream *stream) -{ - bool rotating_to_null_chunk = false; - - if (stream->rotate_position == -1ULL) { - /* No rotation ongoing. */ - goto end; - } - - if (stream->trace_chunk == stream->chan->trace_chunk || - !stream->chan->trace_chunk) { - rotating_to_null_chunk = true; - } -end: - return rotating_to_null_chunk; -} - -enum consumer_stream_open_packet_status consumer_stream_open_packet( - struct lttng_consumer_stream *stream) -{ - int ret; - enum consumer_stream_open_packet_status status; - unsigned long produced_pos_before, produced_pos_after; - - ret = lttng_consumer_sample_snapshot_positions(stream); - if (ret < 0) { - ERR("Failed to snapshot positions before post-rotation empty packet flush: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; - goto end; - } - - ret = lttng_consumer_get_produced_snapshot( - stream, &produced_pos_before); - if (ret < 0) { - ERR("Failed to read produced position before post-rotation empty packet flush: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; - goto end; - } - - ret = consumer_stream_flush_buffer(stream, 0); - if (ret) { - ERR("Failed to flush an empty packet at rotation point: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; - goto end; - } - - ret = lttng_consumer_sample_snapshot_positions(stream); - if (ret < 0) { - ERR("Failed to snapshot positions after post-rotation empty packet flush: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; - goto end; - } - - ret = lttng_consumer_get_produced_snapshot(stream, &produced_pos_after); - if (ret < 0) { - ERR("Failed to read produced position after post-rotation empty packet flush: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; - goto end; - } - - /* - * Determine if the flush had an effect by comparing the produced - * positons before and after the flush. - */ - status = produced_pos_before != produced_pos_after ? - CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED : - CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE; - if (status == CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED) { - stream->opened_packet_in_current_trace_chunk = true; - } - -end: - return status; -} - -/* - * An attempt to open a new packet is performed after a rotation completes to - * get a begin timestamp as close as possible to the rotation point. - * - * However, that initial attempt at opening a packet can fail due to a full - * ring-buffer. In that case, a second attempt is performed after consuming - * a packet since that will have freed enough space in the ring-buffer. - */ -static -int post_consume_open_new_packet(struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuffer, - struct lttng_consumer_local_data *ctx) -{ - int ret = 0; - - if (!stream->opened_packet_in_current_trace_chunk && - stream->trace_chunk && - !stream_is_rotating_to_null_chunk(stream)) { - const enum consumer_stream_open_packet_status status = - consumer_stream_open_packet(stream); - - switch (status) { - case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED: - DBG("Opened a packet after consuming a packet rotation: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - stream->opened_packet_in_current_trace_chunk = true; - break; - case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE: - /* - * Can't open a packet as there is no space left. - * This means that new events were produced, resulting - * in a packet being opened, which is what we want - * anyhow. - */ - DBG("No space left to open a packet after consuming a packet: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - stream->opened_packet_in_current_trace_chunk = true; - break; - case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR: - /* Logged by callee. */ - ret = -1; - goto end; - default: - abort(); - } - - stream->opened_packet_in_current_trace_chunk = true; - } - -end: - return ret; -} - -struct lttng_consumer_stream *consumer_stream_create( - struct lttng_consumer_channel *channel, - uint64_t channel_key, - uint64_t stream_key, - const char *channel_name, - uint64_t relayd_id, - uint64_t session_id, - struct lttng_trace_chunk *trace_chunk, - int cpu, - int *alloc_ret, - enum consumer_channel_type type, - unsigned int monitor) -{ - int ret; - struct lttng_consumer_stream *stream; - - stream = zmalloc(sizeof(*stream)); - if (stream == NULL) { - PERROR("malloc struct lttng_consumer_stream"); - ret = -ENOMEM; - goto end; - } - - rcu_read_lock(); - - if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) { - ERR("Failed to acquire trace chunk reference during the creation of a stream"); - ret = -1; - goto error; - } - - stream->chan = channel; - stream->key = stream_key; - stream->trace_chunk = trace_chunk; - stream->out_fd = -1; - stream->out_fd_offset = 0; - stream->output_written = 0; - stream->net_seq_idx = relayd_id; - stream->session_id = session_id; - stream->monitor = monitor; - stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE; - stream->index_file = NULL; - stream->last_sequence_number = -1ULL; - stream->rotate_position = -1ULL; - /* Buffer is created with an open packet. */ - stream->opened_packet_in_current_trace_chunk = true; - pthread_mutex_init(&stream->lock, NULL); - pthread_mutex_init(&stream->metadata_timer_lock, NULL); - - /* If channel is the metadata, flag this stream as metadata. */ - if (type == CONSUMER_CHANNEL_TYPE_METADATA) { - stream->metadata_flag = 1; - /* Metadata is flat out. */ - strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name)); - /* Live rendez-vous point. */ - pthread_cond_init(&stream->metadata_rdv, NULL); - pthread_mutex_init(&stream->metadata_rdv_lock, NULL); - } else { - /* Format stream name to _ */ - ret = snprintf(stream->name, sizeof(stream->name), "%s_%d", - channel_name, cpu); - if (ret < 0) { - PERROR("snprintf stream name"); - goto error; - } - } - - switch (channel->output) { - case CONSUMER_CHANNEL_SPLICE: - stream->output = LTTNG_EVENT_SPLICE; - ret = utils_create_pipe(stream->splice_pipe); - if (ret < 0) { - goto error; - } - break; - case CONSUMER_CHANNEL_MMAP: - stream->output = LTTNG_EVENT_MMAP; - break; - default: - abort(); - } - - /* Key is always the wait_fd for streams. */ - lttng_ht_node_init_u64(&stream->node, stream->key); - - /* Init node per channel id key */ - lttng_ht_node_init_u64(&stream->node_channel_id, channel_key); - - /* Init session id node with the stream session id */ - lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id); - - DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64 - " relayd_id %" PRIu64 ", session_id %" PRIu64, - stream->name, stream->key, channel_key, - stream->net_seq_idx, stream->session_id); - - rcu_read_unlock(); - - lttng_dynamic_array_init(&stream->read_subbuffer_ops.post_consume_cbs, - sizeof(post_consume_cb), NULL); - - if (type == CONSUMER_CHANNEL_TYPE_METADATA) { - stream->read_subbuffer_ops.lock = - consumer_stream_metadata_lock_all; - stream->read_subbuffer_ops.unlock = - consumer_stream_metadata_unlock_all; - stream->read_subbuffer_ops.pre_consume_subbuffer = - metadata_stream_check_version; - } else { - const post_consume_cb post_consume_index_op = channel->is_live ? - consumer_stream_sync_metadata_index : - consumer_stream_send_index; - - ret = lttng_dynamic_array_add_element( - &stream->read_subbuffer_ops.post_consume_cbs, - &post_consume_index_op); - if (ret) { - PERROR("Failed to add `send index` callback to stream's post consumption callbacks"); - goto error; - } - - ret = lttng_dynamic_array_add_element( - &stream->read_subbuffer_ops.post_consume_cbs, - &(post_consume_cb) { post_consume_open_new_packet }); - if (ret) { - PERROR("Failed to add `open new packet` callback to stream's post consumption callbacks"); - goto error; - } - - stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all; - stream->read_subbuffer_ops.unlock = - consumer_stream_data_unlock_all; - stream->read_subbuffer_ops.pre_consume_subbuffer = - consumer_stream_update_stats; - } - - if (channel->output == CONSUMER_CHANNEL_MMAP) { - stream->read_subbuffer_ops.consume_subbuffer = - consumer_stream_consume_mmap; - } else { - stream->read_subbuffer_ops.consume_subbuffer = - consumer_stream_consume_splice; - } - - return stream; - -error: - rcu_read_unlock(); - lttng_trace_chunk_put(stream->trace_chunk); - lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs); - free(stream); -end: - if (alloc_ret) { - *alloc_ret = ret; - } - return NULL; -} - -/* - * Close stream on the relayd side. This call can destroy a relayd if the - * conditions are met. - * - * A RCU read side lock MUST be acquired if the relayd object was looked up in - * a hash table before calling this. - */ -void consumer_stream_relayd_close(struct lttng_consumer_stream *stream, - struct consumer_relayd_sock_pair *relayd) -{ - int ret; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(relayd); - - if (stream->sent_to_relayd) { - uatomic_dec(&relayd->refcount); - LTTNG_ASSERT(uatomic_read(&relayd->refcount) >= 0); - } - - /* Closing streams requires to lock the control socket. */ - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_send_close_stream(&relayd->control_sock, - stream->relayd_stream_id, - stream->next_net_seq_num - 1); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - if (ret < 0) { - ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - } - - /* Both conditions are met, we destroy the relayd. */ - if (uatomic_read(&relayd->refcount) == 0 && - uatomic_read(&relayd->destroy_flag)) { - consumer_destroy_relayd(relayd); - } - stream->net_seq_idx = (uint64_t) -1ULL; - stream->sent_to_relayd = 0; -} - -/* - * Close stream's file descriptors and, if needed, close stream also on the - * relayd side. - * - * The consumer data lock MUST be acquired. - * The stream lock MUST be acquired. - */ -void consumer_stream_close(struct lttng_consumer_stream *stream) -{ - int ret; - struct consumer_relayd_sock_pair *relayd; - - LTTNG_ASSERT(stream); - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - if (stream->mmap_base != NULL) { - ret = munmap(stream->mmap_base, stream->mmap_len); - if (ret != 0) { - PERROR("munmap"); - } - } - - if (stream->wait_fd >= 0) { - ret = close(stream->wait_fd); - if (ret) { - PERROR("close"); - } - stream->wait_fd = -1; - } - if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) { - utils_close_pipe(stream->splice_pipe); - } - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - { - /* - * Special case for the metadata since the wait fd is an internal pipe - * polled in the metadata thread. - */ - if (stream->metadata_flag && stream->chan->monitor) { - int rpipe = stream->ust_metadata_poll_pipe[0]; - - /* - * This will stop the channel timer if one and close the write side - * of the metadata poll pipe. - */ - lttng_ustconsumer_close_metadata(stream->chan); - if (rpipe >= 0) { - ret = close(rpipe); - if (ret < 0) { - PERROR("closing metadata pipe read side"); - } - stream->ust_metadata_poll_pipe[0] = -1; - } - } - break; - } - default: - ERR("Unknown consumer_data type"); - abort(); - } - - /* Close output fd. Could be a socket or local file at this point. */ - if (stream->out_fd >= 0) { - ret = close(stream->out_fd); - if (ret) { - PERROR("close"); - } - stream->out_fd = -1; - } - - if (stream->index_file) { - lttng_index_file_put(stream->index_file); - stream->index_file = NULL; - } - - lttng_trace_chunk_put(stream->trace_chunk); - stream->trace_chunk = NULL; - - /* Check and cleanup relayd if needed. */ - rcu_read_lock(); - relayd = consumer_find_relayd(stream->net_seq_idx); - if (relayd != NULL) { - consumer_stream_relayd_close(stream, relayd); - } - rcu_read_unlock(); -} - -/* - * Delete the stream from all possible hash tables. - * - * The consumer data lock MUST be acquired. - * The stream lock MUST be acquired. - */ -void consumer_stream_delete(struct lttng_consumer_stream *stream, - struct lttng_ht *ht) -{ - int ret; - struct lttng_ht_iter iter; - - LTTNG_ASSERT(stream); - /* Should NEVER be called not in monitor mode. */ - LTTNG_ASSERT(stream->chan->monitor); - - rcu_read_lock(); - - if (ht) { - iter.iter.node = &stream->node.node; - ret = lttng_ht_del(ht, &iter); - LTTNG_ASSERT(!ret); - } - - /* Delete from stream per channel ID hash table. */ - iter.iter.node = &stream->node_channel_id.node; - /* - * The returned value is of no importance. Even if the node is NOT in the - * hash table, we continue since we may have been called by a code path - * that did not add the stream to a (all) hash table. Same goes for the - * next call ht del call. - */ - (void) lttng_ht_del(the_consumer_data.stream_per_chan_id_ht, &iter); - - /* Delete from the global stream list. */ - iter.iter.node = &stream->node_session_id.node; - /* See the previous ht del on why we ignore the returned value. */ - (void) lttng_ht_del(the_consumer_data.stream_list_ht, &iter); - - rcu_read_unlock(); - - if (!stream->metadata_flag) { - /* Decrement the stream count of the global consumer data. */ - LTTNG_ASSERT(the_consumer_data.stream_count > 0); - the_consumer_data.stream_count--; - } -} - -/* - * Free the given stream within a RCU call. - */ -void consumer_stream_free(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - - metadata_bucket_destroy(stream->metadata_bucket); - call_rcu(&stream->node.head, free_stream_rcu); -} - -/* - * Destroy the stream's buffers of the tracer. - */ -void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - lttng_ustconsumer_del_stream(stream); - break; - default: - ERR("Unknown consumer_data type"); - abort(); - } -} - -/* - * Destroy and close a already created stream. - */ -static void destroy_close_stream(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - - DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key); - - /* Destroy tracer buffers of the stream. */ - consumer_stream_destroy_buffers(stream); - /* Close down everything including the relayd if one. */ - consumer_stream_close(stream); -} - -/* - * Decrement the stream's channel refcount and if down to 0, return the channel - * pointer so it can be destroyed by the caller or NULL if not. - */ -static struct lttng_consumer_channel *unref_channel( - struct lttng_consumer_stream *stream) -{ - struct lttng_consumer_channel *free_chan = NULL; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->chan); - - /* Update refcount of channel and see if we need to destroy it. */ - if (!uatomic_sub_return(&stream->chan->refcount, 1) - && !uatomic_read(&stream->chan->nb_init_stream_left)) { - free_chan = stream->chan; - } - - return free_chan; -} - -/* - * Destroy a stream completely. This will delete, close and free the stream. - * Once return, the stream is NO longer usable. Its channel may get destroyed - * if conditions are met for a monitored stream. - * - * This MUST be called WITHOUT the consumer data and stream lock acquired if - * the stream is in _monitor_ mode else it does not matter. - */ -void consumer_stream_destroy(struct lttng_consumer_stream *stream, - struct lttng_ht *ht) -{ - LTTNG_ASSERT(stream); - - /* Stream is in monitor mode. */ - if (stream->monitor) { - struct lttng_consumer_channel *free_chan = NULL; - - /* - * This means that the stream was successfully removed from the streams - * list of the channel and sent to the right thread managing this - * stream thus being globally visible. - */ - if (stream->globally_visible) { - pthread_mutex_lock(&the_consumer_data.lock); - pthread_mutex_lock(&stream->chan->lock); - pthread_mutex_lock(&stream->lock); - /* Remove every reference of the stream in the consumer. */ - consumer_stream_delete(stream, ht); - - destroy_close_stream(stream); - - /* Update channel's refcount of the stream. */ - free_chan = unref_channel(stream); - - /* Indicates that the consumer data state MUST be updated after this. */ - the_consumer_data.need_update = 1; - - pthread_mutex_unlock(&stream->lock); - pthread_mutex_unlock(&stream->chan->lock); - pthread_mutex_unlock(&the_consumer_data.lock); - } else { - /* - * If the stream is not visible globally, this needs to be done - * outside of the consumer data lock section. - */ - free_chan = unref_channel(stream); - } - - if (free_chan) { - consumer_del_channel(free_chan); - } - } else { - destroy_close_stream(stream); - } - - /* Free stream within a RCU call. */ - lttng_trace_chunk_put(stream->trace_chunk); - stream->trace_chunk = NULL; - lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs); - consumer_stream_free(stream); -} - -/* - * Write index of a specific stream either on the relayd or local disk. - * - * Return 0 on success or else a negative value. - */ -int consumer_stream_write_index(struct lttng_consumer_stream *stream, - struct ctf_packet_index *element) -{ - int ret; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(element); - - rcu_read_lock(); - if (stream->net_seq_idx != (uint64_t) -1ULL) { - struct consumer_relayd_sock_pair *relayd; - relayd = consumer_find_relayd(stream->net_seq_idx); - if (relayd) { - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_send_index(&relayd->control_sock, element, - stream->relayd_stream_id, stream->next_net_seq_num - 1); - if (ret < 0) { - /* - * Communication error with lttng-relayd, - * perform cleanup now - */ - ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - ret = -1; - } - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - } else { - ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.", - stream->key, stream->net_seq_idx); - ret = -1; - } - } else { - if (lttng_index_file_write(stream->index_file, element)) { - ret = -1; - } else { - ret = 0; - } - } - if (ret < 0) { - goto error; - } - -error: - rcu_read_unlock(); - return ret; -} - -int consumer_stream_create_output_files(struct lttng_consumer_stream *stream, - bool create_index) -{ - int ret; - enum lttng_trace_chunk_status chunk_status; - const int flags = O_WRONLY | O_CREAT | O_TRUNC; - const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; - char stream_path[LTTNG_PATH_MAX]; - - ASSERT_LOCKED(stream->lock); - LTTNG_ASSERT(stream->trace_chunk); - - ret = utils_stream_file_path(stream->chan->pathname, stream->name, - stream->chan->tracefile_size, - stream->tracefile_count_current, NULL, - stream_path, sizeof(stream_path)); - if (ret < 0) { - goto end; - } - - if (stream->out_fd >= 0) { - ret = close(stream->out_fd); - if (ret < 0) { - PERROR("Failed to close stream file \"%s\"", - stream->name); - goto end; - } - stream->out_fd = -1; - } - - DBG("Opening stream output file \"%s\"", stream_path); - chunk_status = lttng_trace_chunk_open_file(stream->trace_chunk, stream_path, - flags, mode, &stream->out_fd, false); - if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { - ERR("Failed to open stream file \"%s\"", stream->name); - ret = -1; - goto end; - } - - if (!stream->metadata_flag && (create_index || stream->index_file)) { - if (stream->index_file) { - lttng_index_file_put(stream->index_file); - } - chunk_status = lttng_index_file_create_from_trace_chunk( - stream->trace_chunk, - stream->chan->pathname, - stream->name, - stream->chan->tracefile_size, - stream->tracefile_count_current, - CTF_INDEX_MAJOR, CTF_INDEX_MINOR, - false, &stream->index_file); - if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { - ret = -1; - goto end; - } - } - - /* Reset current size because we just perform a rotation. */ - stream->tracefile_size_current = 0; - stream->out_fd_offset = 0; -end: - return ret; -} - -int consumer_stream_rotate_output_files(struct lttng_consumer_stream *stream) -{ - int ret; - - stream->tracefile_count_current++; - if (stream->chan->tracefile_count > 0) { - stream->tracefile_count_current %= - stream->chan->tracefile_count; - } - - DBG("Rotating output files of stream \"%s\"", stream->name); - ret = consumer_stream_create_output_files(stream, true); - if (ret) { - goto end; - } - -end: - return ret; -} - -bool consumer_stream_is_deleted(struct lttng_consumer_stream *stream) -{ - /* - * This function does not take a const stream since - * cds_lfht_is_node_deleted was not const before liburcu 0.12. - */ - LTTNG_ASSERT(stream); - return cds_lfht_is_node_deleted(&stream->node.node); -} - -static ssize_t metadata_bucket_flush( - const struct stream_subbuffer *buffer, void *data) -{ - ssize_t ret; - struct lttng_consumer_stream *stream = data; - - ret = consumer_stream_consume_mmap(NULL, stream, buffer); - if (ret < 0) { - goto end; - } -end: - return ret; -} - -static ssize_t metadata_bucket_consume( - struct lttng_consumer_local_data *unused, - struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuffer) -{ - ssize_t ret; - enum metadata_bucket_status status; - - status = metadata_bucket_fill(stream->metadata_bucket, subbuffer); - switch (status) { - case METADATA_BUCKET_STATUS_OK: - /* Return consumed size. */ - ret = subbuffer->buffer.buffer.size; - break; - default: - ret = -1; - } - - return ret; -} - -int consumer_stream_enable_metadata_bucketization( - struct lttng_consumer_stream *stream) -{ - int ret = 0; - - LTTNG_ASSERT(stream->metadata_flag); - LTTNG_ASSERT(!stream->metadata_bucket); - LTTNG_ASSERT(stream->chan->output == CONSUMER_CHANNEL_MMAP); - - stream->metadata_bucket = metadata_bucket_create( - metadata_bucket_flush, stream); - if (!stream->metadata_bucket) { - ret = -1; - goto end; - } - - stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume; -end: - return ret; -} - -void consumer_stream_metadata_set_version( - struct lttng_consumer_stream *stream, uint64_t new_version) -{ - LTTNG_ASSERT(new_version > stream->metadata_version); - stream->metadata_version = new_version; - stream->reset_metadata_flag = 1; - - if (stream->metadata_bucket) { - metadata_bucket_reset(stream->metadata_bucket); - } -} - -int consumer_stream_flush_buffer(struct lttng_consumer_stream *stream, - bool producer_active) -{ - int ret = 0; - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - if (producer_active) { - ret = kernctl_buffer_flush(stream->wait_fd); - if (ret < 0) { - ERR("Failed to flush kernel stream"); - goto end; - } - } else { - ret = kernctl_buffer_flush_empty(stream->wait_fd); - if (ret < 0) { - /* - * Doing a buffer flush which does not take into - * account empty packets. This is not perfect, - * but required as a fall-back when - * "flush_empty" is not implemented by - * lttng-modules. - */ - ret = kernctl_buffer_flush(stream->wait_fd); - if (ret < 0) { - ERR("Failed to flush kernel stream"); - goto end; - } - } - } - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - ret = lttng_ustconsumer_flush_buffer(stream, (int) producer_active); - break; - default: - ERR("Unknown consumer_data type"); - abort(); - } - -end: - return ret; -} diff --git a/src/common/consumer/consumer-stream.cpp b/src/common/consumer/consumer-stream.cpp new file mode 100644 index 000000000..fbaf4aef5 --- /dev/null +++ b/src/common/consumer/consumer-stream.cpp @@ -0,0 +1,1331 @@ +/* + * Copyright (C) 2011 Julien Desfossez + * Copyright (C) 2011 Mathieu Desnoyers + * Copyright (C) 2013 David Goulet + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#define _LGPL_SOURCE +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "consumer-stream.h" + +/* + * RCU call to free stream. MUST only be used with call_rcu(). + */ +static void free_stream_rcu(struct rcu_head *head) +{ + struct lttng_ht_node_u64 *node = + caa_container_of(head, struct lttng_ht_node_u64, head); + struct lttng_consumer_stream *stream = + caa_container_of(node, struct lttng_consumer_stream, node); + + pthread_mutex_destroy(&stream->lock); + free(stream); +} + +static void consumer_stream_data_lock_all(struct lttng_consumer_stream *stream) +{ + pthread_mutex_lock(&stream->chan->lock); + pthread_mutex_lock(&stream->lock); +} + +static void consumer_stream_data_unlock_all(struct lttng_consumer_stream *stream) +{ + pthread_mutex_unlock(&stream->lock); + pthread_mutex_unlock(&stream->chan->lock); +} + +static void consumer_stream_metadata_lock_all(struct lttng_consumer_stream *stream) +{ + consumer_stream_data_lock_all(stream); + pthread_mutex_lock(&stream->metadata_rdv_lock); +} + +static void consumer_stream_metadata_unlock_all(struct lttng_consumer_stream *stream) +{ + pthread_mutex_unlock(&stream->metadata_rdv_lock); + consumer_stream_data_unlock_all(stream); +} + +/* Only used for data streams. */ +static int consumer_stream_update_stats(struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuf) +{ + int ret = 0; + uint64_t sequence_number; + const uint64_t discarded_events = subbuf->info.data.events_discarded; + + if (!subbuf->info.data.sequence_number.is_set) { + /* Command not supported by the tracer. */ + sequence_number = -1ULL; + stream->sequence_number_unavailable = true; + } else { + sequence_number = subbuf->info.data.sequence_number.value; + } + + /* + * Start the sequence when we extract the first packet in case we don't + * start at 0 (for example if a consumer is not connected to the + * session immediately after the beginning). + */ + if (stream->last_sequence_number == -1ULL) { + stream->last_sequence_number = sequence_number; + } else if (sequence_number > stream->last_sequence_number) { + stream->chan->lost_packets += sequence_number - + stream->last_sequence_number - 1; + } else { + /* seq <= last_sequence_number */ + ERR("Sequence number inconsistent : prev = %" PRIu64 + ", current = %" PRIu64, + stream->last_sequence_number, sequence_number); + ret = -1; + goto end; + } + stream->last_sequence_number = sequence_number; + + if (discarded_events < stream->last_discarded_events) { + /* + * Overflow has occurred. We assume only one wrap-around + * has occurred. + */ + stream->chan->discarded_events += + (1ULL << (CAA_BITS_PER_LONG - 1)) - + stream->last_discarded_events + + discarded_events; + } else { + stream->chan->discarded_events += discarded_events - + stream->last_discarded_events; + } + stream->last_discarded_events = discarded_events; + ret = 0; + +end: + return ret; +} + +static +void ctf_packet_index_populate(struct ctf_packet_index *index, + off_t offset, const struct stream_subbuffer *subbuffer) +{ + *index = (typeof(*index)){ + .offset = htobe64(offset), + .packet_size = htobe64(subbuffer->info.data.packet_size), + .content_size = htobe64(subbuffer->info.data.content_size), + .timestamp_begin = htobe64( + subbuffer->info.data.timestamp_begin), + .timestamp_end = htobe64( + subbuffer->info.data.timestamp_end), + .events_discarded = htobe64( + subbuffer->info.data.events_discarded), + .stream_id = htobe64(subbuffer->info.data.stream_id), + .stream_instance_id = htobe64( + subbuffer->info.data.stream_instance_id.is_set ? + subbuffer->info.data.stream_instance_id.value : -1ULL), + .packet_seq_num = htobe64( + subbuffer->info.data.sequence_number.is_set ? + subbuffer->info.data.sequence_number.value : -1ULL), + }; +} + +static ssize_t consumer_stream_consume_mmap( + struct lttng_consumer_local_data *ctx, + struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuffer) +{ + const unsigned long padding_size = + subbuffer->info.data.padded_subbuf_size - + subbuffer->info.data.subbuf_size; + const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_mmap( + stream, &subbuffer->buffer.buffer, padding_size); + + if (stream->net_seq_idx == -1ULL) { + /* + * When writing on disk, check that only the subbuffer (no + * padding) was written to disk. + */ + if (written_bytes != subbuffer->info.data.padded_subbuf_size) { + DBG("Failed to write the entire padded subbuffer on disk (written_bytes: %zd, padded subbuffer size %lu)", + written_bytes, + subbuffer->info.data.padded_subbuf_size); + } + } else { + /* + * When streaming over the network, check that the entire + * subbuffer including padding was successfully written. + */ + if (written_bytes != subbuffer->info.data.subbuf_size) { + DBG("Failed to write only the subbuffer over the network (written_bytes: %zd, subbuffer size %lu)", + written_bytes, + subbuffer->info.data.subbuf_size); + } + } + + /* + * If `lttng_consumer_on_read_subbuffer_mmap()` returned an error, pass + * it along to the caller, else return zero. + */ + if (written_bytes < 0) { + ERR("Error reading mmap subbuffer: %zd", written_bytes); + } + + return written_bytes; +} + +static ssize_t consumer_stream_consume_splice( + struct lttng_consumer_local_data *ctx, + struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuffer) +{ + const ssize_t written_bytes = lttng_consumer_on_read_subbuffer_splice( + ctx, stream, subbuffer->info.data.padded_subbuf_size, 0); + + if (written_bytes != subbuffer->info.data.padded_subbuf_size) { + DBG("Failed to write the entire padded subbuffer (written_bytes: %zd, padded subbuffer size %lu)", + written_bytes, + subbuffer->info.data.padded_subbuf_size); + } + + /* + * If `lttng_consumer_on_read_subbuffer_splice()` returned an error, + * pass it along to the caller, else return zero. + */ + if (written_bytes < 0) { + ERR("Error reading splice subbuffer: %zd", written_bytes); + } + + return written_bytes; +} + +static int consumer_stream_send_index( + struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuffer, + struct lttng_consumer_local_data *ctx) +{ + off_t packet_offset = 0; + struct ctf_packet_index index = {}; + + /* + * This is called after consuming the sub-buffer; substract the + * effect this sub-buffer from the offset. + */ + if (stream->net_seq_idx == (uint64_t) -1ULL) { + packet_offset = stream->out_fd_offset - + subbuffer->info.data.padded_subbuf_size; + } + + ctf_packet_index_populate(&index, packet_offset, subbuffer); + return consumer_stream_write_index(stream, &index); +} + +/* + * Actually do the metadata sync using the given metadata stream. + * + * Return 0 on success else a negative value. ENODATA can be returned also + * indicating that there is no metadata available for that stream. + */ +static int do_sync_metadata(struct lttng_consumer_stream *metadata, + struct lttng_consumer_local_data *ctx) +{ + int ret; + enum sync_metadata_status status; + + LTTNG_ASSERT(metadata); + LTTNG_ASSERT(metadata->metadata_flag); + LTTNG_ASSERT(ctx); + + /* + * In UST, since we have to write the metadata from the cache packet + * by packet, we might need to start this procedure multiple times + * until all the metadata from the cache has been extracted. + */ + do { + /* + * Steps : + * - Lock the metadata stream + * - Check if metadata stream node was deleted before locking. + * - if yes, release and return success + * - Check if new metadata is ready (flush + snapshot pos) + * - If nothing : release and return. + * - Lock the metadata_rdv_lock + * - Unlock the metadata stream + * - cond_wait on metadata_rdv to wait the wakeup from the + * metadata thread + * - Unlock the metadata_rdv_lock + */ + pthread_mutex_lock(&metadata->lock); + + /* + * There is a possibility that we were able to acquire a reference on the + * stream from the RCU hash table but between then and now, the node might + * have been deleted just before the lock is acquired. Thus, after locking, + * we make sure the metadata node has not been deleted which means that the + * buffers are closed. + * + * In that case, there is no need to sync the metadata hence returning a + * success return code. + */ + ret = cds_lfht_is_node_deleted(&metadata->node.node); + if (ret) { + ret = 0; + goto end_unlock_mutex; + } + + switch (ctx->type) { + case LTTNG_CONSUMER_KERNEL: + /* + * Empty the metadata cache and flush the current stream. + */ + status = lttng_kconsumer_sync_metadata(metadata); + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + /* + * Ask the sessiond if we have new metadata waiting and update the + * consumer metadata cache. + */ + status = lttng_ustconsumer_sync_metadata(ctx, metadata); + break; + default: + abort(); + } + + switch (status) { + case SYNC_METADATA_STATUS_NEW_DATA: + break; + case SYNC_METADATA_STATUS_NO_DATA: + ret = 0; + goto end_unlock_mutex; + case SYNC_METADATA_STATUS_ERROR: + ret = -1; + goto end_unlock_mutex; + default: + abort(); + } + + /* + * At this point, new metadata have been flushed, so we wait on the + * rendez-vous point for the metadata thread to wake us up when it + * finishes consuming the metadata and continue execution. + */ + + pthread_mutex_lock(&metadata->metadata_rdv_lock); + + /* + * Release metadata stream lock so the metadata thread can process it. + */ + pthread_mutex_unlock(&metadata->lock); + + /* + * Wait on the rendez-vous point. Once woken up, it means the metadata was + * consumed and thus synchronization is achieved. + */ + pthread_cond_wait(&metadata->metadata_rdv, &metadata->metadata_rdv_lock); + pthread_mutex_unlock(&metadata->metadata_rdv_lock); + } while (status == SYNC_METADATA_STATUS_NEW_DATA); + + /* Success */ + return 0; + +end_unlock_mutex: + pthread_mutex_unlock(&metadata->lock); + return ret; +} + +/* + * Synchronize the metadata using a given session ID. A successful acquisition + * of a metadata stream will trigger a request to the session daemon and a + * snapshot so the metadata thread can consume it. + * + * This function call is a rendez-vous point between the metadata thread and + * the data thread. + * + * Return 0 on success or else a negative value. + */ +int consumer_stream_sync_metadata(struct lttng_consumer_local_data *ctx, + uint64_t session_id) +{ + int ret; + struct lttng_consumer_stream *stream = NULL; + struct lttng_ht_iter iter; + struct lttng_ht *ht; + + LTTNG_ASSERT(ctx); + + /* Ease our life a bit. */ + ht = the_consumer_data.stream_list_ht; + + rcu_read_lock(); + + /* Search the metadata associated with the session id of the given stream. */ + + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&session_id, lttng_ht_seed), ht->match_fct, + &session_id, &iter.iter, stream, node_session_id.node) { + if (!stream->metadata_flag) { + continue; + } + + ret = do_sync_metadata(stream, ctx); + if (ret < 0) { + goto end; + } + } + + /* + * Force return code to 0 (success) since ret might be ENODATA for instance + * which is not an error but rather that we should come back. + */ + ret = 0; + +end: + rcu_read_unlock(); + return ret; +} + +static int consumer_stream_sync_metadata_index( + struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuffer, + struct lttng_consumer_local_data *ctx) +{ + int ret; + + /* Block until all the metadata is sent. */ + pthread_mutex_lock(&stream->metadata_timer_lock); + LTTNG_ASSERT(!stream->missed_metadata_flush); + stream->waiting_on_metadata = true; + pthread_mutex_unlock(&stream->metadata_timer_lock); + + ret = consumer_stream_sync_metadata(ctx, stream->session_id); + + pthread_mutex_lock(&stream->metadata_timer_lock); + stream->waiting_on_metadata = false; + if (stream->missed_metadata_flush) { + stream->missed_metadata_flush = false; + pthread_mutex_unlock(&stream->metadata_timer_lock); + (void) stream->read_subbuffer_ops.send_live_beacon(stream); + } else { + pthread_mutex_unlock(&stream->metadata_timer_lock); + } + if (ret < 0) { + goto end; + } + + ret = consumer_stream_send_index(stream, subbuffer, ctx); +end: + return ret; +} + +/* + * Check if the local version of the metadata stream matches with the version + * of the metadata stream in the kernel. If it was updated, set the reset flag + * on the stream. + */ +static +int metadata_stream_check_version(struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuffer) +{ + if (stream->metadata_version == subbuffer->info.metadata.version) { + goto end; + } + + DBG("New metadata version detected"); + consumer_stream_metadata_set_version(stream, + subbuffer->info.metadata.version); + + if (stream->read_subbuffer_ops.reset_metadata) { + stream->read_subbuffer_ops.reset_metadata(stream); + } + +end: + return 0; +} + +static +bool stream_is_rotating_to_null_chunk( + const struct lttng_consumer_stream *stream) +{ + bool rotating_to_null_chunk = false; + + if (stream->rotate_position == -1ULL) { + /* No rotation ongoing. */ + goto end; + } + + if (stream->trace_chunk == stream->chan->trace_chunk || + !stream->chan->trace_chunk) { + rotating_to_null_chunk = true; + } +end: + return rotating_to_null_chunk; +} + +enum consumer_stream_open_packet_status consumer_stream_open_packet( + struct lttng_consumer_stream *stream) +{ + int ret; + enum consumer_stream_open_packet_status status; + unsigned long produced_pos_before, produced_pos_after; + + ret = lttng_consumer_sample_snapshot_positions(stream); + if (ret < 0) { + ERR("Failed to snapshot positions before post-rotation empty packet flush: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; + goto end; + } + + ret = lttng_consumer_get_produced_snapshot( + stream, &produced_pos_before); + if (ret < 0) { + ERR("Failed to read produced position before post-rotation empty packet flush: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; + goto end; + } + + ret = consumer_stream_flush_buffer(stream, 0); + if (ret) { + ERR("Failed to flush an empty packet at rotation point: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; + goto end; + } + + ret = lttng_consumer_sample_snapshot_positions(stream); + if (ret < 0) { + ERR("Failed to snapshot positions after post-rotation empty packet flush: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; + goto end; + } + + ret = lttng_consumer_get_produced_snapshot(stream, &produced_pos_after); + if (ret < 0) { + ERR("Failed to read produced position after post-rotation empty packet flush: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + status = CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR; + goto end; + } + + /* + * Determine if the flush had an effect by comparing the produced + * positons before and after the flush. + */ + status = produced_pos_before != produced_pos_after ? + CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED : + CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE; + if (status == CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED) { + stream->opened_packet_in_current_trace_chunk = true; + } + +end: + return status; +} + +/* + * An attempt to open a new packet is performed after a rotation completes to + * get a begin timestamp as close as possible to the rotation point. + * + * However, that initial attempt at opening a packet can fail due to a full + * ring-buffer. In that case, a second attempt is performed after consuming + * a packet since that will have freed enough space in the ring-buffer. + */ +static +int post_consume_open_new_packet(struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuffer, + struct lttng_consumer_local_data *ctx) +{ + int ret = 0; + + if (!stream->opened_packet_in_current_trace_chunk && + stream->trace_chunk && + !stream_is_rotating_to_null_chunk(stream)) { + const enum consumer_stream_open_packet_status status = + consumer_stream_open_packet(stream); + + switch (status) { + case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED: + DBG("Opened a packet after consuming a packet rotation: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + stream->opened_packet_in_current_trace_chunk = true; + break; + case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE: + /* + * Can't open a packet as there is no space left. + * This means that new events were produced, resulting + * in a packet being opened, which is what we want + * anyhow. + */ + DBG("No space left to open a packet after consuming a packet: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + stream->opened_packet_in_current_trace_chunk = true; + break; + case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR: + /* Logged by callee. */ + ret = -1; + goto end; + default: + abort(); + } + + stream->opened_packet_in_current_trace_chunk = true; + } + +end: + return ret; +} + +struct lttng_consumer_stream *consumer_stream_create( + struct lttng_consumer_channel *channel, + uint64_t channel_key, + uint64_t stream_key, + const char *channel_name, + uint64_t relayd_id, + uint64_t session_id, + struct lttng_trace_chunk *trace_chunk, + int cpu, + int *alloc_ret, + enum consumer_channel_type type, + unsigned int monitor) +{ + int ret; + struct lttng_consumer_stream *stream; + + stream = (lttng_consumer_stream *) zmalloc(sizeof(*stream)); + if (stream == NULL) { + PERROR("malloc struct lttng_consumer_stream"); + ret = -ENOMEM; + goto end; + } + + rcu_read_lock(); + + if (trace_chunk && !lttng_trace_chunk_get(trace_chunk)) { + ERR("Failed to acquire trace chunk reference during the creation of a stream"); + ret = -1; + goto error; + } + + stream->chan = channel; + stream->key = stream_key; + stream->trace_chunk = trace_chunk; + stream->out_fd = -1; + stream->out_fd_offset = 0; + stream->output_written = 0; + stream->net_seq_idx = relayd_id; + stream->session_id = session_id; + stream->monitor = monitor; + stream->endpoint_status = CONSUMER_ENDPOINT_ACTIVE; + stream->index_file = NULL; + stream->last_sequence_number = -1ULL; + stream->rotate_position = -1ULL; + /* Buffer is created with an open packet. */ + stream->opened_packet_in_current_trace_chunk = true; + pthread_mutex_init(&stream->lock, NULL); + pthread_mutex_init(&stream->metadata_timer_lock, NULL); + + /* If channel is the metadata, flag this stream as metadata. */ + if (type == CONSUMER_CHANNEL_TYPE_METADATA) { + stream->metadata_flag = 1; + /* Metadata is flat out. */ + strncpy(stream->name, DEFAULT_METADATA_NAME, sizeof(stream->name)); + /* Live rendez-vous point. */ + pthread_cond_init(&stream->metadata_rdv, NULL); + pthread_mutex_init(&stream->metadata_rdv_lock, NULL); + } else { + /* Format stream name to _ */ + ret = snprintf(stream->name, sizeof(stream->name), "%s_%d", + channel_name, cpu); + if (ret < 0) { + PERROR("snprintf stream name"); + goto error; + } + } + + switch (channel->output) { + case CONSUMER_CHANNEL_SPLICE: + stream->output = LTTNG_EVENT_SPLICE; + ret = utils_create_pipe(stream->splice_pipe); + if (ret < 0) { + goto error; + } + break; + case CONSUMER_CHANNEL_MMAP: + stream->output = LTTNG_EVENT_MMAP; + break; + default: + abort(); + } + + /* Key is always the wait_fd for streams. */ + lttng_ht_node_init_u64(&stream->node, stream->key); + + /* Init node per channel id key */ + lttng_ht_node_init_u64(&stream->node_channel_id, channel_key); + + /* Init session id node with the stream session id */ + lttng_ht_node_init_u64(&stream->node_session_id, stream->session_id); + + DBG3("Allocated stream %s (key %" PRIu64 ", chan_key %" PRIu64 + " relayd_id %" PRIu64 ", session_id %" PRIu64, + stream->name, stream->key, channel_key, + stream->net_seq_idx, stream->session_id); + + rcu_read_unlock(); + + lttng_dynamic_array_init(&stream->read_subbuffer_ops.post_consume_cbs, + sizeof(post_consume_cb), NULL); + + if (type == CONSUMER_CHANNEL_TYPE_METADATA) { + stream->read_subbuffer_ops.lock = + consumer_stream_metadata_lock_all; + stream->read_subbuffer_ops.unlock = + consumer_stream_metadata_unlock_all; + stream->read_subbuffer_ops.pre_consume_subbuffer = + metadata_stream_check_version; + } else { + const post_consume_cb post_consume_index_op = channel->is_live ? + consumer_stream_sync_metadata_index : + consumer_stream_send_index; + const post_consume_cb post_consume_open_new_packet_ = + post_consume_open_new_packet; + + ret = lttng_dynamic_array_add_element( + &stream->read_subbuffer_ops.post_consume_cbs, + &post_consume_index_op); + if (ret) { + PERROR("Failed to add `send index` callback to stream's post consumption callbacks"); + goto error; + } + + ret = lttng_dynamic_array_add_element( + &stream->read_subbuffer_ops.post_consume_cbs, + &post_consume_open_new_packet_); + if (ret) { + PERROR("Failed to add `open new packet` callback to stream's post consumption callbacks"); + goto error; + } + + stream->read_subbuffer_ops.lock = consumer_stream_data_lock_all; + stream->read_subbuffer_ops.unlock = + consumer_stream_data_unlock_all; + stream->read_subbuffer_ops.pre_consume_subbuffer = + consumer_stream_update_stats; + } + + if (channel->output == CONSUMER_CHANNEL_MMAP) { + stream->read_subbuffer_ops.consume_subbuffer = + consumer_stream_consume_mmap; + } else { + stream->read_subbuffer_ops.consume_subbuffer = + consumer_stream_consume_splice; + } + + return stream; + +error: + rcu_read_unlock(); + lttng_trace_chunk_put(stream->trace_chunk); + lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs); + free(stream); +end: + if (alloc_ret) { + *alloc_ret = ret; + } + return NULL; +} + +/* + * Close stream on the relayd side. This call can destroy a relayd if the + * conditions are met. + * + * A RCU read side lock MUST be acquired if the relayd object was looked up in + * a hash table before calling this. + */ +void consumer_stream_relayd_close(struct lttng_consumer_stream *stream, + struct consumer_relayd_sock_pair *relayd) +{ + int ret; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(relayd); + + if (stream->sent_to_relayd) { + uatomic_dec(&relayd->refcount); + LTTNG_ASSERT(uatomic_read(&relayd->refcount) >= 0); + } + + /* Closing streams requires to lock the control socket. */ + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_send_close_stream(&relayd->control_sock, + stream->relayd_stream_id, + stream->next_net_seq_num - 1); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + if (ret < 0) { + ERR("Relayd send close stream failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + } + + /* Both conditions are met, we destroy the relayd. */ + if (uatomic_read(&relayd->refcount) == 0 && + uatomic_read(&relayd->destroy_flag)) { + consumer_destroy_relayd(relayd); + } + stream->net_seq_idx = (uint64_t) -1ULL; + stream->sent_to_relayd = 0; +} + +/* + * Close stream's file descriptors and, if needed, close stream also on the + * relayd side. + * + * The consumer data lock MUST be acquired. + * The stream lock MUST be acquired. + */ +void consumer_stream_close(struct lttng_consumer_stream *stream) +{ + int ret; + struct consumer_relayd_sock_pair *relayd; + + LTTNG_ASSERT(stream); + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + if (stream->mmap_base != NULL) { + ret = munmap(stream->mmap_base, stream->mmap_len); + if (ret != 0) { + PERROR("munmap"); + } + } + + if (stream->wait_fd >= 0) { + ret = close(stream->wait_fd); + if (ret) { + PERROR("close"); + } + stream->wait_fd = -1; + } + if (stream->chan->output == CONSUMER_CHANNEL_SPLICE) { + utils_close_pipe(stream->splice_pipe); + } + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + { + /* + * Special case for the metadata since the wait fd is an internal pipe + * polled in the metadata thread. + */ + if (stream->metadata_flag && stream->chan->monitor) { + int rpipe = stream->ust_metadata_poll_pipe[0]; + + /* + * This will stop the channel timer if one and close the write side + * of the metadata poll pipe. + */ + lttng_ustconsumer_close_metadata(stream->chan); + if (rpipe >= 0) { + ret = close(rpipe); + if (ret < 0) { + PERROR("closing metadata pipe read side"); + } + stream->ust_metadata_poll_pipe[0] = -1; + } + } + break; + } + default: + ERR("Unknown consumer_data type"); + abort(); + } + + /* Close output fd. Could be a socket or local file at this point. */ + if (stream->out_fd >= 0) { + ret = close(stream->out_fd); + if (ret) { + PERROR("close"); + } + stream->out_fd = -1; + } + + if (stream->index_file) { + lttng_index_file_put(stream->index_file); + stream->index_file = NULL; + } + + lttng_trace_chunk_put(stream->trace_chunk); + stream->trace_chunk = NULL; + + /* Check and cleanup relayd if needed. */ + rcu_read_lock(); + relayd = consumer_find_relayd(stream->net_seq_idx); + if (relayd != NULL) { + consumer_stream_relayd_close(stream, relayd); + } + rcu_read_unlock(); +} + +/* + * Delete the stream from all possible hash tables. + * + * The consumer data lock MUST be acquired. + * The stream lock MUST be acquired. + */ +void consumer_stream_delete(struct lttng_consumer_stream *stream, + struct lttng_ht *ht) +{ + int ret; + struct lttng_ht_iter iter; + + LTTNG_ASSERT(stream); + /* Should NEVER be called not in monitor mode. */ + LTTNG_ASSERT(stream->chan->monitor); + + rcu_read_lock(); + + if (ht) { + iter.iter.node = &stream->node.node; + ret = lttng_ht_del(ht, &iter); + LTTNG_ASSERT(!ret); + } + + /* Delete from stream per channel ID hash table. */ + iter.iter.node = &stream->node_channel_id.node; + /* + * The returned value is of no importance. Even if the node is NOT in the + * hash table, we continue since we may have been called by a code path + * that did not add the stream to a (all) hash table. Same goes for the + * next call ht del call. + */ + (void) lttng_ht_del(the_consumer_data.stream_per_chan_id_ht, &iter); + + /* Delete from the global stream list. */ + iter.iter.node = &stream->node_session_id.node; + /* See the previous ht del on why we ignore the returned value. */ + (void) lttng_ht_del(the_consumer_data.stream_list_ht, &iter); + + rcu_read_unlock(); + + if (!stream->metadata_flag) { + /* Decrement the stream count of the global consumer data. */ + LTTNG_ASSERT(the_consumer_data.stream_count > 0); + the_consumer_data.stream_count--; + } +} + +/* + * Free the given stream within a RCU call. + */ +void consumer_stream_free(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + + metadata_bucket_destroy(stream->metadata_bucket); + call_rcu(&stream->node.head, free_stream_rcu); +} + +/* + * Destroy the stream's buffers of the tracer. + */ +void consumer_stream_destroy_buffers(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + lttng_ustconsumer_del_stream(stream); + break; + default: + ERR("Unknown consumer_data type"); + abort(); + } +} + +/* + * Destroy and close a already created stream. + */ +static void destroy_close_stream(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + + DBG("Consumer stream destroy monitored key: %" PRIu64, stream->key); + + /* Destroy tracer buffers of the stream. */ + consumer_stream_destroy_buffers(stream); + /* Close down everything including the relayd if one. */ + consumer_stream_close(stream); +} + +/* + * Decrement the stream's channel refcount and if down to 0, return the channel + * pointer so it can be destroyed by the caller or NULL if not. + */ +static struct lttng_consumer_channel *unref_channel( + struct lttng_consumer_stream *stream) +{ + struct lttng_consumer_channel *free_chan = NULL; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->chan); + + /* Update refcount of channel and see if we need to destroy it. */ + if (!uatomic_sub_return(&stream->chan->refcount, 1) + && !uatomic_read(&stream->chan->nb_init_stream_left)) { + free_chan = stream->chan; + } + + return free_chan; +} + +/* + * Destroy a stream completely. This will delete, close and free the stream. + * Once return, the stream is NO longer usable. Its channel may get destroyed + * if conditions are met for a monitored stream. + * + * This MUST be called WITHOUT the consumer data and stream lock acquired if + * the stream is in _monitor_ mode else it does not matter. + */ +void consumer_stream_destroy(struct lttng_consumer_stream *stream, + struct lttng_ht *ht) +{ + LTTNG_ASSERT(stream); + + /* Stream is in monitor mode. */ + if (stream->monitor) { + struct lttng_consumer_channel *free_chan = NULL; + + /* + * This means that the stream was successfully removed from the streams + * list of the channel and sent to the right thread managing this + * stream thus being globally visible. + */ + if (stream->globally_visible) { + pthread_mutex_lock(&the_consumer_data.lock); + pthread_mutex_lock(&stream->chan->lock); + pthread_mutex_lock(&stream->lock); + /* Remove every reference of the stream in the consumer. */ + consumer_stream_delete(stream, ht); + + destroy_close_stream(stream); + + /* Update channel's refcount of the stream. */ + free_chan = unref_channel(stream); + + /* Indicates that the consumer data state MUST be updated after this. */ + the_consumer_data.need_update = 1; + + pthread_mutex_unlock(&stream->lock); + pthread_mutex_unlock(&stream->chan->lock); + pthread_mutex_unlock(&the_consumer_data.lock); + } else { + /* + * If the stream is not visible globally, this needs to be done + * outside of the consumer data lock section. + */ + free_chan = unref_channel(stream); + } + + if (free_chan) { + consumer_del_channel(free_chan); + } + } else { + destroy_close_stream(stream); + } + + /* Free stream within a RCU call. */ + lttng_trace_chunk_put(stream->trace_chunk); + stream->trace_chunk = NULL; + lttng_dynamic_array_reset(&stream->read_subbuffer_ops.post_consume_cbs); + consumer_stream_free(stream); +} + +/* + * Write index of a specific stream either on the relayd or local disk. + * + * Return 0 on success or else a negative value. + */ +int consumer_stream_write_index(struct lttng_consumer_stream *stream, + struct ctf_packet_index *element) +{ + int ret; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(element); + + rcu_read_lock(); + if (stream->net_seq_idx != (uint64_t) -1ULL) { + struct consumer_relayd_sock_pair *relayd; + relayd = consumer_find_relayd(stream->net_seq_idx); + if (relayd) { + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_send_index(&relayd->control_sock, element, + stream->relayd_stream_id, stream->next_net_seq_num - 1); + if (ret < 0) { + /* + * Communication error with lttng-relayd, + * perform cleanup now + */ + ERR("Relayd send index failed. Cleaning up relayd %" PRIu64 ".", relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + ret = -1; + } + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + } else { + ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't write index.", + stream->key, stream->net_seq_idx); + ret = -1; + } + } else { + if (lttng_index_file_write(stream->index_file, element)) { + ret = -1; + } else { + ret = 0; + } + } + if (ret < 0) { + goto error; + } + +error: + rcu_read_unlock(); + return ret; +} + +int consumer_stream_create_output_files(struct lttng_consumer_stream *stream, + bool create_index) +{ + int ret; + enum lttng_trace_chunk_status chunk_status; + const int flags = O_WRONLY | O_CREAT | O_TRUNC; + const mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP; + char stream_path[LTTNG_PATH_MAX]; + + ASSERT_LOCKED(stream->lock); + LTTNG_ASSERT(stream->trace_chunk); + + ret = utils_stream_file_path(stream->chan->pathname, stream->name, + stream->chan->tracefile_size, + stream->tracefile_count_current, NULL, + stream_path, sizeof(stream_path)); + if (ret < 0) { + goto end; + } + + if (stream->out_fd >= 0) { + ret = close(stream->out_fd); + if (ret < 0) { + PERROR("Failed to close stream file \"%s\"", + stream->name); + goto end; + } + stream->out_fd = -1; + } + + DBG("Opening stream output file \"%s\"", stream_path); + chunk_status = lttng_trace_chunk_open_file(stream->trace_chunk, stream_path, + flags, mode, &stream->out_fd, false); + if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { + ERR("Failed to open stream file \"%s\"", stream->name); + ret = -1; + goto end; + } + + if (!stream->metadata_flag && (create_index || stream->index_file)) { + if (stream->index_file) { + lttng_index_file_put(stream->index_file); + } + chunk_status = lttng_index_file_create_from_trace_chunk( + stream->trace_chunk, + stream->chan->pathname, + stream->name, + stream->chan->tracefile_size, + stream->tracefile_count_current, + CTF_INDEX_MAJOR, CTF_INDEX_MINOR, + false, &stream->index_file); + if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { + ret = -1; + goto end; + } + } + + /* Reset current size because we just perform a rotation. */ + stream->tracefile_size_current = 0; + stream->out_fd_offset = 0; +end: + return ret; +} + +int consumer_stream_rotate_output_files(struct lttng_consumer_stream *stream) +{ + int ret; + + stream->tracefile_count_current++; + if (stream->chan->tracefile_count > 0) { + stream->tracefile_count_current %= + stream->chan->tracefile_count; + } + + DBG("Rotating output files of stream \"%s\"", stream->name); + ret = consumer_stream_create_output_files(stream, true); + if (ret) { + goto end; + } + +end: + return ret; +} + +bool consumer_stream_is_deleted(struct lttng_consumer_stream *stream) +{ + /* + * This function does not take a const stream since + * cds_lfht_is_node_deleted was not const before liburcu 0.12. + */ + LTTNG_ASSERT(stream); + return cds_lfht_is_node_deleted(&stream->node.node); +} + +static ssize_t metadata_bucket_flush( + const struct stream_subbuffer *buffer, void *data) +{ + ssize_t ret; + struct lttng_consumer_stream *stream = (lttng_consumer_stream *) data; + + ret = consumer_stream_consume_mmap(NULL, stream, buffer); + if (ret < 0) { + goto end; + } +end: + return ret; +} + +static ssize_t metadata_bucket_consume( + struct lttng_consumer_local_data *unused, + struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuffer) +{ + ssize_t ret; + enum metadata_bucket_status status; + + status = metadata_bucket_fill(stream->metadata_bucket, subbuffer); + switch (status) { + case METADATA_BUCKET_STATUS_OK: + /* Return consumed size. */ + ret = subbuffer->buffer.buffer.size; + break; + default: + ret = -1; + } + + return ret; +} + +int consumer_stream_enable_metadata_bucketization( + struct lttng_consumer_stream *stream) +{ + int ret = 0; + + LTTNG_ASSERT(stream->metadata_flag); + LTTNG_ASSERT(!stream->metadata_bucket); + LTTNG_ASSERT(stream->chan->output == CONSUMER_CHANNEL_MMAP); + + stream->metadata_bucket = metadata_bucket_create( + metadata_bucket_flush, stream); + if (!stream->metadata_bucket) { + ret = -1; + goto end; + } + + stream->read_subbuffer_ops.consume_subbuffer = metadata_bucket_consume; +end: + return ret; +} + +void consumer_stream_metadata_set_version( + struct lttng_consumer_stream *stream, uint64_t new_version) +{ + LTTNG_ASSERT(new_version > stream->metadata_version); + stream->metadata_version = new_version; + stream->reset_metadata_flag = 1; + + if (stream->metadata_bucket) { + metadata_bucket_reset(stream->metadata_bucket); + } +} + +int consumer_stream_flush_buffer(struct lttng_consumer_stream *stream, + bool producer_active) +{ + int ret = 0; + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + if (producer_active) { + ret = kernctl_buffer_flush(stream->wait_fd); + if (ret < 0) { + ERR("Failed to flush kernel stream"); + goto end; + } + } else { + ret = kernctl_buffer_flush_empty(stream->wait_fd); + if (ret < 0) { + /* + * Doing a buffer flush which does not take into + * account empty packets. This is not perfect, + * but required as a fall-back when + * "flush_empty" is not implemented by + * lttng-modules. + */ + ret = kernctl_buffer_flush(stream->wait_fd); + if (ret < 0) { + ERR("Failed to flush kernel stream"); + goto end; + } + } + } + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + ret = lttng_ustconsumer_flush_buffer(stream, (int) producer_active); + break; + default: + ERR("Unknown consumer_data type"); + abort(); + } + +end: + return ret; +} diff --git a/src/common/consumer/consumer-stream.h b/src/common/consumer/consumer-stream.h index c9af63cd8..af0690a25 100644 --- a/src/common/consumer/consumer-stream.h +++ b/src/common/consumer/consumer-stream.h @@ -10,6 +10,10 @@ #include "consumer.h" +#ifdef __cplusplus +extern "C" { +#endif + enum consumer_stream_open_packet_status { CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED, CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE, @@ -166,4 +170,8 @@ enum consumer_stream_open_packet_status consumer_stream_open_packet( int consumer_stream_flush_buffer(struct lttng_consumer_stream *stream, bool producer_active); +#ifdef __cplusplus +} +#endif + #endif /* LTTNG_CONSUMER_STREAM_H */ diff --git a/src/common/consumer/consumer-timer.c b/src/common/consumer/consumer-timer.c deleted file mode 100644 index 6d2e6b2b2..000000000 --- a/src/common/consumer/consumer-timer.c +++ /dev/null @@ -1,803 +0,0 @@ -/* - * Copyright (C) 2012 Julien Desfossez - * Copyright (C) 2012 David Goulet - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#define _LGPL_SOURCE -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -typedef int (*sample_positions_cb)(struct lttng_consumer_stream *stream); -typedef int (*get_consumed_cb)(struct lttng_consumer_stream *stream, - unsigned long *consumed); -typedef int (*get_produced_cb)(struct lttng_consumer_stream *stream, - unsigned long *produced); -typedef int (*flush_index_cb)(struct lttng_consumer_stream *stream); - -static struct timer_signal_data timer_signal = { - .tid = 0, - .setup_done = 0, - .qs_done = 0, - .lock = PTHREAD_MUTEX_INITIALIZER, -}; - -/* - * Set custom signal mask to current thread. - */ -static void setmask(sigset_t *mask) -{ - int ret; - - ret = sigemptyset(mask); - if (ret) { - PERROR("sigemptyset"); - } - ret = sigaddset(mask, LTTNG_CONSUMER_SIG_SWITCH); - if (ret) { - PERROR("sigaddset switch"); - } - ret = sigaddset(mask, LTTNG_CONSUMER_SIG_TEARDOWN); - if (ret) { - PERROR("sigaddset teardown"); - } - ret = sigaddset(mask, LTTNG_CONSUMER_SIG_LIVE); - if (ret) { - PERROR("sigaddset live"); - } - ret = sigaddset(mask, LTTNG_CONSUMER_SIG_MONITOR); - if (ret) { - PERROR("sigaddset monitor"); - } - ret = sigaddset(mask, LTTNG_CONSUMER_SIG_EXIT); - if (ret) { - PERROR("sigaddset exit"); - } -} - -static int the_channel_monitor_pipe = -1; - -/* - * Execute action on a timer switch. - * - * Beware: metadata_switch_timer() should *never* take a mutex also held - * while consumer_timer_switch_stop() is called. It would result in - * deadlocks. - */ -static void metadata_switch_timer(struct lttng_consumer_local_data *ctx, - siginfo_t *si) -{ - int ret; - struct lttng_consumer_channel *channel; - - channel = si->si_value.sival_ptr; - LTTNG_ASSERT(channel); - - if (channel->switch_timer_error) { - return; - } - - DBG("Switch timer for channel %" PRIu64, channel->key); - switch (ctx->type) { - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - /* - * Locks taken by lttng_ustconsumer_request_metadata(): - * - metadata_socket_lock - * - Calling lttng_ustconsumer_recv_metadata(): - * - channel->metadata_cache->lock - * - Calling consumer_metadata_cache_flushed(): - * - channel->timer_lock - * - channel->metadata_cache->lock - * - * Ensure that neither consumer_data.lock nor - * channel->lock are taken within this function, since - * they are held while consumer_timer_switch_stop() is - * called. - */ - ret = lttng_ustconsumer_request_metadata(ctx, channel, 1, 1); - if (ret < 0) { - channel->switch_timer_error = 1; - } - break; - case LTTNG_CONSUMER_KERNEL: - case LTTNG_CONSUMER_UNKNOWN: - abort(); - break; - } -} - -static int send_empty_index(struct lttng_consumer_stream *stream, uint64_t ts, - uint64_t stream_id) -{ - int ret; - struct ctf_packet_index index; - - memset(&index, 0, sizeof(index)); - index.stream_id = htobe64(stream_id); - index.timestamp_end = htobe64(ts); - ret = consumer_stream_write_index(stream, &index); - if (ret < 0) { - goto error; - } - -error: - return ret; -} - -int consumer_flush_kernel_index(struct lttng_consumer_stream *stream) -{ - uint64_t ts, stream_id; - int ret; - - ret = kernctl_get_current_timestamp(stream->wait_fd, &ts); - if (ret < 0) { - ERR("Failed to get the current timestamp"); - goto end; - } - ret = kernctl_buffer_flush(stream->wait_fd); - if (ret < 0) { - ERR("Failed to flush kernel stream"); - goto end; - } - ret = kernctl_snapshot(stream->wait_fd); - if (ret < 0) { - if (ret != -EAGAIN && ret != -ENODATA) { - PERROR("live timer kernel snapshot"); - ret = -1; - goto end; - } - ret = kernctl_get_stream_id(stream->wait_fd, &stream_id); - if (ret < 0) { - PERROR("kernctl_get_stream_id"); - goto end; - } - DBG("Stream %" PRIu64 " empty, sending beacon", stream->key); - ret = send_empty_index(stream, ts, stream_id); - if (ret < 0) { - goto end; - } - } - ret = 0; -end: - return ret; -} - -static int check_stream(struct lttng_consumer_stream *stream, - flush_index_cb flush_index) -{ - int ret; - - /* - * While holding the stream mutex, try to take a snapshot, if it - * succeeds, it means that data is ready to be sent, just let the data - * thread handle that. Otherwise, if the snapshot returns EAGAIN, it - * means that there is no data to read after the flush, so we can - * safely send the empty index. - * - * Doing a trylock and checking if waiting on metadata if - * trylock fails. Bail out of the stream is indeed waiting for - * metadata to be pushed. Busy wait on trylock otherwise. - */ - for (;;) { - ret = pthread_mutex_trylock(&stream->lock); - switch (ret) { - case 0: - break; /* We have the lock. */ - case EBUSY: - pthread_mutex_lock(&stream->metadata_timer_lock); - if (stream->waiting_on_metadata) { - ret = 0; - stream->missed_metadata_flush = true; - pthread_mutex_unlock(&stream->metadata_timer_lock); - goto end; /* Bail out. */ - } - pthread_mutex_unlock(&stream->metadata_timer_lock); - /* Try again. */ - caa_cpu_relax(); - continue; - default: - ERR("Unexpected pthread_mutex_trylock error %d", ret); - ret = -1; - goto end; - } - break; - } - ret = flush_index(stream); - pthread_mutex_unlock(&stream->lock); -end: - return ret; -} - -int consumer_flush_ust_index(struct lttng_consumer_stream *stream) -{ - uint64_t ts, stream_id; - int ret; - - ret = cds_lfht_is_node_deleted(&stream->node.node); - if (ret) { - goto end; - } - - ret = lttng_ustconsumer_get_current_timestamp(stream, &ts); - if (ret < 0) { - ERR("Failed to get the current timestamp"); - goto end; - } - ret = lttng_ustconsumer_flush_buffer(stream, 1); - if (ret < 0) { - ERR("Failed to flush buffer while flushing index"); - goto end; - } - ret = lttng_ustconsumer_take_snapshot(stream); - if (ret < 0) { - if (ret != -EAGAIN) { - ERR("Taking UST snapshot"); - ret = -1; - goto end; - } - ret = lttng_ustconsumer_get_stream_id(stream, &stream_id); - if (ret < 0) { - PERROR("lttng_ust_ctl_get_stream_id"); - goto end; - } - DBG("Stream %" PRIu64 " empty, sending beacon", stream->key); - ret = send_empty_index(stream, ts, stream_id); - if (ret < 0) { - goto end; - } - } - ret = 0; -end: - return ret; -} - -/* - * Execute action on a live timer - */ -static void live_timer(struct lttng_consumer_local_data *ctx, - siginfo_t *si) -{ - int ret; - struct lttng_consumer_channel *channel; - struct lttng_consumer_stream *stream; - struct lttng_ht_iter iter; - const struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht; - const flush_index_cb flush_index = - ctx->type == LTTNG_CONSUMER_KERNEL ? - consumer_flush_kernel_index : - consumer_flush_ust_index; - - channel = si->si_value.sival_ptr; - LTTNG_ASSERT(channel); - - if (channel->switch_timer_error) { - goto error; - } - - DBG("Live timer for channel %" PRIu64, channel->key); - - rcu_read_lock(); - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&channel->key, lttng_ht_seed), - ht->match_fct, &channel->key, &iter.iter, - stream, node_channel_id.node) { - ret = check_stream(stream, flush_index); - if (ret < 0) { - goto error_unlock; - } - } - -error_unlock: - rcu_read_unlock(); - -error: - return; -} - -static -void consumer_timer_signal_thread_qs(unsigned int signr) -{ - sigset_t pending_set; - int ret; - - /* - * We need to be the only thread interacting with the thread - * that manages signals for teardown synchronization. - */ - pthread_mutex_lock(&timer_signal.lock); - - /* Ensure we don't have any signal queued for this channel. */ - for (;;) { - ret = sigemptyset(&pending_set); - if (ret == -1) { - PERROR("sigemptyset"); - } - ret = sigpending(&pending_set); - if (ret == -1) { - PERROR("sigpending"); - } - if (!sigismember(&pending_set, signr)) { - break; - } - caa_cpu_relax(); - } - - /* - * From this point, no new signal handler will be fired that would try to - * access "chan". However, we still need to wait for any currently - * executing handler to complete. - */ - cmm_smp_mb(); - CMM_STORE_SHARED(timer_signal.qs_done, 0); - cmm_smp_mb(); - - /* - * Kill with LTTNG_CONSUMER_SIG_TEARDOWN, so signal management thread wakes - * up. - */ - kill(getpid(), LTTNG_CONSUMER_SIG_TEARDOWN); - - while (!CMM_LOAD_SHARED(timer_signal.qs_done)) { - caa_cpu_relax(); - } - cmm_smp_mb(); - - pthread_mutex_unlock(&timer_signal.lock); -} - -/* - * Start a timer channel timer which will fire at a given interval - * (timer_interval_us)and fire a given signal (signal). - * - * Returns a negative value on error, 0 if a timer was created, and - * a positive value if no timer was created (not an error). - */ -static -int consumer_channel_timer_start(timer_t *timer_id, - struct lttng_consumer_channel *channel, - unsigned int timer_interval_us, int signal) -{ - int ret = 0, delete_ret; - struct sigevent sev = {}; - struct itimerspec its; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(channel->key); - - if (timer_interval_us == 0) { - /* No creation needed; not an error. */ - ret = 1; - goto end; - } - - sev.sigev_notify = SIGEV_SIGNAL; - sev.sigev_signo = signal; - sev.sigev_value.sival_ptr = channel; - ret = timer_create(CLOCKID, &sev, timer_id); - if (ret == -1) { - PERROR("timer_create"); - goto end; - } - - its.it_value.tv_sec = timer_interval_us / 1000000; - its.it_value.tv_nsec = (timer_interval_us % 1000000) * 1000; - its.it_interval.tv_sec = its.it_value.tv_sec; - its.it_interval.tv_nsec = its.it_value.tv_nsec; - - ret = timer_settime(*timer_id, 0, &its, NULL); - if (ret == -1) { - PERROR("timer_settime"); - goto error_destroy_timer; - } -end: - return ret; -error_destroy_timer: - delete_ret = timer_delete(*timer_id); - if (delete_ret == -1) { - PERROR("timer_delete"); - } - goto end; -} - -static -int consumer_channel_timer_stop(timer_t *timer_id, int signal) -{ - int ret = 0; - - ret = timer_delete(*timer_id); - if (ret == -1) { - PERROR("timer_delete"); - goto end; - } - - consumer_timer_signal_thread_qs(signal); - *timer_id = 0; -end: - return ret; -} - -/* - * Set the channel's switch timer. - */ -void consumer_timer_switch_start(struct lttng_consumer_channel *channel, - unsigned int switch_timer_interval_us) -{ - int ret; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(channel->key); - - ret = consumer_channel_timer_start(&channel->switch_timer, channel, - switch_timer_interval_us, LTTNG_CONSUMER_SIG_SWITCH); - - channel->switch_timer_enabled = !!(ret == 0); -} - -/* - * Stop and delete the channel's switch timer. - */ -void consumer_timer_switch_stop(struct lttng_consumer_channel *channel) -{ - int ret; - - LTTNG_ASSERT(channel); - - ret = consumer_channel_timer_stop(&channel->switch_timer, - LTTNG_CONSUMER_SIG_SWITCH); - if (ret == -1) { - ERR("Failed to stop switch timer"); - } - - channel->switch_timer_enabled = 0; -} - -/* - * Set the channel's live timer. - */ -void consumer_timer_live_start(struct lttng_consumer_channel *channel, - unsigned int live_timer_interval_us) -{ - int ret; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(channel->key); - - ret = consumer_channel_timer_start(&channel->live_timer, channel, - live_timer_interval_us, LTTNG_CONSUMER_SIG_LIVE); - - channel->live_timer_enabled = !!(ret == 0); -} - -/* - * Stop and delete the channel's live timer. - */ -void consumer_timer_live_stop(struct lttng_consumer_channel *channel) -{ - int ret; - - LTTNG_ASSERT(channel); - - ret = consumer_channel_timer_stop(&channel->live_timer, - LTTNG_CONSUMER_SIG_LIVE); - if (ret == -1) { - ERR("Failed to stop live timer"); - } - - channel->live_timer_enabled = 0; -} - -/* - * Set the channel's monitoring timer. - * - * Returns a negative value on error, 0 if a timer was created, and - * a positive value if no timer was created (not an error). - */ -int consumer_timer_monitor_start(struct lttng_consumer_channel *channel, - unsigned int monitor_timer_interval_us) -{ - int ret; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(channel->key); - LTTNG_ASSERT(!channel->monitor_timer_enabled); - - ret = consumer_channel_timer_start(&channel->monitor_timer, channel, - monitor_timer_interval_us, LTTNG_CONSUMER_SIG_MONITOR); - channel->monitor_timer_enabled = !!(ret == 0); - return ret; -} - -/* - * Stop and delete the channel's monitoring timer. - */ -int consumer_timer_monitor_stop(struct lttng_consumer_channel *channel) -{ - int ret; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(channel->monitor_timer_enabled); - - ret = consumer_channel_timer_stop(&channel->monitor_timer, - LTTNG_CONSUMER_SIG_MONITOR); - if (ret == -1) { - ERR("Failed to stop live timer"); - goto end; - } - - channel->monitor_timer_enabled = 0; -end: - return ret; -} - -/* - * Block the RT signals for the entire process. It must be called from the - * consumer main before creating the threads - */ -int consumer_signal_init(void) -{ - int ret; - sigset_t mask; - - /* Block signal for entire process, so only our thread processes it. */ - setmask(&mask); - ret = pthread_sigmask(SIG_BLOCK, &mask, NULL); - if (ret) { - errno = ret; - PERROR("pthread_sigmask"); - return -1; - } - return 0; -} - -static -int sample_channel_positions(struct lttng_consumer_channel *channel, - uint64_t *_highest_use, uint64_t *_lowest_use, uint64_t *_total_consumed, - sample_positions_cb sample, get_consumed_cb get_consumed, - get_produced_cb get_produced) -{ - int ret = 0; - struct lttng_ht_iter iter; - struct lttng_consumer_stream *stream; - bool empty_channel = true; - uint64_t high = 0, low = UINT64_MAX; - struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht; - - *_total_consumed = 0; - - rcu_read_lock(); - - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&channel->key, lttng_ht_seed), - ht->match_fct, &channel->key, - &iter.iter, stream, node_channel_id.node) { - unsigned long produced, consumed, usage; - - empty_channel = false; - - pthread_mutex_lock(&stream->lock); - if (cds_lfht_is_node_deleted(&stream->node.node)) { - goto next; - } - - ret = sample(stream); - if (ret) { - ERR("Failed to take buffer position snapshot in monitor timer (ret = %d)", ret); - pthread_mutex_unlock(&stream->lock); - goto end; - } - ret = get_consumed(stream, &consumed); - if (ret) { - ERR("Failed to get buffer consumed position in monitor timer"); - pthread_mutex_unlock(&stream->lock); - goto end; - } - ret = get_produced(stream, &produced); - if (ret) { - ERR("Failed to get buffer produced position in monitor timer"); - pthread_mutex_unlock(&stream->lock); - goto end; - } - - usage = produced - consumed; - high = (usage > high) ? usage : high; - low = (usage < low) ? usage : low; - - /* - * We don't use consumed here for 2 reasons: - * - output_written takes into account the padding written in the - * tracefiles when we stop the session; - * - the consumed position is not the accurate representation of what - * was extracted from a buffer in overwrite mode. - */ - *_total_consumed += stream->output_written; - next: - pthread_mutex_unlock(&stream->lock); - } - - *_highest_use = high; - *_lowest_use = low; -end: - rcu_read_unlock(); - if (empty_channel) { - ret = -1; - } - return ret; -} - -/* - * Execute action on a monitor timer. - */ -static -void monitor_timer(struct lttng_consumer_channel *channel) -{ - int ret; - int channel_monitor_pipe = - consumer_timer_thread_get_channel_monitor_pipe(); - struct lttcomm_consumer_channel_monitor_msg msg = { - .key = channel->key, - }; - sample_positions_cb sample; - get_consumed_cb get_consumed; - get_produced_cb get_produced; - uint64_t lowest = 0, highest = 0, total_consumed = 0; - - LTTNG_ASSERT(channel); - - if (channel_monitor_pipe < 0) { - return; - } - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - sample = lttng_kconsumer_sample_snapshot_positions; - get_consumed = lttng_kconsumer_get_consumed_snapshot; - get_produced = lttng_kconsumer_get_produced_snapshot; - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - sample = lttng_ustconsumer_sample_snapshot_positions; - get_consumed = lttng_ustconsumer_get_consumed_snapshot; - get_produced = lttng_ustconsumer_get_produced_snapshot; - break; - default: - abort(); - } - - ret = sample_channel_positions(channel, &highest, &lowest, - &total_consumed, sample, get_consumed, get_produced); - if (ret) { - return; - } - msg.highest = highest; - msg.lowest = lowest; - msg.total_consumed = total_consumed; - - /* - * Writes performed here are assumed to be atomic which is only - * guaranteed for sizes < than PIPE_BUF. - */ - LTTNG_ASSERT(sizeof(msg) <= PIPE_BUF); - - do { - ret = write(channel_monitor_pipe, &msg, sizeof(msg)); - } while (ret == -1 && errno == EINTR); - if (ret == -1) { - if (errno == EAGAIN) { - /* Not an error, the sample is merely dropped. */ - DBG("Channel monitor pipe is full; dropping sample for channel key = %"PRIu64, - channel->key); - } else { - PERROR("write to the channel monitor pipe"); - } - } else { - DBG("Sent channel monitoring sample for channel key %" PRIu64 - ", (highest = %" PRIu64 ", lowest = %"PRIu64")", - channel->key, msg.highest, msg.lowest); - } -} - -int consumer_timer_thread_get_channel_monitor_pipe(void) -{ - return uatomic_read(&the_channel_monitor_pipe); -} - -int consumer_timer_thread_set_channel_monitor_pipe(int fd) -{ - int ret; - - ret = uatomic_cmpxchg(&the_channel_monitor_pipe, -1, fd); - if (ret != -1) { - ret = -1; - goto end; - } - ret = 0; -end: - return ret; -} - -/* - * This thread is the sighandler for signals LTTNG_CONSUMER_SIG_SWITCH, - * LTTNG_CONSUMER_SIG_TEARDOWN, LTTNG_CONSUMER_SIG_LIVE, and - * LTTNG_CONSUMER_SIG_MONITOR, LTTNG_CONSUMER_SIG_EXIT. - */ -void *consumer_timer_thread(void *data) -{ - int signr; - sigset_t mask; - siginfo_t info; - struct lttng_consumer_local_data *ctx = data; - - rcu_register_thread(); - - health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_METADATA_TIMER); - - if (testpoint(consumerd_thread_metadata_timer)) { - goto error_testpoint; - } - - health_code_update(); - - /* Only self thread will receive signal mask. */ - setmask(&mask); - CMM_STORE_SHARED(timer_signal.tid, pthread_self()); - - while (1) { - health_code_update(); - - health_poll_entry(); - signr = sigwaitinfo(&mask, &info); - health_poll_exit(); - - /* - * NOTE: cascading conditions are used instead of a switch case - * since the use of SIGRTMIN in the definition of the signals' - * values prevents the reduction to an integer constant. - */ - if (signr == -1) { - if (errno != EINTR) { - PERROR("sigwaitinfo"); - } - continue; - } else if (signr == LTTNG_CONSUMER_SIG_SWITCH) { - metadata_switch_timer(ctx, &info); - } else if (signr == LTTNG_CONSUMER_SIG_TEARDOWN) { - cmm_smp_mb(); - CMM_STORE_SHARED(timer_signal.qs_done, 1); - cmm_smp_mb(); - DBG("Signal timer metadata thread teardown"); - } else if (signr == LTTNG_CONSUMER_SIG_LIVE) { - live_timer(ctx, &info); - } else if (signr == LTTNG_CONSUMER_SIG_MONITOR) { - struct lttng_consumer_channel *channel; - - channel = info.si_value.sival_ptr; - monitor_timer(channel); - } else if (signr == LTTNG_CONSUMER_SIG_EXIT) { - LTTNG_ASSERT(CMM_LOAD_SHARED(consumer_quit)); - goto end; - } else { - ERR("Unexpected signal %d\n", info.si_signo); - } - } - -error_testpoint: - /* Only reached in testpoint error */ - health_error(); -end: - health_unregister(health_consumerd); - rcu_unregister_thread(); - return NULL; -} diff --git a/src/common/consumer/consumer-timer.cpp b/src/common/consumer/consumer-timer.cpp new file mode 100644 index 000000000..f6cb60c3b --- /dev/null +++ b/src/common/consumer/consumer-timer.cpp @@ -0,0 +1,803 @@ +/* + * Copyright (C) 2012 Julien Desfossez + * Copyright (C) 2012 David Goulet + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#define _LGPL_SOURCE +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef int (*sample_positions_cb)(struct lttng_consumer_stream *stream); +typedef int (*get_consumed_cb)(struct lttng_consumer_stream *stream, + unsigned long *consumed); +typedef int (*get_produced_cb)(struct lttng_consumer_stream *stream, + unsigned long *produced); +typedef int (*flush_index_cb)(struct lttng_consumer_stream *stream); + +static struct timer_signal_data timer_signal = { + .tid = 0, + .setup_done = 0, + .qs_done = 0, + .lock = PTHREAD_MUTEX_INITIALIZER, +}; + +/* + * Set custom signal mask to current thread. + */ +static void setmask(sigset_t *mask) +{ + int ret; + + ret = sigemptyset(mask); + if (ret) { + PERROR("sigemptyset"); + } + ret = sigaddset(mask, LTTNG_CONSUMER_SIG_SWITCH); + if (ret) { + PERROR("sigaddset switch"); + } + ret = sigaddset(mask, LTTNG_CONSUMER_SIG_TEARDOWN); + if (ret) { + PERROR("sigaddset teardown"); + } + ret = sigaddset(mask, LTTNG_CONSUMER_SIG_LIVE); + if (ret) { + PERROR("sigaddset live"); + } + ret = sigaddset(mask, LTTNG_CONSUMER_SIG_MONITOR); + if (ret) { + PERROR("sigaddset monitor"); + } + ret = sigaddset(mask, LTTNG_CONSUMER_SIG_EXIT); + if (ret) { + PERROR("sigaddset exit"); + } +} + +static int the_channel_monitor_pipe = -1; + +/* + * Execute action on a timer switch. + * + * Beware: metadata_switch_timer() should *never* take a mutex also held + * while consumer_timer_switch_stop() is called. It would result in + * deadlocks. + */ +static void metadata_switch_timer(struct lttng_consumer_local_data *ctx, + siginfo_t *si) +{ + int ret; + struct lttng_consumer_channel *channel; + + channel = (lttng_consumer_channel *) si->si_value.sival_ptr; + LTTNG_ASSERT(channel); + + if (channel->switch_timer_error) { + return; + } + + DBG("Switch timer for channel %" PRIu64, channel->key); + switch (ctx->type) { + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + /* + * Locks taken by lttng_ustconsumer_request_metadata(): + * - metadata_socket_lock + * - Calling lttng_ustconsumer_recv_metadata(): + * - channel->metadata_cache->lock + * - Calling consumer_metadata_cache_flushed(): + * - channel->timer_lock + * - channel->metadata_cache->lock + * + * Ensure that neither consumer_data.lock nor + * channel->lock are taken within this function, since + * they are held while consumer_timer_switch_stop() is + * called. + */ + ret = lttng_ustconsumer_request_metadata(ctx, channel, 1, 1); + if (ret < 0) { + channel->switch_timer_error = 1; + } + break; + case LTTNG_CONSUMER_KERNEL: + case LTTNG_CONSUMER_UNKNOWN: + abort(); + break; + } +} + +static int send_empty_index(struct lttng_consumer_stream *stream, uint64_t ts, + uint64_t stream_id) +{ + int ret; + struct ctf_packet_index index; + + memset(&index, 0, sizeof(index)); + index.stream_id = htobe64(stream_id); + index.timestamp_end = htobe64(ts); + ret = consumer_stream_write_index(stream, &index); + if (ret < 0) { + goto error; + } + +error: + return ret; +} + +int consumer_flush_kernel_index(struct lttng_consumer_stream *stream) +{ + uint64_t ts, stream_id; + int ret; + + ret = kernctl_get_current_timestamp(stream->wait_fd, &ts); + if (ret < 0) { + ERR("Failed to get the current timestamp"); + goto end; + } + ret = kernctl_buffer_flush(stream->wait_fd); + if (ret < 0) { + ERR("Failed to flush kernel stream"); + goto end; + } + ret = kernctl_snapshot(stream->wait_fd); + if (ret < 0) { + if (ret != -EAGAIN && ret != -ENODATA) { + PERROR("live timer kernel snapshot"); + ret = -1; + goto end; + } + ret = kernctl_get_stream_id(stream->wait_fd, &stream_id); + if (ret < 0) { + PERROR("kernctl_get_stream_id"); + goto end; + } + DBG("Stream %" PRIu64 " empty, sending beacon", stream->key); + ret = send_empty_index(stream, ts, stream_id); + if (ret < 0) { + goto end; + } + } + ret = 0; +end: + return ret; +} + +static int check_stream(struct lttng_consumer_stream *stream, + flush_index_cb flush_index) +{ + int ret; + + /* + * While holding the stream mutex, try to take a snapshot, if it + * succeeds, it means that data is ready to be sent, just let the data + * thread handle that. Otherwise, if the snapshot returns EAGAIN, it + * means that there is no data to read after the flush, so we can + * safely send the empty index. + * + * Doing a trylock and checking if waiting on metadata if + * trylock fails. Bail out of the stream is indeed waiting for + * metadata to be pushed. Busy wait on trylock otherwise. + */ + for (;;) { + ret = pthread_mutex_trylock(&stream->lock); + switch (ret) { + case 0: + break; /* We have the lock. */ + case EBUSY: + pthread_mutex_lock(&stream->metadata_timer_lock); + if (stream->waiting_on_metadata) { + ret = 0; + stream->missed_metadata_flush = true; + pthread_mutex_unlock(&stream->metadata_timer_lock); + goto end; /* Bail out. */ + } + pthread_mutex_unlock(&stream->metadata_timer_lock); + /* Try again. */ + caa_cpu_relax(); + continue; + default: + ERR("Unexpected pthread_mutex_trylock error %d", ret); + ret = -1; + goto end; + } + break; + } + ret = flush_index(stream); + pthread_mutex_unlock(&stream->lock); +end: + return ret; +} + +int consumer_flush_ust_index(struct lttng_consumer_stream *stream) +{ + uint64_t ts, stream_id; + int ret; + + ret = cds_lfht_is_node_deleted(&stream->node.node); + if (ret) { + goto end; + } + + ret = lttng_ustconsumer_get_current_timestamp(stream, &ts); + if (ret < 0) { + ERR("Failed to get the current timestamp"); + goto end; + } + ret = lttng_ustconsumer_flush_buffer(stream, 1); + if (ret < 0) { + ERR("Failed to flush buffer while flushing index"); + goto end; + } + ret = lttng_ustconsumer_take_snapshot(stream); + if (ret < 0) { + if (ret != -EAGAIN) { + ERR("Taking UST snapshot"); + ret = -1; + goto end; + } + ret = lttng_ustconsumer_get_stream_id(stream, &stream_id); + if (ret < 0) { + PERROR("lttng_ust_ctl_get_stream_id"); + goto end; + } + DBG("Stream %" PRIu64 " empty, sending beacon", stream->key); + ret = send_empty_index(stream, ts, stream_id); + if (ret < 0) { + goto end; + } + } + ret = 0; +end: + return ret; +} + +/* + * Execute action on a live timer + */ +static void live_timer(struct lttng_consumer_local_data *ctx, + siginfo_t *si) +{ + int ret; + struct lttng_consumer_channel *channel; + struct lttng_consumer_stream *stream; + struct lttng_ht_iter iter; + const struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht; + const flush_index_cb flush_index = + ctx->type == LTTNG_CONSUMER_KERNEL ? + consumer_flush_kernel_index : + consumer_flush_ust_index; + + channel = (lttng_consumer_channel *) si->si_value.sival_ptr; + LTTNG_ASSERT(channel); + + if (channel->switch_timer_error) { + goto error; + } + + DBG("Live timer for channel %" PRIu64, channel->key); + + rcu_read_lock(); + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&channel->key, lttng_ht_seed), + ht->match_fct, &channel->key, &iter.iter, + stream, node_channel_id.node) { + ret = check_stream(stream, flush_index); + if (ret < 0) { + goto error_unlock; + } + } + +error_unlock: + rcu_read_unlock(); + +error: + return; +} + +static +void consumer_timer_signal_thread_qs(unsigned int signr) +{ + sigset_t pending_set; + int ret; + + /* + * We need to be the only thread interacting with the thread + * that manages signals for teardown synchronization. + */ + pthread_mutex_lock(&timer_signal.lock); + + /* Ensure we don't have any signal queued for this channel. */ + for (;;) { + ret = sigemptyset(&pending_set); + if (ret == -1) { + PERROR("sigemptyset"); + } + ret = sigpending(&pending_set); + if (ret == -1) { + PERROR("sigpending"); + } + if (!sigismember(&pending_set, signr)) { + break; + } + caa_cpu_relax(); + } + + /* + * From this point, no new signal handler will be fired that would try to + * access "chan". However, we still need to wait for any currently + * executing handler to complete. + */ + cmm_smp_mb(); + CMM_STORE_SHARED(timer_signal.qs_done, 0); + cmm_smp_mb(); + + /* + * Kill with LTTNG_CONSUMER_SIG_TEARDOWN, so signal management thread wakes + * up. + */ + kill(getpid(), LTTNG_CONSUMER_SIG_TEARDOWN); + + while (!CMM_LOAD_SHARED(timer_signal.qs_done)) { + caa_cpu_relax(); + } + cmm_smp_mb(); + + pthread_mutex_unlock(&timer_signal.lock); +} + +/* + * Start a timer channel timer which will fire at a given interval + * (timer_interval_us)and fire a given signal (signal). + * + * Returns a negative value on error, 0 if a timer was created, and + * a positive value if no timer was created (not an error). + */ +static +int consumer_channel_timer_start(timer_t *timer_id, + struct lttng_consumer_channel *channel, + unsigned int timer_interval_us, int signal) +{ + int ret = 0, delete_ret; + struct sigevent sev = {}; + struct itimerspec its; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(channel->key); + + if (timer_interval_us == 0) { + /* No creation needed; not an error. */ + ret = 1; + goto end; + } + + sev.sigev_notify = SIGEV_SIGNAL; + sev.sigev_signo = signal; + sev.sigev_value.sival_ptr = channel; + ret = timer_create(CLOCKID, &sev, timer_id); + if (ret == -1) { + PERROR("timer_create"); + goto end; + } + + its.it_value.tv_sec = timer_interval_us / 1000000; + its.it_value.tv_nsec = (timer_interval_us % 1000000) * 1000; + its.it_interval.tv_sec = its.it_value.tv_sec; + its.it_interval.tv_nsec = its.it_value.tv_nsec; + + ret = timer_settime(*timer_id, 0, &its, NULL); + if (ret == -1) { + PERROR("timer_settime"); + goto error_destroy_timer; + } +end: + return ret; +error_destroy_timer: + delete_ret = timer_delete(*timer_id); + if (delete_ret == -1) { + PERROR("timer_delete"); + } + goto end; +} + +static +int consumer_channel_timer_stop(timer_t *timer_id, int signal) +{ + int ret = 0; + + ret = timer_delete(*timer_id); + if (ret == -1) { + PERROR("timer_delete"); + goto end; + } + + consumer_timer_signal_thread_qs(signal); + *timer_id = 0; +end: + return ret; +} + +/* + * Set the channel's switch timer. + */ +void consumer_timer_switch_start(struct lttng_consumer_channel *channel, + unsigned int switch_timer_interval_us) +{ + int ret; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(channel->key); + + ret = consumer_channel_timer_start(&channel->switch_timer, channel, + switch_timer_interval_us, LTTNG_CONSUMER_SIG_SWITCH); + + channel->switch_timer_enabled = !!(ret == 0); +} + +/* + * Stop and delete the channel's switch timer. + */ +void consumer_timer_switch_stop(struct lttng_consumer_channel *channel) +{ + int ret; + + LTTNG_ASSERT(channel); + + ret = consumer_channel_timer_stop(&channel->switch_timer, + LTTNG_CONSUMER_SIG_SWITCH); + if (ret == -1) { + ERR("Failed to stop switch timer"); + } + + channel->switch_timer_enabled = 0; +} + +/* + * Set the channel's live timer. + */ +void consumer_timer_live_start(struct lttng_consumer_channel *channel, + unsigned int live_timer_interval_us) +{ + int ret; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(channel->key); + + ret = consumer_channel_timer_start(&channel->live_timer, channel, + live_timer_interval_us, LTTNG_CONSUMER_SIG_LIVE); + + channel->live_timer_enabled = !!(ret == 0); +} + +/* + * Stop and delete the channel's live timer. + */ +void consumer_timer_live_stop(struct lttng_consumer_channel *channel) +{ + int ret; + + LTTNG_ASSERT(channel); + + ret = consumer_channel_timer_stop(&channel->live_timer, + LTTNG_CONSUMER_SIG_LIVE); + if (ret == -1) { + ERR("Failed to stop live timer"); + } + + channel->live_timer_enabled = 0; +} + +/* + * Set the channel's monitoring timer. + * + * Returns a negative value on error, 0 if a timer was created, and + * a positive value if no timer was created (not an error). + */ +int consumer_timer_monitor_start(struct lttng_consumer_channel *channel, + unsigned int monitor_timer_interval_us) +{ + int ret; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(channel->key); + LTTNG_ASSERT(!channel->monitor_timer_enabled); + + ret = consumer_channel_timer_start(&channel->monitor_timer, channel, + monitor_timer_interval_us, LTTNG_CONSUMER_SIG_MONITOR); + channel->monitor_timer_enabled = !!(ret == 0); + return ret; +} + +/* + * Stop and delete the channel's monitoring timer. + */ +int consumer_timer_monitor_stop(struct lttng_consumer_channel *channel) +{ + int ret; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(channel->monitor_timer_enabled); + + ret = consumer_channel_timer_stop(&channel->monitor_timer, + LTTNG_CONSUMER_SIG_MONITOR); + if (ret == -1) { + ERR("Failed to stop live timer"); + goto end; + } + + channel->monitor_timer_enabled = 0; +end: + return ret; +} + +/* + * Block the RT signals for the entire process. It must be called from the + * consumer main before creating the threads + */ +int consumer_signal_init(void) +{ + int ret; + sigset_t mask; + + /* Block signal for entire process, so only our thread processes it. */ + setmask(&mask); + ret = pthread_sigmask(SIG_BLOCK, &mask, NULL); + if (ret) { + errno = ret; + PERROR("pthread_sigmask"); + return -1; + } + return 0; +} + +static +int sample_channel_positions(struct lttng_consumer_channel *channel, + uint64_t *_highest_use, uint64_t *_lowest_use, uint64_t *_total_consumed, + sample_positions_cb sample, get_consumed_cb get_consumed, + get_produced_cb get_produced) +{ + int ret = 0; + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + bool empty_channel = true; + uint64_t high = 0, low = UINT64_MAX; + struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht; + + *_total_consumed = 0; + + rcu_read_lock(); + + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&channel->key, lttng_ht_seed), + ht->match_fct, &channel->key, + &iter.iter, stream, node_channel_id.node) { + unsigned long produced, consumed, usage; + + empty_channel = false; + + pthread_mutex_lock(&stream->lock); + if (cds_lfht_is_node_deleted(&stream->node.node)) { + goto next; + } + + ret = sample(stream); + if (ret) { + ERR("Failed to take buffer position snapshot in monitor timer (ret = %d)", ret); + pthread_mutex_unlock(&stream->lock); + goto end; + } + ret = get_consumed(stream, &consumed); + if (ret) { + ERR("Failed to get buffer consumed position in monitor timer"); + pthread_mutex_unlock(&stream->lock); + goto end; + } + ret = get_produced(stream, &produced); + if (ret) { + ERR("Failed to get buffer produced position in monitor timer"); + pthread_mutex_unlock(&stream->lock); + goto end; + } + + usage = produced - consumed; + high = (usage > high) ? usage : high; + low = (usage < low) ? usage : low; + + /* + * We don't use consumed here for 2 reasons: + * - output_written takes into account the padding written in the + * tracefiles when we stop the session; + * - the consumed position is not the accurate representation of what + * was extracted from a buffer in overwrite mode. + */ + *_total_consumed += stream->output_written; + next: + pthread_mutex_unlock(&stream->lock); + } + + *_highest_use = high; + *_lowest_use = low; +end: + rcu_read_unlock(); + if (empty_channel) { + ret = -1; + } + return ret; +} + +/* + * Execute action on a monitor timer. + */ +static +void monitor_timer(struct lttng_consumer_channel *channel) +{ + int ret; + int channel_monitor_pipe = + consumer_timer_thread_get_channel_monitor_pipe(); + struct lttcomm_consumer_channel_monitor_msg msg = { + .key = channel->key, + }; + sample_positions_cb sample; + get_consumed_cb get_consumed; + get_produced_cb get_produced; + uint64_t lowest = 0, highest = 0, total_consumed = 0; + + LTTNG_ASSERT(channel); + + if (channel_monitor_pipe < 0) { + return; + } + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + sample = lttng_kconsumer_sample_snapshot_positions; + get_consumed = lttng_kconsumer_get_consumed_snapshot; + get_produced = lttng_kconsumer_get_produced_snapshot; + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + sample = lttng_ustconsumer_sample_snapshot_positions; + get_consumed = lttng_ustconsumer_get_consumed_snapshot; + get_produced = lttng_ustconsumer_get_produced_snapshot; + break; + default: + abort(); + } + + ret = sample_channel_positions(channel, &highest, &lowest, + &total_consumed, sample, get_consumed, get_produced); + if (ret) { + return; + } + msg.highest = highest; + msg.lowest = lowest; + msg.total_consumed = total_consumed; + + /* + * Writes performed here are assumed to be atomic which is only + * guaranteed for sizes < than PIPE_BUF. + */ + LTTNG_ASSERT(sizeof(msg) <= PIPE_BUF); + + do { + ret = write(channel_monitor_pipe, &msg, sizeof(msg)); + } while (ret == -1 && errno == EINTR); + if (ret == -1) { + if (errno == EAGAIN) { + /* Not an error, the sample is merely dropped. */ + DBG("Channel monitor pipe is full; dropping sample for channel key = %" PRIu64, + channel->key); + } else { + PERROR("write to the channel monitor pipe"); + } + } else { + DBG("Sent channel monitoring sample for channel key %" PRIu64 + ", (highest = %" PRIu64 ", lowest = %" PRIu64 ")", + channel->key, msg.highest, msg.lowest); + } +} + +int consumer_timer_thread_get_channel_monitor_pipe(void) +{ + return uatomic_read(&the_channel_monitor_pipe); +} + +int consumer_timer_thread_set_channel_monitor_pipe(int fd) +{ + int ret; + + ret = uatomic_cmpxchg(&the_channel_monitor_pipe, -1, fd); + if (ret != -1) { + ret = -1; + goto end; + } + ret = 0; +end: + return ret; +} + +/* + * This thread is the sighandler for signals LTTNG_CONSUMER_SIG_SWITCH, + * LTTNG_CONSUMER_SIG_TEARDOWN, LTTNG_CONSUMER_SIG_LIVE, and + * LTTNG_CONSUMER_SIG_MONITOR, LTTNG_CONSUMER_SIG_EXIT. + */ +void *consumer_timer_thread(void *data) +{ + int signr; + sigset_t mask; + siginfo_t info; + struct lttng_consumer_local_data *ctx = (lttng_consumer_local_data *) data; + + rcu_register_thread(); + + health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_METADATA_TIMER); + + if (testpoint(consumerd_thread_metadata_timer)) { + goto error_testpoint; + } + + health_code_update(); + + /* Only self thread will receive signal mask. */ + setmask(&mask); + CMM_STORE_SHARED(timer_signal.tid, pthread_self()); + + while (1) { + health_code_update(); + + health_poll_entry(); + signr = sigwaitinfo(&mask, &info); + health_poll_exit(); + + /* + * NOTE: cascading conditions are used instead of a switch case + * since the use of SIGRTMIN in the definition of the signals' + * values prevents the reduction to an integer constant. + */ + if (signr == -1) { + if (errno != EINTR) { + PERROR("sigwaitinfo"); + } + continue; + } else if (signr == LTTNG_CONSUMER_SIG_SWITCH) { + metadata_switch_timer(ctx, &info); + } else if (signr == LTTNG_CONSUMER_SIG_TEARDOWN) { + cmm_smp_mb(); + CMM_STORE_SHARED(timer_signal.qs_done, 1); + cmm_smp_mb(); + DBG("Signal timer metadata thread teardown"); + } else if (signr == LTTNG_CONSUMER_SIG_LIVE) { + live_timer(ctx, &info); + } else if (signr == LTTNG_CONSUMER_SIG_MONITOR) { + struct lttng_consumer_channel *channel; + + channel = (lttng_consumer_channel *) info.si_value.sival_ptr; + monitor_timer(channel); + } else if (signr == LTTNG_CONSUMER_SIG_EXIT) { + LTTNG_ASSERT(CMM_LOAD_SHARED(consumer_quit)); + goto end; + } else { + ERR("Unexpected signal %d\n", info.si_signo); + } + } + +error_testpoint: + /* Only reached in testpoint error */ + health_error(); +end: + health_unregister(health_consumerd); + rcu_unregister_thread(); + return NULL; +} diff --git a/src/common/consumer/consumer.c b/src/common/consumer/consumer.c deleted file mode 100644 index 84fdbcaaa..000000000 --- a/src/common/consumer/consumer.c +++ /dev/null @@ -1,5259 +0,0 @@ -/* - * Copyright (C) 2011 Julien Desfossez - * Copyright (C) 2011 Mathieu Desnoyers - * Copyright (C) 2012 David Goulet - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#include "common/index/ctf-index.h" -#define _LGPL_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -struct lttng_consumer_global_data the_consumer_data = { - .stream_count = 0, - .need_update = 1, - .type = LTTNG_CONSUMER_UNKNOWN, -}; - -enum consumer_channel_action { - CONSUMER_CHANNEL_ADD, - CONSUMER_CHANNEL_DEL, - CONSUMER_CHANNEL_QUIT, -}; - -struct consumer_channel_msg { - enum consumer_channel_action action; - struct lttng_consumer_channel *chan; /* add */ - uint64_t key; /* del */ -}; - -/* Flag used to temporarily pause data consumption from testpoints. */ -int data_consumption_paused; - -/* - * Flag to inform the polling thread to quit when all fd hung up. Updated by - * the consumer_thread_receive_fds when it notices that all fds has hung up. - * Also updated by the signal handler (consumer_should_exit()). Read by the - * polling threads. - */ -int consumer_quit; - -/* - * Global hash table containing respectively metadata and data streams. The - * stream element in this ht should only be updated by the metadata poll thread - * for the metadata and the data poll thread for the data. - */ -static struct lttng_ht *metadata_ht; -static struct lttng_ht *data_ht; - -static const char *get_consumer_domain(void) -{ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - return DEFAULT_KERNEL_TRACE_DIR; - case LTTNG_CONSUMER64_UST: - /* Fall-through. */ - case LTTNG_CONSUMER32_UST: - return DEFAULT_UST_TRACE_DIR; - default: - abort(); - } -} - -/* - * Notify a thread lttng pipe to poll back again. This usually means that some - * global state has changed so we just send back the thread in a poll wait - * call. - */ -static void notify_thread_lttng_pipe(struct lttng_pipe *pipe) -{ - struct lttng_consumer_stream *null_stream = NULL; - - LTTNG_ASSERT(pipe); - - (void) lttng_pipe_write(pipe, &null_stream, sizeof(null_stream)); -} - -static void notify_health_quit_pipe(int *pipe) -{ - ssize_t ret; - - ret = lttng_write(pipe[1], "4", 1); - if (ret < 1) { - PERROR("write consumer health quit"); - } -} - -static void notify_channel_pipe(struct lttng_consumer_local_data *ctx, - struct lttng_consumer_channel *chan, - uint64_t key, - enum consumer_channel_action action) -{ - struct consumer_channel_msg msg; - ssize_t ret; - - memset(&msg, 0, sizeof(msg)); - - msg.action = action; - msg.chan = chan; - msg.key = key; - ret = lttng_write(ctx->consumer_channel_pipe[1], &msg, sizeof(msg)); - if (ret < sizeof(msg)) { - PERROR("notify_channel_pipe write error"); - } -} - -void notify_thread_del_channel(struct lttng_consumer_local_data *ctx, - uint64_t key) -{ - notify_channel_pipe(ctx, NULL, key, CONSUMER_CHANNEL_DEL); -} - -static int read_channel_pipe(struct lttng_consumer_local_data *ctx, - struct lttng_consumer_channel **chan, - uint64_t *key, - enum consumer_channel_action *action) -{ - struct consumer_channel_msg msg; - ssize_t ret; - - ret = lttng_read(ctx->consumer_channel_pipe[0], &msg, sizeof(msg)); - if (ret < sizeof(msg)) { - ret = -1; - goto error; - } - *action = msg.action; - *chan = msg.chan; - *key = msg.key; -error: - return (int) ret; -} - -/* - * Cleanup the stream list of a channel. Those streams are not yet globally - * visible - */ -static void clean_channel_stream_list(struct lttng_consumer_channel *channel) -{ - struct lttng_consumer_stream *stream, *stmp; - - LTTNG_ASSERT(channel); - - /* Delete streams that might have been left in the stream list. */ - cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head, - send_node) { - cds_list_del(&stream->send_node); - /* - * Once a stream is added to this list, the buffers were created so we - * have a guarantee that this call will succeed. Setting the monitor - * mode to 0 so we don't lock nor try to delete the stream from the - * global hash table. - */ - stream->monitor = 0; - consumer_stream_destroy(stream, NULL); - } -} - -/* - * Find a stream. The consumer_data.lock must be locked during this - * call. - */ -static struct lttng_consumer_stream *find_stream(uint64_t key, - struct lttng_ht *ht) -{ - struct lttng_ht_iter iter; - struct lttng_ht_node_u64 *node; - struct lttng_consumer_stream *stream = NULL; - - LTTNG_ASSERT(ht); - - /* -1ULL keys are lookup failures */ - if (key == (uint64_t) -1ULL) { - return NULL; - } - - rcu_read_lock(); - - lttng_ht_lookup(ht, &key, &iter); - node = lttng_ht_iter_get_node_u64(&iter); - if (node != NULL) { - stream = caa_container_of(node, struct lttng_consumer_stream, node); - } - - rcu_read_unlock(); - - return stream; -} - -static void steal_stream_key(uint64_t key, struct lttng_ht *ht) -{ - struct lttng_consumer_stream *stream; - - rcu_read_lock(); - stream = find_stream(key, ht); - if (stream) { - stream->key = (uint64_t) -1ULL; - /* - * We don't want the lookup to match, but we still need - * to iterate on this stream when iterating over the hash table. Just - * change the node key. - */ - stream->node.key = (uint64_t) -1ULL; - } - rcu_read_unlock(); -} - -/* - * Return a channel object for the given key. - * - * RCU read side lock MUST be acquired before calling this function and - * protects the channel ptr. - */ -struct lttng_consumer_channel *consumer_find_channel(uint64_t key) -{ - struct lttng_ht_iter iter; - struct lttng_ht_node_u64 *node; - struct lttng_consumer_channel *channel = NULL; - - /* -1ULL keys are lookup failures */ - if (key == (uint64_t) -1ULL) { - return NULL; - } - - lttng_ht_lookup(the_consumer_data.channel_ht, &key, &iter); - node = lttng_ht_iter_get_node_u64(&iter); - if (node != NULL) { - channel = caa_container_of(node, struct lttng_consumer_channel, node); - } - - return channel; -} - -/* - * There is a possibility that the consumer does not have enough time between - * the close of the channel on the session daemon and the cleanup in here thus - * once we have a channel add with an existing key, we know for sure that this - * channel will eventually get cleaned up by all streams being closed. - * - * This function just nullifies the already existing channel key. - */ -static void steal_channel_key(uint64_t key) -{ - struct lttng_consumer_channel *channel; - - rcu_read_lock(); - channel = consumer_find_channel(key); - if (channel) { - channel->key = (uint64_t) -1ULL; - /* - * We don't want the lookup to match, but we still need to iterate on - * this channel when iterating over the hash table. Just change the - * node key. - */ - channel->node.key = (uint64_t) -1ULL; - } - rcu_read_unlock(); -} - -static void free_channel_rcu(struct rcu_head *head) -{ - struct lttng_ht_node_u64 *node = - caa_container_of(head, struct lttng_ht_node_u64, head); - struct lttng_consumer_channel *channel = - caa_container_of(node, struct lttng_consumer_channel, node); - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - lttng_ustconsumer_free_channel(channel); - break; - default: - ERR("Unknown consumer_data type"); - abort(); - } - free(channel); -} - -/* - * RCU protected relayd socket pair free. - */ -static void free_relayd_rcu(struct rcu_head *head) -{ - struct lttng_ht_node_u64 *node = - caa_container_of(head, struct lttng_ht_node_u64, head); - struct consumer_relayd_sock_pair *relayd = - caa_container_of(node, struct consumer_relayd_sock_pair, node); - - /* - * Close all sockets. This is done in the call RCU since we don't want the - * socket fds to be reassigned thus potentially creating bad state of the - * relayd object. - * - * We do not have to lock the control socket mutex here since at this stage - * there is no one referencing to this relayd object. - */ - (void) relayd_close(&relayd->control_sock); - (void) relayd_close(&relayd->data_sock); - - pthread_mutex_destroy(&relayd->ctrl_sock_mutex); - free(relayd); -} - -/* - * Destroy and free relayd socket pair object. - */ -void consumer_destroy_relayd(struct consumer_relayd_sock_pair *relayd) -{ - int ret; - struct lttng_ht_iter iter; - - if (relayd == NULL) { - return; - } - - DBG("Consumer destroy and close relayd socket pair"); - - iter.iter.node = &relayd->node.node; - ret = lttng_ht_del(the_consumer_data.relayd_ht, &iter); - if (ret != 0) { - /* We assume the relayd is being or is destroyed */ - return; - } - - /* RCU free() call */ - call_rcu(&relayd->node.head, free_relayd_rcu); -} - -/* - * Remove a channel from the global list protected by a mutex. This function is - * also responsible for freeing its data structures. - */ -void consumer_del_channel(struct lttng_consumer_channel *channel) -{ - struct lttng_ht_iter iter; - - DBG("Consumer delete channel key %" PRIu64, channel->key); - - pthread_mutex_lock(&the_consumer_data.lock); - pthread_mutex_lock(&channel->lock); - - /* Destroy streams that might have been left in the stream list. */ - clean_channel_stream_list(channel); - - if (channel->live_timer_enabled == 1) { - consumer_timer_live_stop(channel); - } - if (channel->monitor_timer_enabled == 1) { - consumer_timer_monitor_stop(channel); - } - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - lttng_ustconsumer_del_channel(channel); - break; - default: - ERR("Unknown consumer_data type"); - abort(); - goto end; - } - - lttng_trace_chunk_put(channel->trace_chunk); - channel->trace_chunk = NULL; - - if (channel->is_published) { - int ret; - - rcu_read_lock(); - iter.iter.node = &channel->node.node; - ret = lttng_ht_del(the_consumer_data.channel_ht, &iter); - LTTNG_ASSERT(!ret); - - iter.iter.node = &channel->channels_by_session_id_ht_node.node; - ret = lttng_ht_del(the_consumer_data.channels_by_session_id_ht, - &iter); - LTTNG_ASSERT(!ret); - rcu_read_unlock(); - } - - channel->is_deleted = true; - call_rcu(&channel->node.head, free_channel_rcu); -end: - pthread_mutex_unlock(&channel->lock); - pthread_mutex_unlock(&the_consumer_data.lock); -} - -/* - * Iterate over the relayd hash table and destroy each element. Finally, - * destroy the whole hash table. - */ -static void cleanup_relayd_ht(void) -{ - struct lttng_ht_iter iter; - struct consumer_relayd_sock_pair *relayd; - - rcu_read_lock(); - - cds_lfht_for_each_entry(the_consumer_data.relayd_ht->ht, &iter.iter, - relayd, node.node) { - consumer_destroy_relayd(relayd); - } - - rcu_read_unlock(); - - lttng_ht_destroy(the_consumer_data.relayd_ht); -} - -/* - * Update the end point status of all streams having the given network sequence - * index (relayd index). - * - * It's atomically set without having the stream mutex locked which is fine - * because we handle the write/read race with a pipe wakeup for each thread. - */ -static void update_endpoint_status_by_netidx(uint64_t net_seq_idx, - enum consumer_endpoint_status status) -{ - struct lttng_ht_iter iter; - struct lttng_consumer_stream *stream; - - DBG("Consumer set delete flag on stream by idx %" PRIu64, net_seq_idx); - - rcu_read_lock(); - - /* Let's begin with metadata */ - cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) { - if (stream->net_seq_idx == net_seq_idx) { - uatomic_set(&stream->endpoint_status, status); - DBG("Delete flag set to metadata stream %d", stream->wait_fd); - } - } - - /* Follow up by the data streams */ - cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) { - if (stream->net_seq_idx == net_seq_idx) { - uatomic_set(&stream->endpoint_status, status); - DBG("Delete flag set to data stream %d", stream->wait_fd); - } - } - rcu_read_unlock(); -} - -/* - * Cleanup a relayd object by flagging every associated streams for deletion, - * destroying the object meaning removing it from the relayd hash table, - * closing the sockets and freeing the memory in a RCU call. - * - * If a local data context is available, notify the threads that the streams' - * state have changed. - */ -void lttng_consumer_cleanup_relayd(struct consumer_relayd_sock_pair *relayd) -{ - uint64_t netidx; - - LTTNG_ASSERT(relayd); - - DBG("Cleaning up relayd object ID %"PRIu64, relayd->net_seq_idx); - - /* Save the net sequence index before destroying the object */ - netidx = relayd->net_seq_idx; - - /* - * Delete the relayd from the relayd hash table, close the sockets and free - * the object in a RCU call. - */ - consumer_destroy_relayd(relayd); - - /* Set inactive endpoint to all streams */ - update_endpoint_status_by_netidx(netidx, CONSUMER_ENDPOINT_INACTIVE); - - /* - * With a local data context, notify the threads that the streams' state - * have changed. The write() action on the pipe acts as an "implicit" - * memory barrier ordering the updates of the end point status from the - * read of this status which happens AFTER receiving this notify. - */ - notify_thread_lttng_pipe(relayd->ctx->consumer_data_pipe); - notify_thread_lttng_pipe(relayd->ctx->consumer_metadata_pipe); -} - -/* - * Flag a relayd socket pair for destruction. Destroy it if the refcount - * reaches zero. - * - * RCU read side lock MUST be aquired before calling this function. - */ -void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair *relayd) -{ - LTTNG_ASSERT(relayd); - - /* Set destroy flag for this object */ - uatomic_set(&relayd->destroy_flag, 1); - - /* Destroy the relayd if refcount is 0 */ - if (uatomic_read(&relayd->refcount) == 0) { - consumer_destroy_relayd(relayd); - } -} - -/* - * Completly destroy stream from every visiable data structure and the given - * hash table if one. - * - * One this call returns, the stream object is not longer usable nor visible. - */ -void consumer_del_stream(struct lttng_consumer_stream *stream, - struct lttng_ht *ht) -{ - consumer_stream_destroy(stream, ht); -} - -/* - * XXX naming of del vs destroy is all mixed up. - */ -void consumer_del_stream_for_data(struct lttng_consumer_stream *stream) -{ - consumer_stream_destroy(stream, data_ht); -} - -void consumer_del_stream_for_metadata(struct lttng_consumer_stream *stream) -{ - consumer_stream_destroy(stream, metadata_ht); -} - -void consumer_stream_update_channel_attributes( - struct lttng_consumer_stream *stream, - struct lttng_consumer_channel *channel) -{ - stream->channel_read_only_attributes.tracefile_size = - channel->tracefile_size; -} - -/* - * Add a stream to the global list protected by a mutex. - */ -void consumer_add_data_stream(struct lttng_consumer_stream *stream) -{ - struct lttng_ht *ht = data_ht; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(ht); - - DBG3("Adding consumer stream %" PRIu64, stream->key); - - pthread_mutex_lock(&the_consumer_data.lock); - pthread_mutex_lock(&stream->chan->lock); - pthread_mutex_lock(&stream->chan->timer_lock); - pthread_mutex_lock(&stream->lock); - rcu_read_lock(); - - /* Steal stream identifier to avoid having streams with the same key */ - steal_stream_key(stream->key, ht); - - lttng_ht_add_unique_u64(ht, &stream->node); - - lttng_ht_add_u64(the_consumer_data.stream_per_chan_id_ht, - &stream->node_channel_id); - - /* - * Add stream to the stream_list_ht of the consumer data. No need to steal - * the key since the HT does not use it and we allow to add redundant keys - * into this table. - */ - lttng_ht_add_u64(the_consumer_data.stream_list_ht, - &stream->node_session_id); - - /* - * When nb_init_stream_left reaches 0, we don't need to trigger any action - * in terms of destroying the associated channel, because the action that - * causes the count to become 0 also causes a stream to be added. The - * channel deletion will thus be triggered by the following removal of this - * stream. - */ - if (uatomic_read(&stream->chan->nb_init_stream_left) > 0) { - /* Increment refcount before decrementing nb_init_stream_left */ - cmm_smp_wmb(); - uatomic_dec(&stream->chan->nb_init_stream_left); - } - - /* Update consumer data once the node is inserted. */ - the_consumer_data.stream_count++; - the_consumer_data.need_update = 1; - - rcu_read_unlock(); - pthread_mutex_unlock(&stream->lock); - pthread_mutex_unlock(&stream->chan->timer_lock); - pthread_mutex_unlock(&stream->chan->lock); - pthread_mutex_unlock(&the_consumer_data.lock); -} - -/* - * Add relayd socket to global consumer data hashtable. RCU read side lock MUST - * be acquired before calling this. - */ -static int add_relayd(struct consumer_relayd_sock_pair *relayd) -{ - int ret = 0; - struct lttng_ht_node_u64 *node; - struct lttng_ht_iter iter; - - LTTNG_ASSERT(relayd); - - lttng_ht_lookup(the_consumer_data.relayd_ht, &relayd->net_seq_idx, - &iter); - node = lttng_ht_iter_get_node_u64(&iter); - if (node != NULL) { - goto end; - } - lttng_ht_add_unique_u64(the_consumer_data.relayd_ht, &relayd->node); - -end: - return ret; -} - -/* - * Allocate and return a consumer relayd socket. - */ -static struct consumer_relayd_sock_pair *consumer_allocate_relayd_sock_pair( - uint64_t net_seq_idx) -{ - struct consumer_relayd_sock_pair *obj = NULL; - - /* net sequence index of -1 is a failure */ - if (net_seq_idx == (uint64_t) -1ULL) { - goto error; - } - - obj = zmalloc(sizeof(struct consumer_relayd_sock_pair)); - if (obj == NULL) { - PERROR("zmalloc relayd sock"); - goto error; - } - - obj->net_seq_idx = net_seq_idx; - obj->refcount = 0; - obj->destroy_flag = 0; - obj->control_sock.sock.fd = -1; - obj->data_sock.sock.fd = -1; - lttng_ht_node_init_u64(&obj->node, obj->net_seq_idx); - pthread_mutex_init(&obj->ctrl_sock_mutex, NULL); - -error: - return obj; -} - -/* - * Find a relayd socket pair in the global consumer data. - * - * Return the object if found else NULL. - * RCU read-side lock must be held across this call and while using the - * returned object. - */ -struct consumer_relayd_sock_pair *consumer_find_relayd(uint64_t key) -{ - struct lttng_ht_iter iter; - struct lttng_ht_node_u64 *node; - struct consumer_relayd_sock_pair *relayd = NULL; - - /* Negative keys are lookup failures */ - if (key == (uint64_t) -1ULL) { - goto error; - } - - lttng_ht_lookup(the_consumer_data.relayd_ht, &key, &iter); - node = lttng_ht_iter_get_node_u64(&iter); - if (node != NULL) { - relayd = caa_container_of(node, struct consumer_relayd_sock_pair, node); - } - -error: - return relayd; -} - -/* - * Find a relayd and send the stream - * - * Returns 0 on success, < 0 on error - */ -int consumer_send_relayd_stream(struct lttng_consumer_stream *stream, - char *path) -{ - int ret = 0; - struct consumer_relayd_sock_pair *relayd; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->net_seq_idx != -1ULL); - LTTNG_ASSERT(path); - - /* The stream is not metadata. Get relayd reference if exists. */ - rcu_read_lock(); - relayd = consumer_find_relayd(stream->net_seq_idx); - if (relayd != NULL) { - /* Add stream on the relayd */ - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_add_stream(&relayd->control_sock, stream->name, - get_consumer_domain(), path, &stream->relayd_stream_id, - stream->chan->tracefile_size, - stream->chan->tracefile_count, - stream->trace_chunk); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - if (ret < 0) { - ERR("Relayd add stream failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - goto end; - } - - uatomic_inc(&relayd->refcount); - stream->sent_to_relayd = 1; - } else { - ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't send it.", - stream->key, stream->net_seq_idx); - ret = -1; - goto end; - } - - DBG("Stream %s with key %" PRIu64 " sent to relayd id %" PRIu64, - stream->name, stream->key, stream->net_seq_idx); - -end: - rcu_read_unlock(); - return ret; -} - -/* - * Find a relayd and send the streams sent message - * - * Returns 0 on success, < 0 on error - */ -int consumer_send_relayd_streams_sent(uint64_t net_seq_idx) -{ - int ret = 0; - struct consumer_relayd_sock_pair *relayd; - - LTTNG_ASSERT(net_seq_idx != -1ULL); - - /* The stream is not metadata. Get relayd reference if exists. */ - rcu_read_lock(); - relayd = consumer_find_relayd(net_seq_idx); - if (relayd != NULL) { - /* Add stream on the relayd */ - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_streams_sent(&relayd->control_sock); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - if (ret < 0) { - ERR("Relayd streams sent failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - goto end; - } - } else { - ERR("Relayd ID %" PRIu64 " unknown. Can't send streams_sent.", - net_seq_idx); - ret = -1; - goto end; - } - - ret = 0; - DBG("All streams sent relayd id %" PRIu64, net_seq_idx); - -end: - rcu_read_unlock(); - return ret; -} - -/* - * Find a relayd and close the stream - */ -void close_relayd_stream(struct lttng_consumer_stream *stream) -{ - struct consumer_relayd_sock_pair *relayd; - - /* The stream is not metadata. Get relayd reference if exists. */ - rcu_read_lock(); - relayd = consumer_find_relayd(stream->net_seq_idx); - if (relayd) { - consumer_stream_relayd_close(stream, relayd); - } - rcu_read_unlock(); -} - -/* - * Handle stream for relayd transmission if the stream applies for network - * streaming where the net sequence index is set. - * - * Return destination file descriptor or negative value on error. - */ -static int write_relayd_stream_header(struct lttng_consumer_stream *stream, - size_t data_size, unsigned long padding, - struct consumer_relayd_sock_pair *relayd) -{ - int outfd = -1, ret; - struct lttcomm_relayd_data_hdr data_hdr; - - /* Safety net */ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(relayd); - - /* Reset data header */ - memset(&data_hdr, 0, sizeof(data_hdr)); - - if (stream->metadata_flag) { - /* Caller MUST acquire the relayd control socket lock */ - ret = relayd_send_metadata(&relayd->control_sock, data_size); - if (ret < 0) { - goto error; - } - - /* Metadata are always sent on the control socket. */ - outfd = relayd->control_sock.sock.fd; - } else { - /* Set header with stream information */ - data_hdr.stream_id = htobe64(stream->relayd_stream_id); - data_hdr.data_size = htobe32(data_size); - data_hdr.padding_size = htobe32(padding); - - /* - * Note that net_seq_num below is assigned with the *current* value of - * next_net_seq_num and only after that the next_net_seq_num will be - * increment. This is why when issuing a command on the relayd using - * this next value, 1 should always be substracted in order to compare - * the last seen sequence number on the relayd side to the last sent. - */ - data_hdr.net_seq_num = htobe64(stream->next_net_seq_num); - /* Other fields are zeroed previously */ - - ret = relayd_send_data_hdr(&relayd->data_sock, &data_hdr, - sizeof(data_hdr)); - if (ret < 0) { - goto error; - } - - ++stream->next_net_seq_num; - - /* Set to go on data socket */ - outfd = relayd->data_sock.sock.fd; - } - -error: - return outfd; -} - -/* - * Write a character on the metadata poll pipe to wake the metadata thread. - * Returns 0 on success, -1 on error. - */ -int consumer_metadata_wakeup_pipe(const struct lttng_consumer_channel *channel) -{ - int ret = 0; - - DBG("Waking up metadata poll thread (writing to pipe): channel name = '%s'", - channel->name); - if (channel->monitor && channel->metadata_stream) { - const char dummy = 'c'; - const ssize_t write_ret = lttng_write( - channel->metadata_stream->ust_metadata_poll_pipe[1], - &dummy, 1); - - if (write_ret < 1) { - if (errno == EWOULDBLOCK) { - /* - * This is fine, the metadata poll thread - * is having a hard time keeping-up, but - * it will eventually wake-up and consume - * the available data. - */ - ret = 0; - } else { - PERROR("Failed to write to UST metadata pipe while attempting to wake-up the metadata poll thread"); - ret = -1; - goto end; - } - } - } - -end: - return ret; -} - -/* - * Trigger a dump of the metadata content. Following/during the succesful - * completion of this call, the metadata poll thread will start receiving - * metadata packets to consume. - * - * The caller must hold the channel and stream locks. - */ -static -int consumer_metadata_stream_dump(struct lttng_consumer_stream *stream) -{ - int ret; - - ASSERT_LOCKED(stream->chan->lock); - ASSERT_LOCKED(stream->lock); - LTTNG_ASSERT(stream->metadata_flag); - LTTNG_ASSERT(stream->chan->trace_chunk); - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - /* - * Reset the position of what has been read from the - * metadata cache to 0 so we can dump it again. - */ - ret = kernctl_metadata_cache_dump(stream->wait_fd); - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - /* - * Reset the position pushed from the metadata cache so it - * will write from the beginning on the next push. - */ - stream->ust_metadata_pushed = 0; - ret = consumer_metadata_wakeup_pipe(stream->chan); - break; - default: - ERR("Unknown consumer_data type"); - abort(); - } - if (ret < 0) { - ERR("Failed to dump the metadata cache"); - } - return ret; -} - -static -int lttng_consumer_channel_set_trace_chunk( - struct lttng_consumer_channel *channel, - struct lttng_trace_chunk *new_trace_chunk) -{ - pthread_mutex_lock(&channel->lock); - if (channel->is_deleted) { - /* - * The channel has been logically deleted and should no longer - * be used. It has released its reference to its current trace - * chunk and should not acquire a new one. - * - * Return success as there is nothing for the caller to do. - */ - goto end; - } - - /* - * The acquisition of the reference cannot fail (barring - * a severe internal error) since a reference to the published - * chunk is already held by the caller. - */ - if (new_trace_chunk) { - const bool acquired_reference = lttng_trace_chunk_get( - new_trace_chunk); - - LTTNG_ASSERT(acquired_reference); - } - - lttng_trace_chunk_put(channel->trace_chunk); - channel->trace_chunk = new_trace_chunk; -end: - pthread_mutex_unlock(&channel->lock); - return 0; -} - -/* - * Allocate and return a new lttng_consumer_channel object using the given key - * to initialize the hash table node. - * - * On error, return NULL. - */ -struct lttng_consumer_channel *consumer_allocate_channel(uint64_t key, - uint64_t session_id, - const uint64_t *chunk_id, - const char *pathname, - const char *name, - uint64_t relayd_id, - enum lttng_event_output output, - uint64_t tracefile_size, - uint64_t tracefile_count, - uint64_t session_id_per_pid, - unsigned int monitor, - unsigned int live_timer_interval, - bool is_in_live_session, - const char *root_shm_path, - const char *shm_path) -{ - struct lttng_consumer_channel *channel = NULL; - struct lttng_trace_chunk *trace_chunk = NULL; - - if (chunk_id) { - trace_chunk = lttng_trace_chunk_registry_find_chunk( - the_consumer_data.chunk_registry, session_id, - *chunk_id); - if (!trace_chunk) { - ERR("Failed to find trace chunk reference during creation of channel"); - goto end; - } - } - - channel = zmalloc(sizeof(*channel)); - if (channel == NULL) { - PERROR("malloc struct lttng_consumer_channel"); - goto end; - } - - channel->key = key; - channel->refcount = 0; - channel->session_id = session_id; - channel->session_id_per_pid = session_id_per_pid; - channel->relayd_id = relayd_id; - channel->tracefile_size = tracefile_size; - channel->tracefile_count = tracefile_count; - channel->monitor = monitor; - channel->live_timer_interval = live_timer_interval; - channel->is_live = is_in_live_session; - pthread_mutex_init(&channel->lock, NULL); - pthread_mutex_init(&channel->timer_lock, NULL); - - switch (output) { - case LTTNG_EVENT_SPLICE: - channel->output = CONSUMER_CHANNEL_SPLICE; - break; - case LTTNG_EVENT_MMAP: - channel->output = CONSUMER_CHANNEL_MMAP; - break; - default: - abort(); - free(channel); - channel = NULL; - goto end; - } - - /* - * In monitor mode, the streams associated with the channel will be put in - * a special list ONLY owned by this channel. So, the refcount is set to 1 - * here meaning that the channel itself has streams that are referenced. - * - * On a channel deletion, once the channel is no longer visible, the - * refcount is decremented and checked for a zero value to delete it. With - * streams in no monitor mode, it will now be safe to destroy the channel. - */ - if (!channel->monitor) { - channel->refcount = 1; - } - - strncpy(channel->pathname, pathname, sizeof(channel->pathname)); - channel->pathname[sizeof(channel->pathname) - 1] = '\0'; - - strncpy(channel->name, name, sizeof(channel->name)); - channel->name[sizeof(channel->name) - 1] = '\0'; - - if (root_shm_path) { - strncpy(channel->root_shm_path, root_shm_path, sizeof(channel->root_shm_path)); - channel->root_shm_path[sizeof(channel->root_shm_path) - 1] = '\0'; - } - if (shm_path) { - strncpy(channel->shm_path, shm_path, sizeof(channel->shm_path)); - channel->shm_path[sizeof(channel->shm_path) - 1] = '\0'; - } - - lttng_ht_node_init_u64(&channel->node, channel->key); - lttng_ht_node_init_u64(&channel->channels_by_session_id_ht_node, - channel->session_id); - - channel->wait_fd = -1; - CDS_INIT_LIST_HEAD(&channel->streams.head); - - if (trace_chunk) { - int ret = lttng_consumer_channel_set_trace_chunk(channel, - trace_chunk); - if (ret) { - goto error; - } - } - - DBG("Allocated channel (key %" PRIu64 ")", channel->key); - -end: - lttng_trace_chunk_put(trace_chunk); - return channel; -error: - consumer_del_channel(channel); - channel = NULL; - goto end; -} - -/* - * Add a channel to the global list protected by a mutex. - * - * Always return 0 indicating success. - */ -int consumer_add_channel(struct lttng_consumer_channel *channel, - struct lttng_consumer_local_data *ctx) -{ - pthread_mutex_lock(&the_consumer_data.lock); - pthread_mutex_lock(&channel->lock); - pthread_mutex_lock(&channel->timer_lock); - - /* - * This gives us a guarantee that the channel we are about to add to the - * channel hash table will be unique. See this function comment on the why - * we need to steel the channel key at this stage. - */ - steal_channel_key(channel->key); - - rcu_read_lock(); - lttng_ht_add_unique_u64(the_consumer_data.channel_ht, &channel->node); - lttng_ht_add_u64(the_consumer_data.channels_by_session_id_ht, - &channel->channels_by_session_id_ht_node); - rcu_read_unlock(); - channel->is_published = true; - - pthread_mutex_unlock(&channel->timer_lock); - pthread_mutex_unlock(&channel->lock); - pthread_mutex_unlock(&the_consumer_data.lock); - - if (channel->wait_fd != -1 && channel->type == CONSUMER_CHANNEL_TYPE_DATA) { - notify_channel_pipe(ctx, channel, -1, CONSUMER_CHANNEL_ADD); - } - - return 0; -} - -/* - * Allocate the pollfd structure and the local view of the out fds to avoid - * doing a lookup in the linked list and concurrency issues when writing is - * needed. Called with consumer_data.lock held. - * - * Returns the number of fds in the structures. - */ -static int update_poll_array(struct lttng_consumer_local_data *ctx, - struct pollfd **pollfd, struct lttng_consumer_stream **local_stream, - struct lttng_ht *ht, int *nb_inactive_fd) -{ - int i = 0; - struct lttng_ht_iter iter; - struct lttng_consumer_stream *stream; - - LTTNG_ASSERT(ctx); - LTTNG_ASSERT(ht); - LTTNG_ASSERT(pollfd); - LTTNG_ASSERT(local_stream); - - DBG("Updating poll fd array"); - *nb_inactive_fd = 0; - rcu_read_lock(); - cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { - /* - * Only active streams with an active end point can be added to the - * poll set and local stream storage of the thread. - * - * There is a potential race here for endpoint_status to be updated - * just after the check. However, this is OK since the stream(s) will - * be deleted once the thread is notified that the end point state has - * changed where this function will be called back again. - * - * We track the number of inactive FDs because they still need to be - * closed by the polling thread after a wakeup on the data_pipe or - * metadata_pipe. - */ - if (stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) { - (*nb_inactive_fd)++; - continue; - } - /* - * This clobbers way too much the debug output. Uncomment that if you - * need it for debugging purposes. - */ - (*pollfd)[i].fd = stream->wait_fd; - (*pollfd)[i].events = POLLIN | POLLPRI; - local_stream[i] = stream; - i++; - } - rcu_read_unlock(); - - /* - * Insert the consumer_data_pipe at the end of the array and don't - * increment i so nb_fd is the number of real FD. - */ - (*pollfd)[i].fd = lttng_pipe_get_readfd(ctx->consumer_data_pipe); - (*pollfd)[i].events = POLLIN | POLLPRI; - - (*pollfd)[i + 1].fd = lttng_pipe_get_readfd(ctx->consumer_wakeup_pipe); - (*pollfd)[i + 1].events = POLLIN | POLLPRI; - return i; -} - -/* - * Poll on the should_quit pipe and the command socket return -1 on - * error, 1 if should exit, 0 if data is available on the command socket - */ -int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll) -{ - int num_rdy; - -restart: - num_rdy = poll(consumer_sockpoll, 2, -1); - if (num_rdy == -1) { - /* - * Restart interrupted system call. - */ - if (errno == EINTR) { - goto restart; - } - PERROR("Poll error"); - return -1; - } - if (consumer_sockpoll[0].revents & (POLLIN | POLLPRI)) { - DBG("consumer_should_quit wake up"); - return 1; - } - return 0; -} - -/* - * Set the error socket. - */ -void lttng_consumer_set_error_sock(struct lttng_consumer_local_data *ctx, - int sock) -{ - ctx->consumer_error_socket = sock; -} - -/* - * Set the command socket path. - */ -void lttng_consumer_set_command_sock_path( - struct lttng_consumer_local_data *ctx, char *sock) -{ - ctx->consumer_command_sock_path = sock; -} - -/* - * Send return code to the session daemon. - * If the socket is not defined, we return 0, it is not a fatal error - */ -int lttng_consumer_send_error(struct lttng_consumer_local_data *ctx, int cmd) -{ - if (ctx->consumer_error_socket > 0) { - return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd, - sizeof(enum lttcomm_sessiond_command)); - } - - return 0; -} - -/* - * Close all the tracefiles and stream fds and MUST be called when all - * instances are destroyed i.e. when all threads were joined and are ended. - */ -void lttng_consumer_cleanup(void) -{ - struct lttng_ht_iter iter; - struct lttng_consumer_channel *channel; - unsigned int trace_chunks_left; - - rcu_read_lock(); - - cds_lfht_for_each_entry(the_consumer_data.channel_ht->ht, &iter.iter, - channel, node.node) { - consumer_del_channel(channel); - } - - rcu_read_unlock(); - - lttng_ht_destroy(the_consumer_data.channel_ht); - lttng_ht_destroy(the_consumer_data.channels_by_session_id_ht); - - cleanup_relayd_ht(); - - lttng_ht_destroy(the_consumer_data.stream_per_chan_id_ht); - - /* - * This HT contains streams that are freed by either the metadata thread or - * the data thread so we do *nothing* on the hash table and simply destroy - * it. - */ - lttng_ht_destroy(the_consumer_data.stream_list_ht); - - /* - * Trace chunks in the registry may still exist if the session - * daemon has encountered an internal error and could not - * tear down its sessions and/or trace chunks properly. - * - * Release the session daemon's implicit reference to any remaining - * trace chunk and print an error if any trace chunk was found. Note - * that there are _no_ legitimate cases for trace chunks to be left, - * it is a leak. However, it can happen following a crash of the - * session daemon and not emptying the registry would cause an assertion - * to hit. - */ - trace_chunks_left = lttng_trace_chunk_registry_put_each_chunk( - the_consumer_data.chunk_registry); - if (trace_chunks_left) { - ERR("%u trace chunks are leaked by lttng-consumerd. " - "This can be caused by an internal error of the session daemon.", - trace_chunks_left); - } - /* Run all callbacks freeing each chunk. */ - rcu_barrier(); - lttng_trace_chunk_registry_destroy(the_consumer_data.chunk_registry); -} - -/* - * Called from signal handler. - */ -void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx) -{ - ssize_t ret; - - CMM_STORE_SHARED(consumer_quit, 1); - ret = lttng_write(ctx->consumer_should_quit[1], "4", 1); - if (ret < 1) { - PERROR("write consumer quit"); - } - - DBG("Consumer flag that it should quit"); -} - - -/* - * Flush pending writes to trace output disk file. - */ -static -void lttng_consumer_sync_trace_file(struct lttng_consumer_stream *stream, - off_t orig_offset) -{ - int ret; - int outfd = stream->out_fd; - - /* - * This does a blocking write-and-wait on any page that belongs to the - * subbuffer prior to the one we just wrote. - * Don't care about error values, as these are just hints and ways to - * limit the amount of page cache used. - */ - if (orig_offset < stream->max_sb_size) { - return; - } - lttng_sync_file_range(outfd, orig_offset - stream->max_sb_size, - stream->max_sb_size, - SYNC_FILE_RANGE_WAIT_BEFORE - | SYNC_FILE_RANGE_WRITE - | SYNC_FILE_RANGE_WAIT_AFTER); - /* - * Give hints to the kernel about how we access the file: - * POSIX_FADV_DONTNEED : we won't re-access data in a near future after - * we write it. - * - * We need to call fadvise again after the file grows because the - * kernel does not seem to apply fadvise to non-existing parts of the - * file. - * - * Call fadvise _after_ having waited for the page writeback to - * complete because the dirty page writeback semantic is not well - * defined. So it can be expected to lead to lower throughput in - * streaming. - */ - ret = posix_fadvise(outfd, orig_offset - stream->max_sb_size, - stream->max_sb_size, POSIX_FADV_DONTNEED); - if (ret && ret != -ENOSYS) { - errno = ret; - PERROR("posix_fadvise on fd %i", outfd); - } -} - -/* - * Initialise the necessary environnement : - * - create a new context - * - create the poll_pipe - * - create the should_quit pipe (for signal handler) - * - create the thread pipe (for splice) - * - * Takes a function pointer as argument, this function is called when data is - * available on a buffer. This function is responsible to do the - * kernctl_get_next_subbuf, read the data with mmap or splice depending on the - * buffer configuration and then kernctl_put_next_subbuf at the end. - * - * Returns a pointer to the new context or NULL on error. - */ -struct lttng_consumer_local_data *lttng_consumer_create( - enum lttng_consumer_type type, - ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream, - struct lttng_consumer_local_data *ctx, bool locked_by_caller), - int (*recv_channel)(struct lttng_consumer_channel *channel), - int (*recv_stream)(struct lttng_consumer_stream *stream), - int (*update_stream)(uint64_t stream_key, uint32_t state)) -{ - int ret; - struct lttng_consumer_local_data *ctx; - - LTTNG_ASSERT(the_consumer_data.type == LTTNG_CONSUMER_UNKNOWN || - the_consumer_data.type == type); - the_consumer_data.type = type; - - ctx = zmalloc(sizeof(struct lttng_consumer_local_data)); - if (ctx == NULL) { - PERROR("allocating context"); - goto error; - } - - ctx->consumer_error_socket = -1; - ctx->consumer_metadata_socket = -1; - pthread_mutex_init(&ctx->metadata_socket_lock, NULL); - /* assign the callbacks */ - ctx->on_buffer_ready = buffer_ready; - ctx->on_recv_channel = recv_channel; - ctx->on_recv_stream = recv_stream; - ctx->on_update_stream = update_stream; - - ctx->consumer_data_pipe = lttng_pipe_open(0); - if (!ctx->consumer_data_pipe) { - goto error_poll_pipe; - } - - ctx->consumer_wakeup_pipe = lttng_pipe_open(0); - if (!ctx->consumer_wakeup_pipe) { - goto error_wakeup_pipe; - } - - ret = pipe(ctx->consumer_should_quit); - if (ret < 0) { - PERROR("Error creating recv pipe"); - goto error_quit_pipe; - } - - ret = pipe(ctx->consumer_channel_pipe); - if (ret < 0) { - PERROR("Error creating channel pipe"); - goto error_channel_pipe; - } - - ctx->consumer_metadata_pipe = lttng_pipe_open(0); - if (!ctx->consumer_metadata_pipe) { - goto error_metadata_pipe; - } - - ctx->channel_monitor_pipe = -1; - - return ctx; - -error_metadata_pipe: - utils_close_pipe(ctx->consumer_channel_pipe); -error_channel_pipe: - utils_close_pipe(ctx->consumer_should_quit); -error_quit_pipe: - lttng_pipe_destroy(ctx->consumer_wakeup_pipe); -error_wakeup_pipe: - lttng_pipe_destroy(ctx->consumer_data_pipe); -error_poll_pipe: - free(ctx); -error: - return NULL; -} - -/* - * Iterate over all streams of the hashtable and free them properly. - */ -static void destroy_data_stream_ht(struct lttng_ht *ht) -{ - struct lttng_ht_iter iter; - struct lttng_consumer_stream *stream; - - if (ht == NULL) { - return; - } - - rcu_read_lock(); - cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { - /* - * Ignore return value since we are currently cleaning up so any error - * can't be handled. - */ - (void) consumer_del_stream(stream, ht); - } - rcu_read_unlock(); - - lttng_ht_destroy(ht); -} - -/* - * Iterate over all streams of the metadata hashtable and free them - * properly. - */ -static void destroy_metadata_stream_ht(struct lttng_ht *ht) -{ - struct lttng_ht_iter iter; - struct lttng_consumer_stream *stream; - - if (ht == NULL) { - return; - } - - rcu_read_lock(); - cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { - /* - * Ignore return value since we are currently cleaning up so any error - * can't be handled. - */ - (void) consumer_del_metadata_stream(stream, ht); - } - rcu_read_unlock(); - - lttng_ht_destroy(ht); -} - -/* - * Close all fds associated with the instance and free the context. - */ -void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx) -{ - int ret; - - DBG("Consumer destroying it. Closing everything."); - - if (!ctx) { - return; - } - - destroy_data_stream_ht(data_ht); - destroy_metadata_stream_ht(metadata_ht); - - ret = close(ctx->consumer_error_socket); - if (ret) { - PERROR("close"); - } - ret = close(ctx->consumer_metadata_socket); - if (ret) { - PERROR("close"); - } - utils_close_pipe(ctx->consumer_channel_pipe); - lttng_pipe_destroy(ctx->consumer_data_pipe); - lttng_pipe_destroy(ctx->consumer_metadata_pipe); - lttng_pipe_destroy(ctx->consumer_wakeup_pipe); - utils_close_pipe(ctx->consumer_should_quit); - - unlink(ctx->consumer_command_sock_path); - free(ctx); -} - -/* - * Write the metadata stream id on the specified file descriptor. - */ -static int write_relayd_metadata_id(int fd, - struct lttng_consumer_stream *stream, - unsigned long padding) -{ - ssize_t ret; - struct lttcomm_relayd_metadata_payload hdr; - - hdr.stream_id = htobe64(stream->relayd_stream_id); - hdr.padding_size = htobe32(padding); - ret = lttng_write(fd, (void *) &hdr, sizeof(hdr)); - if (ret < sizeof(hdr)) { - /* - * This error means that the fd's end is closed so ignore the PERROR - * not to clubber the error output since this can happen in a normal - * code path. - */ - if (errno != EPIPE) { - PERROR("write metadata stream id"); - } - DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno); - /* - * Set ret to a negative value because if ret != sizeof(hdr), we don't - * handle writting the missing part so report that as an error and - * don't lie to the caller. - */ - ret = -1; - goto end; - } - DBG("Metadata stream id %" PRIu64 " with padding %lu written before data", - stream->relayd_stream_id, padding); - -end: - return (int) ret; -} - -/* - * Mmap the ring buffer, read it and write the data to the tracefile. This is a - * core function for writing trace buffers to either the local filesystem or - * the network. - * - * It must be called with the stream and the channel lock held. - * - * Careful review MUST be put if any changes occur! - * - * Returns the number of bytes written - */ -ssize_t lttng_consumer_on_read_subbuffer_mmap( - struct lttng_consumer_stream *stream, - const struct lttng_buffer_view *buffer, - unsigned long padding) -{ - ssize_t ret = 0; - off_t orig_offset = stream->out_fd_offset; - /* Default is on the disk */ - int outfd = stream->out_fd; - struct consumer_relayd_sock_pair *relayd = NULL; - unsigned int relayd_hang_up = 0; - const size_t subbuf_content_size = buffer->size - padding; - size_t write_len; - - /* RCU lock for the relayd pointer */ - rcu_read_lock(); - LTTNG_ASSERT(stream->net_seq_idx != (uint64_t) -1ULL || - stream->trace_chunk); - - /* Flag that the current stream if set for network streaming. */ - if (stream->net_seq_idx != (uint64_t) -1ULL) { - relayd = consumer_find_relayd(stream->net_seq_idx); - if (relayd == NULL) { - ret = -EPIPE; - goto end; - } - } - - /* Handle stream on the relayd if the output is on the network */ - if (relayd) { - unsigned long netlen = subbuf_content_size; - - /* - * Lock the control socket for the complete duration of the function - * since from this point on we will use the socket. - */ - if (stream->metadata_flag) { - /* Metadata requires the control socket. */ - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - if (stream->reset_metadata_flag) { - ret = relayd_reset_metadata(&relayd->control_sock, - stream->relayd_stream_id, - stream->metadata_version); - if (ret < 0) { - relayd_hang_up = 1; - goto write_error; - } - stream->reset_metadata_flag = 0; - } - netlen += sizeof(struct lttcomm_relayd_metadata_payload); - } - - ret = write_relayd_stream_header(stream, netlen, padding, relayd); - if (ret < 0) { - relayd_hang_up = 1; - goto write_error; - } - /* Use the returned socket. */ - outfd = ret; - - /* Write metadata stream id before payload */ - if (stream->metadata_flag) { - ret = write_relayd_metadata_id(outfd, stream, padding); - if (ret < 0) { - relayd_hang_up = 1; - goto write_error; - } - } - - write_len = subbuf_content_size; - } else { - /* No streaming; we have to write the full padding. */ - if (stream->metadata_flag && stream->reset_metadata_flag) { - ret = utils_truncate_stream_file(stream->out_fd, 0); - if (ret < 0) { - ERR("Reset metadata file"); - goto end; - } - stream->reset_metadata_flag = 0; - } - - /* - * Check if we need to change the tracefile before writing the packet. - */ - if (stream->chan->tracefile_size > 0 && - (stream->tracefile_size_current + buffer->size) > - stream->chan->tracefile_size) { - ret = consumer_stream_rotate_output_files(stream); - if (ret) { - goto end; - } - outfd = stream->out_fd; - orig_offset = 0; - } - stream->tracefile_size_current += buffer->size; - write_len = buffer->size; - } - - /* - * This call guarantee that len or less is returned. It's impossible to - * receive a ret value that is bigger than len. - */ - ret = lttng_write(outfd, buffer->data, write_len); - DBG("Consumer mmap write() ret %zd (len %zu)", ret, write_len); - if (ret < 0 || ((size_t) ret != write_len)) { - /* - * Report error to caller if nothing was written else at least send the - * amount written. - */ - if (ret < 0) { - ret = -errno; - } - relayd_hang_up = 1; - - /* Socket operation failed. We consider the relayd dead */ - if (errno == EPIPE) { - /* - * This is possible if the fd is closed on the other side - * (outfd) or any write problem. It can be verbose a bit for a - * normal execution if for instance the relayd is stopped - * abruptly. This can happen so set this to a DBG statement. - */ - DBG("Consumer mmap write detected relayd hang up"); - } else { - /* Unhandled error, print it and stop function right now. */ - PERROR("Error in write mmap (ret %zd != write_len %zu)", ret, - write_len); - } - goto write_error; - } - stream->output_written += ret; - - /* This call is useless on a socket so better save a syscall. */ - if (!relayd) { - /* This won't block, but will start writeout asynchronously */ - lttng_sync_file_range(outfd, stream->out_fd_offset, write_len, - SYNC_FILE_RANGE_WRITE); - stream->out_fd_offset += write_len; - lttng_consumer_sync_trace_file(stream, orig_offset); - } - -write_error: - /* - * This is a special case that the relayd has closed its socket. Let's - * cleanup the relayd object and all associated streams. - */ - if (relayd && relayd_hang_up) { - ERR("Relayd hangup. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - } - -end: - /* Unlock only if ctrl socket used */ - if (relayd && stream->metadata_flag) { - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - } - - rcu_read_unlock(); - return ret; -} - -/* - * Splice the data from the ring buffer to the tracefile. - * - * It must be called with the stream lock held. - * - * Returns the number of bytes spliced. - */ -ssize_t lttng_consumer_on_read_subbuffer_splice( - struct lttng_consumer_local_data *ctx, - struct lttng_consumer_stream *stream, unsigned long len, - unsigned long padding) -{ - ssize_t ret = 0, written = 0, ret_splice = 0; - loff_t offset = 0; - off_t orig_offset = stream->out_fd_offset; - int fd = stream->wait_fd; - /* Default is on the disk */ - int outfd = stream->out_fd; - struct consumer_relayd_sock_pair *relayd = NULL; - int *splice_pipe; - unsigned int relayd_hang_up = 0; - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - /* Not supported for user space tracing */ - return -ENOSYS; - default: - ERR("Unknown consumer_data type"); - abort(); - } - - /* RCU lock for the relayd pointer */ - rcu_read_lock(); - - /* Flag that the current stream if set for network streaming. */ - if (stream->net_seq_idx != (uint64_t) -1ULL) { - relayd = consumer_find_relayd(stream->net_seq_idx); - if (relayd == NULL) { - written = -ret; - goto end; - } - } - splice_pipe = stream->splice_pipe; - - /* Write metadata stream id before payload */ - if (relayd) { - unsigned long total_len = len; - - if (stream->metadata_flag) { - /* - * Lock the control socket for the complete duration of the function - * since from this point on we will use the socket. - */ - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - - if (stream->reset_metadata_flag) { - ret = relayd_reset_metadata(&relayd->control_sock, - stream->relayd_stream_id, - stream->metadata_version); - if (ret < 0) { - relayd_hang_up = 1; - goto write_error; - } - stream->reset_metadata_flag = 0; - } - ret = write_relayd_metadata_id(splice_pipe[1], stream, - padding); - if (ret < 0) { - written = ret; - relayd_hang_up = 1; - goto write_error; - } - - total_len += sizeof(struct lttcomm_relayd_metadata_payload); - } - - ret = write_relayd_stream_header(stream, total_len, padding, relayd); - if (ret < 0) { - written = ret; - relayd_hang_up = 1; - goto write_error; - } - /* Use the returned socket. */ - outfd = ret; - } else { - /* No streaming, we have to set the len with the full padding */ - len += padding; - - if (stream->metadata_flag && stream->reset_metadata_flag) { - ret = utils_truncate_stream_file(stream->out_fd, 0); - if (ret < 0) { - ERR("Reset metadata file"); - goto end; - } - stream->reset_metadata_flag = 0; - } - /* - * Check if we need to change the tracefile before writing the packet. - */ - if (stream->chan->tracefile_size > 0 && - (stream->tracefile_size_current + len) > - stream->chan->tracefile_size) { - ret = consumer_stream_rotate_output_files(stream); - if (ret < 0) { - written = ret; - goto end; - } - outfd = stream->out_fd; - orig_offset = 0; - } - stream->tracefile_size_current += len; - } - - while (len > 0) { - DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)", - (unsigned long)offset, len, fd, splice_pipe[1]); - ret_splice = splice(fd, &offset, splice_pipe[1], NULL, len, - SPLICE_F_MOVE | SPLICE_F_MORE); - DBG("splice chan to pipe, ret %zd", ret_splice); - if (ret_splice < 0) { - ret = errno; - written = -ret; - PERROR("Error in relay splice"); - goto splice_error; - } - - /* Handle stream on the relayd if the output is on the network */ - if (relayd && stream->metadata_flag) { - size_t metadata_payload_size = - sizeof(struct lttcomm_relayd_metadata_payload); - - /* Update counter to fit the spliced data */ - ret_splice += metadata_payload_size; - len += metadata_payload_size; - /* - * We do this so the return value can match the len passed as - * argument to this function. - */ - written -= metadata_payload_size; - } - - /* Splice data out */ - ret_splice = splice(splice_pipe[0], NULL, outfd, NULL, - ret_splice, SPLICE_F_MOVE | SPLICE_F_MORE); - DBG("Consumer splice pipe to file (out_fd: %d), ret %zd", - outfd, ret_splice); - if (ret_splice < 0) { - ret = errno; - written = -ret; - relayd_hang_up = 1; - goto write_error; - } else if (ret_splice > len) { - /* - * We don't expect this code path to be executed but you never know - * so this is an extra protection agains a buggy splice(). - */ - ret = errno; - written += ret_splice; - PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice, - len); - goto splice_error; - } else { - /* All good, update current len and continue. */ - len -= ret_splice; - } - - /* This call is useless on a socket so better save a syscall. */ - if (!relayd) { - /* This won't block, but will start writeout asynchronously */ - lttng_sync_file_range(outfd, stream->out_fd_offset, ret_splice, - SYNC_FILE_RANGE_WRITE); - stream->out_fd_offset += ret_splice; - } - stream->output_written += ret_splice; - written += ret_splice; - } - if (!relayd) { - lttng_consumer_sync_trace_file(stream, orig_offset); - } - goto end; - -write_error: - /* - * This is a special case that the relayd has closed its socket. Let's - * cleanup the relayd object and all associated streams. - */ - if (relayd && relayd_hang_up) { - ERR("Relayd hangup. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - /* Skip splice error so the consumer does not fail */ - goto end; - } - -splice_error: - /* send the appropriate error description to sessiond */ - switch (ret) { - case EINVAL: - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EINVAL); - break; - case ENOMEM: - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ENOMEM); - break; - case ESPIPE: - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ESPIPE); - break; - } - -end: - if (relayd && stream->metadata_flag) { - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - } - - rcu_read_unlock(); - return written; -} - -/* - * Sample the snapshot positions for a specific fd - * - * Returns 0 on success, < 0 on error - */ -int lttng_consumer_sample_snapshot_positions(struct lttng_consumer_stream *stream) -{ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - return lttng_kconsumer_sample_snapshot_positions(stream); - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - return lttng_ustconsumer_sample_snapshot_positions(stream); - default: - ERR("Unknown consumer_data type"); - abort(); - return -ENOSYS; - } -} -/* - * Take a snapshot for a specific fd - * - * Returns 0 on success, < 0 on error - */ -int lttng_consumer_take_snapshot(struct lttng_consumer_stream *stream) -{ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - return lttng_kconsumer_take_snapshot(stream); - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - return lttng_ustconsumer_take_snapshot(stream); - default: - ERR("Unknown consumer_data type"); - abort(); - return -ENOSYS; - } -} - -/* - * Get the produced position - * - * Returns 0 on success, < 0 on error - */ -int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream *stream, - unsigned long *pos) -{ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - return lttng_kconsumer_get_produced_snapshot(stream, pos); - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - return lttng_ustconsumer_get_produced_snapshot(stream, pos); - default: - ERR("Unknown consumer_data type"); - abort(); - return -ENOSYS; - } -} - -/* - * Get the consumed position (free-running counter position in bytes). - * - * Returns 0 on success, < 0 on error - */ -int lttng_consumer_get_consumed_snapshot(struct lttng_consumer_stream *stream, - unsigned long *pos) -{ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - return lttng_kconsumer_get_consumed_snapshot(stream, pos); - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - return lttng_ustconsumer_get_consumed_snapshot(stream, pos); - default: - ERR("Unknown consumer_data type"); - abort(); - return -ENOSYS; - } -} - -int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx, - int sock, struct pollfd *consumer_sockpoll) -{ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll); - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll); - default: - ERR("Unknown consumer_data type"); - abort(); - return -ENOSYS; - } -} - -static -void lttng_consumer_close_all_metadata(void) -{ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - /* - * The Kernel consumer has a different metadata scheme so we don't - * close anything because the stream will be closed by the session - * daemon. - */ - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - /* - * Close all metadata streams. The metadata hash table is passed and - * this call iterates over it by closing all wakeup fd. This is safe - * because at this point we are sure that the metadata producer is - * either dead or blocked. - */ - lttng_ustconsumer_close_all_metadata(metadata_ht); - break; - default: - ERR("Unknown consumer_data type"); - abort(); - } -} - -/* - * Clean up a metadata stream and free its memory. - */ -void consumer_del_metadata_stream(struct lttng_consumer_stream *stream, - struct lttng_ht *ht) -{ - struct lttng_consumer_channel *channel = NULL; - bool free_channel = false; - - LTTNG_ASSERT(stream); - /* - * This call should NEVER receive regular stream. It must always be - * metadata stream and this is crucial for data structure synchronization. - */ - LTTNG_ASSERT(stream->metadata_flag); - - DBG3("Consumer delete metadata stream %d", stream->wait_fd); - - pthread_mutex_lock(&the_consumer_data.lock); - /* - * Note that this assumes that a stream's channel is never changed and - * that the stream's lock doesn't need to be taken to sample its - * channel. - */ - channel = stream->chan; - pthread_mutex_lock(&channel->lock); - pthread_mutex_lock(&stream->lock); - if (channel->metadata_cache) { - /* Only applicable to userspace consumers. */ - pthread_mutex_lock(&channel->metadata_cache->lock); - } - - /* Remove any reference to that stream. */ - consumer_stream_delete(stream, ht); - - /* Close down everything including the relayd if one. */ - consumer_stream_close(stream); - /* Destroy tracer buffers of the stream. */ - consumer_stream_destroy_buffers(stream); - - /* Atomically decrement channel refcount since other threads can use it. */ - if (!uatomic_sub_return(&channel->refcount, 1) - && !uatomic_read(&channel->nb_init_stream_left)) { - /* Go for channel deletion! */ - free_channel = true; - } - stream->chan = NULL; - - /* - * Nullify the stream reference so it is not used after deletion. The - * channel lock MUST be acquired before being able to check for a NULL - * pointer value. - */ - channel->metadata_stream = NULL; - - if (channel->metadata_cache) { - pthread_mutex_unlock(&channel->metadata_cache->lock); - } - pthread_mutex_unlock(&stream->lock); - pthread_mutex_unlock(&channel->lock); - pthread_mutex_unlock(&the_consumer_data.lock); - - if (free_channel) { - consumer_del_channel(channel); - } - - lttng_trace_chunk_put(stream->trace_chunk); - stream->trace_chunk = NULL; - consumer_stream_free(stream); -} - -/* - * Action done with the metadata stream when adding it to the consumer internal - * data structures to handle it. - */ -void consumer_add_metadata_stream(struct lttng_consumer_stream *stream) -{ - struct lttng_ht *ht = metadata_ht; - struct lttng_ht_iter iter; - struct lttng_ht_node_u64 *node; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(ht); - - DBG3("Adding metadata stream %" PRIu64 " to hash table", stream->key); - - pthread_mutex_lock(&the_consumer_data.lock); - pthread_mutex_lock(&stream->chan->lock); - pthread_mutex_lock(&stream->chan->timer_lock); - pthread_mutex_lock(&stream->lock); - - /* - * From here, refcounts are updated so be _careful_ when returning an error - * after this point. - */ - - rcu_read_lock(); - - /* - * Lookup the stream just to make sure it does not exist in our internal - * state. This should NEVER happen. - */ - lttng_ht_lookup(ht, &stream->key, &iter); - node = lttng_ht_iter_get_node_u64(&iter); - LTTNG_ASSERT(!node); - - /* - * When nb_init_stream_left reaches 0, we don't need to trigger any action - * in terms of destroying the associated channel, because the action that - * causes the count to become 0 also causes a stream to be added. The - * channel deletion will thus be triggered by the following removal of this - * stream. - */ - if (uatomic_read(&stream->chan->nb_init_stream_left) > 0) { - /* Increment refcount before decrementing nb_init_stream_left */ - cmm_smp_wmb(); - uatomic_dec(&stream->chan->nb_init_stream_left); - } - - lttng_ht_add_unique_u64(ht, &stream->node); - - lttng_ht_add_u64(the_consumer_data.stream_per_chan_id_ht, - &stream->node_channel_id); - - /* - * Add stream to the stream_list_ht of the consumer data. No need to steal - * the key since the HT does not use it and we allow to add redundant keys - * into this table. - */ - lttng_ht_add_u64(the_consumer_data.stream_list_ht, - &stream->node_session_id); - - rcu_read_unlock(); - - pthread_mutex_unlock(&stream->lock); - pthread_mutex_unlock(&stream->chan->lock); - pthread_mutex_unlock(&stream->chan->timer_lock); - pthread_mutex_unlock(&the_consumer_data.lock); -} - -/* - * Delete data stream that are flagged for deletion (endpoint_status). - */ -static void validate_endpoint_status_data_stream(void) -{ - struct lttng_ht_iter iter; - struct lttng_consumer_stream *stream; - - DBG("Consumer delete flagged data stream"); - - rcu_read_lock(); - cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) { - /* Validate delete flag of the stream */ - if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) { - continue; - } - /* Delete it right now */ - consumer_del_stream(stream, data_ht); - } - rcu_read_unlock(); -} - -/* - * Delete metadata stream that are flagged for deletion (endpoint_status). - */ -static void validate_endpoint_status_metadata_stream( - struct lttng_poll_event *pollset) -{ - struct lttng_ht_iter iter; - struct lttng_consumer_stream *stream; - - DBG("Consumer delete flagged metadata stream"); - - LTTNG_ASSERT(pollset); - - rcu_read_lock(); - cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) { - /* Validate delete flag of the stream */ - if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) { - continue; - } - /* - * Remove from pollset so the metadata thread can continue without - * blocking on a deleted stream. - */ - lttng_poll_del(pollset, stream->wait_fd); - - /* Delete it right now */ - consumer_del_metadata_stream(stream, metadata_ht); - } - rcu_read_unlock(); -} - -/* - * Thread polls on metadata file descriptor and write them on disk or on the - * network. - */ -void *consumer_thread_metadata_poll(void *data) -{ - int ret, i, pollfd, err = -1; - uint32_t revents, nb_fd; - struct lttng_consumer_stream *stream = NULL; - struct lttng_ht_iter iter; - struct lttng_ht_node_u64 *node; - struct lttng_poll_event events; - struct lttng_consumer_local_data *ctx = data; - ssize_t len; - - rcu_register_thread(); - - health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_METADATA); - - if (testpoint(consumerd_thread_metadata)) { - goto error_testpoint; - } - - health_code_update(); - - DBG("Thread metadata poll started"); - - /* Size is set to 1 for the consumer_metadata pipe */ - ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC); - if (ret < 0) { - ERR("Poll set creation failed"); - goto end_poll; - } - - ret = lttng_poll_add(&events, - lttng_pipe_get_readfd(ctx->consumer_metadata_pipe), LPOLLIN); - if (ret < 0) { - goto end; - } - - /* Main loop */ - DBG("Metadata main loop started"); - - while (1) { -restart: - health_code_update(); - health_poll_entry(); - DBG("Metadata poll wait"); - ret = lttng_poll_wait(&events, -1); - DBG("Metadata poll return from wait with %d fd(s)", - LTTNG_POLL_GETNB(&events)); - health_poll_exit(); - DBG("Metadata event caught in thread"); - if (ret < 0) { - if (errno == EINTR) { - ERR("Poll EINTR caught"); - goto restart; - } - if (LTTNG_POLL_GETNB(&events) == 0) { - err = 0; /* All is OK */ - } - goto end; - } - - nb_fd = ret; - - /* From here, the event is a metadata wait fd */ - for (i = 0; i < nb_fd; i++) { - health_code_update(); - - revents = LTTNG_POLL_GETEV(&events, i); - pollfd = LTTNG_POLL_GETFD(&events, i); - - if (pollfd == lttng_pipe_get_readfd(ctx->consumer_metadata_pipe)) { - if (revents & LPOLLIN) { - ssize_t pipe_len; - - pipe_len = lttng_pipe_read(ctx->consumer_metadata_pipe, - &stream, sizeof(stream)); - if (pipe_len < sizeof(stream)) { - if (pipe_len < 0) { - PERROR("read metadata stream"); - } - /* - * Remove the pipe from the poll set and continue the loop - * since their might be data to consume. - */ - lttng_poll_del(&events, - lttng_pipe_get_readfd(ctx->consumer_metadata_pipe)); - lttng_pipe_read_close(ctx->consumer_metadata_pipe); - continue; - } - - /* A NULL stream means that the state has changed. */ - if (stream == NULL) { - /* Check for deleted streams. */ - validate_endpoint_status_metadata_stream(&events); - goto restart; - } - - DBG("Adding metadata stream %d to poll set", - stream->wait_fd); - - /* Add metadata stream to the global poll events list */ - lttng_poll_add(&events, stream->wait_fd, - LPOLLIN | LPOLLPRI | LPOLLHUP); - } else if (revents & (LPOLLERR | LPOLLHUP)) { - DBG("Metadata thread pipe hung up"); - /* - * Remove the pipe from the poll set and continue the loop - * since their might be data to consume. - */ - lttng_poll_del(&events, - lttng_pipe_get_readfd(ctx->consumer_metadata_pipe)); - lttng_pipe_read_close(ctx->consumer_metadata_pipe); - continue; - } else { - ERR("Unexpected poll events %u for sock %d", revents, pollfd); - goto end; - } - - /* Handle other stream */ - continue; - } - - rcu_read_lock(); - { - uint64_t tmp_id = (uint64_t) pollfd; - - lttng_ht_lookup(metadata_ht, &tmp_id, &iter); - } - node = lttng_ht_iter_get_node_u64(&iter); - LTTNG_ASSERT(node); - - stream = caa_container_of(node, struct lttng_consumer_stream, - node); - - if (revents & (LPOLLIN | LPOLLPRI)) { - /* Get the data out of the metadata file descriptor */ - DBG("Metadata available on fd %d", pollfd); - LTTNG_ASSERT(stream->wait_fd == pollfd); - - do { - health_code_update(); - - len = ctx->on_buffer_ready(stream, ctx, false); - /* - * We don't check the return value here since if we get - * a negative len, it means an error occurred thus we - * simply remove it from the poll set and free the - * stream. - */ - } while (len > 0); - - /* It's ok to have an unavailable sub-buffer */ - if (len < 0 && len != -EAGAIN && len != -ENODATA) { - /* Clean up stream from consumer and free it. */ - lttng_poll_del(&events, stream->wait_fd); - consumer_del_metadata_stream(stream, metadata_ht); - } - } else if (revents & (LPOLLERR | LPOLLHUP)) { - DBG("Metadata fd %d is hup|err.", pollfd); - if (!stream->hangup_flush_done && - (the_consumer_data.type == LTTNG_CONSUMER32_UST || - the_consumer_data.type == - LTTNG_CONSUMER64_UST)) { - DBG("Attempting to flush and consume the UST buffers"); - lttng_ustconsumer_on_stream_hangup(stream); - - /* We just flushed the stream now read it. */ - do { - health_code_update(); - - len = ctx->on_buffer_ready(stream, ctx, false); - /* - * We don't check the return value here since if we get - * a negative len, it means an error occurred thus we - * simply remove it from the poll set and free the - * stream. - */ - } while (len > 0); - } - - lttng_poll_del(&events, stream->wait_fd); - /* - * This call update the channel states, closes file descriptors - * and securely free the stream. - */ - consumer_del_metadata_stream(stream, metadata_ht); - } else { - ERR("Unexpected poll events %u for sock %d", revents, pollfd); - rcu_read_unlock(); - goto end; - } - /* Release RCU lock for the stream looked up */ - rcu_read_unlock(); - } - } - - /* All is OK */ - err = 0; -end: - DBG("Metadata poll thread exiting"); - - lttng_poll_clean(&events); -end_poll: -error_testpoint: - if (err) { - health_error(); - ERR("Health error occurred in %s", __func__); - } - health_unregister(health_consumerd); - rcu_unregister_thread(); - return NULL; -} - -/* - * This thread polls the fds in the set to consume the data and write - * it to tracefile if necessary. - */ -void *consumer_thread_data_poll(void *data) -{ - int num_rdy, num_hup, high_prio, ret, i, err = -1; - struct pollfd *pollfd = NULL; - /* local view of the streams */ - struct lttng_consumer_stream **local_stream = NULL, *new_stream = NULL; - /* local view of consumer_data.fds_count */ - int nb_fd = 0; - /* 2 for the consumer_data_pipe and wake up pipe */ - const int nb_pipes_fd = 2; - /* Number of FDs with CONSUMER_ENDPOINT_INACTIVE but still open. */ - int nb_inactive_fd = 0; - struct lttng_consumer_local_data *ctx = data; - ssize_t len; - - rcu_register_thread(); - - health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_DATA); - - if (testpoint(consumerd_thread_data)) { - goto error_testpoint; - } - - health_code_update(); - - local_stream = zmalloc(sizeof(struct lttng_consumer_stream *)); - if (local_stream == NULL) { - PERROR("local_stream malloc"); - goto end; - } - - while (1) { - health_code_update(); - - high_prio = 0; - num_hup = 0; - - /* - * the fds set has been updated, we need to update our - * local array as well - */ - pthread_mutex_lock(&the_consumer_data.lock); - if (the_consumer_data.need_update) { - free(pollfd); - pollfd = NULL; - - free(local_stream); - local_stream = NULL; - - /* Allocate for all fds */ - pollfd = zmalloc((the_consumer_data.stream_count + - nb_pipes_fd) * - sizeof(struct pollfd)); - if (pollfd == NULL) { - PERROR("pollfd malloc"); - pthread_mutex_unlock(&the_consumer_data.lock); - goto end; - } - - local_stream = zmalloc((the_consumer_data.stream_count + - nb_pipes_fd) * - sizeof(struct lttng_consumer_stream *)); - if (local_stream == NULL) { - PERROR("local_stream malloc"); - pthread_mutex_unlock(&the_consumer_data.lock); - goto end; - } - ret = update_poll_array(ctx, &pollfd, local_stream, - data_ht, &nb_inactive_fd); - if (ret < 0) { - ERR("Error in allocating pollfd or local_outfds"); - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR); - pthread_mutex_unlock(&the_consumer_data.lock); - goto end; - } - nb_fd = ret; - the_consumer_data.need_update = 0; - } - pthread_mutex_unlock(&the_consumer_data.lock); - - /* No FDs and consumer_quit, consumer_cleanup the thread */ - if (nb_fd == 0 && nb_inactive_fd == 0 && - CMM_LOAD_SHARED(consumer_quit) == 1) { - err = 0; /* All is OK */ - goto end; - } - /* poll on the array of fds */ - restart: - DBG("polling on %d fd", nb_fd + nb_pipes_fd); - if (testpoint(consumerd_thread_data_poll)) { - goto end; - } - health_poll_entry(); - num_rdy = poll(pollfd, nb_fd + nb_pipes_fd, -1); - health_poll_exit(); - DBG("poll num_rdy : %d", num_rdy); - if (num_rdy == -1) { - /* - * Restart interrupted system call. - */ - if (errno == EINTR) { - goto restart; - } - PERROR("Poll error"); - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR); - goto end; - } else if (num_rdy == 0) { - DBG("Polling thread timed out"); - goto end; - } - - if (caa_unlikely(data_consumption_paused)) { - DBG("Data consumption paused, sleeping..."); - sleep(1); - goto restart; - } - - /* - * If the consumer_data_pipe triggered poll go directly to the - * beginning of the loop to update the array. We want to prioritize - * array update over low-priority reads. - */ - if (pollfd[nb_fd].revents & (POLLIN | POLLPRI)) { - ssize_t pipe_readlen; - - DBG("consumer_data_pipe wake up"); - pipe_readlen = lttng_pipe_read(ctx->consumer_data_pipe, - &new_stream, sizeof(new_stream)); - if (pipe_readlen < sizeof(new_stream)) { - PERROR("Consumer data pipe"); - /* Continue so we can at least handle the current stream(s). */ - continue; - } - - /* - * If the stream is NULL, just ignore it. It's also possible that - * the sessiond poll thread changed the consumer_quit state and is - * waking us up to test it. - */ - if (new_stream == NULL) { - validate_endpoint_status_data_stream(); - continue; - } - - /* Continue to update the local streams and handle prio ones */ - continue; - } - - /* Handle wakeup pipe. */ - if (pollfd[nb_fd + 1].revents & (POLLIN | POLLPRI)) { - char dummy; - ssize_t pipe_readlen; - - pipe_readlen = lttng_pipe_read(ctx->consumer_wakeup_pipe, &dummy, - sizeof(dummy)); - if (pipe_readlen < 0) { - PERROR("Consumer data wakeup pipe"); - } - /* We've been awakened to handle stream(s). */ - ctx->has_wakeup = 0; - } - - /* Take care of high priority channels first. */ - for (i = 0; i < nb_fd; i++) { - health_code_update(); - - if (local_stream[i] == NULL) { - continue; - } - if (pollfd[i].revents & POLLPRI) { - DBG("Urgent read on fd %d", pollfd[i].fd); - high_prio = 1; - len = ctx->on_buffer_ready(local_stream[i], ctx, false); - /* it's ok to have an unavailable sub-buffer */ - if (len < 0 && len != -EAGAIN && len != -ENODATA) { - /* Clean the stream and free it. */ - consumer_del_stream(local_stream[i], data_ht); - local_stream[i] = NULL; - } else if (len > 0) { - local_stream[i]->data_read = 1; - } - } - } - - /* - * If we read high prio channel in this loop, try again - * for more high prio data. - */ - if (high_prio) { - continue; - } - - /* Take care of low priority channels. */ - for (i = 0; i < nb_fd; i++) { - health_code_update(); - - if (local_stream[i] == NULL) { - continue; - } - if ((pollfd[i].revents & POLLIN) || - local_stream[i]->hangup_flush_done || - local_stream[i]->has_data) { - DBG("Normal read on fd %d", pollfd[i].fd); - len = ctx->on_buffer_ready(local_stream[i], ctx, false); - /* it's ok to have an unavailable sub-buffer */ - if (len < 0 && len != -EAGAIN && len != -ENODATA) { - /* Clean the stream and free it. */ - consumer_del_stream(local_stream[i], data_ht); - local_stream[i] = NULL; - } else if (len > 0) { - local_stream[i]->data_read = 1; - } - } - } - - /* Handle hangup and errors */ - for (i = 0; i < nb_fd; i++) { - health_code_update(); - - if (local_stream[i] == NULL) { - continue; - } - if (!local_stream[i]->hangup_flush_done - && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL)) - && (the_consumer_data.type == LTTNG_CONSUMER32_UST - || the_consumer_data.type == LTTNG_CONSUMER64_UST)) { - DBG("fd %d is hup|err|nval. Attempting flush and read.", - pollfd[i].fd); - lttng_ustconsumer_on_stream_hangup(local_stream[i]); - /* Attempt read again, for the data we just flushed. */ - local_stream[i]->data_read = 1; - } - /* - * If the poll flag is HUP/ERR/NVAL and we have - * read no data in this pass, we can remove the - * stream from its hash table. - */ - if ((pollfd[i].revents & POLLHUP)) { - DBG("Polling fd %d tells it has hung up.", pollfd[i].fd); - if (!local_stream[i]->data_read) { - consumer_del_stream(local_stream[i], data_ht); - local_stream[i] = NULL; - num_hup++; - } - } else if (pollfd[i].revents & POLLERR) { - ERR("Error returned in polling fd %d.", pollfd[i].fd); - if (!local_stream[i]->data_read) { - consumer_del_stream(local_stream[i], data_ht); - local_stream[i] = NULL; - num_hup++; - } - } else if (pollfd[i].revents & POLLNVAL) { - ERR("Polling fd %d tells fd is not open.", pollfd[i].fd); - if (!local_stream[i]->data_read) { - consumer_del_stream(local_stream[i], data_ht); - local_stream[i] = NULL; - num_hup++; - } - } - if (local_stream[i] != NULL) { - local_stream[i]->data_read = 0; - } - } - } - /* All is OK */ - err = 0; -end: - DBG("polling thread exiting"); - free(pollfd); - free(local_stream); - - /* - * Close the write side of the pipe so epoll_wait() in - * consumer_thread_metadata_poll can catch it. The thread is monitoring the - * read side of the pipe. If we close them both, epoll_wait strangely does - * not return and could create a endless wait period if the pipe is the - * only tracked fd in the poll set. The thread will take care of closing - * the read side. - */ - (void) lttng_pipe_write_close(ctx->consumer_metadata_pipe); - -error_testpoint: - if (err) { - health_error(); - ERR("Health error occurred in %s", __func__); - } - health_unregister(health_consumerd); - - rcu_unregister_thread(); - return NULL; -} - -/* - * Close wake-up end of each stream belonging to the channel. This will - * allow the poll() on the stream read-side to detect when the - * write-side (application) finally closes them. - */ -static -void consumer_close_channel_streams(struct lttng_consumer_channel *channel) -{ - struct lttng_ht *ht; - struct lttng_consumer_stream *stream; - struct lttng_ht_iter iter; - - ht = the_consumer_data.stream_per_chan_id_ht; - - rcu_read_lock(); - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&channel->key, lttng_ht_seed), - ht->match_fct, &channel->key, - &iter.iter, stream, node_channel_id.node) { - /* - * Protect against teardown with mutex. - */ - pthread_mutex_lock(&stream->lock); - if (cds_lfht_is_node_deleted(&stream->node.node)) { - goto next; - } - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - if (stream->metadata_flag) { - /* Safe and protected by the stream lock. */ - lttng_ustconsumer_close_metadata(stream->chan); - } else { - /* - * Note: a mutex is taken internally within - * liblttng-ust-ctl to protect timer wakeup_fd - * use from concurrent close. - */ - lttng_ustconsumer_close_stream_wakeup(stream); - } - break; - default: - ERR("Unknown consumer_data type"); - abort(); - } - next: - pthread_mutex_unlock(&stream->lock); - } - rcu_read_unlock(); -} - -static void destroy_channel_ht(struct lttng_ht *ht) -{ - struct lttng_ht_iter iter; - struct lttng_consumer_channel *channel; - int ret; - - if (ht == NULL) { - return; - } - - rcu_read_lock(); - cds_lfht_for_each_entry(ht->ht, &iter.iter, channel, wait_fd_node.node) { - ret = lttng_ht_del(ht, &iter); - LTTNG_ASSERT(ret != 0); - } - rcu_read_unlock(); - - lttng_ht_destroy(ht); -} - -/* - * This thread polls the channel fds to detect when they are being - * closed. It closes all related streams if the channel is detected as - * closed. It is currently only used as a shim layer for UST because the - * consumerd needs to keep the per-stream wakeup end of pipes open for - * periodical flush. - */ -void *consumer_thread_channel_poll(void *data) -{ - int ret, i, pollfd, err = -1; - uint32_t revents, nb_fd; - struct lttng_consumer_channel *chan = NULL; - struct lttng_ht_iter iter; - struct lttng_ht_node_u64 *node; - struct lttng_poll_event events; - struct lttng_consumer_local_data *ctx = data; - struct lttng_ht *channel_ht; - - rcu_register_thread(); - - health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_CHANNEL); - - if (testpoint(consumerd_thread_channel)) { - goto error_testpoint; - } - - health_code_update(); - - channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); - if (!channel_ht) { - /* ENOMEM at this point. Better to bail out. */ - goto end_ht; - } - - DBG("Thread channel poll started"); - - /* Size is set to 1 for the consumer_channel pipe */ - ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC); - if (ret < 0) { - ERR("Poll set creation failed"); - goto end_poll; - } - - ret = lttng_poll_add(&events, ctx->consumer_channel_pipe[0], LPOLLIN); - if (ret < 0) { - goto end; - } - - /* Main loop */ - DBG("Channel main loop started"); - - while (1) { -restart: - health_code_update(); - DBG("Channel poll wait"); - health_poll_entry(); - ret = lttng_poll_wait(&events, -1); - DBG("Channel poll return from wait with %d fd(s)", - LTTNG_POLL_GETNB(&events)); - health_poll_exit(); - DBG("Channel event caught in thread"); - if (ret < 0) { - if (errno == EINTR) { - ERR("Poll EINTR caught"); - goto restart; - } - if (LTTNG_POLL_GETNB(&events) == 0) { - err = 0; /* All is OK */ - } - goto end; - } - - nb_fd = ret; - - /* From here, the event is a channel wait fd */ - for (i = 0; i < nb_fd; i++) { - health_code_update(); - - revents = LTTNG_POLL_GETEV(&events, i); - pollfd = LTTNG_POLL_GETFD(&events, i); - - if (pollfd == ctx->consumer_channel_pipe[0]) { - if (revents & LPOLLIN) { - enum consumer_channel_action action; - uint64_t key; - - ret = read_channel_pipe(ctx, &chan, &key, &action); - if (ret <= 0) { - if (ret < 0) { - ERR("Error reading channel pipe"); - } - lttng_poll_del(&events, ctx->consumer_channel_pipe[0]); - continue; - } - - switch (action) { - case CONSUMER_CHANNEL_ADD: - DBG("Adding channel %d to poll set", - chan->wait_fd); - - lttng_ht_node_init_u64(&chan->wait_fd_node, - chan->wait_fd); - rcu_read_lock(); - lttng_ht_add_unique_u64(channel_ht, - &chan->wait_fd_node); - rcu_read_unlock(); - /* Add channel to the global poll events list */ - lttng_poll_add(&events, chan->wait_fd, - LPOLLERR | LPOLLHUP); - break; - case CONSUMER_CHANNEL_DEL: - { - /* - * This command should never be called if the channel - * has streams monitored by either the data or metadata - * thread. The consumer only notify this thread with a - * channel del. command if it receives a destroy - * channel command from the session daemon that send it - * if a command prior to the GET_CHANNEL failed. - */ - - rcu_read_lock(); - chan = consumer_find_channel(key); - if (!chan) { - rcu_read_unlock(); - ERR("UST consumer get channel key %" PRIu64 " not found for del channel", key); - break; - } - lttng_poll_del(&events, chan->wait_fd); - iter.iter.node = &chan->wait_fd_node.node; - ret = lttng_ht_del(channel_ht, &iter); - LTTNG_ASSERT(ret == 0); - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - health_code_update(); - /* Destroy streams that might have been left in the stream list. */ - clean_channel_stream_list(chan); - break; - default: - ERR("Unknown consumer_data type"); - abort(); - } - - /* - * Release our own refcount. Force channel deletion even if - * streams were not initialized. - */ - if (!uatomic_sub_return(&chan->refcount, 1)) { - consumer_del_channel(chan); - } - rcu_read_unlock(); - goto restart; - } - case CONSUMER_CHANNEL_QUIT: - /* - * Remove the pipe from the poll set and continue the loop - * since their might be data to consume. - */ - lttng_poll_del(&events, ctx->consumer_channel_pipe[0]); - continue; - default: - ERR("Unknown action"); - break; - } - } else if (revents & (LPOLLERR | LPOLLHUP)) { - DBG("Channel thread pipe hung up"); - /* - * Remove the pipe from the poll set and continue the loop - * since their might be data to consume. - */ - lttng_poll_del(&events, ctx->consumer_channel_pipe[0]); - continue; - } else { - ERR("Unexpected poll events %u for sock %d", revents, pollfd); - goto end; - } - - /* Handle other stream */ - continue; - } - - rcu_read_lock(); - { - uint64_t tmp_id = (uint64_t) pollfd; - - lttng_ht_lookup(channel_ht, &tmp_id, &iter); - } - node = lttng_ht_iter_get_node_u64(&iter); - LTTNG_ASSERT(node); - - chan = caa_container_of(node, struct lttng_consumer_channel, - wait_fd_node); - - /* Check for error event */ - if (revents & (LPOLLERR | LPOLLHUP)) { - DBG("Channel fd %d is hup|err.", pollfd); - - lttng_poll_del(&events, chan->wait_fd); - ret = lttng_ht_del(channel_ht, &iter); - LTTNG_ASSERT(ret == 0); - - /* - * This will close the wait fd for each stream associated to - * this channel AND monitored by the data/metadata thread thus - * will be clean by the right thread. - */ - consumer_close_channel_streams(chan); - - /* Release our own refcount */ - if (!uatomic_sub_return(&chan->refcount, 1) - && !uatomic_read(&chan->nb_init_stream_left)) { - consumer_del_channel(chan); - } - } else { - ERR("Unexpected poll events %u for sock %d", revents, pollfd); - rcu_read_unlock(); - goto end; - } - - /* Release RCU lock for the channel looked up */ - rcu_read_unlock(); - } - } - - /* All is OK */ - err = 0; -end: - lttng_poll_clean(&events); -end_poll: - destroy_channel_ht(channel_ht); -end_ht: -error_testpoint: - DBG("Channel poll thread exiting"); - if (err) { - health_error(); - ERR("Health error occurred in %s", __func__); - } - health_unregister(health_consumerd); - rcu_unregister_thread(); - return NULL; -} - -static int set_metadata_socket(struct lttng_consumer_local_data *ctx, - struct pollfd *sockpoll, int client_socket) -{ - int ret; - - LTTNG_ASSERT(ctx); - LTTNG_ASSERT(sockpoll); - - ret = lttng_consumer_poll_socket(sockpoll); - if (ret) { - goto error; - } - DBG("Metadata connection on client_socket"); - - /* Blocking call, waiting for transmission */ - ctx->consumer_metadata_socket = lttcomm_accept_unix_sock(client_socket); - if (ctx->consumer_metadata_socket < 0) { - WARN("On accept metadata"); - ret = -1; - goto error; - } - ret = 0; - -error: - return ret; -} - -/* - * This thread listens on the consumerd socket and receives the file - * descriptors from the session daemon. - */ -void *consumer_thread_sessiond_poll(void *data) -{ - int sock = -1, client_socket, ret, err = -1; - /* - * structure to poll for incoming data on communication socket avoids - * making blocking sockets. - */ - struct pollfd consumer_sockpoll[2]; - struct lttng_consumer_local_data *ctx = data; - - rcu_register_thread(); - - health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_SESSIOND); - - if (testpoint(consumerd_thread_sessiond)) { - goto error_testpoint; - } - - health_code_update(); - - DBG("Creating command socket %s", ctx->consumer_command_sock_path); - unlink(ctx->consumer_command_sock_path); - client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path); - if (client_socket < 0) { - ERR("Cannot create command socket"); - goto end; - } - - ret = lttcomm_listen_unix_sock(client_socket); - if (ret < 0) { - goto end; - } - - DBG("Sending ready command to lttng-sessiond"); - ret = lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY); - /* return < 0 on error, but == 0 is not fatal */ - if (ret < 0) { - ERR("Error sending ready command to lttng-sessiond"); - goto end; - } - - /* prepare the FDs to poll : to client socket and the should_quit pipe */ - consumer_sockpoll[0].fd = ctx->consumer_should_quit[0]; - consumer_sockpoll[0].events = POLLIN | POLLPRI; - consumer_sockpoll[1].fd = client_socket; - consumer_sockpoll[1].events = POLLIN | POLLPRI; - - ret = lttng_consumer_poll_socket(consumer_sockpoll); - if (ret) { - if (ret > 0) { - /* should exit */ - err = 0; - } - goto end; - } - DBG("Connection on client_socket"); - - /* Blocking call, waiting for transmission */ - sock = lttcomm_accept_unix_sock(client_socket); - if (sock < 0) { - WARN("On accept"); - goto end; - } - - /* - * Setup metadata socket which is the second socket connection on the - * command unix socket. - */ - ret = set_metadata_socket(ctx, consumer_sockpoll, client_socket); - if (ret) { - if (ret > 0) { - /* should exit */ - err = 0; - } - goto end; - } - - /* This socket is not useful anymore. */ - ret = close(client_socket); - if (ret < 0) { - PERROR("close client_socket"); - } - client_socket = -1; - - /* update the polling structure to poll on the established socket */ - consumer_sockpoll[1].fd = sock; - consumer_sockpoll[1].events = POLLIN | POLLPRI; - - while (1) { - health_code_update(); - - health_poll_entry(); - ret = lttng_consumer_poll_socket(consumer_sockpoll); - health_poll_exit(); - if (ret) { - if (ret > 0) { - /* should exit */ - err = 0; - } - goto end; - } - DBG("Incoming command on sock"); - ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll); - if (ret <= 0) { - /* - * This could simply be a session daemon quitting. Don't output - * ERR() here. - */ - DBG("Communication interrupted on command socket"); - err = 0; - goto end; - } - if (CMM_LOAD_SHARED(consumer_quit)) { - DBG("consumer_thread_receive_fds received quit from signal"); - err = 0; /* All is OK */ - goto end; - } - DBG("Received command on sock"); - } - /* All is OK */ - err = 0; - -end: - DBG("Consumer thread sessiond poll exiting"); - - /* - * Close metadata streams since the producer is the session daemon which - * just died. - * - * NOTE: for now, this only applies to the UST tracer. - */ - lttng_consumer_close_all_metadata(); - - /* - * when all fds have hung up, the polling thread - * can exit cleanly - */ - CMM_STORE_SHARED(consumer_quit, 1); - - /* - * Notify the data poll thread to poll back again and test the - * consumer_quit state that we just set so to quit gracefully. - */ - notify_thread_lttng_pipe(ctx->consumer_data_pipe); - - notify_channel_pipe(ctx, NULL, -1, CONSUMER_CHANNEL_QUIT); - - notify_health_quit_pipe(health_quit_pipe); - - /* Cleaning up possibly open sockets. */ - if (sock >= 0) { - ret = close(sock); - if (ret < 0) { - PERROR("close sock sessiond poll"); - } - } - if (client_socket >= 0) { - ret = close(client_socket); - if (ret < 0) { - PERROR("close client_socket sessiond poll"); - } - } - -error_testpoint: - if (err) { - health_error(); - ERR("Health error occurred in %s", __func__); - } - health_unregister(health_consumerd); - - rcu_unregister_thread(); - return NULL; -} - -static int post_consume(struct lttng_consumer_stream *stream, - const struct stream_subbuffer *subbuffer, - struct lttng_consumer_local_data *ctx) -{ - size_t i; - int ret = 0; - const size_t count = lttng_dynamic_array_get_count( - &stream->read_subbuffer_ops.post_consume_cbs); - - for (i = 0; i < count; i++) { - const post_consume_cb op = *(post_consume_cb *) lttng_dynamic_array_get_element( - &stream->read_subbuffer_ops.post_consume_cbs, - i); - - ret = op(stream, subbuffer, ctx); - if (ret) { - goto end; - } - } -end: - return ret; -} - -ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream, - struct lttng_consumer_local_data *ctx, - bool locked_by_caller) -{ - ssize_t ret, written_bytes = 0; - int rotation_ret; - struct stream_subbuffer subbuffer = {}; - enum get_next_subbuffer_status get_next_status; - - if (!locked_by_caller) { - stream->read_subbuffer_ops.lock(stream); - } - - if (stream->read_subbuffer_ops.on_wake_up) { - ret = stream->read_subbuffer_ops.on_wake_up(stream); - if (ret) { - goto end; - } - } - - /* - * If the stream was flagged to be ready for rotation before we extract - * the next packet, rotate it now. - */ - if (stream->rotate_ready) { - DBG("Rotate stream before consuming data"); - ret = lttng_consumer_rotate_stream(ctx, stream); - if (ret < 0) { - ERR("Stream rotation error before consuming data"); - goto end; - } - } - - get_next_status = stream->read_subbuffer_ops.get_next_subbuffer( - stream, &subbuffer); - switch (get_next_status) { - case GET_NEXT_SUBBUFFER_STATUS_OK: - break; - case GET_NEXT_SUBBUFFER_STATUS_NO_DATA: - /* Not an error. */ - ret = 0; - goto sleep_stream; - case GET_NEXT_SUBBUFFER_STATUS_ERROR: - ret = -1; - goto end; - default: - abort(); - } - - ret = stream->read_subbuffer_ops.pre_consume_subbuffer( - stream, &subbuffer); - if (ret) { - goto error_put_subbuf; - } - - written_bytes = stream->read_subbuffer_ops.consume_subbuffer( - ctx, stream, &subbuffer); - if (written_bytes <= 0) { - ERR("Error consuming subbuffer: (%zd)", written_bytes); - ret = (int) written_bytes; - goto error_put_subbuf; - } - - ret = stream->read_subbuffer_ops.put_next_subbuffer(stream, &subbuffer); - if (ret) { - goto end; - } - - ret = post_consume(stream, &subbuffer, ctx); - if (ret) { - goto end; - } - - /* - * After extracting the packet, we check if the stream is now ready to - * be rotated and perform the action immediately. - * - * Don't overwrite `ret` as callers expect the number of bytes - * consumed to be returned on success. - */ - rotation_ret = lttng_consumer_stream_is_rotate_ready(stream); - if (rotation_ret == 1) { - rotation_ret = lttng_consumer_rotate_stream(ctx, stream); - if (rotation_ret < 0) { - ret = rotation_ret; - ERR("Stream rotation error after consuming data"); - goto end; - } - - } else if (rotation_ret < 0) { - ret = rotation_ret; - ERR("Failed to check if stream was ready to rotate after consuming data"); - goto end; - } - -sleep_stream: - if (stream->read_subbuffer_ops.on_sleep) { - stream->read_subbuffer_ops.on_sleep(stream, ctx); - } - - ret = written_bytes; -end: - if (!locked_by_caller) { - stream->read_subbuffer_ops.unlock(stream); - } - - return ret; -error_put_subbuf: - (void) stream->read_subbuffer_ops.put_next_subbuffer(stream, &subbuffer); - goto end; -} - -int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream) -{ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - return lttng_kconsumer_on_recv_stream(stream); - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - return lttng_ustconsumer_on_recv_stream(stream); - default: - ERR("Unknown consumer_data type"); - abort(); - return -ENOSYS; - } -} - -/* - * Allocate and set consumer data hash tables. - */ -int lttng_consumer_init(void) -{ - the_consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); - if (!the_consumer_data.channel_ht) { - goto error; - } - - the_consumer_data.channels_by_session_id_ht = - lttng_ht_new(0, LTTNG_HT_TYPE_U64); - if (!the_consumer_data.channels_by_session_id_ht) { - goto error; - } - - the_consumer_data.relayd_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); - if (!the_consumer_data.relayd_ht) { - goto error; - } - - the_consumer_data.stream_list_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); - if (!the_consumer_data.stream_list_ht) { - goto error; - } - - the_consumer_data.stream_per_chan_id_ht = - lttng_ht_new(0, LTTNG_HT_TYPE_U64); - if (!the_consumer_data.stream_per_chan_id_ht) { - goto error; - } - - data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); - if (!data_ht) { - goto error; - } - - metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); - if (!metadata_ht) { - goto error; - } - - the_consumer_data.chunk_registry = lttng_trace_chunk_registry_create(); - if (!the_consumer_data.chunk_registry) { - goto error; - } - - return 0; - -error: - return -1; -} - -/* - * Process the ADD_RELAYD command receive by a consumer. - * - * This will create a relayd socket pair and add it to the relayd hash table. - * The caller MUST acquire a RCU read side lock before calling it. - */ - void consumer_add_relayd_socket(uint64_t net_seq_idx, int sock_type, - struct lttng_consumer_local_data *ctx, int sock, - struct pollfd *consumer_sockpoll, - struct lttcomm_relayd_sock *relayd_sock, uint64_t sessiond_id, - uint64_t relayd_session_id) -{ - int fd = -1, ret = -1, relayd_created = 0; - enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; - struct consumer_relayd_sock_pair *relayd = NULL; - - LTTNG_ASSERT(ctx); - LTTNG_ASSERT(relayd_sock); - - DBG("Consumer adding relayd socket (idx: %" PRIu64 ")", net_seq_idx); - - /* Get relayd reference if exists. */ - relayd = consumer_find_relayd(net_seq_idx); - if (relayd == NULL) { - LTTNG_ASSERT(sock_type == LTTNG_STREAM_CONTROL); - /* Not found. Allocate one. */ - relayd = consumer_allocate_relayd_sock_pair(net_seq_idx); - if (relayd == NULL) { - ret_code = LTTCOMM_CONSUMERD_ENOMEM; - goto error; - } else { - relayd->sessiond_session_id = sessiond_id; - relayd_created = 1; - } - - /* - * This code path MUST continue to the consumer send status message to - * we can notify the session daemon and continue our work without - * killing everything. - */ - } else { - /* - * relayd key should never be found for control socket. - */ - LTTNG_ASSERT(sock_type != LTTNG_STREAM_CONTROL); - } - - /* First send a status message before receiving the fds. */ - ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS); - if (ret < 0) { - /* Somehow, the session daemon is not responding anymore. */ - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL); - goto error_nosignal; - } - - /* Poll on consumer socket. */ - ret = lttng_consumer_poll_socket(consumer_sockpoll); - if (ret) { - /* Needing to exit in the middle of a command: error. */ - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR); - goto error_nosignal; - } - - /* Get relayd socket from session daemon */ - ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1); - if (ret != sizeof(fd)) { - fd = -1; /* Just in case it gets set with an invalid value. */ - - /* - * Failing to receive FDs might indicate a major problem such as - * reaching a fd limit during the receive where the kernel returns a - * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we - * don't take any chances and stop everything. - * - * XXX: Feature request #558 will fix that and avoid this possible - * issue when reaching the fd limit. - */ - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD); - ret_code = LTTCOMM_CONSUMERD_ERROR_RECV_FD; - goto error; - } - - /* Copy socket information and received FD */ - switch (sock_type) { - case LTTNG_STREAM_CONTROL: - /* Copy received lttcomm socket */ - lttcomm_copy_sock(&relayd->control_sock.sock, &relayd_sock->sock); - ret = lttcomm_create_sock(&relayd->control_sock.sock); - /* Handle create_sock error. */ - if (ret < 0) { - ret_code = LTTCOMM_CONSUMERD_ENOMEM; - goto error; - } - /* - * Close the socket created internally by - * lttcomm_create_sock, so we can replace it by the one - * received from sessiond. - */ - if (close(relayd->control_sock.sock.fd)) { - PERROR("close"); - } - - /* Assign new file descriptor */ - relayd->control_sock.sock.fd = fd; - /* Assign version values. */ - relayd->control_sock.major = relayd_sock->major; - relayd->control_sock.minor = relayd_sock->minor; - - relayd->relayd_session_id = relayd_session_id; - - break; - case LTTNG_STREAM_DATA: - /* Copy received lttcomm socket */ - lttcomm_copy_sock(&relayd->data_sock.sock, &relayd_sock->sock); - ret = lttcomm_create_sock(&relayd->data_sock.sock); - /* Handle create_sock error. */ - if (ret < 0) { - ret_code = LTTCOMM_CONSUMERD_ENOMEM; - goto error; - } - /* - * Close the socket created internally by - * lttcomm_create_sock, so we can replace it by the one - * received from sessiond. - */ - if (close(relayd->data_sock.sock.fd)) { - PERROR("close"); - } - - /* Assign new file descriptor */ - relayd->data_sock.sock.fd = fd; - /* Assign version values. */ - relayd->data_sock.major = relayd_sock->major; - relayd->data_sock.minor = relayd_sock->minor; - break; - default: - ERR("Unknown relayd socket type (%d)", sock_type); - ret_code = LTTCOMM_CONSUMERD_FATAL; - goto error; - } - - DBG("Consumer %s socket created successfully with net idx %" PRIu64 " (fd: %d)", - sock_type == LTTNG_STREAM_CONTROL ? "control" : "data", - relayd->net_seq_idx, fd); - /* - * We gave the ownership of the fd to the relayd structure. Set the - * fd to -1 so we don't call close() on it in the error path below. - */ - fd = -1; - - /* We successfully added the socket. Send status back. */ - ret = consumer_send_status_msg(sock, ret_code); - if (ret < 0) { - /* Somehow, the session daemon is not responding anymore. */ - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL); - goto error_nosignal; - } - - /* - * Add relayd socket pair to consumer data hashtable. If object already - * exists or on error, the function gracefully returns. - */ - relayd->ctx = ctx; - add_relayd(relayd); - - /* All good! */ - return; - -error: - if (consumer_send_status_msg(sock, ret_code) < 0) { - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL); - } - -error_nosignal: - /* Close received socket if valid. */ - if (fd >= 0) { - if (close(fd)) { - PERROR("close received socket"); - } - } - - if (relayd_created) { - free(relayd); - } -} - -/* - * Search for a relayd associated to the session id and return the reference. - * - * A rcu read side lock MUST be acquire before calling this function and locked - * until the relayd object is no longer necessary. - */ -static struct consumer_relayd_sock_pair *find_relayd_by_session_id(uint64_t id) -{ - struct lttng_ht_iter iter; - struct consumer_relayd_sock_pair *relayd = NULL; - - /* Iterate over all relayd since they are indexed by net_seq_idx. */ - cds_lfht_for_each_entry(the_consumer_data.relayd_ht->ht, &iter.iter, - relayd, node.node) { - /* - * Check by sessiond id which is unique here where the relayd session - * id might not be when having multiple relayd. - */ - if (relayd->sessiond_session_id == id) { - /* Found the relayd. There can be only one per id. */ - goto found; - } - } - - return NULL; - -found: - return relayd; -} - -/* - * Check if for a given session id there is still data needed to be extract - * from the buffers. - * - * Return 1 if data is pending or else 0 meaning ready to be read. - */ -int consumer_data_pending(uint64_t id) -{ - int ret; - struct lttng_ht_iter iter; - struct lttng_ht *ht; - struct lttng_consumer_stream *stream; - struct consumer_relayd_sock_pair *relayd = NULL; - int (*data_pending)(struct lttng_consumer_stream *); - - DBG("Consumer data pending command on session id %" PRIu64, id); - - rcu_read_lock(); - pthread_mutex_lock(&the_consumer_data.lock); - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - data_pending = lttng_kconsumer_data_pending; - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - data_pending = lttng_ustconsumer_data_pending; - break; - default: - ERR("Unknown consumer data type"); - abort(); - } - - /* Ease our life a bit */ - ht = the_consumer_data.stream_list_ht; - - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&id, lttng_ht_seed), - ht->match_fct, &id, - &iter.iter, stream, node_session_id.node) { - pthread_mutex_lock(&stream->lock); - - /* - * A removed node from the hash table indicates that the stream has - * been deleted thus having a guarantee that the buffers are closed - * on the consumer side. However, data can still be transmitted - * over the network so don't skip the relayd check. - */ - ret = cds_lfht_is_node_deleted(&stream->node.node); - if (!ret) { - /* Check the stream if there is data in the buffers. */ - ret = data_pending(stream); - if (ret == 1) { - pthread_mutex_unlock(&stream->lock); - goto data_pending; - } - } - - pthread_mutex_unlock(&stream->lock); - } - - relayd = find_relayd_by_session_id(id); - if (relayd) { - unsigned int is_data_inflight = 0; - - /* Send init command for data pending. */ - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_begin_data_pending(&relayd->control_sock, - relayd->relayd_session_id); - if (ret < 0) { - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - /* Communication error thus the relayd so no data pending. */ - goto data_not_pending; - } - - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&id, lttng_ht_seed), - ht->match_fct, &id, - &iter.iter, stream, node_session_id.node) { - if (stream->metadata_flag) { - ret = relayd_quiescent_control(&relayd->control_sock, - stream->relayd_stream_id); - } else { - ret = relayd_data_pending(&relayd->control_sock, - stream->relayd_stream_id, - stream->next_net_seq_num - 1); - } - - if (ret == 1) { - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - goto data_pending; - } else if (ret < 0) { - ERR("Relayd data pending failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - goto data_not_pending; - } - } - - /* Send end command for data pending. */ - ret = relayd_end_data_pending(&relayd->control_sock, - relayd->relayd_session_id, &is_data_inflight); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - if (ret < 0) { - ERR("Relayd end data pending failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - goto data_not_pending; - } - if (is_data_inflight) { - goto data_pending; - } - } - - /* - * Finding _no_ node in the hash table and no inflight data means that the - * stream(s) have been removed thus data is guaranteed to be available for - * analysis from the trace files. - */ - -data_not_pending: - /* Data is available to be read by a viewer. */ - pthread_mutex_unlock(&the_consumer_data.lock); - rcu_read_unlock(); - return 0; - -data_pending: - /* Data is still being extracted from buffers. */ - pthread_mutex_unlock(&the_consumer_data.lock); - rcu_read_unlock(); - return 1; -} - -/* - * Send a ret code status message to the sessiond daemon. - * - * Return the sendmsg() return value. - */ -int consumer_send_status_msg(int sock, int ret_code) -{ - struct lttcomm_consumer_status_msg msg; - - memset(&msg, 0, sizeof(msg)); - msg.ret_code = ret_code; - - return lttcomm_send_unix_sock(sock, &msg, sizeof(msg)); -} - -/* - * Send a channel status message to the sessiond daemon. - * - * Return the sendmsg() return value. - */ -int consumer_send_status_channel(int sock, - struct lttng_consumer_channel *channel) -{ - struct lttcomm_consumer_status_channel msg; - - LTTNG_ASSERT(sock >= 0); - - memset(&msg, 0, sizeof(msg)); - if (!channel) { - msg.ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL; - } else { - msg.ret_code = LTTCOMM_CONSUMERD_SUCCESS; - msg.key = channel->key; - msg.stream_count = channel->streams.count; - } - - return lttcomm_send_unix_sock(sock, &msg, sizeof(msg)); -} - -unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos, - unsigned long produced_pos, uint64_t nb_packets_per_stream, - uint64_t max_sb_size) -{ - unsigned long start_pos; - - if (!nb_packets_per_stream) { - return consumed_pos; /* Grab everything */ - } - start_pos = produced_pos - lttng_offset_align_floor(produced_pos, max_sb_size); - start_pos -= max_sb_size * nb_packets_per_stream; - if ((long) (start_pos - consumed_pos) < 0) { - return consumed_pos; /* Grab everything */ - } - return start_pos; -} - -/* Stream lock must be held by the caller. */ -static int sample_stream_positions(struct lttng_consumer_stream *stream, - unsigned long *produced, unsigned long *consumed) -{ - int ret; - - ASSERT_LOCKED(stream->lock); - - ret = lttng_consumer_sample_snapshot_positions(stream); - if (ret < 0) { - ERR("Failed to sample snapshot positions"); - goto end; - } - - ret = lttng_consumer_get_produced_snapshot(stream, produced); - if (ret < 0) { - ERR("Failed to sample produced position"); - goto end; - } - - ret = lttng_consumer_get_consumed_snapshot(stream, consumed); - if (ret < 0) { - ERR("Failed to sample consumed position"); - goto end; - } - -end: - return ret; -} - -/* - * Sample the rotate position for all the streams of a channel. If a stream - * is already at the rotate position (produced == consumed), we flag it as - * ready for rotation. The rotation of ready streams occurs after we have - * replied to the session daemon that we have finished sampling the positions. - * Must be called with RCU read-side lock held to ensure existence of channel. - * - * Returns 0 on success, < 0 on error - */ -int lttng_consumer_rotate_channel(struct lttng_consumer_channel *channel, - uint64_t key, uint64_t relayd_id, uint32_t metadata, - struct lttng_consumer_local_data *ctx) -{ - int ret; - struct lttng_consumer_stream *stream; - struct lttng_ht_iter iter; - struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht; - struct lttng_dynamic_array stream_rotation_positions; - uint64_t next_chunk_id, stream_count = 0; - enum lttng_trace_chunk_status chunk_status; - const bool is_local_trace = relayd_id == -1ULL; - struct consumer_relayd_sock_pair *relayd = NULL; - bool rotating_to_new_chunk = true; - /* Array of `struct lttng_consumer_stream *` */ - struct lttng_dynamic_pointer_array streams_packet_to_open; - size_t stream_idx; - - DBG("Consumer sample rotate position for channel %" PRIu64, key); - - lttng_dynamic_array_init(&stream_rotation_positions, - sizeof(struct relayd_stream_rotation_position), NULL); - lttng_dynamic_pointer_array_init(&streams_packet_to_open, NULL); - - rcu_read_lock(); - - pthread_mutex_lock(&channel->lock); - LTTNG_ASSERT(channel->trace_chunk); - chunk_status = lttng_trace_chunk_get_id(channel->trace_chunk, - &next_chunk_id); - if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { - ret = -1; - goto end_unlock_channel; - } - - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&channel->key, lttng_ht_seed), - ht->match_fct, &channel->key, &iter.iter, - stream, node_channel_id.node) { - unsigned long produced_pos = 0, consumed_pos = 0; - - health_code_update(); - - /* - * Lock stream because we are about to change its state. - */ - pthread_mutex_lock(&stream->lock); - - if (stream->trace_chunk == stream->chan->trace_chunk) { - rotating_to_new_chunk = false; - } - - /* - * Do not flush a packet when rotating from a NULL trace - * chunk. The stream has no means to output data, and the prior - * rotation which rotated to NULL performed that side-effect - * already. No new data can be produced when a stream has no - * associated trace chunk (e.g. a stop followed by a rotate). - */ - if (stream->trace_chunk) { - bool flush_active; - - if (stream->metadata_flag) { - /* - * Don't produce an empty metadata packet, - * simply close the current one. - * - * Metadata is regenerated on every trace chunk - * switch; there is no concern that no data was - * produced. - */ - flush_active = true; - } else { - /* - * Only flush an empty packet if the "packet - * open" could not be performed on transition - * to a new trace chunk and no packets were - * consumed within the chunk's lifetime. - */ - if (stream->opened_packet_in_current_trace_chunk) { - flush_active = true; - } else { - /* - * Stream could have been full at the - * time of rotation, but then have had - * no activity at all. - * - * It is important to flush a packet - * to prevent 0-length files from being - * produced as most viewers choke on - * them. - * - * Unfortunately viewers will not be - * able to know that tracing was active - * for this stream during this trace - * chunk's lifetime. - */ - ret = sample_stream_positions(stream, &produced_pos, &consumed_pos); - if (ret) { - goto end_unlock_stream; - } - - /* - * Don't flush an empty packet if data - * was produced; it will be consumed - * before the rotation completes. - */ - flush_active = produced_pos != consumed_pos; - if (!flush_active) { - const char *trace_chunk_name; - uint64_t trace_chunk_id; - - chunk_status = lttng_trace_chunk_get_name( - stream->trace_chunk, - &trace_chunk_name, - NULL); - if (chunk_status == LTTNG_TRACE_CHUNK_STATUS_NONE) { - trace_chunk_name = "none"; - } - - /* - * Consumer trace chunks are - * never anonymous. - */ - chunk_status = lttng_trace_chunk_get_id( - stream->trace_chunk, - &trace_chunk_id); - LTTNG_ASSERT(chunk_status == - LTTNG_TRACE_CHUNK_STATUS_OK); - - DBG("Unable to open packet for stream during trace chunk's lifetime. " - "Flushing an empty packet to prevent an empty file from being created: " - "stream id = %" PRIu64 ", trace chunk name = `%s`, trace chunk id = %" PRIu64, - stream->key, trace_chunk_name, trace_chunk_id); - } - } - } - - /* - * Close the current packet before sampling the - * ring buffer positions. - */ - ret = consumer_stream_flush_buffer(stream, flush_active); - if (ret < 0) { - ERR("Failed to flush stream %" PRIu64 " during channel rotation", - stream->key); - goto end_unlock_stream; - } - } - - ret = lttng_consumer_take_snapshot(stream); - if (ret < 0 && ret != -ENODATA && ret != -EAGAIN) { - ERR("Failed to sample snapshot position during channel rotation"); - goto end_unlock_stream; - } - if (!ret) { - ret = lttng_consumer_get_produced_snapshot(stream, - &produced_pos); - if (ret < 0) { - ERR("Failed to sample produced position during channel rotation"); - goto end_unlock_stream; - } - - ret = lttng_consumer_get_consumed_snapshot(stream, - &consumed_pos); - if (ret < 0) { - ERR("Failed to sample consumed position during channel rotation"); - goto end_unlock_stream; - } - } - /* - * Align produced position on the start-of-packet boundary of the first - * packet going into the next trace chunk. - */ - produced_pos = lttng_align_floor(produced_pos, stream->max_sb_size); - if (consumed_pos == produced_pos) { - DBG("Set rotate ready for stream %" PRIu64 " produced = %lu consumed = %lu", - stream->key, produced_pos, consumed_pos); - stream->rotate_ready = true; - } else { - DBG("Different consumed and produced positions " - "for stream %" PRIu64 " produced = %lu consumed = %lu", - stream->key, produced_pos, consumed_pos); - } - /* - * The rotation position is based on the packet_seq_num of the - * packet following the last packet that was consumed for this - * stream, incremented by the offset between produced and - * consumed positions. This rotation position is a lower bound - * (inclusive) at which the next trace chunk starts. Since it - * is a lower bound, it is OK if the packet_seq_num does not - * correspond exactly to the same packet identified by the - * consumed_pos, which can happen in overwrite mode. - */ - if (stream->sequence_number_unavailable) { - /* - * Rotation should never be performed on a session which - * interacts with a pre-2.8 lttng-modules, which does - * not implement packet sequence number. - */ - ERR("Failure to rotate stream %" PRIu64 ": sequence number unavailable", - stream->key); - ret = -1; - goto end_unlock_stream; - } - stream->rotate_position = stream->last_sequence_number + 1 + - ((produced_pos - consumed_pos) / stream->max_sb_size); - DBG("Set rotation position for stream %" PRIu64 " at position %" PRIu64, - stream->key, stream->rotate_position); - - if (!is_local_trace) { - /* - * The relay daemon control protocol expects a rotation - * position as "the sequence number of the first packet - * _after_ the current trace chunk". - */ - const struct relayd_stream_rotation_position position = { - .stream_id = stream->relayd_stream_id, - .rotate_at_seq_num = stream->rotate_position, - }; - - ret = lttng_dynamic_array_add_element( - &stream_rotation_positions, - &position); - if (ret) { - ERR("Failed to allocate stream rotation position"); - goto end_unlock_stream; - } - stream_count++; - } - - stream->opened_packet_in_current_trace_chunk = false; - - if (rotating_to_new_chunk && !stream->metadata_flag) { - /* - * Attempt to flush an empty packet as close to the - * rotation point as possible. In the event where a - * stream remains inactive after the rotation point, - * this ensures that the new trace chunk has a - * beginning timestamp set at the begining of the - * trace chunk instead of only creating an empty - * packet when the trace chunk is stopped. - * - * This indicates to the viewers that the stream - * was being recorded, but more importantly it - * allows viewers to determine a useable trace - * intersection. - * - * This presents a problem in the case where the - * ring-buffer is completely full. - * - * Consider the following scenario: - * - The consumption of data is slow (slow network, - * for instance), - * - The ring buffer is full, - * - A rotation is initiated, - * - The flush below does nothing (no space left to - * open a new packet), - * - The other streams rotate very soon, and new - * data is produced in the new chunk, - * - This stream completes its rotation long after the - * rotation was initiated - * - The session is stopped before any event can be - * produced in this stream's buffers. - * - * The resulting trace chunk will have a single packet - * temporaly at the end of the trace chunk for this - * stream making the stream intersection more narrow - * than it should be. - * - * To work-around this, an empty flush is performed - * after the first consumption of a packet during a - * rotation if open_packet fails. The idea is that - * consuming a packet frees enough space to switch - * packets in this scenario and allows the tracer to - * "stamp" the beginning of the new trace chunk at the - * earliest possible point. - * - * The packet open is performed after the channel - * rotation to ensure that no attempt to open a packet - * is performed in a stream that has no active trace - * chunk. - */ - ret = lttng_dynamic_pointer_array_add_pointer( - &streams_packet_to_open, stream); - if (ret) { - PERROR("Failed to add a stream pointer to array of streams in which to open a packet"); - ret = -1; - goto end_unlock_stream; - } - } - - pthread_mutex_unlock(&stream->lock); - } - stream = NULL; - - if (!is_local_trace) { - relayd = consumer_find_relayd(relayd_id); - if (!relayd) { - ERR("Failed to find relayd %" PRIu64, relayd_id); - ret = -1; - goto end_unlock_channel; - } - - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_rotate_streams(&relayd->control_sock, stream_count, - rotating_to_new_chunk ? &next_chunk_id : NULL, - (const struct relayd_stream_rotation_position *) - stream_rotation_positions.buffer - .data); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - if (ret < 0) { - ERR("Relayd rotate stream failed. Cleaning up relayd %" PRIu64, - relayd->net_seq_idx); - lttng_consumer_cleanup_relayd(relayd); - goto end_unlock_channel; - } - } - - for (stream_idx = 0; - stream_idx < lttng_dynamic_pointer_array_get_count( - &streams_packet_to_open); - stream_idx++) { - enum consumer_stream_open_packet_status status; - - stream = lttng_dynamic_pointer_array_get_pointer( - &streams_packet_to_open, stream_idx); - - pthread_mutex_lock(&stream->lock); - status = consumer_stream_open_packet(stream); - pthread_mutex_unlock(&stream->lock); - switch (status) { - case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED: - DBG("Opened a packet after a rotation: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - break; - case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE: - /* - * Can't open a packet as there is no space left - * in the buffer. A new packet will be opened - * once one has been consumed. - */ - DBG("No space left to open a packet after a rotation: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - break; - case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR: - /* Logged by callee. */ - ret = -1; - goto end_unlock_channel; - default: - abort(); - } - } - - pthread_mutex_unlock(&channel->lock); - ret = 0; - goto end; - -end_unlock_stream: - pthread_mutex_unlock(&stream->lock); -end_unlock_channel: - pthread_mutex_unlock(&channel->lock); -end: - rcu_read_unlock(); - lttng_dynamic_array_reset(&stream_rotation_positions); - lttng_dynamic_pointer_array_reset(&streams_packet_to_open); - return ret; -} - -static -int consumer_clear_buffer(struct lttng_consumer_stream *stream) -{ - int ret = 0; - unsigned long consumed_pos_before, consumed_pos_after; - - ret = lttng_consumer_sample_snapshot_positions(stream); - if (ret < 0) { - ERR("Taking snapshot positions"); - goto end; - } - - ret = lttng_consumer_get_consumed_snapshot(stream, &consumed_pos_before); - if (ret < 0) { - ERR("Consumed snapshot position"); - goto end; - } - - switch (the_consumer_data.type) { - case LTTNG_CONSUMER_KERNEL: - ret = kernctl_buffer_clear(stream->wait_fd); - if (ret < 0) { - ERR("Failed to clear kernel stream (ret = %d)", ret); - goto end; - } - break; - case LTTNG_CONSUMER32_UST: - case LTTNG_CONSUMER64_UST: - ret = lttng_ustconsumer_clear_buffer(stream); - if (ret < 0) { - ERR("Failed to clear ust stream (ret = %d)", ret); - goto end; - } - break; - default: - ERR("Unknown consumer_data type"); - abort(); - } - - ret = lttng_consumer_sample_snapshot_positions(stream); - if (ret < 0) { - ERR("Taking snapshot positions"); - goto end; - } - ret = lttng_consumer_get_consumed_snapshot(stream, &consumed_pos_after); - if (ret < 0) { - ERR("Consumed snapshot position"); - goto end; - } - DBG("clear: before: %lu after: %lu", consumed_pos_before, consumed_pos_after); -end: - return ret; -} - -static -int consumer_clear_stream(struct lttng_consumer_stream *stream) -{ - int ret; - - ret = consumer_stream_flush_buffer(stream, 1); - if (ret < 0) { - ERR("Failed to flush stream %" PRIu64 " during channel clear", - stream->key); - ret = LTTCOMM_CONSUMERD_FATAL; - goto error; - } - - ret = consumer_clear_buffer(stream); - if (ret < 0) { - ERR("Failed to clear stream %" PRIu64 " during channel clear", - stream->key); - ret = LTTCOMM_CONSUMERD_FATAL; - goto error; - } - - ret = LTTCOMM_CONSUMERD_SUCCESS; -error: - return ret; -} - -static -int consumer_clear_unmonitored_channel(struct lttng_consumer_channel *channel) -{ - int ret; - struct lttng_consumer_stream *stream; - - rcu_read_lock(); - pthread_mutex_lock(&channel->lock); - cds_list_for_each_entry(stream, &channel->streams.head, send_node) { - health_code_update(); - pthread_mutex_lock(&stream->lock); - ret = consumer_clear_stream(stream); - if (ret) { - goto error_unlock; - } - pthread_mutex_unlock(&stream->lock); - } - pthread_mutex_unlock(&channel->lock); - rcu_read_unlock(); - return 0; - -error_unlock: - pthread_mutex_unlock(&stream->lock); - pthread_mutex_unlock(&channel->lock); - rcu_read_unlock(); - return ret; -} - -/* - * Check if a stream is ready to be rotated after extracting it. - * - * Return 1 if it is ready for rotation, 0 if it is not, a negative value on - * error. Stream lock must be held. - */ -int lttng_consumer_stream_is_rotate_ready(struct lttng_consumer_stream *stream) -{ - DBG("Check is rotate ready for stream %" PRIu64 - " ready %u rotate_position %" PRIu64 - " last_sequence_number %" PRIu64, - stream->key, stream->rotate_ready, - stream->rotate_position, stream->last_sequence_number); - if (stream->rotate_ready) { - return 1; - } - - /* - * If packet seq num is unavailable, it means we are interacting - * with a pre-2.8 lttng-modules which does not implement the - * sequence number. Rotation should never be used by sessiond in this - * scenario. - */ - if (stream->sequence_number_unavailable) { - ERR("Internal error: rotation used on stream %" PRIu64 - " with unavailable sequence number", - stream->key); - return -1; - } - - if (stream->rotate_position == -1ULL || - stream->last_sequence_number == -1ULL) { - return 0; - } - - /* - * Rotate position not reached yet. The stream rotate position is - * the position of the next packet belonging to the next trace chunk, - * but consumerd considers rotation ready when reaching the last - * packet of the current chunk, hence the "rotate_position - 1". - */ - - DBG("Check is rotate ready for stream %" PRIu64 - " last_sequence_number %" PRIu64 - " rotate_position %" PRIu64, - stream->key, stream->last_sequence_number, - stream->rotate_position); - if (stream->last_sequence_number >= stream->rotate_position - 1) { - return 1; - } - - return 0; -} - -/* - * Reset the state for a stream after a rotation occurred. - */ -void lttng_consumer_reset_stream_rotate_state(struct lttng_consumer_stream *stream) -{ - DBG("lttng_consumer_reset_stream_rotate_state for stream %" PRIu64, - stream->key); - stream->rotate_position = -1ULL; - stream->rotate_ready = false; -} - -/* - * Perform the rotation a local stream file. - */ -static -int rotate_local_stream(struct lttng_consumer_local_data *ctx, - struct lttng_consumer_stream *stream) -{ - int ret = 0; - - DBG("Rotate local stream: stream key %" PRIu64 ", channel key %" PRIu64, - stream->key, - stream->chan->key); - stream->tracefile_size_current = 0; - stream->tracefile_count_current = 0; - - if (stream->out_fd >= 0) { - ret = close(stream->out_fd); - if (ret) { - PERROR("Failed to close stream out_fd of channel \"%s\"", - stream->chan->name); - } - stream->out_fd = -1; - } - - if (stream->index_file) { - lttng_index_file_put(stream->index_file); - stream->index_file = NULL; - } - - if (!stream->trace_chunk) { - goto end; - } - - ret = consumer_stream_create_output_files(stream, true); -end: - return ret; -} - -/* - * Performs the stream rotation for the rotate session feature if needed. - * It must be called with the channel and stream locks held. - * - * Return 0 on success, a negative number of error. - */ -int lttng_consumer_rotate_stream(struct lttng_consumer_local_data *ctx, - struct lttng_consumer_stream *stream) -{ - int ret; - - DBG("Consumer rotate stream %" PRIu64, stream->key); - - /* - * Update the stream's 'current' chunk to the session's (channel) - * now-current chunk. - */ - lttng_trace_chunk_put(stream->trace_chunk); - if (stream->chan->trace_chunk == stream->trace_chunk) { - /* - * A channel can be rotated and not have a "next" chunk - * to transition to. In that case, the channel's "current chunk" - * has not been closed yet, but it has not been updated to - * a "next" trace chunk either. Hence, the stream, like its - * parent channel, becomes part of no chunk and can't output - * anything until a new trace chunk is created. - */ - stream->trace_chunk = NULL; - } else if (stream->chan->trace_chunk && - !lttng_trace_chunk_get(stream->chan->trace_chunk)) { - ERR("Failed to acquire a reference to channel's trace chunk during stream rotation"); - ret = -1; - goto error; - } else { - /* - * Update the stream's trace chunk to its parent channel's - * current trace chunk. - */ - stream->trace_chunk = stream->chan->trace_chunk; - } - - if (stream->net_seq_idx == (uint64_t) -1ULL) { - ret = rotate_local_stream(ctx, stream); - if (ret < 0) { - ERR("Failed to rotate stream, ret = %i", ret); - goto error; - } - } - - if (stream->metadata_flag && stream->trace_chunk) { - /* - * If the stream has transitioned to a new trace - * chunk, the metadata should be re-dumped to the - * newest chunk. - * - * However, it is possible for a stream to transition to - * a "no-chunk" state. This can happen if a rotation - * occurs on an inactive session. In such cases, the metadata - * regeneration will happen when the next trace chunk is - * created. - */ - ret = consumer_metadata_stream_dump(stream); - if (ret) { - goto error; - } - } - lttng_consumer_reset_stream_rotate_state(stream); - - ret = 0; - -error: - return ret; -} - -/* - * Rotate all the ready streams now. - * - * This is especially important for low throughput streams that have already - * been consumed, we cannot wait for their next packet to perform the - * rotation. - * Need to be called with RCU read-side lock held to ensure existence of - * channel. - * - * Returns 0 on success, < 0 on error - */ -int lttng_consumer_rotate_ready_streams(struct lttng_consumer_channel *channel, - uint64_t key, struct lttng_consumer_local_data *ctx) -{ - int ret; - struct lttng_consumer_stream *stream; - struct lttng_ht_iter iter; - struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht; - - rcu_read_lock(); - - DBG("Consumer rotate ready streams in channel %" PRIu64, key); - - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&channel->key, lttng_ht_seed), - ht->match_fct, &channel->key, &iter.iter, - stream, node_channel_id.node) { - health_code_update(); - - pthread_mutex_lock(&stream->chan->lock); - pthread_mutex_lock(&stream->lock); - - if (!stream->rotate_ready) { - pthread_mutex_unlock(&stream->lock); - pthread_mutex_unlock(&stream->chan->lock); - continue; - } - DBG("Consumer rotate ready stream %" PRIu64, stream->key); - - ret = lttng_consumer_rotate_stream(ctx, stream); - pthread_mutex_unlock(&stream->lock); - pthread_mutex_unlock(&stream->chan->lock); - if (ret) { - goto end; - } - } - - ret = 0; - -end: - rcu_read_unlock(); - return ret; -} - -enum lttcomm_return_code lttng_consumer_init_command( - struct lttng_consumer_local_data *ctx, - const lttng_uuid sessiond_uuid) -{ - enum lttcomm_return_code ret; - char uuid_str[LTTNG_UUID_STR_LEN]; - - if (ctx->sessiond_uuid.is_set) { - ret = LTTCOMM_CONSUMERD_ALREADY_SET; - goto end; - } - - ctx->sessiond_uuid.is_set = true; - memcpy(ctx->sessiond_uuid.value, sessiond_uuid, sizeof(lttng_uuid)); - ret = LTTCOMM_CONSUMERD_SUCCESS; - lttng_uuid_to_str(sessiond_uuid, uuid_str); - DBG("Received session daemon UUID: %s", uuid_str); -end: - return ret; -} - -enum lttcomm_return_code lttng_consumer_create_trace_chunk( - const uint64_t *relayd_id, uint64_t session_id, - uint64_t chunk_id, - time_t chunk_creation_timestamp, - const char *chunk_override_name, - const struct lttng_credentials *credentials, - struct lttng_directory_handle *chunk_directory_handle) -{ - int ret; - enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; - struct lttng_trace_chunk *created_chunk = NULL, *published_chunk = NULL; - enum lttng_trace_chunk_status chunk_status; - char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)]; - char creation_timestamp_buffer[ISO8601_STR_LEN]; - const char *relayd_id_str = "(none)"; - const char *creation_timestamp_str; - struct lttng_ht_iter iter; - struct lttng_consumer_channel *channel; - - if (relayd_id) { - /* Only used for logging purposes. */ - ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer), - "%" PRIu64, *relayd_id); - if (ret > 0 && ret < sizeof(relayd_id_buffer)) { - relayd_id_str = relayd_id_buffer; - } else { - relayd_id_str = "(formatting error)"; - } - } - - /* Local protocol error. */ - LTTNG_ASSERT(chunk_creation_timestamp); - ret = time_to_iso8601_str(chunk_creation_timestamp, - creation_timestamp_buffer, - sizeof(creation_timestamp_buffer)); - creation_timestamp_str = !ret ? creation_timestamp_buffer : - "(formatting error)"; - - DBG("Consumer create trace chunk command: relay_id = %s" - ", session_id = %" PRIu64 ", chunk_id = %" PRIu64 - ", chunk_override_name = %s" - ", chunk_creation_timestamp = %s", - relayd_id_str, session_id, chunk_id, - chunk_override_name ? : "(none)", - creation_timestamp_str); - - /* - * The trace chunk registry, as used by the consumer daemon, implicitly - * owns the trace chunks. This is only needed in the consumer since - * the consumer has no notion of a session beyond session IDs being - * used to identify other objects. - * - * The lttng_trace_chunk_registry_publish() call below provides a - * reference which is not released; it implicitly becomes the session - * daemon's reference to the chunk in the consumer daemon. - * - * The lifetime of trace chunks in the consumer daemon is managed by - * the session daemon through the LTTNG_CONSUMER_CREATE_TRACE_CHUNK - * and LTTNG_CONSUMER_DESTROY_TRACE_CHUNK commands. - */ - created_chunk = lttng_trace_chunk_create(chunk_id, - chunk_creation_timestamp, NULL); - if (!created_chunk) { - ERR("Failed to create trace chunk"); - ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; - goto error; - } - - if (chunk_override_name) { - chunk_status = lttng_trace_chunk_override_name(created_chunk, - chunk_override_name); - if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { - ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; - goto error; - } - } - - if (chunk_directory_handle) { - chunk_status = lttng_trace_chunk_set_credentials(created_chunk, - credentials); - if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { - ERR("Failed to set trace chunk credentials"); - ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; - goto error; - } - /* - * The consumer daemon has no ownership of the chunk output - * directory. - */ - chunk_status = lttng_trace_chunk_set_as_user(created_chunk, - chunk_directory_handle); - chunk_directory_handle = NULL; - if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { - ERR("Failed to set trace chunk's directory handle"); - ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; - goto error; - } - } - - published_chunk = lttng_trace_chunk_registry_publish_chunk( - the_consumer_data.chunk_registry, session_id, - created_chunk); - lttng_trace_chunk_put(created_chunk); - created_chunk = NULL; - if (!published_chunk) { - ERR("Failed to publish trace chunk"); - ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; - goto error; - } - - rcu_read_lock(); - cds_lfht_for_each_entry_duplicate( - the_consumer_data.channels_by_session_id_ht->ht, - the_consumer_data.channels_by_session_id_ht->hash_fct( - &session_id, lttng_ht_seed), - the_consumer_data.channels_by_session_id_ht->match_fct, - &session_id, &iter.iter, channel, - channels_by_session_id_ht_node.node) { - ret = lttng_consumer_channel_set_trace_chunk(channel, - published_chunk); - if (ret) { - /* - * Roll-back the creation of this chunk. - * - * This is important since the session daemon will - * assume that the creation of this chunk failed and - * will never ask for it to be closed, resulting - * in a leak and an inconsistent state for some - * channels. - */ - enum lttcomm_return_code close_ret; - char path[LTTNG_PATH_MAX]; - - DBG("Failed to set new trace chunk on existing channels, rolling back"); - close_ret = lttng_consumer_close_trace_chunk(relayd_id, - session_id, chunk_id, - chunk_creation_timestamp, NULL, - path); - if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) { - ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64 ", chunk_id = %" PRIu64, - session_id, chunk_id); - } - - ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; - break; - } - } - - if (relayd_id) { - struct consumer_relayd_sock_pair *relayd; - - relayd = consumer_find_relayd(*relayd_id); - if (relayd) { - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_create_trace_chunk( - &relayd->control_sock, published_chunk); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - } else { - ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64, *relayd_id); - } - - if (!relayd || ret) { - enum lttcomm_return_code close_ret; - char path[LTTNG_PATH_MAX]; - - close_ret = lttng_consumer_close_trace_chunk(relayd_id, - session_id, - chunk_id, - chunk_creation_timestamp, - NULL, path); - if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) { - ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64 ", chunk_id = %" PRIu64, - session_id, - chunk_id); - } - - ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; - goto error_unlock; - } - } -error_unlock: - rcu_read_unlock(); -error: - /* Release the reference returned by the "publish" operation. */ - lttng_trace_chunk_put(published_chunk); - lttng_trace_chunk_put(created_chunk); - return ret_code; -} - -enum lttcomm_return_code lttng_consumer_close_trace_chunk( - const uint64_t *relayd_id, uint64_t session_id, - uint64_t chunk_id, time_t chunk_close_timestamp, - const enum lttng_trace_chunk_command_type *close_command, - char *path) -{ - enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; - struct lttng_trace_chunk *chunk; - char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)]; - const char *relayd_id_str = "(none)"; - const char *close_command_name = "none"; - struct lttng_ht_iter iter; - struct lttng_consumer_channel *channel; - enum lttng_trace_chunk_status chunk_status; - - if (relayd_id) { - int ret; - - /* Only used for logging purposes. */ - ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer), - "%" PRIu64, *relayd_id); - if (ret > 0 && ret < sizeof(relayd_id_buffer)) { - relayd_id_str = relayd_id_buffer; - } else { - relayd_id_str = "(formatting error)"; - } - } - if (close_command) { - close_command_name = lttng_trace_chunk_command_type_get_name( - *close_command); - } - - DBG("Consumer close trace chunk command: relayd_id = %s" - ", session_id = %" PRIu64 ", chunk_id = %" PRIu64 - ", close command = %s", - relayd_id_str, session_id, chunk_id, - close_command_name); - - chunk = lttng_trace_chunk_registry_find_chunk( - the_consumer_data.chunk_registry, session_id, chunk_id); - if (!chunk) { - ERR("Failed to find chunk: session_id = %" PRIu64 - ", chunk_id = %" PRIu64, - session_id, chunk_id); - ret_code = LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK; - goto end; - } - - chunk_status = lttng_trace_chunk_set_close_timestamp(chunk, - chunk_close_timestamp); - if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { - ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED; - goto end; - } - - if (close_command) { - chunk_status = lttng_trace_chunk_set_close_command( - chunk, *close_command); - if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { - ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED; - goto end; - } - } - - /* - * chunk is now invalid to access as we no longer hold a reference to - * it; it is only kept around to compare it (by address) to the - * current chunk found in the session's channels. - */ - rcu_read_lock(); - cds_lfht_for_each_entry(the_consumer_data.channel_ht->ht, &iter.iter, - channel, node.node) { - int ret; - - /* - * Only change the channel's chunk to NULL if it still - * references the chunk being closed. The channel may - * reference a newer channel in the case of a session - * rotation. When a session rotation occurs, the "next" - * chunk is created before the "current" chunk is closed. - */ - if (channel->trace_chunk != chunk) { - continue; - } - ret = lttng_consumer_channel_set_trace_chunk(channel, NULL); - if (ret) { - /* - * Attempt to close the chunk on as many channels as - * possible. - */ - ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED; - } - } - - if (relayd_id) { - int ret; - struct consumer_relayd_sock_pair *relayd; - - relayd = consumer_find_relayd(*relayd_id); - if (relayd) { - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_close_trace_chunk( - &relayd->control_sock, chunk, - path); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - } else { - ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64, - *relayd_id); - } - - if (!relayd || ret) { - ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED; - goto error_unlock; - } - } -error_unlock: - rcu_read_unlock(); -end: - /* - * Release the reference returned by the "find" operation and - * the session daemon's implicit reference to the chunk. - */ - lttng_trace_chunk_put(chunk); - lttng_trace_chunk_put(chunk); - - return ret_code; -} - -enum lttcomm_return_code lttng_consumer_trace_chunk_exists( - const uint64_t *relayd_id, uint64_t session_id, - uint64_t chunk_id) -{ - int ret; - enum lttcomm_return_code ret_code; - char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)]; - const char *relayd_id_str = "(none)"; - const bool is_local_trace = !relayd_id; - struct consumer_relayd_sock_pair *relayd = NULL; - bool chunk_exists_local, chunk_exists_remote; - - if (relayd_id) { - /* Only used for logging purposes. */ - ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer), - "%" PRIu64, *relayd_id); - if (ret > 0 && ret < sizeof(relayd_id_buffer)) { - relayd_id_str = relayd_id_buffer; - } else { - relayd_id_str = "(formatting error)"; - } - } - - DBG("Consumer trace chunk exists command: relayd_id = %s" - ", chunk_id = %" PRIu64, relayd_id_str, - chunk_id); - ret = lttng_trace_chunk_registry_chunk_exists( - the_consumer_data.chunk_registry, session_id, chunk_id, - &chunk_exists_local); - if (ret) { - /* Internal error. */ - ERR("Failed to query the existence of a trace chunk"); - ret_code = LTTCOMM_CONSUMERD_FATAL; - goto end; - } - DBG("Trace chunk %s locally", - chunk_exists_local ? "exists" : "does not exist"); - if (chunk_exists_local) { - ret_code = LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_LOCAL; - goto end; - } else if (is_local_trace) { - ret_code = LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK; - goto end; - } - - rcu_read_lock(); - relayd = consumer_find_relayd(*relayd_id); - if (!relayd) { - ERR("Failed to find relayd %" PRIu64, *relayd_id); - ret_code = LTTCOMM_CONSUMERD_INVALID_PARAMETERS; - goto end_rcu_unlock; - } - DBG("Looking up existence of trace chunk on relay daemon"); - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - ret = relayd_trace_chunk_exists(&relayd->control_sock, chunk_id, - &chunk_exists_remote); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - if (ret < 0) { - ERR("Failed to look-up the existence of trace chunk on relay daemon"); - ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; - goto end_rcu_unlock; - } - - ret_code = chunk_exists_remote ? - LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_REMOTE : - LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK; - DBG("Trace chunk %s on relay daemon", - chunk_exists_remote ? "exists" : "does not exist"); - -end_rcu_unlock: - rcu_read_unlock(); -end: - return ret_code; -} - -static -int consumer_clear_monitored_channel(struct lttng_consumer_channel *channel) -{ - struct lttng_ht *ht; - struct lttng_consumer_stream *stream; - struct lttng_ht_iter iter; - int ret; - - ht = the_consumer_data.stream_per_chan_id_ht; - - rcu_read_lock(); - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&channel->key, lttng_ht_seed), - ht->match_fct, &channel->key, - &iter.iter, stream, node_channel_id.node) { - /* - * Protect against teardown with mutex. - */ - pthread_mutex_lock(&stream->lock); - if (cds_lfht_is_node_deleted(&stream->node.node)) { - goto next; - } - ret = consumer_clear_stream(stream); - if (ret) { - goto error_unlock; - } - next: - pthread_mutex_unlock(&stream->lock); - } - rcu_read_unlock(); - return LTTCOMM_CONSUMERD_SUCCESS; - -error_unlock: - pthread_mutex_unlock(&stream->lock); - rcu_read_unlock(); - return ret; -} - -int lttng_consumer_clear_channel(struct lttng_consumer_channel *channel) -{ - int ret; - - DBG("Consumer clear channel %" PRIu64, channel->key); - - if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) { - /* - * Nothing to do for the metadata channel/stream. - * Snapshot mechanism already take care of the metadata - * handling/generation, and monitored channels only need to - * have their data stream cleared.. - */ - ret = LTTCOMM_CONSUMERD_SUCCESS; - goto end; - } - - if (!channel->monitor) { - ret = consumer_clear_unmonitored_channel(channel); - } else { - ret = consumer_clear_monitored_channel(channel); - } -end: - return ret; -} - -enum lttcomm_return_code lttng_consumer_open_channel_packets( - struct lttng_consumer_channel *channel) -{ - struct lttng_consumer_stream *stream; - enum lttcomm_return_code ret = LTTCOMM_CONSUMERD_SUCCESS; - - if (channel->metadata_stream) { - ERR("Open channel packets command attempted on a metadata channel"); - ret = LTTCOMM_CONSUMERD_INVALID_PARAMETERS; - goto end; - } - - rcu_read_lock(); - cds_list_for_each_entry(stream, &channel->streams.head, send_node) { - enum consumer_stream_open_packet_status status; - - pthread_mutex_lock(&stream->lock); - if (cds_lfht_is_node_deleted(&stream->node.node)) { - goto next; - } - - status = consumer_stream_open_packet(stream); - switch (status) { - case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED: - DBG("Opened a packet in \"open channel packets\" command: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - stream->opened_packet_in_current_trace_chunk = true; - break; - case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE: - DBG("No space left to open a packet in \"open channel packets\" command: stream id = %" PRIu64 - ", channel name = %s, session id = %" PRIu64, - stream->key, stream->chan->name, - stream->chan->session_id); - break; - case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR: - /* - * Only unexpected internal errors can lead to this - * failing. Report an unknown error. - */ - ERR("Failed to flush empty buffer in \"open channel packets\" command: stream id = %" PRIu64 - ", channel id = %" PRIu64 - ", channel name = %s" - ", session id = %" PRIu64, - stream->key, channel->key, - channel->name, channel->session_id); - ret = LTTCOMM_CONSUMERD_UNKNOWN_ERROR; - goto error_unlock; - default: - abort(); - } - - next: - pthread_mutex_unlock(&stream->lock); - } - -end_rcu_unlock: - rcu_read_unlock(); -end: - return ret; - -error_unlock: - pthread_mutex_unlock(&stream->lock); - goto end_rcu_unlock; -} - -void lttng_consumer_sigbus_handle(void *addr) -{ - lttng_ustconsumer_sigbus_handle(addr); -} diff --git a/src/common/consumer/consumer.cpp b/src/common/consumer/consumer.cpp new file mode 100644 index 000000000..47bb5bd98 --- /dev/null +++ b/src/common/consumer/consumer.cpp @@ -0,0 +1,5255 @@ +/* + * Copyright (C) 2011 Julien Desfossez + * Copyright (C) 2011 Mathieu Desnoyers + * Copyright (C) 2012 David Goulet + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#include "common/index/ctf-index.h" +#define _LGPL_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +lttng_consumer_global_data the_consumer_data; + +enum consumer_channel_action { + CONSUMER_CHANNEL_ADD, + CONSUMER_CHANNEL_DEL, + CONSUMER_CHANNEL_QUIT, +}; + +struct consumer_channel_msg { + enum consumer_channel_action action; + struct lttng_consumer_channel *chan; /* add */ + uint64_t key; /* del */ +}; + +/* Flag used to temporarily pause data consumption from testpoints. */ +int data_consumption_paused; + +/* + * Flag to inform the polling thread to quit when all fd hung up. Updated by + * the consumer_thread_receive_fds when it notices that all fds has hung up. + * Also updated by the signal handler (consumer_should_exit()). Read by the + * polling threads. + */ +int consumer_quit; + +/* + * Global hash table containing respectively metadata and data streams. The + * stream element in this ht should only be updated by the metadata poll thread + * for the metadata and the data poll thread for the data. + */ +static struct lttng_ht *metadata_ht; +static struct lttng_ht *data_ht; + +static const char *get_consumer_domain(void) +{ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + return DEFAULT_KERNEL_TRACE_DIR; + case LTTNG_CONSUMER64_UST: + /* Fall-through. */ + case LTTNG_CONSUMER32_UST: + return DEFAULT_UST_TRACE_DIR; + default: + abort(); + } +} + +/* + * Notify a thread lttng pipe to poll back again. This usually means that some + * global state has changed so we just send back the thread in a poll wait + * call. + */ +static void notify_thread_lttng_pipe(struct lttng_pipe *pipe) +{ + struct lttng_consumer_stream *null_stream = NULL; + + LTTNG_ASSERT(pipe); + + (void) lttng_pipe_write(pipe, &null_stream, sizeof(null_stream)); +} + +static void notify_health_quit_pipe(int *pipe) +{ + ssize_t ret; + + ret = lttng_write(pipe[1], "4", 1); + if (ret < 1) { + PERROR("write consumer health quit"); + } +} + +static void notify_channel_pipe(struct lttng_consumer_local_data *ctx, + struct lttng_consumer_channel *chan, + uint64_t key, + enum consumer_channel_action action) +{ + struct consumer_channel_msg msg; + ssize_t ret; + + memset(&msg, 0, sizeof(msg)); + + msg.action = action; + msg.chan = chan; + msg.key = key; + ret = lttng_write(ctx->consumer_channel_pipe[1], &msg, sizeof(msg)); + if (ret < sizeof(msg)) { + PERROR("notify_channel_pipe write error"); + } +} + +void notify_thread_del_channel(struct lttng_consumer_local_data *ctx, + uint64_t key) +{ + notify_channel_pipe(ctx, NULL, key, CONSUMER_CHANNEL_DEL); +} + +static int read_channel_pipe(struct lttng_consumer_local_data *ctx, + struct lttng_consumer_channel **chan, + uint64_t *key, + enum consumer_channel_action *action) +{ + struct consumer_channel_msg msg; + ssize_t ret; + + ret = lttng_read(ctx->consumer_channel_pipe[0], &msg, sizeof(msg)); + if (ret < sizeof(msg)) { + ret = -1; + goto error; + } + *action = msg.action; + *chan = msg.chan; + *key = msg.key; +error: + return (int) ret; +} + +/* + * Cleanup the stream list of a channel. Those streams are not yet globally + * visible + */ +static void clean_channel_stream_list(struct lttng_consumer_channel *channel) +{ + struct lttng_consumer_stream *stream, *stmp; + + LTTNG_ASSERT(channel); + + /* Delete streams that might have been left in the stream list. */ + cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head, + send_node) { + cds_list_del(&stream->send_node); + /* + * Once a stream is added to this list, the buffers were created so we + * have a guarantee that this call will succeed. Setting the monitor + * mode to 0 so we don't lock nor try to delete the stream from the + * global hash table. + */ + stream->monitor = 0; + consumer_stream_destroy(stream, NULL); + } +} + +/* + * Find a stream. The consumer_data.lock must be locked during this + * call. + */ +static struct lttng_consumer_stream *find_stream(uint64_t key, + struct lttng_ht *ht) +{ + struct lttng_ht_iter iter; + struct lttng_ht_node_u64 *node; + struct lttng_consumer_stream *stream = NULL; + + LTTNG_ASSERT(ht); + + /* -1ULL keys are lookup failures */ + if (key == (uint64_t) -1ULL) { + return NULL; + } + + rcu_read_lock(); + + lttng_ht_lookup(ht, &key, &iter); + node = lttng_ht_iter_get_node_u64(&iter); + if (node != NULL) { + stream = caa_container_of(node, struct lttng_consumer_stream, node); + } + + rcu_read_unlock(); + + return stream; +} + +static void steal_stream_key(uint64_t key, struct lttng_ht *ht) +{ + struct lttng_consumer_stream *stream; + + rcu_read_lock(); + stream = find_stream(key, ht); + if (stream) { + stream->key = (uint64_t) -1ULL; + /* + * We don't want the lookup to match, but we still need + * to iterate on this stream when iterating over the hash table. Just + * change the node key. + */ + stream->node.key = (uint64_t) -1ULL; + } + rcu_read_unlock(); +} + +/* + * Return a channel object for the given key. + * + * RCU read side lock MUST be acquired before calling this function and + * protects the channel ptr. + */ +struct lttng_consumer_channel *consumer_find_channel(uint64_t key) +{ + struct lttng_ht_iter iter; + struct lttng_ht_node_u64 *node; + struct lttng_consumer_channel *channel = NULL; + + /* -1ULL keys are lookup failures */ + if (key == (uint64_t) -1ULL) { + return NULL; + } + + lttng_ht_lookup(the_consumer_data.channel_ht, &key, &iter); + node = lttng_ht_iter_get_node_u64(&iter); + if (node != NULL) { + channel = caa_container_of(node, struct lttng_consumer_channel, node); + } + + return channel; +} + +/* + * There is a possibility that the consumer does not have enough time between + * the close of the channel on the session daemon and the cleanup in here thus + * once we have a channel add with an existing key, we know for sure that this + * channel will eventually get cleaned up by all streams being closed. + * + * This function just nullifies the already existing channel key. + */ +static void steal_channel_key(uint64_t key) +{ + struct lttng_consumer_channel *channel; + + rcu_read_lock(); + channel = consumer_find_channel(key); + if (channel) { + channel->key = (uint64_t) -1ULL; + /* + * We don't want the lookup to match, but we still need to iterate on + * this channel when iterating over the hash table. Just change the + * node key. + */ + channel->node.key = (uint64_t) -1ULL; + } + rcu_read_unlock(); +} + +static void free_channel_rcu(struct rcu_head *head) +{ + struct lttng_ht_node_u64 *node = + caa_container_of(head, struct lttng_ht_node_u64, head); + struct lttng_consumer_channel *channel = + caa_container_of(node, struct lttng_consumer_channel, node); + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + lttng_ustconsumer_free_channel(channel); + break; + default: + ERR("Unknown consumer_data type"); + abort(); + } + free(channel); +} + +/* + * RCU protected relayd socket pair free. + */ +static void free_relayd_rcu(struct rcu_head *head) +{ + struct lttng_ht_node_u64 *node = + caa_container_of(head, struct lttng_ht_node_u64, head); + struct consumer_relayd_sock_pair *relayd = + caa_container_of(node, struct consumer_relayd_sock_pair, node); + + /* + * Close all sockets. This is done in the call RCU since we don't want the + * socket fds to be reassigned thus potentially creating bad state of the + * relayd object. + * + * We do not have to lock the control socket mutex here since at this stage + * there is no one referencing to this relayd object. + */ + (void) relayd_close(&relayd->control_sock); + (void) relayd_close(&relayd->data_sock); + + pthread_mutex_destroy(&relayd->ctrl_sock_mutex); + free(relayd); +} + +/* + * Destroy and free relayd socket pair object. + */ +void consumer_destroy_relayd(struct consumer_relayd_sock_pair *relayd) +{ + int ret; + struct lttng_ht_iter iter; + + if (relayd == NULL) { + return; + } + + DBG("Consumer destroy and close relayd socket pair"); + + iter.iter.node = &relayd->node.node; + ret = lttng_ht_del(the_consumer_data.relayd_ht, &iter); + if (ret != 0) { + /* We assume the relayd is being or is destroyed */ + return; + } + + /* RCU free() call */ + call_rcu(&relayd->node.head, free_relayd_rcu); +} + +/* + * Remove a channel from the global list protected by a mutex. This function is + * also responsible for freeing its data structures. + */ +void consumer_del_channel(struct lttng_consumer_channel *channel) +{ + struct lttng_ht_iter iter; + + DBG("Consumer delete channel key %" PRIu64, channel->key); + + pthread_mutex_lock(&the_consumer_data.lock); + pthread_mutex_lock(&channel->lock); + + /* Destroy streams that might have been left in the stream list. */ + clean_channel_stream_list(channel); + + if (channel->live_timer_enabled == 1) { + consumer_timer_live_stop(channel); + } + if (channel->monitor_timer_enabled == 1) { + consumer_timer_monitor_stop(channel); + } + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + lttng_ustconsumer_del_channel(channel); + break; + default: + ERR("Unknown consumer_data type"); + abort(); + goto end; + } + + lttng_trace_chunk_put(channel->trace_chunk); + channel->trace_chunk = NULL; + + if (channel->is_published) { + int ret; + + rcu_read_lock(); + iter.iter.node = &channel->node.node; + ret = lttng_ht_del(the_consumer_data.channel_ht, &iter); + LTTNG_ASSERT(!ret); + + iter.iter.node = &channel->channels_by_session_id_ht_node.node; + ret = lttng_ht_del(the_consumer_data.channels_by_session_id_ht, + &iter); + LTTNG_ASSERT(!ret); + rcu_read_unlock(); + } + + channel->is_deleted = true; + call_rcu(&channel->node.head, free_channel_rcu); +end: + pthread_mutex_unlock(&channel->lock); + pthread_mutex_unlock(&the_consumer_data.lock); +} + +/* + * Iterate over the relayd hash table and destroy each element. Finally, + * destroy the whole hash table. + */ +static void cleanup_relayd_ht(void) +{ + struct lttng_ht_iter iter; + struct consumer_relayd_sock_pair *relayd; + + rcu_read_lock(); + + cds_lfht_for_each_entry(the_consumer_data.relayd_ht->ht, &iter.iter, + relayd, node.node) { + consumer_destroy_relayd(relayd); + } + + rcu_read_unlock(); + + lttng_ht_destroy(the_consumer_data.relayd_ht); +} + +/* + * Update the end point status of all streams having the given network sequence + * index (relayd index). + * + * It's atomically set without having the stream mutex locked which is fine + * because we handle the write/read race with a pipe wakeup for each thread. + */ +static void update_endpoint_status_by_netidx(uint64_t net_seq_idx, + enum consumer_endpoint_status status) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + DBG("Consumer set delete flag on stream by idx %" PRIu64, net_seq_idx); + + rcu_read_lock(); + + /* Let's begin with metadata */ + cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) { + if (stream->net_seq_idx == net_seq_idx) { + uatomic_set(&stream->endpoint_status, status); + DBG("Delete flag set to metadata stream %d", stream->wait_fd); + } + } + + /* Follow up by the data streams */ + cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) { + if (stream->net_seq_idx == net_seq_idx) { + uatomic_set(&stream->endpoint_status, status); + DBG("Delete flag set to data stream %d", stream->wait_fd); + } + } + rcu_read_unlock(); +} + +/* + * Cleanup a relayd object by flagging every associated streams for deletion, + * destroying the object meaning removing it from the relayd hash table, + * closing the sockets and freeing the memory in a RCU call. + * + * If a local data context is available, notify the threads that the streams' + * state have changed. + */ +void lttng_consumer_cleanup_relayd(struct consumer_relayd_sock_pair *relayd) +{ + uint64_t netidx; + + LTTNG_ASSERT(relayd); + + DBG("Cleaning up relayd object ID %" PRIu64, relayd->net_seq_idx); + + /* Save the net sequence index before destroying the object */ + netidx = relayd->net_seq_idx; + + /* + * Delete the relayd from the relayd hash table, close the sockets and free + * the object in a RCU call. + */ + consumer_destroy_relayd(relayd); + + /* Set inactive endpoint to all streams */ + update_endpoint_status_by_netidx(netidx, CONSUMER_ENDPOINT_INACTIVE); + + /* + * With a local data context, notify the threads that the streams' state + * have changed. The write() action on the pipe acts as an "implicit" + * memory barrier ordering the updates of the end point status from the + * read of this status which happens AFTER receiving this notify. + */ + notify_thread_lttng_pipe(relayd->ctx->consumer_data_pipe); + notify_thread_lttng_pipe(relayd->ctx->consumer_metadata_pipe); +} + +/* + * Flag a relayd socket pair for destruction. Destroy it if the refcount + * reaches zero. + * + * RCU read side lock MUST be aquired before calling this function. + */ +void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair *relayd) +{ + LTTNG_ASSERT(relayd); + + /* Set destroy flag for this object */ + uatomic_set(&relayd->destroy_flag, 1); + + /* Destroy the relayd if refcount is 0 */ + if (uatomic_read(&relayd->refcount) == 0) { + consumer_destroy_relayd(relayd); + } +} + +/* + * Completly destroy stream from every visiable data structure and the given + * hash table if one. + * + * One this call returns, the stream object is not longer usable nor visible. + */ +void consumer_del_stream(struct lttng_consumer_stream *stream, + struct lttng_ht *ht) +{ + consumer_stream_destroy(stream, ht); +} + +/* + * XXX naming of del vs destroy is all mixed up. + */ +void consumer_del_stream_for_data(struct lttng_consumer_stream *stream) +{ + consumer_stream_destroy(stream, data_ht); +} + +void consumer_del_stream_for_metadata(struct lttng_consumer_stream *stream) +{ + consumer_stream_destroy(stream, metadata_ht); +} + +void consumer_stream_update_channel_attributes( + struct lttng_consumer_stream *stream, + struct lttng_consumer_channel *channel) +{ + stream->channel_read_only_attributes.tracefile_size = + channel->tracefile_size; +} + +/* + * Add a stream to the global list protected by a mutex. + */ +void consumer_add_data_stream(struct lttng_consumer_stream *stream) +{ + struct lttng_ht *ht = data_ht; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(ht); + + DBG3("Adding consumer stream %" PRIu64, stream->key); + + pthread_mutex_lock(&the_consumer_data.lock); + pthread_mutex_lock(&stream->chan->lock); + pthread_mutex_lock(&stream->chan->timer_lock); + pthread_mutex_lock(&stream->lock); + rcu_read_lock(); + + /* Steal stream identifier to avoid having streams with the same key */ + steal_stream_key(stream->key, ht); + + lttng_ht_add_unique_u64(ht, &stream->node); + + lttng_ht_add_u64(the_consumer_data.stream_per_chan_id_ht, + &stream->node_channel_id); + + /* + * Add stream to the stream_list_ht of the consumer data. No need to steal + * the key since the HT does not use it and we allow to add redundant keys + * into this table. + */ + lttng_ht_add_u64(the_consumer_data.stream_list_ht, + &stream->node_session_id); + + /* + * When nb_init_stream_left reaches 0, we don't need to trigger any action + * in terms of destroying the associated channel, because the action that + * causes the count to become 0 also causes a stream to be added. The + * channel deletion will thus be triggered by the following removal of this + * stream. + */ + if (uatomic_read(&stream->chan->nb_init_stream_left) > 0) { + /* Increment refcount before decrementing nb_init_stream_left */ + cmm_smp_wmb(); + uatomic_dec(&stream->chan->nb_init_stream_left); + } + + /* Update consumer data once the node is inserted. */ + the_consumer_data.stream_count++; + the_consumer_data.need_update = 1; + + rcu_read_unlock(); + pthread_mutex_unlock(&stream->lock); + pthread_mutex_unlock(&stream->chan->timer_lock); + pthread_mutex_unlock(&stream->chan->lock); + pthread_mutex_unlock(&the_consumer_data.lock); +} + +/* + * Add relayd socket to global consumer data hashtable. RCU read side lock MUST + * be acquired before calling this. + */ +static int add_relayd(struct consumer_relayd_sock_pair *relayd) +{ + int ret = 0; + struct lttng_ht_node_u64 *node; + struct lttng_ht_iter iter; + + LTTNG_ASSERT(relayd); + + lttng_ht_lookup(the_consumer_data.relayd_ht, &relayd->net_seq_idx, + &iter); + node = lttng_ht_iter_get_node_u64(&iter); + if (node != NULL) { + goto end; + } + lttng_ht_add_unique_u64(the_consumer_data.relayd_ht, &relayd->node); + +end: + return ret; +} + +/* + * Allocate and return a consumer relayd socket. + */ +static struct consumer_relayd_sock_pair *consumer_allocate_relayd_sock_pair( + uint64_t net_seq_idx) +{ + struct consumer_relayd_sock_pair *obj = NULL; + + /* net sequence index of -1 is a failure */ + if (net_seq_idx == (uint64_t) -1ULL) { + goto error; + } + + obj = (consumer_relayd_sock_pair *) zmalloc(sizeof(struct consumer_relayd_sock_pair)); + if (obj == NULL) { + PERROR("zmalloc relayd sock"); + goto error; + } + + obj->net_seq_idx = net_seq_idx; + obj->refcount = 0; + obj->destroy_flag = 0; + obj->control_sock.sock.fd = -1; + obj->data_sock.sock.fd = -1; + lttng_ht_node_init_u64(&obj->node, obj->net_seq_idx); + pthread_mutex_init(&obj->ctrl_sock_mutex, NULL); + +error: + return obj; +} + +/* + * Find a relayd socket pair in the global consumer data. + * + * Return the object if found else NULL. + * RCU read-side lock must be held across this call and while using the + * returned object. + */ +struct consumer_relayd_sock_pair *consumer_find_relayd(uint64_t key) +{ + struct lttng_ht_iter iter; + struct lttng_ht_node_u64 *node; + struct consumer_relayd_sock_pair *relayd = NULL; + + /* Negative keys are lookup failures */ + if (key == (uint64_t) -1ULL) { + goto error; + } + + lttng_ht_lookup(the_consumer_data.relayd_ht, &key, &iter); + node = lttng_ht_iter_get_node_u64(&iter); + if (node != NULL) { + relayd = caa_container_of(node, struct consumer_relayd_sock_pair, node); + } + +error: + return relayd; +} + +/* + * Find a relayd and send the stream + * + * Returns 0 on success, < 0 on error + */ +int consumer_send_relayd_stream(struct lttng_consumer_stream *stream, + char *path) +{ + int ret = 0; + struct consumer_relayd_sock_pair *relayd; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->net_seq_idx != -1ULL); + LTTNG_ASSERT(path); + + /* The stream is not metadata. Get relayd reference if exists. */ + rcu_read_lock(); + relayd = consumer_find_relayd(stream->net_seq_idx); + if (relayd != NULL) { + /* Add stream on the relayd */ + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_add_stream(&relayd->control_sock, stream->name, + get_consumer_domain(), path, &stream->relayd_stream_id, + stream->chan->tracefile_size, + stream->chan->tracefile_count, + stream->trace_chunk); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + if (ret < 0) { + ERR("Relayd add stream failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + goto end; + } + + uatomic_inc(&relayd->refcount); + stream->sent_to_relayd = 1; + } else { + ERR("Stream %" PRIu64 " relayd ID %" PRIu64 " unknown. Can't send it.", + stream->key, stream->net_seq_idx); + ret = -1; + goto end; + } + + DBG("Stream %s with key %" PRIu64 " sent to relayd id %" PRIu64, + stream->name, stream->key, stream->net_seq_idx); + +end: + rcu_read_unlock(); + return ret; +} + +/* + * Find a relayd and send the streams sent message + * + * Returns 0 on success, < 0 on error + */ +int consumer_send_relayd_streams_sent(uint64_t net_seq_idx) +{ + int ret = 0; + struct consumer_relayd_sock_pair *relayd; + + LTTNG_ASSERT(net_seq_idx != -1ULL); + + /* The stream is not metadata. Get relayd reference if exists. */ + rcu_read_lock(); + relayd = consumer_find_relayd(net_seq_idx); + if (relayd != NULL) { + /* Add stream on the relayd */ + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_streams_sent(&relayd->control_sock); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + if (ret < 0) { + ERR("Relayd streams sent failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + goto end; + } + } else { + ERR("Relayd ID %" PRIu64 " unknown. Can't send streams_sent.", + net_seq_idx); + ret = -1; + goto end; + } + + ret = 0; + DBG("All streams sent relayd id %" PRIu64, net_seq_idx); + +end: + rcu_read_unlock(); + return ret; +} + +/* + * Find a relayd and close the stream + */ +void close_relayd_stream(struct lttng_consumer_stream *stream) +{ + struct consumer_relayd_sock_pair *relayd; + + /* The stream is not metadata. Get relayd reference if exists. */ + rcu_read_lock(); + relayd = consumer_find_relayd(stream->net_seq_idx); + if (relayd) { + consumer_stream_relayd_close(stream, relayd); + } + rcu_read_unlock(); +} + +/* + * Handle stream for relayd transmission if the stream applies for network + * streaming where the net sequence index is set. + * + * Return destination file descriptor or negative value on error. + */ +static int write_relayd_stream_header(struct lttng_consumer_stream *stream, + size_t data_size, unsigned long padding, + struct consumer_relayd_sock_pair *relayd) +{ + int outfd = -1, ret; + struct lttcomm_relayd_data_hdr data_hdr; + + /* Safety net */ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(relayd); + + /* Reset data header */ + memset(&data_hdr, 0, sizeof(data_hdr)); + + if (stream->metadata_flag) { + /* Caller MUST acquire the relayd control socket lock */ + ret = relayd_send_metadata(&relayd->control_sock, data_size); + if (ret < 0) { + goto error; + } + + /* Metadata are always sent on the control socket. */ + outfd = relayd->control_sock.sock.fd; + } else { + /* Set header with stream information */ + data_hdr.stream_id = htobe64(stream->relayd_stream_id); + data_hdr.data_size = htobe32(data_size); + data_hdr.padding_size = htobe32(padding); + + /* + * Note that net_seq_num below is assigned with the *current* value of + * next_net_seq_num and only after that the next_net_seq_num will be + * increment. This is why when issuing a command on the relayd using + * this next value, 1 should always be substracted in order to compare + * the last seen sequence number on the relayd side to the last sent. + */ + data_hdr.net_seq_num = htobe64(stream->next_net_seq_num); + /* Other fields are zeroed previously */ + + ret = relayd_send_data_hdr(&relayd->data_sock, &data_hdr, + sizeof(data_hdr)); + if (ret < 0) { + goto error; + } + + ++stream->next_net_seq_num; + + /* Set to go on data socket */ + outfd = relayd->data_sock.sock.fd; + } + +error: + return outfd; +} + +/* + * Write a character on the metadata poll pipe to wake the metadata thread. + * Returns 0 on success, -1 on error. + */ +int consumer_metadata_wakeup_pipe(const struct lttng_consumer_channel *channel) +{ + int ret = 0; + + DBG("Waking up metadata poll thread (writing to pipe): channel name = '%s'", + channel->name); + if (channel->monitor && channel->metadata_stream) { + const char dummy = 'c'; + const ssize_t write_ret = lttng_write( + channel->metadata_stream->ust_metadata_poll_pipe[1], + &dummy, 1); + + if (write_ret < 1) { + if (errno == EWOULDBLOCK) { + /* + * This is fine, the metadata poll thread + * is having a hard time keeping-up, but + * it will eventually wake-up and consume + * the available data. + */ + ret = 0; + } else { + PERROR("Failed to write to UST metadata pipe while attempting to wake-up the metadata poll thread"); + ret = -1; + goto end; + } + } + } + +end: + return ret; +} + +/* + * Trigger a dump of the metadata content. Following/during the succesful + * completion of this call, the metadata poll thread will start receiving + * metadata packets to consume. + * + * The caller must hold the channel and stream locks. + */ +static +int consumer_metadata_stream_dump(struct lttng_consumer_stream *stream) +{ + int ret; + + ASSERT_LOCKED(stream->chan->lock); + ASSERT_LOCKED(stream->lock); + LTTNG_ASSERT(stream->metadata_flag); + LTTNG_ASSERT(stream->chan->trace_chunk); + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + /* + * Reset the position of what has been read from the + * metadata cache to 0 so we can dump it again. + */ + ret = kernctl_metadata_cache_dump(stream->wait_fd); + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + /* + * Reset the position pushed from the metadata cache so it + * will write from the beginning on the next push. + */ + stream->ust_metadata_pushed = 0; + ret = consumer_metadata_wakeup_pipe(stream->chan); + break; + default: + ERR("Unknown consumer_data type"); + abort(); + } + if (ret < 0) { + ERR("Failed to dump the metadata cache"); + } + return ret; +} + +static +int lttng_consumer_channel_set_trace_chunk( + struct lttng_consumer_channel *channel, + struct lttng_trace_chunk *new_trace_chunk) +{ + pthread_mutex_lock(&channel->lock); + if (channel->is_deleted) { + /* + * The channel has been logically deleted and should no longer + * be used. It has released its reference to its current trace + * chunk and should not acquire a new one. + * + * Return success as there is nothing for the caller to do. + */ + goto end; + } + + /* + * The acquisition of the reference cannot fail (barring + * a severe internal error) since a reference to the published + * chunk is already held by the caller. + */ + if (new_trace_chunk) { + const bool acquired_reference = lttng_trace_chunk_get( + new_trace_chunk); + + LTTNG_ASSERT(acquired_reference); + } + + lttng_trace_chunk_put(channel->trace_chunk); + channel->trace_chunk = new_trace_chunk; +end: + pthread_mutex_unlock(&channel->lock); + return 0; +} + +/* + * Allocate and return a new lttng_consumer_channel object using the given key + * to initialize the hash table node. + * + * On error, return NULL. + */ +struct lttng_consumer_channel *consumer_allocate_channel(uint64_t key, + uint64_t session_id, + const uint64_t *chunk_id, + const char *pathname, + const char *name, + uint64_t relayd_id, + enum lttng_event_output output, + uint64_t tracefile_size, + uint64_t tracefile_count, + uint64_t session_id_per_pid, + unsigned int monitor, + unsigned int live_timer_interval, + bool is_in_live_session, + const char *root_shm_path, + const char *shm_path) +{ + struct lttng_consumer_channel *channel = NULL; + struct lttng_trace_chunk *trace_chunk = NULL; + + if (chunk_id) { + trace_chunk = lttng_trace_chunk_registry_find_chunk( + the_consumer_data.chunk_registry, session_id, + *chunk_id); + if (!trace_chunk) { + ERR("Failed to find trace chunk reference during creation of channel"); + goto end; + } + } + + channel = (lttng_consumer_channel *) zmalloc(sizeof(*channel)); + if (channel == NULL) { + PERROR("malloc struct lttng_consumer_channel"); + goto end; + } + + channel->key = key; + channel->refcount = 0; + channel->session_id = session_id; + channel->session_id_per_pid = session_id_per_pid; + channel->relayd_id = relayd_id; + channel->tracefile_size = tracefile_size; + channel->tracefile_count = tracefile_count; + channel->monitor = monitor; + channel->live_timer_interval = live_timer_interval; + channel->is_live = is_in_live_session; + pthread_mutex_init(&channel->lock, NULL); + pthread_mutex_init(&channel->timer_lock, NULL); + + switch (output) { + case LTTNG_EVENT_SPLICE: + channel->output = CONSUMER_CHANNEL_SPLICE; + break; + case LTTNG_EVENT_MMAP: + channel->output = CONSUMER_CHANNEL_MMAP; + break; + default: + abort(); + free(channel); + channel = NULL; + goto end; + } + + /* + * In monitor mode, the streams associated with the channel will be put in + * a special list ONLY owned by this channel. So, the refcount is set to 1 + * here meaning that the channel itself has streams that are referenced. + * + * On a channel deletion, once the channel is no longer visible, the + * refcount is decremented and checked for a zero value to delete it. With + * streams in no monitor mode, it will now be safe to destroy the channel. + */ + if (!channel->monitor) { + channel->refcount = 1; + } + + strncpy(channel->pathname, pathname, sizeof(channel->pathname)); + channel->pathname[sizeof(channel->pathname) - 1] = '\0'; + + strncpy(channel->name, name, sizeof(channel->name)); + channel->name[sizeof(channel->name) - 1] = '\0'; + + if (root_shm_path) { + strncpy(channel->root_shm_path, root_shm_path, sizeof(channel->root_shm_path)); + channel->root_shm_path[sizeof(channel->root_shm_path) - 1] = '\0'; + } + if (shm_path) { + strncpy(channel->shm_path, shm_path, sizeof(channel->shm_path)); + channel->shm_path[sizeof(channel->shm_path) - 1] = '\0'; + } + + lttng_ht_node_init_u64(&channel->node, channel->key); + lttng_ht_node_init_u64(&channel->channels_by_session_id_ht_node, + channel->session_id); + + channel->wait_fd = -1; + CDS_INIT_LIST_HEAD(&channel->streams.head); + + if (trace_chunk) { + int ret = lttng_consumer_channel_set_trace_chunk(channel, + trace_chunk); + if (ret) { + goto error; + } + } + + DBG("Allocated channel (key %" PRIu64 ")", channel->key); + +end: + lttng_trace_chunk_put(trace_chunk); + return channel; +error: + consumer_del_channel(channel); + channel = NULL; + goto end; +} + +/* + * Add a channel to the global list protected by a mutex. + * + * Always return 0 indicating success. + */ +int consumer_add_channel(struct lttng_consumer_channel *channel, + struct lttng_consumer_local_data *ctx) +{ + pthread_mutex_lock(&the_consumer_data.lock); + pthread_mutex_lock(&channel->lock); + pthread_mutex_lock(&channel->timer_lock); + + /* + * This gives us a guarantee that the channel we are about to add to the + * channel hash table will be unique. See this function comment on the why + * we need to steel the channel key at this stage. + */ + steal_channel_key(channel->key); + + rcu_read_lock(); + lttng_ht_add_unique_u64(the_consumer_data.channel_ht, &channel->node); + lttng_ht_add_u64(the_consumer_data.channels_by_session_id_ht, + &channel->channels_by_session_id_ht_node); + rcu_read_unlock(); + channel->is_published = true; + + pthread_mutex_unlock(&channel->timer_lock); + pthread_mutex_unlock(&channel->lock); + pthread_mutex_unlock(&the_consumer_data.lock); + + if (channel->wait_fd != -1 && channel->type == CONSUMER_CHANNEL_TYPE_DATA) { + notify_channel_pipe(ctx, channel, -1, CONSUMER_CHANNEL_ADD); + } + + return 0; +} + +/* + * Allocate the pollfd structure and the local view of the out fds to avoid + * doing a lookup in the linked list and concurrency issues when writing is + * needed. Called with consumer_data.lock held. + * + * Returns the number of fds in the structures. + */ +static int update_poll_array(struct lttng_consumer_local_data *ctx, + struct pollfd **pollfd, struct lttng_consumer_stream **local_stream, + struct lttng_ht *ht, int *nb_inactive_fd) +{ + int i = 0; + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + LTTNG_ASSERT(ctx); + LTTNG_ASSERT(ht); + LTTNG_ASSERT(pollfd); + LTTNG_ASSERT(local_stream); + + DBG("Updating poll fd array"); + *nb_inactive_fd = 0; + rcu_read_lock(); + cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { + /* + * Only active streams with an active end point can be added to the + * poll set and local stream storage of the thread. + * + * There is a potential race here for endpoint_status to be updated + * just after the check. However, this is OK since the stream(s) will + * be deleted once the thread is notified that the end point state has + * changed where this function will be called back again. + * + * We track the number of inactive FDs because they still need to be + * closed by the polling thread after a wakeup on the data_pipe or + * metadata_pipe. + */ + if (stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) { + (*nb_inactive_fd)++; + continue; + } + /* + * This clobbers way too much the debug output. Uncomment that if you + * need it for debugging purposes. + */ + (*pollfd)[i].fd = stream->wait_fd; + (*pollfd)[i].events = POLLIN | POLLPRI; + local_stream[i] = stream; + i++; + } + rcu_read_unlock(); + + /* + * Insert the consumer_data_pipe at the end of the array and don't + * increment i so nb_fd is the number of real FD. + */ + (*pollfd)[i].fd = lttng_pipe_get_readfd(ctx->consumer_data_pipe); + (*pollfd)[i].events = POLLIN | POLLPRI; + + (*pollfd)[i + 1].fd = lttng_pipe_get_readfd(ctx->consumer_wakeup_pipe); + (*pollfd)[i + 1].events = POLLIN | POLLPRI; + return i; +} + +/* + * Poll on the should_quit pipe and the command socket return -1 on + * error, 1 if should exit, 0 if data is available on the command socket + */ +int lttng_consumer_poll_socket(struct pollfd *consumer_sockpoll) +{ + int num_rdy; + +restart: + num_rdy = poll(consumer_sockpoll, 2, -1); + if (num_rdy == -1) { + /* + * Restart interrupted system call. + */ + if (errno == EINTR) { + goto restart; + } + PERROR("Poll error"); + return -1; + } + if (consumer_sockpoll[0].revents & (POLLIN | POLLPRI)) { + DBG("consumer_should_quit wake up"); + return 1; + } + return 0; +} + +/* + * Set the error socket. + */ +void lttng_consumer_set_error_sock(struct lttng_consumer_local_data *ctx, + int sock) +{ + ctx->consumer_error_socket = sock; +} + +/* + * Set the command socket path. + */ +void lttng_consumer_set_command_sock_path( + struct lttng_consumer_local_data *ctx, char *sock) +{ + ctx->consumer_command_sock_path = sock; +} + +/* + * Send return code to the session daemon. + * If the socket is not defined, we return 0, it is not a fatal error + */ +int lttng_consumer_send_error(struct lttng_consumer_local_data *ctx, int cmd) +{ + if (ctx->consumer_error_socket > 0) { + return lttcomm_send_unix_sock(ctx->consumer_error_socket, &cmd, + sizeof(enum lttcomm_sessiond_command)); + } + + return 0; +} + +/* + * Close all the tracefiles and stream fds and MUST be called when all + * instances are destroyed i.e. when all threads were joined and are ended. + */ +void lttng_consumer_cleanup(void) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_channel *channel; + unsigned int trace_chunks_left; + + rcu_read_lock(); + + cds_lfht_for_each_entry(the_consumer_data.channel_ht->ht, &iter.iter, + channel, node.node) { + consumer_del_channel(channel); + } + + rcu_read_unlock(); + + lttng_ht_destroy(the_consumer_data.channel_ht); + lttng_ht_destroy(the_consumer_data.channels_by_session_id_ht); + + cleanup_relayd_ht(); + + lttng_ht_destroy(the_consumer_data.stream_per_chan_id_ht); + + /* + * This HT contains streams that are freed by either the metadata thread or + * the data thread so we do *nothing* on the hash table and simply destroy + * it. + */ + lttng_ht_destroy(the_consumer_data.stream_list_ht); + + /* + * Trace chunks in the registry may still exist if the session + * daemon has encountered an internal error and could not + * tear down its sessions and/or trace chunks properly. + * + * Release the session daemon's implicit reference to any remaining + * trace chunk and print an error if any trace chunk was found. Note + * that there are _no_ legitimate cases for trace chunks to be left, + * it is a leak. However, it can happen following a crash of the + * session daemon and not emptying the registry would cause an assertion + * to hit. + */ + trace_chunks_left = lttng_trace_chunk_registry_put_each_chunk( + the_consumer_data.chunk_registry); + if (trace_chunks_left) { + ERR("%u trace chunks are leaked by lttng-consumerd. " + "This can be caused by an internal error of the session daemon.", + trace_chunks_left); + } + /* Run all callbacks freeing each chunk. */ + rcu_barrier(); + lttng_trace_chunk_registry_destroy(the_consumer_data.chunk_registry); +} + +/* + * Called from signal handler. + */ +void lttng_consumer_should_exit(struct lttng_consumer_local_data *ctx) +{ + ssize_t ret; + + CMM_STORE_SHARED(consumer_quit, 1); + ret = lttng_write(ctx->consumer_should_quit[1], "4", 1); + if (ret < 1) { + PERROR("write consumer quit"); + } + + DBG("Consumer flag that it should quit"); +} + + +/* + * Flush pending writes to trace output disk file. + */ +static +void lttng_consumer_sync_trace_file(struct lttng_consumer_stream *stream, + off_t orig_offset) +{ + int ret; + int outfd = stream->out_fd; + + /* + * This does a blocking write-and-wait on any page that belongs to the + * subbuffer prior to the one we just wrote. + * Don't care about error values, as these are just hints and ways to + * limit the amount of page cache used. + */ + if (orig_offset < stream->max_sb_size) { + return; + } + lttng_sync_file_range(outfd, orig_offset - stream->max_sb_size, + stream->max_sb_size, + SYNC_FILE_RANGE_WAIT_BEFORE + | SYNC_FILE_RANGE_WRITE + | SYNC_FILE_RANGE_WAIT_AFTER); + /* + * Give hints to the kernel about how we access the file: + * POSIX_FADV_DONTNEED : we won't re-access data in a near future after + * we write it. + * + * We need to call fadvise again after the file grows because the + * kernel does not seem to apply fadvise to non-existing parts of the + * file. + * + * Call fadvise _after_ having waited for the page writeback to + * complete because the dirty page writeback semantic is not well + * defined. So it can be expected to lead to lower throughput in + * streaming. + */ + ret = posix_fadvise(outfd, orig_offset - stream->max_sb_size, + stream->max_sb_size, POSIX_FADV_DONTNEED); + if (ret && ret != -ENOSYS) { + errno = ret; + PERROR("posix_fadvise on fd %i", outfd); + } +} + +/* + * Initialise the necessary environnement : + * - create a new context + * - create the poll_pipe + * - create the should_quit pipe (for signal handler) + * - create the thread pipe (for splice) + * + * Takes a function pointer as argument, this function is called when data is + * available on a buffer. This function is responsible to do the + * kernctl_get_next_subbuf, read the data with mmap or splice depending on the + * buffer configuration and then kernctl_put_next_subbuf at the end. + * + * Returns a pointer to the new context or NULL on error. + */ +struct lttng_consumer_local_data *lttng_consumer_create( + enum lttng_consumer_type type, + ssize_t (*buffer_ready)(struct lttng_consumer_stream *stream, + struct lttng_consumer_local_data *ctx, bool locked_by_caller), + int (*recv_channel)(struct lttng_consumer_channel *channel), + int (*recv_stream)(struct lttng_consumer_stream *stream), + int (*update_stream)(uint64_t stream_key, uint32_t state)) +{ + int ret; + struct lttng_consumer_local_data *ctx; + + LTTNG_ASSERT(the_consumer_data.type == LTTNG_CONSUMER_UNKNOWN || + the_consumer_data.type == type); + the_consumer_data.type = type; + + ctx = (lttng_consumer_local_data *) zmalloc(sizeof(struct lttng_consumer_local_data)); + if (ctx == NULL) { + PERROR("allocating context"); + goto error; + } + + ctx->consumer_error_socket = -1; + ctx->consumer_metadata_socket = -1; + pthread_mutex_init(&ctx->metadata_socket_lock, NULL); + /* assign the callbacks */ + ctx->on_buffer_ready = buffer_ready; + ctx->on_recv_channel = recv_channel; + ctx->on_recv_stream = recv_stream; + ctx->on_update_stream = update_stream; + + ctx->consumer_data_pipe = lttng_pipe_open(0); + if (!ctx->consumer_data_pipe) { + goto error_poll_pipe; + } + + ctx->consumer_wakeup_pipe = lttng_pipe_open(0); + if (!ctx->consumer_wakeup_pipe) { + goto error_wakeup_pipe; + } + + ret = pipe(ctx->consumer_should_quit); + if (ret < 0) { + PERROR("Error creating recv pipe"); + goto error_quit_pipe; + } + + ret = pipe(ctx->consumer_channel_pipe); + if (ret < 0) { + PERROR("Error creating channel pipe"); + goto error_channel_pipe; + } + + ctx->consumer_metadata_pipe = lttng_pipe_open(0); + if (!ctx->consumer_metadata_pipe) { + goto error_metadata_pipe; + } + + ctx->channel_monitor_pipe = -1; + + return ctx; + +error_metadata_pipe: + utils_close_pipe(ctx->consumer_channel_pipe); +error_channel_pipe: + utils_close_pipe(ctx->consumer_should_quit); +error_quit_pipe: + lttng_pipe_destroy(ctx->consumer_wakeup_pipe); +error_wakeup_pipe: + lttng_pipe_destroy(ctx->consumer_data_pipe); +error_poll_pipe: + free(ctx); +error: + return NULL; +} + +/* + * Iterate over all streams of the hashtable and free them properly. + */ +static void destroy_data_stream_ht(struct lttng_ht *ht) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + if (ht == NULL) { + return; + } + + rcu_read_lock(); + cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { + /* + * Ignore return value since we are currently cleaning up so any error + * can't be handled. + */ + (void) consumer_del_stream(stream, ht); + } + rcu_read_unlock(); + + lttng_ht_destroy(ht); +} + +/* + * Iterate over all streams of the metadata hashtable and free them + * properly. + */ +static void destroy_metadata_stream_ht(struct lttng_ht *ht) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + if (ht == NULL) { + return; + } + + rcu_read_lock(); + cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { + /* + * Ignore return value since we are currently cleaning up so any error + * can't be handled. + */ + (void) consumer_del_metadata_stream(stream, ht); + } + rcu_read_unlock(); + + lttng_ht_destroy(ht); +} + +/* + * Close all fds associated with the instance and free the context. + */ +void lttng_consumer_destroy(struct lttng_consumer_local_data *ctx) +{ + int ret; + + DBG("Consumer destroying it. Closing everything."); + + if (!ctx) { + return; + } + + destroy_data_stream_ht(data_ht); + destroy_metadata_stream_ht(metadata_ht); + + ret = close(ctx->consumer_error_socket); + if (ret) { + PERROR("close"); + } + ret = close(ctx->consumer_metadata_socket); + if (ret) { + PERROR("close"); + } + utils_close_pipe(ctx->consumer_channel_pipe); + lttng_pipe_destroy(ctx->consumer_data_pipe); + lttng_pipe_destroy(ctx->consumer_metadata_pipe); + lttng_pipe_destroy(ctx->consumer_wakeup_pipe); + utils_close_pipe(ctx->consumer_should_quit); + + unlink(ctx->consumer_command_sock_path); + free(ctx); +} + +/* + * Write the metadata stream id on the specified file descriptor. + */ +static int write_relayd_metadata_id(int fd, + struct lttng_consumer_stream *stream, + unsigned long padding) +{ + ssize_t ret; + struct lttcomm_relayd_metadata_payload hdr; + + hdr.stream_id = htobe64(stream->relayd_stream_id); + hdr.padding_size = htobe32(padding); + ret = lttng_write(fd, (void *) &hdr, sizeof(hdr)); + if (ret < sizeof(hdr)) { + /* + * This error means that the fd's end is closed so ignore the PERROR + * not to clubber the error output since this can happen in a normal + * code path. + */ + if (errno != EPIPE) { + PERROR("write metadata stream id"); + } + DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno); + /* + * Set ret to a negative value because if ret != sizeof(hdr), we don't + * handle writting the missing part so report that as an error and + * don't lie to the caller. + */ + ret = -1; + goto end; + } + DBG("Metadata stream id %" PRIu64 " with padding %lu written before data", + stream->relayd_stream_id, padding); + +end: + return (int) ret; +} + +/* + * Mmap the ring buffer, read it and write the data to the tracefile. This is a + * core function for writing trace buffers to either the local filesystem or + * the network. + * + * It must be called with the stream and the channel lock held. + * + * Careful review MUST be put if any changes occur! + * + * Returns the number of bytes written + */ +ssize_t lttng_consumer_on_read_subbuffer_mmap( + struct lttng_consumer_stream *stream, + const struct lttng_buffer_view *buffer, + unsigned long padding) +{ + ssize_t ret = 0; + off_t orig_offset = stream->out_fd_offset; + /* Default is on the disk */ + int outfd = stream->out_fd; + struct consumer_relayd_sock_pair *relayd = NULL; + unsigned int relayd_hang_up = 0; + const size_t subbuf_content_size = buffer->size - padding; + size_t write_len; + + /* RCU lock for the relayd pointer */ + rcu_read_lock(); + LTTNG_ASSERT(stream->net_seq_idx != (uint64_t) -1ULL || + stream->trace_chunk); + + /* Flag that the current stream if set for network streaming. */ + if (stream->net_seq_idx != (uint64_t) -1ULL) { + relayd = consumer_find_relayd(stream->net_seq_idx); + if (relayd == NULL) { + ret = -EPIPE; + goto end; + } + } + + /* Handle stream on the relayd if the output is on the network */ + if (relayd) { + unsigned long netlen = subbuf_content_size; + + /* + * Lock the control socket for the complete duration of the function + * since from this point on we will use the socket. + */ + if (stream->metadata_flag) { + /* Metadata requires the control socket. */ + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + if (stream->reset_metadata_flag) { + ret = relayd_reset_metadata(&relayd->control_sock, + stream->relayd_stream_id, + stream->metadata_version); + if (ret < 0) { + relayd_hang_up = 1; + goto write_error; + } + stream->reset_metadata_flag = 0; + } + netlen += sizeof(struct lttcomm_relayd_metadata_payload); + } + + ret = write_relayd_stream_header(stream, netlen, padding, relayd); + if (ret < 0) { + relayd_hang_up = 1; + goto write_error; + } + /* Use the returned socket. */ + outfd = ret; + + /* Write metadata stream id before payload */ + if (stream->metadata_flag) { + ret = write_relayd_metadata_id(outfd, stream, padding); + if (ret < 0) { + relayd_hang_up = 1; + goto write_error; + } + } + + write_len = subbuf_content_size; + } else { + /* No streaming; we have to write the full padding. */ + if (stream->metadata_flag && stream->reset_metadata_flag) { + ret = utils_truncate_stream_file(stream->out_fd, 0); + if (ret < 0) { + ERR("Reset metadata file"); + goto end; + } + stream->reset_metadata_flag = 0; + } + + /* + * Check if we need to change the tracefile before writing the packet. + */ + if (stream->chan->tracefile_size > 0 && + (stream->tracefile_size_current + buffer->size) > + stream->chan->tracefile_size) { + ret = consumer_stream_rotate_output_files(stream); + if (ret) { + goto end; + } + outfd = stream->out_fd; + orig_offset = 0; + } + stream->tracefile_size_current += buffer->size; + write_len = buffer->size; + } + + /* + * This call guarantee that len or less is returned. It's impossible to + * receive a ret value that is bigger than len. + */ + ret = lttng_write(outfd, buffer->data, write_len); + DBG("Consumer mmap write() ret %zd (len %zu)", ret, write_len); + if (ret < 0 || ((size_t) ret != write_len)) { + /* + * Report error to caller if nothing was written else at least send the + * amount written. + */ + if (ret < 0) { + ret = -errno; + } + relayd_hang_up = 1; + + /* Socket operation failed. We consider the relayd dead */ + if (errno == EPIPE) { + /* + * This is possible if the fd is closed on the other side + * (outfd) or any write problem. It can be verbose a bit for a + * normal execution if for instance the relayd is stopped + * abruptly. This can happen so set this to a DBG statement. + */ + DBG("Consumer mmap write detected relayd hang up"); + } else { + /* Unhandled error, print it and stop function right now. */ + PERROR("Error in write mmap (ret %zd != write_len %zu)", ret, + write_len); + } + goto write_error; + } + stream->output_written += ret; + + /* This call is useless on a socket so better save a syscall. */ + if (!relayd) { + /* This won't block, but will start writeout asynchronously */ + lttng_sync_file_range(outfd, stream->out_fd_offset, write_len, + SYNC_FILE_RANGE_WRITE); + stream->out_fd_offset += write_len; + lttng_consumer_sync_trace_file(stream, orig_offset); + } + +write_error: + /* + * This is a special case that the relayd has closed its socket. Let's + * cleanup the relayd object and all associated streams. + */ + if (relayd && relayd_hang_up) { + ERR("Relayd hangup. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + } + +end: + /* Unlock only if ctrl socket used */ + if (relayd && stream->metadata_flag) { + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + } + + rcu_read_unlock(); + return ret; +} + +/* + * Splice the data from the ring buffer to the tracefile. + * + * It must be called with the stream lock held. + * + * Returns the number of bytes spliced. + */ +ssize_t lttng_consumer_on_read_subbuffer_splice( + struct lttng_consumer_local_data *ctx, + struct lttng_consumer_stream *stream, unsigned long len, + unsigned long padding) +{ + ssize_t ret = 0, written = 0, ret_splice = 0; + loff_t offset = 0; + off_t orig_offset = stream->out_fd_offset; + int fd = stream->wait_fd; + /* Default is on the disk */ + int outfd = stream->out_fd; + struct consumer_relayd_sock_pair *relayd = NULL; + int *splice_pipe; + unsigned int relayd_hang_up = 0; + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + /* Not supported for user space tracing */ + return -ENOSYS; + default: + ERR("Unknown consumer_data type"); + abort(); + } + + /* RCU lock for the relayd pointer */ + rcu_read_lock(); + + /* Flag that the current stream if set for network streaming. */ + if (stream->net_seq_idx != (uint64_t) -1ULL) { + relayd = consumer_find_relayd(stream->net_seq_idx); + if (relayd == NULL) { + written = -ret; + goto end; + } + } + splice_pipe = stream->splice_pipe; + + /* Write metadata stream id before payload */ + if (relayd) { + unsigned long total_len = len; + + if (stream->metadata_flag) { + /* + * Lock the control socket for the complete duration of the function + * since from this point on we will use the socket. + */ + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + + if (stream->reset_metadata_flag) { + ret = relayd_reset_metadata(&relayd->control_sock, + stream->relayd_stream_id, + stream->metadata_version); + if (ret < 0) { + relayd_hang_up = 1; + goto write_error; + } + stream->reset_metadata_flag = 0; + } + ret = write_relayd_metadata_id(splice_pipe[1], stream, + padding); + if (ret < 0) { + written = ret; + relayd_hang_up = 1; + goto write_error; + } + + total_len += sizeof(struct lttcomm_relayd_metadata_payload); + } + + ret = write_relayd_stream_header(stream, total_len, padding, relayd); + if (ret < 0) { + written = ret; + relayd_hang_up = 1; + goto write_error; + } + /* Use the returned socket. */ + outfd = ret; + } else { + /* No streaming, we have to set the len with the full padding */ + len += padding; + + if (stream->metadata_flag && stream->reset_metadata_flag) { + ret = utils_truncate_stream_file(stream->out_fd, 0); + if (ret < 0) { + ERR("Reset metadata file"); + goto end; + } + stream->reset_metadata_flag = 0; + } + /* + * Check if we need to change the tracefile before writing the packet. + */ + if (stream->chan->tracefile_size > 0 && + (stream->tracefile_size_current + len) > + stream->chan->tracefile_size) { + ret = consumer_stream_rotate_output_files(stream); + if (ret < 0) { + written = ret; + goto end; + } + outfd = stream->out_fd; + orig_offset = 0; + } + stream->tracefile_size_current += len; + } + + while (len > 0) { + DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)", + (unsigned long)offset, len, fd, splice_pipe[1]); + ret_splice = splice(fd, &offset, splice_pipe[1], NULL, len, + SPLICE_F_MOVE | SPLICE_F_MORE); + DBG("splice chan to pipe, ret %zd", ret_splice); + if (ret_splice < 0) { + ret = errno; + written = -ret; + PERROR("Error in relay splice"); + goto splice_error; + } + + /* Handle stream on the relayd if the output is on the network */ + if (relayd && stream->metadata_flag) { + size_t metadata_payload_size = + sizeof(struct lttcomm_relayd_metadata_payload); + + /* Update counter to fit the spliced data */ + ret_splice += metadata_payload_size; + len += metadata_payload_size; + /* + * We do this so the return value can match the len passed as + * argument to this function. + */ + written -= metadata_payload_size; + } + + /* Splice data out */ + ret_splice = splice(splice_pipe[0], NULL, outfd, NULL, + ret_splice, SPLICE_F_MOVE | SPLICE_F_MORE); + DBG("Consumer splice pipe to file (out_fd: %d), ret %zd", + outfd, ret_splice); + if (ret_splice < 0) { + ret = errno; + written = -ret; + relayd_hang_up = 1; + goto write_error; + } else if (ret_splice > len) { + /* + * We don't expect this code path to be executed but you never know + * so this is an extra protection agains a buggy splice(). + */ + ret = errno; + written += ret_splice; + PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice, + len); + goto splice_error; + } else { + /* All good, update current len and continue. */ + len -= ret_splice; + } + + /* This call is useless on a socket so better save a syscall. */ + if (!relayd) { + /* This won't block, but will start writeout asynchronously */ + lttng_sync_file_range(outfd, stream->out_fd_offset, ret_splice, + SYNC_FILE_RANGE_WRITE); + stream->out_fd_offset += ret_splice; + } + stream->output_written += ret_splice; + written += ret_splice; + } + if (!relayd) { + lttng_consumer_sync_trace_file(stream, orig_offset); + } + goto end; + +write_error: + /* + * This is a special case that the relayd has closed its socket. Let's + * cleanup the relayd object and all associated streams. + */ + if (relayd && relayd_hang_up) { + ERR("Relayd hangup. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + /* Skip splice error so the consumer does not fail */ + goto end; + } + +splice_error: + /* send the appropriate error description to sessiond */ + switch (ret) { + case EINVAL: + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EINVAL); + break; + case ENOMEM: + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ENOMEM); + break; + case ESPIPE: + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_ESPIPE); + break; + } + +end: + if (relayd && stream->metadata_flag) { + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + } + + rcu_read_unlock(); + return written; +} + +/* + * Sample the snapshot positions for a specific fd + * + * Returns 0 on success, < 0 on error + */ +int lttng_consumer_sample_snapshot_positions(struct lttng_consumer_stream *stream) +{ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + return lttng_kconsumer_sample_snapshot_positions(stream); + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + return lttng_ustconsumer_sample_snapshot_positions(stream); + default: + ERR("Unknown consumer_data type"); + abort(); + return -ENOSYS; + } +} +/* + * Take a snapshot for a specific fd + * + * Returns 0 on success, < 0 on error + */ +int lttng_consumer_take_snapshot(struct lttng_consumer_stream *stream) +{ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + return lttng_kconsumer_take_snapshot(stream); + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + return lttng_ustconsumer_take_snapshot(stream); + default: + ERR("Unknown consumer_data type"); + abort(); + return -ENOSYS; + } +} + +/* + * Get the produced position + * + * Returns 0 on success, < 0 on error + */ +int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream *stream, + unsigned long *pos) +{ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + return lttng_kconsumer_get_produced_snapshot(stream, pos); + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + return lttng_ustconsumer_get_produced_snapshot(stream, pos); + default: + ERR("Unknown consumer_data type"); + abort(); + return -ENOSYS; + } +} + +/* + * Get the consumed position (free-running counter position in bytes). + * + * Returns 0 on success, < 0 on error + */ +int lttng_consumer_get_consumed_snapshot(struct lttng_consumer_stream *stream, + unsigned long *pos) +{ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + return lttng_kconsumer_get_consumed_snapshot(stream, pos); + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + return lttng_ustconsumer_get_consumed_snapshot(stream, pos); + default: + ERR("Unknown consumer_data type"); + abort(); + return -ENOSYS; + } +} + +int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx, + int sock, struct pollfd *consumer_sockpoll) +{ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + return lttng_kconsumer_recv_cmd(ctx, sock, consumer_sockpoll); + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + return lttng_ustconsumer_recv_cmd(ctx, sock, consumer_sockpoll); + default: + ERR("Unknown consumer_data type"); + abort(); + return -ENOSYS; + } +} + +static +void lttng_consumer_close_all_metadata(void) +{ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + /* + * The Kernel consumer has a different metadata scheme so we don't + * close anything because the stream will be closed by the session + * daemon. + */ + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + /* + * Close all metadata streams. The metadata hash table is passed and + * this call iterates over it by closing all wakeup fd. This is safe + * because at this point we are sure that the metadata producer is + * either dead or blocked. + */ + lttng_ustconsumer_close_all_metadata(metadata_ht); + break; + default: + ERR("Unknown consumer_data type"); + abort(); + } +} + +/* + * Clean up a metadata stream and free its memory. + */ +void consumer_del_metadata_stream(struct lttng_consumer_stream *stream, + struct lttng_ht *ht) +{ + struct lttng_consumer_channel *channel = NULL; + bool free_channel = false; + + LTTNG_ASSERT(stream); + /* + * This call should NEVER receive regular stream. It must always be + * metadata stream and this is crucial for data structure synchronization. + */ + LTTNG_ASSERT(stream->metadata_flag); + + DBG3("Consumer delete metadata stream %d", stream->wait_fd); + + pthread_mutex_lock(&the_consumer_data.lock); + /* + * Note that this assumes that a stream's channel is never changed and + * that the stream's lock doesn't need to be taken to sample its + * channel. + */ + channel = stream->chan; + pthread_mutex_lock(&channel->lock); + pthread_mutex_lock(&stream->lock); + if (channel->metadata_cache) { + /* Only applicable to userspace consumers. */ + pthread_mutex_lock(&channel->metadata_cache->lock); + } + + /* Remove any reference to that stream. */ + consumer_stream_delete(stream, ht); + + /* Close down everything including the relayd if one. */ + consumer_stream_close(stream); + /* Destroy tracer buffers of the stream. */ + consumer_stream_destroy_buffers(stream); + + /* Atomically decrement channel refcount since other threads can use it. */ + if (!uatomic_sub_return(&channel->refcount, 1) + && !uatomic_read(&channel->nb_init_stream_left)) { + /* Go for channel deletion! */ + free_channel = true; + } + stream->chan = NULL; + + /* + * Nullify the stream reference so it is not used after deletion. The + * channel lock MUST be acquired before being able to check for a NULL + * pointer value. + */ + channel->metadata_stream = NULL; + + if (channel->metadata_cache) { + pthread_mutex_unlock(&channel->metadata_cache->lock); + } + pthread_mutex_unlock(&stream->lock); + pthread_mutex_unlock(&channel->lock); + pthread_mutex_unlock(&the_consumer_data.lock); + + if (free_channel) { + consumer_del_channel(channel); + } + + lttng_trace_chunk_put(stream->trace_chunk); + stream->trace_chunk = NULL; + consumer_stream_free(stream); +} + +/* + * Action done with the metadata stream when adding it to the consumer internal + * data structures to handle it. + */ +void consumer_add_metadata_stream(struct lttng_consumer_stream *stream) +{ + struct lttng_ht *ht = metadata_ht; + struct lttng_ht_iter iter; + struct lttng_ht_node_u64 *node; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(ht); + + DBG3("Adding metadata stream %" PRIu64 " to hash table", stream->key); + + pthread_mutex_lock(&the_consumer_data.lock); + pthread_mutex_lock(&stream->chan->lock); + pthread_mutex_lock(&stream->chan->timer_lock); + pthread_mutex_lock(&stream->lock); + + /* + * From here, refcounts are updated so be _careful_ when returning an error + * after this point. + */ + + rcu_read_lock(); + + /* + * Lookup the stream just to make sure it does not exist in our internal + * state. This should NEVER happen. + */ + lttng_ht_lookup(ht, &stream->key, &iter); + node = lttng_ht_iter_get_node_u64(&iter); + LTTNG_ASSERT(!node); + + /* + * When nb_init_stream_left reaches 0, we don't need to trigger any action + * in terms of destroying the associated channel, because the action that + * causes the count to become 0 also causes a stream to be added. The + * channel deletion will thus be triggered by the following removal of this + * stream. + */ + if (uatomic_read(&stream->chan->nb_init_stream_left) > 0) { + /* Increment refcount before decrementing nb_init_stream_left */ + cmm_smp_wmb(); + uatomic_dec(&stream->chan->nb_init_stream_left); + } + + lttng_ht_add_unique_u64(ht, &stream->node); + + lttng_ht_add_u64(the_consumer_data.stream_per_chan_id_ht, + &stream->node_channel_id); + + /* + * Add stream to the stream_list_ht of the consumer data. No need to steal + * the key since the HT does not use it and we allow to add redundant keys + * into this table. + */ + lttng_ht_add_u64(the_consumer_data.stream_list_ht, + &stream->node_session_id); + + rcu_read_unlock(); + + pthread_mutex_unlock(&stream->lock); + pthread_mutex_unlock(&stream->chan->lock); + pthread_mutex_unlock(&stream->chan->timer_lock); + pthread_mutex_unlock(&the_consumer_data.lock); +} + +/* + * Delete data stream that are flagged for deletion (endpoint_status). + */ +static void validate_endpoint_status_data_stream(void) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + DBG("Consumer delete flagged data stream"); + + rcu_read_lock(); + cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) { + /* Validate delete flag of the stream */ + if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) { + continue; + } + /* Delete it right now */ + consumer_del_stream(stream, data_ht); + } + rcu_read_unlock(); +} + +/* + * Delete metadata stream that are flagged for deletion (endpoint_status). + */ +static void validate_endpoint_status_metadata_stream( + struct lttng_poll_event *pollset) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + DBG("Consumer delete flagged metadata stream"); + + LTTNG_ASSERT(pollset); + + rcu_read_lock(); + cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) { + /* Validate delete flag of the stream */ + if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) { + continue; + } + /* + * Remove from pollset so the metadata thread can continue without + * blocking on a deleted stream. + */ + lttng_poll_del(pollset, stream->wait_fd); + + /* Delete it right now */ + consumer_del_metadata_stream(stream, metadata_ht); + } + rcu_read_unlock(); +} + +/* + * Thread polls on metadata file descriptor and write them on disk or on the + * network. + */ +void *consumer_thread_metadata_poll(void *data) +{ + int ret, i, pollfd, err = -1; + uint32_t revents, nb_fd; + struct lttng_consumer_stream *stream = NULL; + struct lttng_ht_iter iter; + struct lttng_ht_node_u64 *node; + struct lttng_poll_event events; + struct lttng_consumer_local_data *ctx = (lttng_consumer_local_data *) data; + ssize_t len; + + rcu_register_thread(); + + health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_METADATA); + + if (testpoint(consumerd_thread_metadata)) { + goto error_testpoint; + } + + health_code_update(); + + DBG("Thread metadata poll started"); + + /* Size is set to 1 for the consumer_metadata pipe */ + ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC); + if (ret < 0) { + ERR("Poll set creation failed"); + goto end_poll; + } + + ret = lttng_poll_add(&events, + lttng_pipe_get_readfd(ctx->consumer_metadata_pipe), LPOLLIN); + if (ret < 0) { + goto end; + } + + /* Main loop */ + DBG("Metadata main loop started"); + + while (1) { +restart: + health_code_update(); + health_poll_entry(); + DBG("Metadata poll wait"); + ret = lttng_poll_wait(&events, -1); + DBG("Metadata poll return from wait with %d fd(s)", + LTTNG_POLL_GETNB(&events)); + health_poll_exit(); + DBG("Metadata event caught in thread"); + if (ret < 0) { + if (errno == EINTR) { + ERR("Poll EINTR caught"); + goto restart; + } + if (LTTNG_POLL_GETNB(&events) == 0) { + err = 0; /* All is OK */ + } + goto end; + } + + nb_fd = ret; + + /* From here, the event is a metadata wait fd */ + for (i = 0; i < nb_fd; i++) { + health_code_update(); + + revents = LTTNG_POLL_GETEV(&events, i); + pollfd = LTTNG_POLL_GETFD(&events, i); + + if (pollfd == lttng_pipe_get_readfd(ctx->consumer_metadata_pipe)) { + if (revents & LPOLLIN) { + ssize_t pipe_len; + + pipe_len = lttng_pipe_read(ctx->consumer_metadata_pipe, + &stream, sizeof(stream)); + if (pipe_len < sizeof(stream)) { + if (pipe_len < 0) { + PERROR("read metadata stream"); + } + /* + * Remove the pipe from the poll set and continue the loop + * since their might be data to consume. + */ + lttng_poll_del(&events, + lttng_pipe_get_readfd(ctx->consumer_metadata_pipe)); + lttng_pipe_read_close(ctx->consumer_metadata_pipe); + continue; + } + + /* A NULL stream means that the state has changed. */ + if (stream == NULL) { + /* Check for deleted streams. */ + validate_endpoint_status_metadata_stream(&events); + goto restart; + } + + DBG("Adding metadata stream %d to poll set", + stream->wait_fd); + + /* Add metadata stream to the global poll events list */ + lttng_poll_add(&events, stream->wait_fd, + LPOLLIN | LPOLLPRI | LPOLLHUP); + } else if (revents & (LPOLLERR | LPOLLHUP)) { + DBG("Metadata thread pipe hung up"); + /* + * Remove the pipe from the poll set and continue the loop + * since their might be data to consume. + */ + lttng_poll_del(&events, + lttng_pipe_get_readfd(ctx->consumer_metadata_pipe)); + lttng_pipe_read_close(ctx->consumer_metadata_pipe); + continue; + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto end; + } + + /* Handle other stream */ + continue; + } + + rcu_read_lock(); + { + uint64_t tmp_id = (uint64_t) pollfd; + + lttng_ht_lookup(metadata_ht, &tmp_id, &iter); + } + node = lttng_ht_iter_get_node_u64(&iter); + LTTNG_ASSERT(node); + + stream = caa_container_of(node, struct lttng_consumer_stream, + node); + + if (revents & (LPOLLIN | LPOLLPRI)) { + /* Get the data out of the metadata file descriptor */ + DBG("Metadata available on fd %d", pollfd); + LTTNG_ASSERT(stream->wait_fd == pollfd); + + do { + health_code_update(); + + len = ctx->on_buffer_ready(stream, ctx, false); + /* + * We don't check the return value here since if we get + * a negative len, it means an error occurred thus we + * simply remove it from the poll set and free the + * stream. + */ + } while (len > 0); + + /* It's ok to have an unavailable sub-buffer */ + if (len < 0 && len != -EAGAIN && len != -ENODATA) { + /* Clean up stream from consumer and free it. */ + lttng_poll_del(&events, stream->wait_fd); + consumer_del_metadata_stream(stream, metadata_ht); + } + } else if (revents & (LPOLLERR | LPOLLHUP)) { + DBG("Metadata fd %d is hup|err.", pollfd); + if (!stream->hangup_flush_done && + (the_consumer_data.type == LTTNG_CONSUMER32_UST || + the_consumer_data.type == + LTTNG_CONSUMER64_UST)) { + DBG("Attempting to flush and consume the UST buffers"); + lttng_ustconsumer_on_stream_hangup(stream); + + /* We just flushed the stream now read it. */ + do { + health_code_update(); + + len = ctx->on_buffer_ready(stream, ctx, false); + /* + * We don't check the return value here since if we get + * a negative len, it means an error occurred thus we + * simply remove it from the poll set and free the + * stream. + */ + } while (len > 0); + } + + lttng_poll_del(&events, stream->wait_fd); + /* + * This call update the channel states, closes file descriptors + * and securely free the stream. + */ + consumer_del_metadata_stream(stream, metadata_ht); + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + rcu_read_unlock(); + goto end; + } + /* Release RCU lock for the stream looked up */ + rcu_read_unlock(); + } + } + + /* All is OK */ + err = 0; +end: + DBG("Metadata poll thread exiting"); + + lttng_poll_clean(&events); +end_poll: +error_testpoint: + if (err) { + health_error(); + ERR("Health error occurred in %s", __func__); + } + health_unregister(health_consumerd); + rcu_unregister_thread(); + return NULL; +} + +/* + * This thread polls the fds in the set to consume the data and write + * it to tracefile if necessary. + */ +void *consumer_thread_data_poll(void *data) +{ + int num_rdy, num_hup, high_prio, ret, i, err = -1; + struct pollfd *pollfd = NULL; + /* local view of the streams */ + struct lttng_consumer_stream **local_stream = NULL, *new_stream = NULL; + /* local view of consumer_data.fds_count */ + int nb_fd = 0; + /* 2 for the consumer_data_pipe and wake up pipe */ + const int nb_pipes_fd = 2; + /* Number of FDs with CONSUMER_ENDPOINT_INACTIVE but still open. */ + int nb_inactive_fd = 0; + struct lttng_consumer_local_data *ctx = (lttng_consumer_local_data *) data; + ssize_t len; + + rcu_register_thread(); + + health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_DATA); + + if (testpoint(consumerd_thread_data)) { + goto error_testpoint; + } + + health_code_update(); + + local_stream = (lttng_consumer_stream **) zmalloc(sizeof(struct lttng_consumer_stream *)); + if (local_stream == NULL) { + PERROR("local_stream malloc"); + goto end; + } + + while (1) { + health_code_update(); + + high_prio = 0; + num_hup = 0; + + /* + * the fds set has been updated, we need to update our + * local array as well + */ + pthread_mutex_lock(&the_consumer_data.lock); + if (the_consumer_data.need_update) { + free(pollfd); + pollfd = NULL; + + free(local_stream); + local_stream = NULL; + + /* Allocate for all fds */ + pollfd = (struct pollfd *) zmalloc((the_consumer_data.stream_count + + nb_pipes_fd) * + sizeof(struct pollfd)); + if (pollfd == NULL) { + PERROR("pollfd malloc"); + pthread_mutex_unlock(&the_consumer_data.lock); + goto end; + } + + local_stream = (lttng_consumer_stream **) zmalloc((the_consumer_data.stream_count + + nb_pipes_fd) * + sizeof(struct lttng_consumer_stream *)); + if (local_stream == NULL) { + PERROR("local_stream malloc"); + pthread_mutex_unlock(&the_consumer_data.lock); + goto end; + } + ret = update_poll_array(ctx, &pollfd, local_stream, + data_ht, &nb_inactive_fd); + if (ret < 0) { + ERR("Error in allocating pollfd or local_outfds"); + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR); + pthread_mutex_unlock(&the_consumer_data.lock); + goto end; + } + nb_fd = ret; + the_consumer_data.need_update = 0; + } + pthread_mutex_unlock(&the_consumer_data.lock); + + /* No FDs and consumer_quit, consumer_cleanup the thread */ + if (nb_fd == 0 && nb_inactive_fd == 0 && + CMM_LOAD_SHARED(consumer_quit) == 1) { + err = 0; /* All is OK */ + goto end; + } + /* poll on the array of fds */ + restart: + DBG("polling on %d fd", nb_fd + nb_pipes_fd); + if (testpoint(consumerd_thread_data_poll)) { + goto end; + } + health_poll_entry(); + num_rdy = poll(pollfd, nb_fd + nb_pipes_fd, -1); + health_poll_exit(); + DBG("poll num_rdy : %d", num_rdy); + if (num_rdy == -1) { + /* + * Restart interrupted system call. + */ + if (errno == EINTR) { + goto restart; + } + PERROR("Poll error"); + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR); + goto end; + } else if (num_rdy == 0) { + DBG("Polling thread timed out"); + goto end; + } + + if (caa_unlikely(data_consumption_paused)) { + DBG("Data consumption paused, sleeping..."); + sleep(1); + goto restart; + } + + /* + * If the consumer_data_pipe triggered poll go directly to the + * beginning of the loop to update the array. We want to prioritize + * array update over low-priority reads. + */ + if (pollfd[nb_fd].revents & (POLLIN | POLLPRI)) { + ssize_t pipe_readlen; + + DBG("consumer_data_pipe wake up"); + pipe_readlen = lttng_pipe_read(ctx->consumer_data_pipe, + &new_stream, sizeof(new_stream)); + if (pipe_readlen < sizeof(new_stream)) { + PERROR("Consumer data pipe"); + /* Continue so we can at least handle the current stream(s). */ + continue; + } + + /* + * If the stream is NULL, just ignore it. It's also possible that + * the sessiond poll thread changed the consumer_quit state and is + * waking us up to test it. + */ + if (new_stream == NULL) { + validate_endpoint_status_data_stream(); + continue; + } + + /* Continue to update the local streams and handle prio ones */ + continue; + } + + /* Handle wakeup pipe. */ + if (pollfd[nb_fd + 1].revents & (POLLIN | POLLPRI)) { + char dummy; + ssize_t pipe_readlen; + + pipe_readlen = lttng_pipe_read(ctx->consumer_wakeup_pipe, &dummy, + sizeof(dummy)); + if (pipe_readlen < 0) { + PERROR("Consumer data wakeup pipe"); + } + /* We've been awakened to handle stream(s). */ + ctx->has_wakeup = 0; + } + + /* Take care of high priority channels first. */ + for (i = 0; i < nb_fd; i++) { + health_code_update(); + + if (local_stream[i] == NULL) { + continue; + } + if (pollfd[i].revents & POLLPRI) { + DBG("Urgent read on fd %d", pollfd[i].fd); + high_prio = 1; + len = ctx->on_buffer_ready(local_stream[i], ctx, false); + /* it's ok to have an unavailable sub-buffer */ + if (len < 0 && len != -EAGAIN && len != -ENODATA) { + /* Clean the stream and free it. */ + consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; + } else if (len > 0) { + local_stream[i]->data_read = 1; + } + } + } + + /* + * If we read high prio channel in this loop, try again + * for more high prio data. + */ + if (high_prio) { + continue; + } + + /* Take care of low priority channels. */ + for (i = 0; i < nb_fd; i++) { + health_code_update(); + + if (local_stream[i] == NULL) { + continue; + } + if ((pollfd[i].revents & POLLIN) || + local_stream[i]->hangup_flush_done || + local_stream[i]->has_data) { + DBG("Normal read on fd %d", pollfd[i].fd); + len = ctx->on_buffer_ready(local_stream[i], ctx, false); + /* it's ok to have an unavailable sub-buffer */ + if (len < 0 && len != -EAGAIN && len != -ENODATA) { + /* Clean the stream and free it. */ + consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; + } else if (len > 0) { + local_stream[i]->data_read = 1; + } + } + } + + /* Handle hangup and errors */ + for (i = 0; i < nb_fd; i++) { + health_code_update(); + + if (local_stream[i] == NULL) { + continue; + } + if (!local_stream[i]->hangup_flush_done + && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL)) + && (the_consumer_data.type == LTTNG_CONSUMER32_UST + || the_consumer_data.type == LTTNG_CONSUMER64_UST)) { + DBG("fd %d is hup|err|nval. Attempting flush and read.", + pollfd[i].fd); + lttng_ustconsumer_on_stream_hangup(local_stream[i]); + /* Attempt read again, for the data we just flushed. */ + local_stream[i]->data_read = 1; + } + /* + * If the poll flag is HUP/ERR/NVAL and we have + * read no data in this pass, we can remove the + * stream from its hash table. + */ + if ((pollfd[i].revents & POLLHUP)) { + DBG("Polling fd %d tells it has hung up.", pollfd[i].fd); + if (!local_stream[i]->data_read) { + consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; + num_hup++; + } + } else if (pollfd[i].revents & POLLERR) { + ERR("Error returned in polling fd %d.", pollfd[i].fd); + if (!local_stream[i]->data_read) { + consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; + num_hup++; + } + } else if (pollfd[i].revents & POLLNVAL) { + ERR("Polling fd %d tells fd is not open.", pollfd[i].fd); + if (!local_stream[i]->data_read) { + consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; + num_hup++; + } + } + if (local_stream[i] != NULL) { + local_stream[i]->data_read = 0; + } + } + } + /* All is OK */ + err = 0; +end: + DBG("polling thread exiting"); + free(pollfd); + free(local_stream); + + /* + * Close the write side of the pipe so epoll_wait() in + * consumer_thread_metadata_poll can catch it. The thread is monitoring the + * read side of the pipe. If we close them both, epoll_wait strangely does + * not return and could create a endless wait period if the pipe is the + * only tracked fd in the poll set. The thread will take care of closing + * the read side. + */ + (void) lttng_pipe_write_close(ctx->consumer_metadata_pipe); + +error_testpoint: + if (err) { + health_error(); + ERR("Health error occurred in %s", __func__); + } + health_unregister(health_consumerd); + + rcu_unregister_thread(); + return NULL; +} + +/* + * Close wake-up end of each stream belonging to the channel. This will + * allow the poll() on the stream read-side to detect when the + * write-side (application) finally closes them. + */ +static +void consumer_close_channel_streams(struct lttng_consumer_channel *channel) +{ + struct lttng_ht *ht; + struct lttng_consumer_stream *stream; + struct lttng_ht_iter iter; + + ht = the_consumer_data.stream_per_chan_id_ht; + + rcu_read_lock(); + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&channel->key, lttng_ht_seed), + ht->match_fct, &channel->key, + &iter.iter, stream, node_channel_id.node) { + /* + * Protect against teardown with mutex. + */ + pthread_mutex_lock(&stream->lock); + if (cds_lfht_is_node_deleted(&stream->node.node)) { + goto next; + } + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + if (stream->metadata_flag) { + /* Safe and protected by the stream lock. */ + lttng_ustconsumer_close_metadata(stream->chan); + } else { + /* + * Note: a mutex is taken internally within + * liblttng-ust-ctl to protect timer wakeup_fd + * use from concurrent close. + */ + lttng_ustconsumer_close_stream_wakeup(stream); + } + break; + default: + ERR("Unknown consumer_data type"); + abort(); + } + next: + pthread_mutex_unlock(&stream->lock); + } + rcu_read_unlock(); +} + +static void destroy_channel_ht(struct lttng_ht *ht) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_channel *channel; + int ret; + + if (ht == NULL) { + return; + } + + rcu_read_lock(); + cds_lfht_for_each_entry(ht->ht, &iter.iter, channel, wait_fd_node.node) { + ret = lttng_ht_del(ht, &iter); + LTTNG_ASSERT(ret != 0); + } + rcu_read_unlock(); + + lttng_ht_destroy(ht); +} + +/* + * This thread polls the channel fds to detect when they are being + * closed. It closes all related streams if the channel is detected as + * closed. It is currently only used as a shim layer for UST because the + * consumerd needs to keep the per-stream wakeup end of pipes open for + * periodical flush. + */ +void *consumer_thread_channel_poll(void *data) +{ + int ret, i, pollfd, err = -1; + uint32_t revents, nb_fd; + struct lttng_consumer_channel *chan = NULL; + struct lttng_ht_iter iter; + struct lttng_ht_node_u64 *node; + struct lttng_poll_event events; + struct lttng_consumer_local_data *ctx = (lttng_consumer_local_data *) data; + struct lttng_ht *channel_ht; + + rcu_register_thread(); + + health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_CHANNEL); + + if (testpoint(consumerd_thread_channel)) { + goto error_testpoint; + } + + health_code_update(); + + channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); + if (!channel_ht) { + /* ENOMEM at this point. Better to bail out. */ + goto end_ht; + } + + DBG("Thread channel poll started"); + + /* Size is set to 1 for the consumer_channel pipe */ + ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC); + if (ret < 0) { + ERR("Poll set creation failed"); + goto end_poll; + } + + ret = lttng_poll_add(&events, ctx->consumer_channel_pipe[0], LPOLLIN); + if (ret < 0) { + goto end; + } + + /* Main loop */ + DBG("Channel main loop started"); + + while (1) { +restart: + health_code_update(); + DBG("Channel poll wait"); + health_poll_entry(); + ret = lttng_poll_wait(&events, -1); + DBG("Channel poll return from wait with %d fd(s)", + LTTNG_POLL_GETNB(&events)); + health_poll_exit(); + DBG("Channel event caught in thread"); + if (ret < 0) { + if (errno == EINTR) { + ERR("Poll EINTR caught"); + goto restart; + } + if (LTTNG_POLL_GETNB(&events) == 0) { + err = 0; /* All is OK */ + } + goto end; + } + + nb_fd = ret; + + /* From here, the event is a channel wait fd */ + for (i = 0; i < nb_fd; i++) { + health_code_update(); + + revents = LTTNG_POLL_GETEV(&events, i); + pollfd = LTTNG_POLL_GETFD(&events, i); + + if (pollfd == ctx->consumer_channel_pipe[0]) { + if (revents & LPOLLIN) { + enum consumer_channel_action action; + uint64_t key; + + ret = read_channel_pipe(ctx, &chan, &key, &action); + if (ret <= 0) { + if (ret < 0) { + ERR("Error reading channel pipe"); + } + lttng_poll_del(&events, ctx->consumer_channel_pipe[0]); + continue; + } + + switch (action) { + case CONSUMER_CHANNEL_ADD: + DBG("Adding channel %d to poll set", + chan->wait_fd); + + lttng_ht_node_init_u64(&chan->wait_fd_node, + chan->wait_fd); + rcu_read_lock(); + lttng_ht_add_unique_u64(channel_ht, + &chan->wait_fd_node); + rcu_read_unlock(); + /* Add channel to the global poll events list */ + lttng_poll_add(&events, chan->wait_fd, + LPOLLERR | LPOLLHUP); + break; + case CONSUMER_CHANNEL_DEL: + { + /* + * This command should never be called if the channel + * has streams monitored by either the data or metadata + * thread. The consumer only notify this thread with a + * channel del. command if it receives a destroy + * channel command from the session daemon that send it + * if a command prior to the GET_CHANNEL failed. + */ + + rcu_read_lock(); + chan = consumer_find_channel(key); + if (!chan) { + rcu_read_unlock(); + ERR("UST consumer get channel key %" PRIu64 " not found for del channel", key); + break; + } + lttng_poll_del(&events, chan->wait_fd); + iter.iter.node = &chan->wait_fd_node.node; + ret = lttng_ht_del(channel_ht, &iter); + LTTNG_ASSERT(ret == 0); + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + health_code_update(); + /* Destroy streams that might have been left in the stream list. */ + clean_channel_stream_list(chan); + break; + default: + ERR("Unknown consumer_data type"); + abort(); + } + + /* + * Release our own refcount. Force channel deletion even if + * streams were not initialized. + */ + if (!uatomic_sub_return(&chan->refcount, 1)) { + consumer_del_channel(chan); + } + rcu_read_unlock(); + goto restart; + } + case CONSUMER_CHANNEL_QUIT: + /* + * Remove the pipe from the poll set and continue the loop + * since their might be data to consume. + */ + lttng_poll_del(&events, ctx->consumer_channel_pipe[0]); + continue; + default: + ERR("Unknown action"); + break; + } + } else if (revents & (LPOLLERR | LPOLLHUP)) { + DBG("Channel thread pipe hung up"); + /* + * Remove the pipe from the poll set and continue the loop + * since their might be data to consume. + */ + lttng_poll_del(&events, ctx->consumer_channel_pipe[0]); + continue; + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + goto end; + } + + /* Handle other stream */ + continue; + } + + rcu_read_lock(); + { + uint64_t tmp_id = (uint64_t) pollfd; + + lttng_ht_lookup(channel_ht, &tmp_id, &iter); + } + node = lttng_ht_iter_get_node_u64(&iter); + LTTNG_ASSERT(node); + + chan = caa_container_of(node, struct lttng_consumer_channel, + wait_fd_node); + + /* Check for error event */ + if (revents & (LPOLLERR | LPOLLHUP)) { + DBG("Channel fd %d is hup|err.", pollfd); + + lttng_poll_del(&events, chan->wait_fd); + ret = lttng_ht_del(channel_ht, &iter); + LTTNG_ASSERT(ret == 0); + + /* + * This will close the wait fd for each stream associated to + * this channel AND monitored by the data/metadata thread thus + * will be clean by the right thread. + */ + consumer_close_channel_streams(chan); + + /* Release our own refcount */ + if (!uatomic_sub_return(&chan->refcount, 1) + && !uatomic_read(&chan->nb_init_stream_left)) { + consumer_del_channel(chan); + } + } else { + ERR("Unexpected poll events %u for sock %d", revents, pollfd); + rcu_read_unlock(); + goto end; + } + + /* Release RCU lock for the channel looked up */ + rcu_read_unlock(); + } + } + + /* All is OK */ + err = 0; +end: + lttng_poll_clean(&events); +end_poll: + destroy_channel_ht(channel_ht); +end_ht: +error_testpoint: + DBG("Channel poll thread exiting"); + if (err) { + health_error(); + ERR("Health error occurred in %s", __func__); + } + health_unregister(health_consumerd); + rcu_unregister_thread(); + return NULL; +} + +static int set_metadata_socket(struct lttng_consumer_local_data *ctx, + struct pollfd *sockpoll, int client_socket) +{ + int ret; + + LTTNG_ASSERT(ctx); + LTTNG_ASSERT(sockpoll); + + ret = lttng_consumer_poll_socket(sockpoll); + if (ret) { + goto error; + } + DBG("Metadata connection on client_socket"); + + /* Blocking call, waiting for transmission */ + ctx->consumer_metadata_socket = lttcomm_accept_unix_sock(client_socket); + if (ctx->consumer_metadata_socket < 0) { + WARN("On accept metadata"); + ret = -1; + goto error; + } + ret = 0; + +error: + return ret; +} + +/* + * This thread listens on the consumerd socket and receives the file + * descriptors from the session daemon. + */ +void *consumer_thread_sessiond_poll(void *data) +{ + int sock = -1, client_socket, ret, err = -1; + /* + * structure to poll for incoming data on communication socket avoids + * making blocking sockets. + */ + struct pollfd consumer_sockpoll[2]; + struct lttng_consumer_local_data *ctx = (lttng_consumer_local_data *) data; + + rcu_register_thread(); + + health_register(health_consumerd, HEALTH_CONSUMERD_TYPE_SESSIOND); + + if (testpoint(consumerd_thread_sessiond)) { + goto error_testpoint; + } + + health_code_update(); + + DBG("Creating command socket %s", ctx->consumer_command_sock_path); + unlink(ctx->consumer_command_sock_path); + client_socket = lttcomm_create_unix_sock(ctx->consumer_command_sock_path); + if (client_socket < 0) { + ERR("Cannot create command socket"); + goto end; + } + + ret = lttcomm_listen_unix_sock(client_socket); + if (ret < 0) { + goto end; + } + + DBG("Sending ready command to lttng-sessiond"); + ret = lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY); + /* return < 0 on error, but == 0 is not fatal */ + if (ret < 0) { + ERR("Error sending ready command to lttng-sessiond"); + goto end; + } + + /* prepare the FDs to poll : to client socket and the should_quit pipe */ + consumer_sockpoll[0].fd = ctx->consumer_should_quit[0]; + consumer_sockpoll[0].events = POLLIN | POLLPRI; + consumer_sockpoll[1].fd = client_socket; + consumer_sockpoll[1].events = POLLIN | POLLPRI; + + ret = lttng_consumer_poll_socket(consumer_sockpoll); + if (ret) { + if (ret > 0) { + /* should exit */ + err = 0; + } + goto end; + } + DBG("Connection on client_socket"); + + /* Blocking call, waiting for transmission */ + sock = lttcomm_accept_unix_sock(client_socket); + if (sock < 0) { + WARN("On accept"); + goto end; + } + + /* + * Setup metadata socket which is the second socket connection on the + * command unix socket. + */ + ret = set_metadata_socket(ctx, consumer_sockpoll, client_socket); + if (ret) { + if (ret > 0) { + /* should exit */ + err = 0; + } + goto end; + } + + /* This socket is not useful anymore. */ + ret = close(client_socket); + if (ret < 0) { + PERROR("close client_socket"); + } + client_socket = -1; + + /* update the polling structure to poll on the established socket */ + consumer_sockpoll[1].fd = sock; + consumer_sockpoll[1].events = POLLIN | POLLPRI; + + while (1) { + health_code_update(); + + health_poll_entry(); + ret = lttng_consumer_poll_socket(consumer_sockpoll); + health_poll_exit(); + if (ret) { + if (ret > 0) { + /* should exit */ + err = 0; + } + goto end; + } + DBG("Incoming command on sock"); + ret = lttng_consumer_recv_cmd(ctx, sock, consumer_sockpoll); + if (ret <= 0) { + /* + * This could simply be a session daemon quitting. Don't output + * ERR() here. + */ + DBG("Communication interrupted on command socket"); + err = 0; + goto end; + } + if (CMM_LOAD_SHARED(consumer_quit)) { + DBG("consumer_thread_receive_fds received quit from signal"); + err = 0; /* All is OK */ + goto end; + } + DBG("Received command on sock"); + } + /* All is OK */ + err = 0; + +end: + DBG("Consumer thread sessiond poll exiting"); + + /* + * Close metadata streams since the producer is the session daemon which + * just died. + * + * NOTE: for now, this only applies to the UST tracer. + */ + lttng_consumer_close_all_metadata(); + + /* + * when all fds have hung up, the polling thread + * can exit cleanly + */ + CMM_STORE_SHARED(consumer_quit, 1); + + /* + * Notify the data poll thread to poll back again and test the + * consumer_quit state that we just set so to quit gracefully. + */ + notify_thread_lttng_pipe(ctx->consumer_data_pipe); + + notify_channel_pipe(ctx, NULL, -1, CONSUMER_CHANNEL_QUIT); + + notify_health_quit_pipe(health_quit_pipe); + + /* Cleaning up possibly open sockets. */ + if (sock >= 0) { + ret = close(sock); + if (ret < 0) { + PERROR("close sock sessiond poll"); + } + } + if (client_socket >= 0) { + ret = close(client_socket); + if (ret < 0) { + PERROR("close client_socket sessiond poll"); + } + } + +error_testpoint: + if (err) { + health_error(); + ERR("Health error occurred in %s", __func__); + } + health_unregister(health_consumerd); + + rcu_unregister_thread(); + return NULL; +} + +static int post_consume(struct lttng_consumer_stream *stream, + const struct stream_subbuffer *subbuffer, + struct lttng_consumer_local_data *ctx) +{ + size_t i; + int ret = 0; + const size_t count = lttng_dynamic_array_get_count( + &stream->read_subbuffer_ops.post_consume_cbs); + + for (i = 0; i < count; i++) { + const post_consume_cb op = *(post_consume_cb *) lttng_dynamic_array_get_element( + &stream->read_subbuffer_ops.post_consume_cbs, + i); + + ret = op(stream, subbuffer, ctx); + if (ret) { + goto end; + } + } +end: + return ret; +} + +ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream, + struct lttng_consumer_local_data *ctx, + bool locked_by_caller) +{ + ssize_t ret, written_bytes = 0; + int rotation_ret; + struct stream_subbuffer subbuffer = {}; + enum get_next_subbuffer_status get_next_status; + + if (!locked_by_caller) { + stream->read_subbuffer_ops.lock(stream); + } + + if (stream->read_subbuffer_ops.on_wake_up) { + ret = stream->read_subbuffer_ops.on_wake_up(stream); + if (ret) { + goto end; + } + } + + /* + * If the stream was flagged to be ready for rotation before we extract + * the next packet, rotate it now. + */ + if (stream->rotate_ready) { + DBG("Rotate stream before consuming data"); + ret = lttng_consumer_rotate_stream(ctx, stream); + if (ret < 0) { + ERR("Stream rotation error before consuming data"); + goto end; + } + } + + get_next_status = stream->read_subbuffer_ops.get_next_subbuffer( + stream, &subbuffer); + switch (get_next_status) { + case GET_NEXT_SUBBUFFER_STATUS_OK: + break; + case GET_NEXT_SUBBUFFER_STATUS_NO_DATA: + /* Not an error. */ + ret = 0; + goto sleep_stream; + case GET_NEXT_SUBBUFFER_STATUS_ERROR: + ret = -1; + goto end; + default: + abort(); + } + + ret = stream->read_subbuffer_ops.pre_consume_subbuffer( + stream, &subbuffer); + if (ret) { + goto error_put_subbuf; + } + + written_bytes = stream->read_subbuffer_ops.consume_subbuffer( + ctx, stream, &subbuffer); + if (written_bytes <= 0) { + ERR("Error consuming subbuffer: (%zd)", written_bytes); + ret = (int) written_bytes; + goto error_put_subbuf; + } + + ret = stream->read_subbuffer_ops.put_next_subbuffer(stream, &subbuffer); + if (ret) { + goto end; + } + + ret = post_consume(stream, &subbuffer, ctx); + if (ret) { + goto end; + } + + /* + * After extracting the packet, we check if the stream is now ready to + * be rotated and perform the action immediately. + * + * Don't overwrite `ret` as callers expect the number of bytes + * consumed to be returned on success. + */ + rotation_ret = lttng_consumer_stream_is_rotate_ready(stream); + if (rotation_ret == 1) { + rotation_ret = lttng_consumer_rotate_stream(ctx, stream); + if (rotation_ret < 0) { + ret = rotation_ret; + ERR("Stream rotation error after consuming data"); + goto end; + } + + } else if (rotation_ret < 0) { + ret = rotation_ret; + ERR("Failed to check if stream was ready to rotate after consuming data"); + goto end; + } + +sleep_stream: + if (stream->read_subbuffer_ops.on_sleep) { + stream->read_subbuffer_ops.on_sleep(stream, ctx); + } + + ret = written_bytes; +end: + if (!locked_by_caller) { + stream->read_subbuffer_ops.unlock(stream); + } + + return ret; +error_put_subbuf: + (void) stream->read_subbuffer_ops.put_next_subbuffer(stream, &subbuffer); + goto end; +} + +int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream) +{ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + return lttng_kconsumer_on_recv_stream(stream); + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + return lttng_ustconsumer_on_recv_stream(stream); + default: + ERR("Unknown consumer_data type"); + abort(); + return -ENOSYS; + } +} + +/* + * Allocate and set consumer data hash tables. + */ +int lttng_consumer_init(void) +{ + the_consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); + if (!the_consumer_data.channel_ht) { + goto error; + } + + the_consumer_data.channels_by_session_id_ht = + lttng_ht_new(0, LTTNG_HT_TYPE_U64); + if (!the_consumer_data.channels_by_session_id_ht) { + goto error; + } + + the_consumer_data.relayd_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); + if (!the_consumer_data.relayd_ht) { + goto error; + } + + the_consumer_data.stream_list_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); + if (!the_consumer_data.stream_list_ht) { + goto error; + } + + the_consumer_data.stream_per_chan_id_ht = + lttng_ht_new(0, LTTNG_HT_TYPE_U64); + if (!the_consumer_data.stream_per_chan_id_ht) { + goto error; + } + + data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); + if (!data_ht) { + goto error; + } + + metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64); + if (!metadata_ht) { + goto error; + } + + the_consumer_data.chunk_registry = lttng_trace_chunk_registry_create(); + if (!the_consumer_data.chunk_registry) { + goto error; + } + + return 0; + +error: + return -1; +} + +/* + * Process the ADD_RELAYD command receive by a consumer. + * + * This will create a relayd socket pair and add it to the relayd hash table. + * The caller MUST acquire a RCU read side lock before calling it. + */ + void consumer_add_relayd_socket(uint64_t net_seq_idx, int sock_type, + struct lttng_consumer_local_data *ctx, int sock, + struct pollfd *consumer_sockpoll, + struct lttcomm_relayd_sock *relayd_sock, uint64_t sessiond_id, + uint64_t relayd_session_id) +{ + int fd = -1, ret = -1, relayd_created = 0; + enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; + struct consumer_relayd_sock_pair *relayd = NULL; + + LTTNG_ASSERT(ctx); + LTTNG_ASSERT(relayd_sock); + + DBG("Consumer adding relayd socket (idx: %" PRIu64 ")", net_seq_idx); + + /* Get relayd reference if exists. */ + relayd = consumer_find_relayd(net_seq_idx); + if (relayd == NULL) { + LTTNG_ASSERT(sock_type == LTTNG_STREAM_CONTROL); + /* Not found. Allocate one. */ + relayd = consumer_allocate_relayd_sock_pair(net_seq_idx); + if (relayd == NULL) { + ret_code = LTTCOMM_CONSUMERD_ENOMEM; + goto error; + } else { + relayd->sessiond_session_id = sessiond_id; + relayd_created = 1; + } + + /* + * This code path MUST continue to the consumer send status message to + * we can notify the session daemon and continue our work without + * killing everything. + */ + } else { + /* + * relayd key should never be found for control socket. + */ + LTTNG_ASSERT(sock_type != LTTNG_STREAM_CONTROL); + } + + /* First send a status message before receiving the fds. */ + ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS); + if (ret < 0) { + /* Somehow, the session daemon is not responding anymore. */ + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL); + goto error_nosignal; + } + + /* Poll on consumer socket. */ + ret = lttng_consumer_poll_socket(consumer_sockpoll); + if (ret) { + /* Needing to exit in the middle of a command: error. */ + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_POLL_ERROR); + goto error_nosignal; + } + + /* Get relayd socket from session daemon */ + ret = lttcomm_recv_fds_unix_sock(sock, &fd, 1); + if (ret != sizeof(fd)) { + fd = -1; /* Just in case it gets set with an invalid value. */ + + /* + * Failing to receive FDs might indicate a major problem such as + * reaching a fd limit during the receive where the kernel returns a + * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we + * don't take any chances and stop everything. + * + * XXX: Feature request #558 will fix that and avoid this possible + * issue when reaching the fd limit. + */ + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD); + ret_code = LTTCOMM_CONSUMERD_ERROR_RECV_FD; + goto error; + } + + /* Copy socket information and received FD */ + switch (sock_type) { + case LTTNG_STREAM_CONTROL: + /* Copy received lttcomm socket */ + lttcomm_copy_sock(&relayd->control_sock.sock, &relayd_sock->sock); + ret = lttcomm_create_sock(&relayd->control_sock.sock); + /* Handle create_sock error. */ + if (ret < 0) { + ret_code = LTTCOMM_CONSUMERD_ENOMEM; + goto error; + } + /* + * Close the socket created internally by + * lttcomm_create_sock, so we can replace it by the one + * received from sessiond. + */ + if (close(relayd->control_sock.sock.fd)) { + PERROR("close"); + } + + /* Assign new file descriptor */ + relayd->control_sock.sock.fd = fd; + /* Assign version values. */ + relayd->control_sock.major = relayd_sock->major; + relayd->control_sock.minor = relayd_sock->minor; + + relayd->relayd_session_id = relayd_session_id; + + break; + case LTTNG_STREAM_DATA: + /* Copy received lttcomm socket */ + lttcomm_copy_sock(&relayd->data_sock.sock, &relayd_sock->sock); + ret = lttcomm_create_sock(&relayd->data_sock.sock); + /* Handle create_sock error. */ + if (ret < 0) { + ret_code = LTTCOMM_CONSUMERD_ENOMEM; + goto error; + } + /* + * Close the socket created internally by + * lttcomm_create_sock, so we can replace it by the one + * received from sessiond. + */ + if (close(relayd->data_sock.sock.fd)) { + PERROR("close"); + } + + /* Assign new file descriptor */ + relayd->data_sock.sock.fd = fd; + /* Assign version values. */ + relayd->data_sock.major = relayd_sock->major; + relayd->data_sock.minor = relayd_sock->minor; + break; + default: + ERR("Unknown relayd socket type (%d)", sock_type); + ret_code = LTTCOMM_CONSUMERD_FATAL; + goto error; + } + + DBG("Consumer %s socket created successfully with net idx %" PRIu64 " (fd: %d)", + sock_type == LTTNG_STREAM_CONTROL ? "control" : "data", + relayd->net_seq_idx, fd); + /* + * We gave the ownership of the fd to the relayd structure. Set the + * fd to -1 so we don't call close() on it in the error path below. + */ + fd = -1; + + /* We successfully added the socket. Send status back. */ + ret = consumer_send_status_msg(sock, ret_code); + if (ret < 0) { + /* Somehow, the session daemon is not responding anymore. */ + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL); + goto error_nosignal; + } + + /* + * Add relayd socket pair to consumer data hashtable. If object already + * exists or on error, the function gracefully returns. + */ + relayd->ctx = ctx; + add_relayd(relayd); + + /* All good! */ + return; + +error: + if (consumer_send_status_msg(sock, ret_code) < 0) { + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_FATAL); + } + +error_nosignal: + /* Close received socket if valid. */ + if (fd >= 0) { + if (close(fd)) { + PERROR("close received socket"); + } + } + + if (relayd_created) { + free(relayd); + } +} + +/* + * Search for a relayd associated to the session id and return the reference. + * + * A rcu read side lock MUST be acquire before calling this function and locked + * until the relayd object is no longer necessary. + */ +static struct consumer_relayd_sock_pair *find_relayd_by_session_id(uint64_t id) +{ + struct lttng_ht_iter iter; + struct consumer_relayd_sock_pair *relayd = NULL; + + /* Iterate over all relayd since they are indexed by net_seq_idx. */ + cds_lfht_for_each_entry(the_consumer_data.relayd_ht->ht, &iter.iter, + relayd, node.node) { + /* + * Check by sessiond id which is unique here where the relayd session + * id might not be when having multiple relayd. + */ + if (relayd->sessiond_session_id == id) { + /* Found the relayd. There can be only one per id. */ + goto found; + } + } + + return NULL; + +found: + return relayd; +} + +/* + * Check if for a given session id there is still data needed to be extract + * from the buffers. + * + * Return 1 if data is pending or else 0 meaning ready to be read. + */ +int consumer_data_pending(uint64_t id) +{ + int ret; + struct lttng_ht_iter iter; + struct lttng_ht *ht; + struct lttng_consumer_stream *stream; + struct consumer_relayd_sock_pair *relayd = NULL; + int (*data_pending)(struct lttng_consumer_stream *); + + DBG("Consumer data pending command on session id %" PRIu64, id); + + rcu_read_lock(); + pthread_mutex_lock(&the_consumer_data.lock); + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + data_pending = lttng_kconsumer_data_pending; + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + data_pending = lttng_ustconsumer_data_pending; + break; + default: + ERR("Unknown consumer data type"); + abort(); + } + + /* Ease our life a bit */ + ht = the_consumer_data.stream_list_ht; + + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&id, lttng_ht_seed), + ht->match_fct, &id, + &iter.iter, stream, node_session_id.node) { + pthread_mutex_lock(&stream->lock); + + /* + * A removed node from the hash table indicates that the stream has + * been deleted thus having a guarantee that the buffers are closed + * on the consumer side. However, data can still be transmitted + * over the network so don't skip the relayd check. + */ + ret = cds_lfht_is_node_deleted(&stream->node.node); + if (!ret) { + /* Check the stream if there is data in the buffers. */ + ret = data_pending(stream); + if (ret == 1) { + pthread_mutex_unlock(&stream->lock); + goto data_pending; + } + } + + pthread_mutex_unlock(&stream->lock); + } + + relayd = find_relayd_by_session_id(id); + if (relayd) { + unsigned int is_data_inflight = 0; + + /* Send init command for data pending. */ + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_begin_data_pending(&relayd->control_sock, + relayd->relayd_session_id); + if (ret < 0) { + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + /* Communication error thus the relayd so no data pending. */ + goto data_not_pending; + } + + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&id, lttng_ht_seed), + ht->match_fct, &id, + &iter.iter, stream, node_session_id.node) { + if (stream->metadata_flag) { + ret = relayd_quiescent_control(&relayd->control_sock, + stream->relayd_stream_id); + } else { + ret = relayd_data_pending(&relayd->control_sock, + stream->relayd_stream_id, + stream->next_net_seq_num - 1); + } + + if (ret == 1) { + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + goto data_pending; + } else if (ret < 0) { + ERR("Relayd data pending failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + goto data_not_pending; + } + } + + /* Send end command for data pending. */ + ret = relayd_end_data_pending(&relayd->control_sock, + relayd->relayd_session_id, &is_data_inflight); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + if (ret < 0) { + ERR("Relayd end data pending failed. Cleaning up relayd %" PRIu64".", relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + goto data_not_pending; + } + if (is_data_inflight) { + goto data_pending; + } + } + + /* + * Finding _no_ node in the hash table and no inflight data means that the + * stream(s) have been removed thus data is guaranteed to be available for + * analysis from the trace files. + */ + +data_not_pending: + /* Data is available to be read by a viewer. */ + pthread_mutex_unlock(&the_consumer_data.lock); + rcu_read_unlock(); + return 0; + +data_pending: + /* Data is still being extracted from buffers. */ + pthread_mutex_unlock(&the_consumer_data.lock); + rcu_read_unlock(); + return 1; +} + +/* + * Send a ret code status message to the sessiond daemon. + * + * Return the sendmsg() return value. + */ +int consumer_send_status_msg(int sock, int ret_code) +{ + struct lttcomm_consumer_status_msg msg; + + memset(&msg, 0, sizeof(msg)); + msg.ret_code = (lttcomm_return_code) ret_code; + + return lttcomm_send_unix_sock(sock, &msg, sizeof(msg)); +} + +/* + * Send a channel status message to the sessiond daemon. + * + * Return the sendmsg() return value. + */ +int consumer_send_status_channel(int sock, + struct lttng_consumer_channel *channel) +{ + struct lttcomm_consumer_status_channel msg; + + LTTNG_ASSERT(sock >= 0); + + memset(&msg, 0, sizeof(msg)); + if (!channel) { + msg.ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL; + } else { + msg.ret_code = LTTCOMM_CONSUMERD_SUCCESS; + msg.key = channel->key; + msg.stream_count = channel->streams.count; + } + + return lttcomm_send_unix_sock(sock, &msg, sizeof(msg)); +} + +unsigned long consumer_get_consume_start_pos(unsigned long consumed_pos, + unsigned long produced_pos, uint64_t nb_packets_per_stream, + uint64_t max_sb_size) +{ + unsigned long start_pos; + + if (!nb_packets_per_stream) { + return consumed_pos; /* Grab everything */ + } + start_pos = produced_pos - lttng_offset_align_floor(produced_pos, max_sb_size); + start_pos -= max_sb_size * nb_packets_per_stream; + if ((long) (start_pos - consumed_pos) < 0) { + return consumed_pos; /* Grab everything */ + } + return start_pos; +} + +/* Stream lock must be held by the caller. */ +static int sample_stream_positions(struct lttng_consumer_stream *stream, + unsigned long *produced, unsigned long *consumed) +{ + int ret; + + ASSERT_LOCKED(stream->lock); + + ret = lttng_consumer_sample_snapshot_positions(stream); + if (ret < 0) { + ERR("Failed to sample snapshot positions"); + goto end; + } + + ret = lttng_consumer_get_produced_snapshot(stream, produced); + if (ret < 0) { + ERR("Failed to sample produced position"); + goto end; + } + + ret = lttng_consumer_get_consumed_snapshot(stream, consumed); + if (ret < 0) { + ERR("Failed to sample consumed position"); + goto end; + } + +end: + return ret; +} + +/* + * Sample the rotate position for all the streams of a channel. If a stream + * is already at the rotate position (produced == consumed), we flag it as + * ready for rotation. The rotation of ready streams occurs after we have + * replied to the session daemon that we have finished sampling the positions. + * Must be called with RCU read-side lock held to ensure existence of channel. + * + * Returns 0 on success, < 0 on error + */ +int lttng_consumer_rotate_channel(struct lttng_consumer_channel *channel, + uint64_t key, uint64_t relayd_id, uint32_t metadata, + struct lttng_consumer_local_data *ctx) +{ + int ret; + struct lttng_consumer_stream *stream; + struct lttng_ht_iter iter; + struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht; + struct lttng_dynamic_array stream_rotation_positions; + uint64_t next_chunk_id, stream_count = 0; + enum lttng_trace_chunk_status chunk_status; + const bool is_local_trace = relayd_id == -1ULL; + struct consumer_relayd_sock_pair *relayd = NULL; + bool rotating_to_new_chunk = true; + /* Array of `struct lttng_consumer_stream *` */ + struct lttng_dynamic_pointer_array streams_packet_to_open; + size_t stream_idx; + + DBG("Consumer sample rotate position for channel %" PRIu64, key); + + lttng_dynamic_array_init(&stream_rotation_positions, + sizeof(struct relayd_stream_rotation_position), NULL); + lttng_dynamic_pointer_array_init(&streams_packet_to_open, NULL); + + rcu_read_lock(); + + pthread_mutex_lock(&channel->lock); + LTTNG_ASSERT(channel->trace_chunk); + chunk_status = lttng_trace_chunk_get_id(channel->trace_chunk, + &next_chunk_id); + if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { + ret = -1; + goto end_unlock_channel; + } + + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&channel->key, lttng_ht_seed), + ht->match_fct, &channel->key, &iter.iter, + stream, node_channel_id.node) { + unsigned long produced_pos = 0, consumed_pos = 0; + + health_code_update(); + + /* + * Lock stream because we are about to change its state. + */ + pthread_mutex_lock(&stream->lock); + + if (stream->trace_chunk == stream->chan->trace_chunk) { + rotating_to_new_chunk = false; + } + + /* + * Do not flush a packet when rotating from a NULL trace + * chunk. The stream has no means to output data, and the prior + * rotation which rotated to NULL performed that side-effect + * already. No new data can be produced when a stream has no + * associated trace chunk (e.g. a stop followed by a rotate). + */ + if (stream->trace_chunk) { + bool flush_active; + + if (stream->metadata_flag) { + /* + * Don't produce an empty metadata packet, + * simply close the current one. + * + * Metadata is regenerated on every trace chunk + * switch; there is no concern that no data was + * produced. + */ + flush_active = true; + } else { + /* + * Only flush an empty packet if the "packet + * open" could not be performed on transition + * to a new trace chunk and no packets were + * consumed within the chunk's lifetime. + */ + if (stream->opened_packet_in_current_trace_chunk) { + flush_active = true; + } else { + /* + * Stream could have been full at the + * time of rotation, but then have had + * no activity at all. + * + * It is important to flush a packet + * to prevent 0-length files from being + * produced as most viewers choke on + * them. + * + * Unfortunately viewers will not be + * able to know that tracing was active + * for this stream during this trace + * chunk's lifetime. + */ + ret = sample_stream_positions(stream, &produced_pos, &consumed_pos); + if (ret) { + goto end_unlock_stream; + } + + /* + * Don't flush an empty packet if data + * was produced; it will be consumed + * before the rotation completes. + */ + flush_active = produced_pos != consumed_pos; + if (!flush_active) { + const char *trace_chunk_name; + uint64_t trace_chunk_id; + + chunk_status = lttng_trace_chunk_get_name( + stream->trace_chunk, + &trace_chunk_name, + NULL); + if (chunk_status == LTTNG_TRACE_CHUNK_STATUS_NONE) { + trace_chunk_name = "none"; + } + + /* + * Consumer trace chunks are + * never anonymous. + */ + chunk_status = lttng_trace_chunk_get_id( + stream->trace_chunk, + &trace_chunk_id); + LTTNG_ASSERT(chunk_status == + LTTNG_TRACE_CHUNK_STATUS_OK); + + DBG("Unable to open packet for stream during trace chunk's lifetime. " + "Flushing an empty packet to prevent an empty file from being created: " + "stream id = %" PRIu64 ", trace chunk name = `%s`, trace chunk id = %" PRIu64, + stream->key, trace_chunk_name, trace_chunk_id); + } + } + } + + /* + * Close the current packet before sampling the + * ring buffer positions. + */ + ret = consumer_stream_flush_buffer(stream, flush_active); + if (ret < 0) { + ERR("Failed to flush stream %" PRIu64 " during channel rotation", + stream->key); + goto end_unlock_stream; + } + } + + ret = lttng_consumer_take_snapshot(stream); + if (ret < 0 && ret != -ENODATA && ret != -EAGAIN) { + ERR("Failed to sample snapshot position during channel rotation"); + goto end_unlock_stream; + } + if (!ret) { + ret = lttng_consumer_get_produced_snapshot(stream, + &produced_pos); + if (ret < 0) { + ERR("Failed to sample produced position during channel rotation"); + goto end_unlock_stream; + } + + ret = lttng_consumer_get_consumed_snapshot(stream, + &consumed_pos); + if (ret < 0) { + ERR("Failed to sample consumed position during channel rotation"); + goto end_unlock_stream; + } + } + /* + * Align produced position on the start-of-packet boundary of the first + * packet going into the next trace chunk. + */ + produced_pos = lttng_align_floor(produced_pos, stream->max_sb_size); + if (consumed_pos == produced_pos) { + DBG("Set rotate ready for stream %" PRIu64 " produced = %lu consumed = %lu", + stream->key, produced_pos, consumed_pos); + stream->rotate_ready = true; + } else { + DBG("Different consumed and produced positions " + "for stream %" PRIu64 " produced = %lu consumed = %lu", + stream->key, produced_pos, consumed_pos); + } + /* + * The rotation position is based on the packet_seq_num of the + * packet following the last packet that was consumed for this + * stream, incremented by the offset between produced and + * consumed positions. This rotation position is a lower bound + * (inclusive) at which the next trace chunk starts. Since it + * is a lower bound, it is OK if the packet_seq_num does not + * correspond exactly to the same packet identified by the + * consumed_pos, which can happen in overwrite mode. + */ + if (stream->sequence_number_unavailable) { + /* + * Rotation should never be performed on a session which + * interacts with a pre-2.8 lttng-modules, which does + * not implement packet sequence number. + */ + ERR("Failure to rotate stream %" PRIu64 ": sequence number unavailable", + stream->key); + ret = -1; + goto end_unlock_stream; + } + stream->rotate_position = stream->last_sequence_number + 1 + + ((produced_pos - consumed_pos) / stream->max_sb_size); + DBG("Set rotation position for stream %" PRIu64 " at position %" PRIu64, + stream->key, stream->rotate_position); + + if (!is_local_trace) { + /* + * The relay daemon control protocol expects a rotation + * position as "the sequence number of the first packet + * _after_ the current trace chunk". + */ + const struct relayd_stream_rotation_position position = { + .stream_id = stream->relayd_stream_id, + .rotate_at_seq_num = stream->rotate_position, + }; + + ret = lttng_dynamic_array_add_element( + &stream_rotation_positions, + &position); + if (ret) { + ERR("Failed to allocate stream rotation position"); + goto end_unlock_stream; + } + stream_count++; + } + + stream->opened_packet_in_current_trace_chunk = false; + + if (rotating_to_new_chunk && !stream->metadata_flag) { + /* + * Attempt to flush an empty packet as close to the + * rotation point as possible. In the event where a + * stream remains inactive after the rotation point, + * this ensures that the new trace chunk has a + * beginning timestamp set at the begining of the + * trace chunk instead of only creating an empty + * packet when the trace chunk is stopped. + * + * This indicates to the viewers that the stream + * was being recorded, but more importantly it + * allows viewers to determine a useable trace + * intersection. + * + * This presents a problem in the case where the + * ring-buffer is completely full. + * + * Consider the following scenario: + * - The consumption of data is slow (slow network, + * for instance), + * - The ring buffer is full, + * - A rotation is initiated, + * - The flush below does nothing (no space left to + * open a new packet), + * - The other streams rotate very soon, and new + * data is produced in the new chunk, + * - This stream completes its rotation long after the + * rotation was initiated + * - The session is stopped before any event can be + * produced in this stream's buffers. + * + * The resulting trace chunk will have a single packet + * temporaly at the end of the trace chunk for this + * stream making the stream intersection more narrow + * than it should be. + * + * To work-around this, an empty flush is performed + * after the first consumption of a packet during a + * rotation if open_packet fails. The idea is that + * consuming a packet frees enough space to switch + * packets in this scenario and allows the tracer to + * "stamp" the beginning of the new trace chunk at the + * earliest possible point. + * + * The packet open is performed after the channel + * rotation to ensure that no attempt to open a packet + * is performed in a stream that has no active trace + * chunk. + */ + ret = lttng_dynamic_pointer_array_add_pointer( + &streams_packet_to_open, stream); + if (ret) { + PERROR("Failed to add a stream pointer to array of streams in which to open a packet"); + ret = -1; + goto end_unlock_stream; + } + } + + pthread_mutex_unlock(&stream->lock); + } + stream = NULL; + + if (!is_local_trace) { + relayd = consumer_find_relayd(relayd_id); + if (!relayd) { + ERR("Failed to find relayd %" PRIu64, relayd_id); + ret = -1; + goto end_unlock_channel; + } + + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_rotate_streams(&relayd->control_sock, stream_count, + rotating_to_new_chunk ? &next_chunk_id : NULL, + (const struct relayd_stream_rotation_position *) + stream_rotation_positions.buffer + .data); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + if (ret < 0) { + ERR("Relayd rotate stream failed. Cleaning up relayd %" PRIu64, + relayd->net_seq_idx); + lttng_consumer_cleanup_relayd(relayd); + goto end_unlock_channel; + } + } + + for (stream_idx = 0; + stream_idx < lttng_dynamic_pointer_array_get_count( + &streams_packet_to_open); + stream_idx++) { + enum consumer_stream_open_packet_status status; + + stream = (lttng_consumer_stream *) lttng_dynamic_pointer_array_get_pointer( + &streams_packet_to_open, stream_idx); + + pthread_mutex_lock(&stream->lock); + status = consumer_stream_open_packet(stream); + pthread_mutex_unlock(&stream->lock); + switch (status) { + case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED: + DBG("Opened a packet after a rotation: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + break; + case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE: + /* + * Can't open a packet as there is no space left + * in the buffer. A new packet will be opened + * once one has been consumed. + */ + DBG("No space left to open a packet after a rotation: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + break; + case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR: + /* Logged by callee. */ + ret = -1; + goto end_unlock_channel; + default: + abort(); + } + } + + pthread_mutex_unlock(&channel->lock); + ret = 0; + goto end; + +end_unlock_stream: + pthread_mutex_unlock(&stream->lock); +end_unlock_channel: + pthread_mutex_unlock(&channel->lock); +end: + rcu_read_unlock(); + lttng_dynamic_array_reset(&stream_rotation_positions); + lttng_dynamic_pointer_array_reset(&streams_packet_to_open); + return ret; +} + +static +int consumer_clear_buffer(struct lttng_consumer_stream *stream) +{ + int ret = 0; + unsigned long consumed_pos_before, consumed_pos_after; + + ret = lttng_consumer_sample_snapshot_positions(stream); + if (ret < 0) { + ERR("Taking snapshot positions"); + goto end; + } + + ret = lttng_consumer_get_consumed_snapshot(stream, &consumed_pos_before); + if (ret < 0) { + ERR("Consumed snapshot position"); + goto end; + } + + switch (the_consumer_data.type) { + case LTTNG_CONSUMER_KERNEL: + ret = kernctl_buffer_clear(stream->wait_fd); + if (ret < 0) { + ERR("Failed to clear kernel stream (ret = %d)", ret); + goto end; + } + break; + case LTTNG_CONSUMER32_UST: + case LTTNG_CONSUMER64_UST: + ret = lttng_ustconsumer_clear_buffer(stream); + if (ret < 0) { + ERR("Failed to clear ust stream (ret = %d)", ret); + goto end; + } + break; + default: + ERR("Unknown consumer_data type"); + abort(); + } + + ret = lttng_consumer_sample_snapshot_positions(stream); + if (ret < 0) { + ERR("Taking snapshot positions"); + goto end; + } + ret = lttng_consumer_get_consumed_snapshot(stream, &consumed_pos_after); + if (ret < 0) { + ERR("Consumed snapshot position"); + goto end; + } + DBG("clear: before: %lu after: %lu", consumed_pos_before, consumed_pos_after); +end: + return ret; +} + +static +int consumer_clear_stream(struct lttng_consumer_stream *stream) +{ + int ret; + + ret = consumer_stream_flush_buffer(stream, 1); + if (ret < 0) { + ERR("Failed to flush stream %" PRIu64 " during channel clear", + stream->key); + ret = LTTCOMM_CONSUMERD_FATAL; + goto error; + } + + ret = consumer_clear_buffer(stream); + if (ret < 0) { + ERR("Failed to clear stream %" PRIu64 " during channel clear", + stream->key); + ret = LTTCOMM_CONSUMERD_FATAL; + goto error; + } + + ret = LTTCOMM_CONSUMERD_SUCCESS; +error: + return ret; +} + +static +int consumer_clear_unmonitored_channel(struct lttng_consumer_channel *channel) +{ + int ret; + struct lttng_consumer_stream *stream; + + rcu_read_lock(); + pthread_mutex_lock(&channel->lock); + cds_list_for_each_entry(stream, &channel->streams.head, send_node) { + health_code_update(); + pthread_mutex_lock(&stream->lock); + ret = consumer_clear_stream(stream); + if (ret) { + goto error_unlock; + } + pthread_mutex_unlock(&stream->lock); + } + pthread_mutex_unlock(&channel->lock); + rcu_read_unlock(); + return 0; + +error_unlock: + pthread_mutex_unlock(&stream->lock); + pthread_mutex_unlock(&channel->lock); + rcu_read_unlock(); + return ret; +} + +/* + * Check if a stream is ready to be rotated after extracting it. + * + * Return 1 if it is ready for rotation, 0 if it is not, a negative value on + * error. Stream lock must be held. + */ +int lttng_consumer_stream_is_rotate_ready(struct lttng_consumer_stream *stream) +{ + DBG("Check is rotate ready for stream %" PRIu64 + " ready %u rotate_position %" PRIu64 + " last_sequence_number %" PRIu64, + stream->key, stream->rotate_ready, + stream->rotate_position, stream->last_sequence_number); + if (stream->rotate_ready) { + return 1; + } + + /* + * If packet seq num is unavailable, it means we are interacting + * with a pre-2.8 lttng-modules which does not implement the + * sequence number. Rotation should never be used by sessiond in this + * scenario. + */ + if (stream->sequence_number_unavailable) { + ERR("Internal error: rotation used on stream %" PRIu64 + " with unavailable sequence number", + stream->key); + return -1; + } + + if (stream->rotate_position == -1ULL || + stream->last_sequence_number == -1ULL) { + return 0; + } + + /* + * Rotate position not reached yet. The stream rotate position is + * the position of the next packet belonging to the next trace chunk, + * but consumerd considers rotation ready when reaching the last + * packet of the current chunk, hence the "rotate_position - 1". + */ + + DBG("Check is rotate ready for stream %" PRIu64 + " last_sequence_number %" PRIu64 + " rotate_position %" PRIu64, + stream->key, stream->last_sequence_number, + stream->rotate_position); + if (stream->last_sequence_number >= stream->rotate_position - 1) { + return 1; + } + + return 0; +} + +/* + * Reset the state for a stream after a rotation occurred. + */ +void lttng_consumer_reset_stream_rotate_state(struct lttng_consumer_stream *stream) +{ + DBG("lttng_consumer_reset_stream_rotate_state for stream %" PRIu64, + stream->key); + stream->rotate_position = -1ULL; + stream->rotate_ready = false; +} + +/* + * Perform the rotation a local stream file. + */ +static +int rotate_local_stream(struct lttng_consumer_local_data *ctx, + struct lttng_consumer_stream *stream) +{ + int ret = 0; + + DBG("Rotate local stream: stream key %" PRIu64 ", channel key %" PRIu64, + stream->key, + stream->chan->key); + stream->tracefile_size_current = 0; + stream->tracefile_count_current = 0; + + if (stream->out_fd >= 0) { + ret = close(stream->out_fd); + if (ret) { + PERROR("Failed to close stream out_fd of channel \"%s\"", + stream->chan->name); + } + stream->out_fd = -1; + } + + if (stream->index_file) { + lttng_index_file_put(stream->index_file); + stream->index_file = NULL; + } + + if (!stream->trace_chunk) { + goto end; + } + + ret = consumer_stream_create_output_files(stream, true); +end: + return ret; +} + +/* + * Performs the stream rotation for the rotate session feature if needed. + * It must be called with the channel and stream locks held. + * + * Return 0 on success, a negative number of error. + */ +int lttng_consumer_rotate_stream(struct lttng_consumer_local_data *ctx, + struct lttng_consumer_stream *stream) +{ + int ret; + + DBG("Consumer rotate stream %" PRIu64, stream->key); + + /* + * Update the stream's 'current' chunk to the session's (channel) + * now-current chunk. + */ + lttng_trace_chunk_put(stream->trace_chunk); + if (stream->chan->trace_chunk == stream->trace_chunk) { + /* + * A channel can be rotated and not have a "next" chunk + * to transition to. In that case, the channel's "current chunk" + * has not been closed yet, but it has not been updated to + * a "next" trace chunk either. Hence, the stream, like its + * parent channel, becomes part of no chunk and can't output + * anything until a new trace chunk is created. + */ + stream->trace_chunk = NULL; + } else if (stream->chan->trace_chunk && + !lttng_trace_chunk_get(stream->chan->trace_chunk)) { + ERR("Failed to acquire a reference to channel's trace chunk during stream rotation"); + ret = -1; + goto error; + } else { + /* + * Update the stream's trace chunk to its parent channel's + * current trace chunk. + */ + stream->trace_chunk = stream->chan->trace_chunk; + } + + if (stream->net_seq_idx == (uint64_t) -1ULL) { + ret = rotate_local_stream(ctx, stream); + if (ret < 0) { + ERR("Failed to rotate stream, ret = %i", ret); + goto error; + } + } + + if (stream->metadata_flag && stream->trace_chunk) { + /* + * If the stream has transitioned to a new trace + * chunk, the metadata should be re-dumped to the + * newest chunk. + * + * However, it is possible for a stream to transition to + * a "no-chunk" state. This can happen if a rotation + * occurs on an inactive session. In such cases, the metadata + * regeneration will happen when the next trace chunk is + * created. + */ + ret = consumer_metadata_stream_dump(stream); + if (ret) { + goto error; + } + } + lttng_consumer_reset_stream_rotate_state(stream); + + ret = 0; + +error: + return ret; +} + +/* + * Rotate all the ready streams now. + * + * This is especially important for low throughput streams that have already + * been consumed, we cannot wait for their next packet to perform the + * rotation. + * Need to be called with RCU read-side lock held to ensure existence of + * channel. + * + * Returns 0 on success, < 0 on error + */ +int lttng_consumer_rotate_ready_streams(struct lttng_consumer_channel *channel, + uint64_t key, struct lttng_consumer_local_data *ctx) +{ + int ret; + struct lttng_consumer_stream *stream; + struct lttng_ht_iter iter; + struct lttng_ht *ht = the_consumer_data.stream_per_chan_id_ht; + + rcu_read_lock(); + + DBG("Consumer rotate ready streams in channel %" PRIu64, key); + + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&channel->key, lttng_ht_seed), + ht->match_fct, &channel->key, &iter.iter, + stream, node_channel_id.node) { + health_code_update(); + + pthread_mutex_lock(&stream->chan->lock); + pthread_mutex_lock(&stream->lock); + + if (!stream->rotate_ready) { + pthread_mutex_unlock(&stream->lock); + pthread_mutex_unlock(&stream->chan->lock); + continue; + } + DBG("Consumer rotate ready stream %" PRIu64, stream->key); + + ret = lttng_consumer_rotate_stream(ctx, stream); + pthread_mutex_unlock(&stream->lock); + pthread_mutex_unlock(&stream->chan->lock); + if (ret) { + goto end; + } + } + + ret = 0; + +end: + rcu_read_unlock(); + return ret; +} + +enum lttcomm_return_code lttng_consumer_init_command( + struct lttng_consumer_local_data *ctx, + const lttng_uuid sessiond_uuid) +{ + enum lttcomm_return_code ret; + char uuid_str[LTTNG_UUID_STR_LEN]; + + if (ctx->sessiond_uuid.is_set) { + ret = LTTCOMM_CONSUMERD_ALREADY_SET; + goto end; + } + + ctx->sessiond_uuid.is_set = true; + memcpy(ctx->sessiond_uuid.value, sessiond_uuid, sizeof(lttng_uuid)); + ret = LTTCOMM_CONSUMERD_SUCCESS; + lttng_uuid_to_str(sessiond_uuid, uuid_str); + DBG("Received session daemon UUID: %s", uuid_str); +end: + return ret; +} + +enum lttcomm_return_code lttng_consumer_create_trace_chunk( + const uint64_t *relayd_id, uint64_t session_id, + uint64_t chunk_id, + time_t chunk_creation_timestamp, + const char *chunk_override_name, + const struct lttng_credentials *credentials, + struct lttng_directory_handle *chunk_directory_handle) +{ + int ret; + enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; + struct lttng_trace_chunk *created_chunk = NULL, *published_chunk = NULL; + enum lttng_trace_chunk_status chunk_status; + char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)]; + char creation_timestamp_buffer[ISO8601_STR_LEN]; + const char *relayd_id_str = "(none)"; + const char *creation_timestamp_str; + struct lttng_ht_iter iter; + struct lttng_consumer_channel *channel; + + if (relayd_id) { + /* Only used for logging purposes. */ + ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer), + "%" PRIu64, *relayd_id); + if (ret > 0 && ret < sizeof(relayd_id_buffer)) { + relayd_id_str = relayd_id_buffer; + } else { + relayd_id_str = "(formatting error)"; + } + } + + /* Local protocol error. */ + LTTNG_ASSERT(chunk_creation_timestamp); + ret = time_to_iso8601_str(chunk_creation_timestamp, + creation_timestamp_buffer, + sizeof(creation_timestamp_buffer)); + creation_timestamp_str = !ret ? creation_timestamp_buffer : + "(formatting error)"; + + DBG("Consumer create trace chunk command: relay_id = %s" + ", session_id = %" PRIu64 ", chunk_id = %" PRIu64 + ", chunk_override_name = %s" + ", chunk_creation_timestamp = %s", + relayd_id_str, session_id, chunk_id, + chunk_override_name ? : "(none)", + creation_timestamp_str); + + /* + * The trace chunk registry, as used by the consumer daemon, implicitly + * owns the trace chunks. This is only needed in the consumer since + * the consumer has no notion of a session beyond session IDs being + * used to identify other objects. + * + * The lttng_trace_chunk_registry_publish() call below provides a + * reference which is not released; it implicitly becomes the session + * daemon's reference to the chunk in the consumer daemon. + * + * The lifetime of trace chunks in the consumer daemon is managed by + * the session daemon through the LTTNG_CONSUMER_CREATE_TRACE_CHUNK + * and LTTNG_CONSUMER_DESTROY_TRACE_CHUNK commands. + */ + created_chunk = lttng_trace_chunk_create(chunk_id, + chunk_creation_timestamp, NULL); + if (!created_chunk) { + ERR("Failed to create trace chunk"); + ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; + goto error; + } + + if (chunk_override_name) { + chunk_status = lttng_trace_chunk_override_name(created_chunk, + chunk_override_name); + if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { + ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; + goto error; + } + } + + if (chunk_directory_handle) { + chunk_status = lttng_trace_chunk_set_credentials(created_chunk, + credentials); + if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { + ERR("Failed to set trace chunk credentials"); + ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; + goto error; + } + /* + * The consumer daemon has no ownership of the chunk output + * directory. + */ + chunk_status = lttng_trace_chunk_set_as_user(created_chunk, + chunk_directory_handle); + chunk_directory_handle = NULL; + if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { + ERR("Failed to set trace chunk's directory handle"); + ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; + goto error; + } + } + + published_chunk = lttng_trace_chunk_registry_publish_chunk( + the_consumer_data.chunk_registry, session_id, + created_chunk); + lttng_trace_chunk_put(created_chunk); + created_chunk = NULL; + if (!published_chunk) { + ERR("Failed to publish trace chunk"); + ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; + goto error; + } + + rcu_read_lock(); + cds_lfht_for_each_entry_duplicate( + the_consumer_data.channels_by_session_id_ht->ht, + the_consumer_data.channels_by_session_id_ht->hash_fct( + &session_id, lttng_ht_seed), + the_consumer_data.channels_by_session_id_ht->match_fct, + &session_id, &iter.iter, channel, + channels_by_session_id_ht_node.node) { + ret = lttng_consumer_channel_set_trace_chunk(channel, + published_chunk); + if (ret) { + /* + * Roll-back the creation of this chunk. + * + * This is important since the session daemon will + * assume that the creation of this chunk failed and + * will never ask for it to be closed, resulting + * in a leak and an inconsistent state for some + * channels. + */ + enum lttcomm_return_code close_ret; + char path[LTTNG_PATH_MAX]; + + DBG("Failed to set new trace chunk on existing channels, rolling back"); + close_ret = lttng_consumer_close_trace_chunk(relayd_id, + session_id, chunk_id, + chunk_creation_timestamp, NULL, + path); + if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) { + ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64 ", chunk_id = %" PRIu64, + session_id, chunk_id); + } + + ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; + break; + } + } + + if (relayd_id) { + struct consumer_relayd_sock_pair *relayd; + + relayd = consumer_find_relayd(*relayd_id); + if (relayd) { + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_create_trace_chunk( + &relayd->control_sock, published_chunk); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + } else { + ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64, *relayd_id); + } + + if (!relayd || ret) { + enum lttcomm_return_code close_ret; + char path[LTTNG_PATH_MAX]; + + close_ret = lttng_consumer_close_trace_chunk(relayd_id, + session_id, + chunk_id, + chunk_creation_timestamp, + NULL, path); + if (close_ret != LTTCOMM_CONSUMERD_SUCCESS) { + ERR("Failed to roll-back the creation of new chunk: session_id = %" PRIu64 ", chunk_id = %" PRIu64, + session_id, + chunk_id); + } + + ret_code = LTTCOMM_CONSUMERD_CREATE_TRACE_CHUNK_FAILED; + goto error_unlock; + } + } +error_unlock: + rcu_read_unlock(); +error: + /* Release the reference returned by the "publish" operation. */ + lttng_trace_chunk_put(published_chunk); + lttng_trace_chunk_put(created_chunk); + return ret_code; +} + +enum lttcomm_return_code lttng_consumer_close_trace_chunk( + const uint64_t *relayd_id, uint64_t session_id, + uint64_t chunk_id, time_t chunk_close_timestamp, + const enum lttng_trace_chunk_command_type *close_command, + char *path) +{ + enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; + struct lttng_trace_chunk *chunk; + char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)]; + const char *relayd_id_str = "(none)"; + const char *close_command_name = "none"; + struct lttng_ht_iter iter; + struct lttng_consumer_channel *channel; + enum lttng_trace_chunk_status chunk_status; + + if (relayd_id) { + int ret; + + /* Only used for logging purposes. */ + ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer), + "%" PRIu64, *relayd_id); + if (ret > 0 && ret < sizeof(relayd_id_buffer)) { + relayd_id_str = relayd_id_buffer; + } else { + relayd_id_str = "(formatting error)"; + } + } + if (close_command) { + close_command_name = lttng_trace_chunk_command_type_get_name( + *close_command); + } + + DBG("Consumer close trace chunk command: relayd_id = %s" + ", session_id = %" PRIu64 ", chunk_id = %" PRIu64 + ", close command = %s", + relayd_id_str, session_id, chunk_id, + close_command_name); + + chunk = lttng_trace_chunk_registry_find_chunk( + the_consumer_data.chunk_registry, session_id, chunk_id); + if (!chunk) { + ERR("Failed to find chunk: session_id = %" PRIu64 + ", chunk_id = %" PRIu64, + session_id, chunk_id); + ret_code = LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK; + goto end; + } + + chunk_status = lttng_trace_chunk_set_close_timestamp(chunk, + chunk_close_timestamp); + if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { + ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED; + goto end; + } + + if (close_command) { + chunk_status = lttng_trace_chunk_set_close_command( + chunk, *close_command); + if (chunk_status != LTTNG_TRACE_CHUNK_STATUS_OK) { + ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED; + goto end; + } + } + + /* + * chunk is now invalid to access as we no longer hold a reference to + * it; it is only kept around to compare it (by address) to the + * current chunk found in the session's channels. + */ + rcu_read_lock(); + cds_lfht_for_each_entry(the_consumer_data.channel_ht->ht, &iter.iter, + channel, node.node) { + int ret; + + /* + * Only change the channel's chunk to NULL if it still + * references the chunk being closed. The channel may + * reference a newer channel in the case of a session + * rotation. When a session rotation occurs, the "next" + * chunk is created before the "current" chunk is closed. + */ + if (channel->trace_chunk != chunk) { + continue; + } + ret = lttng_consumer_channel_set_trace_chunk(channel, NULL); + if (ret) { + /* + * Attempt to close the chunk on as many channels as + * possible. + */ + ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED; + } + } + + if (relayd_id) { + int ret; + struct consumer_relayd_sock_pair *relayd; + + relayd = consumer_find_relayd(*relayd_id); + if (relayd) { + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_close_trace_chunk( + &relayd->control_sock, chunk, + path); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + } else { + ERR("Failed to find relay daemon socket: relayd_id = %" PRIu64, + *relayd_id); + } + + if (!relayd || ret) { + ret_code = LTTCOMM_CONSUMERD_CLOSE_TRACE_CHUNK_FAILED; + goto error_unlock; + } + } +error_unlock: + rcu_read_unlock(); +end: + /* + * Release the reference returned by the "find" operation and + * the session daemon's implicit reference to the chunk. + */ + lttng_trace_chunk_put(chunk); + lttng_trace_chunk_put(chunk); + + return ret_code; +} + +enum lttcomm_return_code lttng_consumer_trace_chunk_exists( + const uint64_t *relayd_id, uint64_t session_id, + uint64_t chunk_id) +{ + int ret; + enum lttcomm_return_code ret_code; + char relayd_id_buffer[MAX_INT_DEC_LEN(*relayd_id)]; + const char *relayd_id_str = "(none)"; + const bool is_local_trace = !relayd_id; + struct consumer_relayd_sock_pair *relayd = NULL; + bool chunk_exists_local, chunk_exists_remote; + + if (relayd_id) { + /* Only used for logging purposes. */ + ret = snprintf(relayd_id_buffer, sizeof(relayd_id_buffer), + "%" PRIu64, *relayd_id); + if (ret > 0 && ret < sizeof(relayd_id_buffer)) { + relayd_id_str = relayd_id_buffer; + } else { + relayd_id_str = "(formatting error)"; + } + } + + DBG("Consumer trace chunk exists command: relayd_id = %s" + ", chunk_id = %" PRIu64, relayd_id_str, + chunk_id); + ret = lttng_trace_chunk_registry_chunk_exists( + the_consumer_data.chunk_registry, session_id, chunk_id, + &chunk_exists_local); + if (ret) { + /* Internal error. */ + ERR("Failed to query the existence of a trace chunk"); + ret_code = LTTCOMM_CONSUMERD_FATAL; + goto end; + } + DBG("Trace chunk %s locally", + chunk_exists_local ? "exists" : "does not exist"); + if (chunk_exists_local) { + ret_code = LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_LOCAL; + goto end; + } else if (is_local_trace) { + ret_code = LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK; + goto end; + } + + rcu_read_lock(); + relayd = consumer_find_relayd(*relayd_id); + if (!relayd) { + ERR("Failed to find relayd %" PRIu64, *relayd_id); + ret_code = LTTCOMM_CONSUMERD_INVALID_PARAMETERS; + goto end_rcu_unlock; + } + DBG("Looking up existence of trace chunk on relay daemon"); + pthread_mutex_lock(&relayd->ctrl_sock_mutex); + ret = relayd_trace_chunk_exists(&relayd->control_sock, chunk_id, + &chunk_exists_remote); + pthread_mutex_unlock(&relayd->ctrl_sock_mutex); + if (ret < 0) { + ERR("Failed to look-up the existence of trace chunk on relay daemon"); + ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; + goto end_rcu_unlock; + } + + ret_code = chunk_exists_remote ? + LTTCOMM_CONSUMERD_TRACE_CHUNK_EXISTS_REMOTE : + LTTCOMM_CONSUMERD_UNKNOWN_TRACE_CHUNK; + DBG("Trace chunk %s on relay daemon", + chunk_exists_remote ? "exists" : "does not exist"); + +end_rcu_unlock: + rcu_read_unlock(); +end: + return ret_code; +} + +static +int consumer_clear_monitored_channel(struct lttng_consumer_channel *channel) +{ + struct lttng_ht *ht; + struct lttng_consumer_stream *stream; + struct lttng_ht_iter iter; + int ret; + + ht = the_consumer_data.stream_per_chan_id_ht; + + rcu_read_lock(); + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&channel->key, lttng_ht_seed), + ht->match_fct, &channel->key, + &iter.iter, stream, node_channel_id.node) { + /* + * Protect against teardown with mutex. + */ + pthread_mutex_lock(&stream->lock); + if (cds_lfht_is_node_deleted(&stream->node.node)) { + goto next; + } + ret = consumer_clear_stream(stream); + if (ret) { + goto error_unlock; + } + next: + pthread_mutex_unlock(&stream->lock); + } + rcu_read_unlock(); + return LTTCOMM_CONSUMERD_SUCCESS; + +error_unlock: + pthread_mutex_unlock(&stream->lock); + rcu_read_unlock(); + return ret; +} + +int lttng_consumer_clear_channel(struct lttng_consumer_channel *channel) +{ + int ret; + + DBG("Consumer clear channel %" PRIu64, channel->key); + + if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) { + /* + * Nothing to do for the metadata channel/stream. + * Snapshot mechanism already take care of the metadata + * handling/generation, and monitored channels only need to + * have their data stream cleared.. + */ + ret = LTTCOMM_CONSUMERD_SUCCESS; + goto end; + } + + if (!channel->monitor) { + ret = consumer_clear_unmonitored_channel(channel); + } else { + ret = consumer_clear_monitored_channel(channel); + } +end: + return ret; +} + +enum lttcomm_return_code lttng_consumer_open_channel_packets( + struct lttng_consumer_channel *channel) +{ + struct lttng_consumer_stream *stream; + enum lttcomm_return_code ret = LTTCOMM_CONSUMERD_SUCCESS; + + if (channel->metadata_stream) { + ERR("Open channel packets command attempted on a metadata channel"); + ret = LTTCOMM_CONSUMERD_INVALID_PARAMETERS; + goto end; + } + + rcu_read_lock(); + cds_list_for_each_entry(stream, &channel->streams.head, send_node) { + enum consumer_stream_open_packet_status status; + + pthread_mutex_lock(&stream->lock); + if (cds_lfht_is_node_deleted(&stream->node.node)) { + goto next; + } + + status = consumer_stream_open_packet(stream); + switch (status) { + case CONSUMER_STREAM_OPEN_PACKET_STATUS_OPENED: + DBG("Opened a packet in \"open channel packets\" command: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + stream->opened_packet_in_current_trace_chunk = true; + break; + case CONSUMER_STREAM_OPEN_PACKET_STATUS_NO_SPACE: + DBG("No space left to open a packet in \"open channel packets\" command: stream id = %" PRIu64 + ", channel name = %s, session id = %" PRIu64, + stream->key, stream->chan->name, + stream->chan->session_id); + break; + case CONSUMER_STREAM_OPEN_PACKET_STATUS_ERROR: + /* + * Only unexpected internal errors can lead to this + * failing. Report an unknown error. + */ + ERR("Failed to flush empty buffer in \"open channel packets\" command: stream id = %" PRIu64 + ", channel id = %" PRIu64 + ", channel name = %s" + ", session id = %" PRIu64, + stream->key, channel->key, + channel->name, channel->session_id); + ret = LTTCOMM_CONSUMERD_UNKNOWN_ERROR; + goto error_unlock; + default: + abort(); + } + + next: + pthread_mutex_unlock(&stream->lock); + } + +end_rcu_unlock: + rcu_read_unlock(); +end: + return ret; + +error_unlock: + pthread_mutex_unlock(&stream->lock); + goto end_rcu_unlock; +} + +void lttng_consumer_sigbus_handle(void *addr) +{ + lttng_ustconsumer_sigbus_handle(addr); +} diff --git a/src/common/consumer/consumer.h b/src/common/consumer/consumer.h index 1b0ee000b..582385c82 100644 --- a/src/common/consumer/consumer.h +++ b/src/common/consumer/consumer.h @@ -802,30 +802,30 @@ struct lttng_consumer_global_data { * This is nested OUTSIDE the stream lock. * This is nested OUTSIDE the consumer_relayd_sock_pair lock. */ - pthread_mutex_t lock; + pthread_mutex_t lock {}; /* * Number of streams in the data stream hash table declared outside. * Protected by consumer_data.lock. */ - int stream_count; + int stream_count = 0; /* Channel hash table protected by consumer_data.lock. */ - struct lttng_ht *channel_ht; + struct lttng_ht *channel_ht = nullptr; /* Channel hash table indexed by session id. */ - struct lttng_ht *channels_by_session_id_ht; + struct lttng_ht *channels_by_session_id_ht = nullptr; /* * Flag specifying if the local array of FDs needs update in the * poll function. Protected by consumer_data.lock. */ - unsigned int need_update; - enum lttng_consumer_type type; + unsigned int need_update = 1; + enum lttng_consumer_type type = LTTNG_CONSUMER_UNKNOWN; /* * Relayd socket(s) hashtable indexed by network sequence number. Each * stream has an index which associate the right relayd socket to use. */ - struct lttng_ht *relayd_ht; + struct lttng_ht *relayd_ht = nullptr; /* * This hash table contains all streams (metadata and data) indexed by @@ -834,17 +834,17 @@ struct lttng_consumer_global_data { * * This HT uses the "node_session_id" of the consumer stream. */ - struct lttng_ht *stream_list_ht; + struct lttng_ht *stream_list_ht = nullptr; /* * This HT uses the "node_channel_id" of the consumer stream. */ - struct lttng_ht *stream_per_chan_id_ht; + struct lttng_ht *stream_per_chan_id_ht = nullptr; /* * Trace chunk registry indexed by (session_id, chunk_id). */ - struct lttng_trace_chunk_registry *chunk_registry; + struct lttng_trace_chunk_registry *chunk_registry = nullptr; }; /* diff --git a/src/common/consumer/metadata-bucket.c b/src/common/consumer/metadata-bucket.c deleted file mode 100644 index 1ee5022e5..000000000 --- a/src/common/consumer/metadata-bucket.c +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Copyright (C) 2020 Jérémie Galarneau - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#include "metadata-bucket.h" - -#include -#include -#include -#include -#include - -struct metadata_bucket { - struct lttng_dynamic_buffer content; - struct { - metadata_bucket_flush_cb fn; - void *data; - } flush; - unsigned int buffer_count; -}; - -struct metadata_bucket *metadata_bucket_create( - metadata_bucket_flush_cb flush, void *data) -{ - struct metadata_bucket *bucket; - - bucket = zmalloc(sizeof(typeof(*bucket))); - if (!bucket) { - PERROR("Failed to allocate buffer bucket"); - goto end; - } - - bucket->flush.fn = flush; - bucket->flush.data = data; - lttng_dynamic_buffer_init(&bucket->content); -end: - return bucket; -} - -void metadata_bucket_destroy(struct metadata_bucket *bucket) -{ - if (!bucket) { - return; - } - - if (bucket->content.size > 0) { - WARN("Stream metadata bucket destroyed with remaining data: size = %zu, buffer count = %u", - bucket->content.size, bucket->buffer_count); - } - - lttng_dynamic_buffer_reset(&bucket->content); - free(bucket); -} - -void metadata_bucket_reset(struct metadata_bucket *bucket) -{ - lttng_dynamic_buffer_reset(&bucket->content); - lttng_dynamic_buffer_init(&bucket->content); - bucket->buffer_count = 0; -} - -enum metadata_bucket_status metadata_bucket_fill(struct metadata_bucket *bucket, - const struct stream_subbuffer *buffer) -{ - ssize_t ret; - struct lttng_buffer_view flushed_view; - struct stream_subbuffer flushed_subbuffer; - enum metadata_bucket_status status; - const bool should_flush = - LTTNG_OPTIONAL_GET(buffer->info.metadata.coherent); - const size_t padding_this_buffer = - buffer->info.metadata.padded_subbuf_size - - buffer->info.metadata.subbuf_size; - size_t flush_size; - - DBG("Metadata bucket filled with %zu bytes buffer view, sub-buffer size: %lu, padded sub-buffer size: %lu, coherent: %s", - buffer->buffer.buffer.size, - buffer->info.metadata.subbuf_size, - buffer->info.metadata.padded_subbuf_size, - buffer->info.metadata.coherent.value ? "true" : "false"); - /* - * If no metadata was accumulated and this buffer should be - * flushed, don't copy it unecessarily; just flush it directly. - */ - if (!should_flush || bucket->buffer_count != 0) { - /* - * Append the _padded_ subbuffer since they are combined - * into a single "virtual" subbuffer that will be - * flushed at once. - * - * This means that some padding will be sent over the - * network, but should not represent a large amount - * of data as incoherent subbuffers are typically - * pretty full. - * - * The padding of the last subbuffer (coherent) added to - * the bucket is not sent, which is what really matters - * from an efficiency point of view. - */ - ret = lttng_dynamic_buffer_append_view( - &bucket->content, &buffer->buffer.buffer); - if (ret) { - status = METADATA_BUCKET_STATUS_ERROR; - goto end; - } - } - - bucket->buffer_count++; - if (!should_flush) { - status = METADATA_BUCKET_STATUS_OK; - goto end; - } - - flushed_view = bucket->content.size != 0 ? - lttng_buffer_view_from_dynamic_buffer(&bucket->content, 0, -1) : - lttng_buffer_view_from_view(&buffer->buffer.buffer, 0, -1); - - /* - * The flush is done with the size of all padded sub-buffers, except - * for the last one which we can safely "trim". The padding of the last - * packet will be reconstructed by the relay daemon. - */ - flush_size = flushed_view.size - padding_this_buffer; - - flushed_subbuffer = (typeof(flushed_subbuffer)) { - .buffer.buffer = flushed_view, - .info.metadata.subbuf_size = flush_size, - .info.metadata.padded_subbuf_size = flushed_view.size, - .info.metadata.version = buffer->info.metadata.version, - .info.metadata.coherent = buffer->info.metadata.coherent, - }; - - DBG("Metadata bucket flushing %zu bytes (%u sub-buffer%s)", - flushed_view.size, bucket->buffer_count, - bucket->buffer_count > 1 ? "s" : ""); - ret = bucket->flush.fn(&flushed_subbuffer, bucket->flush.data); - if (ret >= 0) { - status = METADATA_BUCKET_STATUS_OK; - } else { - status = METADATA_BUCKET_STATUS_ERROR; - } - - metadata_bucket_reset(bucket); - -end: - return status; -} diff --git a/src/common/consumer/metadata-bucket.cpp b/src/common/consumer/metadata-bucket.cpp new file mode 100644 index 000000000..160185def --- /dev/null +++ b/src/common/consumer/metadata-bucket.cpp @@ -0,0 +1,156 @@ +/* + * Copyright (C) 2020 Jérémie Galarneau + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#include "metadata-bucket.h" + +#include +#include +#include +#include +#include + +struct metadata_bucket { + struct lttng_dynamic_buffer content; + struct { + metadata_bucket_flush_cb fn; + void *data; + } flush; + unsigned int buffer_count; +}; + +struct metadata_bucket *metadata_bucket_create( + metadata_bucket_flush_cb flush, void *data) +{ + struct metadata_bucket *bucket; + + bucket = (metadata_bucket *) zmalloc(sizeof(typeof(*bucket))); + if (!bucket) { + PERROR("Failed to allocate buffer bucket"); + goto end; + } + + bucket->flush.fn = flush; + bucket->flush.data = data; + lttng_dynamic_buffer_init(&bucket->content); +end: + return bucket; +} + +void metadata_bucket_destroy(struct metadata_bucket *bucket) +{ + if (!bucket) { + return; + } + + if (bucket->content.size > 0) { + WARN("Stream metadata bucket destroyed with remaining data: size = %zu, buffer count = %u", + bucket->content.size, bucket->buffer_count); + } + + lttng_dynamic_buffer_reset(&bucket->content); + free(bucket); +} + +void metadata_bucket_reset(struct metadata_bucket *bucket) +{ + lttng_dynamic_buffer_reset(&bucket->content); + lttng_dynamic_buffer_init(&bucket->content); + bucket->buffer_count = 0; +} + +enum metadata_bucket_status metadata_bucket_fill(struct metadata_bucket *bucket, + const struct stream_subbuffer *buffer) +{ + ssize_t ret; + struct lttng_buffer_view flushed_view; + struct stream_subbuffer flushed_subbuffer; + enum metadata_bucket_status status; + const bool should_flush = + LTTNG_OPTIONAL_GET(buffer->info.metadata.coherent); + const size_t padding_this_buffer = + buffer->info.metadata.padded_subbuf_size - + buffer->info.metadata.subbuf_size; + size_t flush_size; + + DBG("Metadata bucket filled with %zu bytes buffer view, sub-buffer size: %lu, padded sub-buffer size: %lu, coherent: %s", + buffer->buffer.buffer.size, + buffer->info.metadata.subbuf_size, + buffer->info.metadata.padded_subbuf_size, + buffer->info.metadata.coherent.value ? "true" : "false"); + /* + * If no metadata was accumulated and this buffer should be + * flushed, don't copy it unecessarily; just flush it directly. + */ + if (!should_flush || bucket->buffer_count != 0) { + /* + * Append the _padded_ subbuffer since they are combined + * into a single "virtual" subbuffer that will be + * flushed at once. + * + * This means that some padding will be sent over the + * network, but should not represent a large amount + * of data as incoherent subbuffers are typically + * pretty full. + * + * The padding of the last subbuffer (coherent) added to + * the bucket is not sent, which is what really matters + * from an efficiency point of view. + */ + ret = lttng_dynamic_buffer_append_view( + &bucket->content, &buffer->buffer.buffer); + if (ret) { + status = METADATA_BUCKET_STATUS_ERROR; + goto end; + } + } + + bucket->buffer_count++; + if (!should_flush) { + status = METADATA_BUCKET_STATUS_OK; + goto end; + } + + flushed_view = bucket->content.size != 0 ? + lttng_buffer_view_from_dynamic_buffer(&bucket->content, 0, -1) : + lttng_buffer_view_from_view(&buffer->buffer.buffer, 0, -1); + + /* + * The flush is done with the size of all padded sub-buffers, except + * for the last one which we can safely "trim". The padding of the last + * packet will be reconstructed by the relay daemon. + */ + flush_size = flushed_view.size - padding_this_buffer; + + flushed_subbuffer = (typeof(flushed_subbuffer)) { + .buffer = { + .buffer = flushed_view, + }, + .info = { + .metadata = { + .subbuf_size = flush_size, + .padded_subbuf_size = flushed_view.size, + .version = buffer->info.metadata.version, + .coherent = buffer->info.metadata.coherent, + }, + }, + }; + + DBG("Metadata bucket flushing %zu bytes (%u sub-buffer%s)", + flushed_view.size, bucket->buffer_count, + bucket->buffer_count > 1 ? "s" : ""); + ret = bucket->flush.fn(&flushed_subbuffer, bucket->flush.data); + if (ret >= 0) { + status = METADATA_BUCKET_STATUS_OK; + } else { + status = METADATA_BUCKET_STATUS_ERROR; + } + + metadata_bucket_reset(bucket); + +end: + return status; +} diff --git a/src/common/consumer/metadata-bucket.h b/src/common/consumer/metadata-bucket.h index 0355eb3c0..8868811ef 100644 --- a/src/common/consumer/metadata-bucket.h +++ b/src/common/consumer/metadata-bucket.h @@ -10,6 +10,10 @@ #include +#ifdef __cplusplus +extern "C" { +#endif + struct metadata_bucket; typedef ssize_t (*metadata_bucket_flush_cb)( @@ -30,5 +34,8 @@ enum metadata_bucket_status metadata_bucket_fill(struct metadata_bucket *bucket, void metadata_bucket_reset(struct metadata_bucket *bucket); -#endif /* METADATA_BUCKET_H */ +#ifdef __cplusplus +} +#endif +#endif /* METADATA_BUCKET_H */ diff --git a/src/common/kernel-consumer/Makefile.am b/src/common/kernel-consumer/Makefile.am index cf86e2a63..b1184130e 100644 --- a/src/common/kernel-consumer/Makefile.am +++ b/src/common/kernel-consumer/Makefile.am @@ -2,7 +2,9 @@ noinst_LTLIBRARIES = libkernel-consumer.la -libkernel_consumer_la_SOURCES = kernel-consumer.c kernel-consumer.h +libkernel_consumer_la_SOURCES = \ + kernel-consumer.cpp \ + kernel-consumer.h libkernel_consumer_la_LIBADD = \ $(top_builddir)/src/common/kernel-ctl/libkernel-ctl.la \ diff --git a/src/common/kernel-consumer/kernel-consumer.c b/src/common/kernel-consumer/kernel-consumer.c deleted file mode 100644 index 236c2c98f..000000000 --- a/src/common/kernel-consumer/kernel-consumer.c +++ /dev/null @@ -1,1920 +0,0 @@ -/* - * Copyright (C) 2011 Julien Desfossez - * Copyright (C) 2011 Mathieu Desnoyers - * Copyright (C) 2017 Jérémie Galarneau - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#define _LGPL_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "kernel-consumer.h" - -extern struct lttng_consumer_global_data the_consumer_data; -extern int consumer_poll_timeout; - -/* - * Take a snapshot for a specific fd - * - * Returns 0 on success, < 0 on error - */ -int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream) -{ - int ret = 0; - int infd = stream->wait_fd; - - ret = kernctl_snapshot(infd); - /* - * -EAGAIN is not an error, it just means that there is no data to - * be read. - */ - if (ret != 0 && ret != -EAGAIN) { - PERROR("Getting sub-buffer snapshot."); - } - - return ret; -} - -/* - * Sample consumed and produced positions for a specific fd. - * - * Returns 0 on success, < 0 on error. - */ -int lttng_kconsumer_sample_snapshot_positions( - struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - - return kernctl_snapshot_sample_positions(stream->wait_fd); -} - -/* - * Get the produced position - * - * Returns 0 on success, < 0 on error - */ -int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream, - unsigned long *pos) -{ - int ret; - int infd = stream->wait_fd; - - ret = kernctl_snapshot_get_produced(infd, pos); - if (ret != 0) { - PERROR("kernctl_snapshot_get_produced"); - } - - return ret; -} - -/* - * Get the consumerd position - * - * Returns 0 on success, < 0 on error - */ -int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream, - unsigned long *pos) -{ - int ret; - int infd = stream->wait_fd; - - ret = kernctl_snapshot_get_consumed(infd, pos); - if (ret != 0) { - PERROR("kernctl_snapshot_get_consumed"); - } - - return ret; -} - -static -int get_current_subbuf_addr(struct lttng_consumer_stream *stream, - const char **addr) -{ - int ret; - unsigned long mmap_offset; - const char *mmap_base = stream->mmap_base; - - ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset); - if (ret < 0) { - PERROR("Failed to get mmap read offset"); - goto error; - } - - *addr = mmap_base + mmap_offset; -error: - return ret; -} - -/* - * Take a snapshot of all the stream of a channel - * RCU read-side lock must be held across this function to ensure existence of - * channel. The channel lock must be held by the caller. - * - * Returns 0 on success, < 0 on error - */ -static int lttng_kconsumer_snapshot_channel( - struct lttng_consumer_channel *channel, - uint64_t key, char *path, uint64_t relayd_id, - uint64_t nb_packets_per_stream, - struct lttng_consumer_local_data *ctx) -{ - int ret; - struct lttng_consumer_stream *stream; - - DBG("Kernel consumer snapshot channel %" PRIu64, key); - - rcu_read_lock(); - - /* Splice is not supported yet for channel snapshot. */ - if (channel->output != CONSUMER_CHANNEL_MMAP) { - ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot", - channel->name); - ret = -1; - goto end; - } - - cds_list_for_each_entry(stream, &channel->streams.head, send_node) { - unsigned long consumed_pos, produced_pos; - - health_code_update(); - - /* - * Lock stream because we are about to change its state. - */ - pthread_mutex_lock(&stream->lock); - - LTTNG_ASSERT(channel->trace_chunk); - if (!lttng_trace_chunk_get(channel->trace_chunk)) { - /* - * Can't happen barring an internal error as the channel - * holds a reference to the trace chunk. - */ - ERR("Failed to acquire reference to channel's trace chunk"); - ret = -1; - goto end_unlock; - } - LTTNG_ASSERT(!stream->trace_chunk); - stream->trace_chunk = channel->trace_chunk; - - /* - * Assign the received relayd ID so we can use it for streaming. The streams - * are not visible to anyone so this is OK to change it. - */ - stream->net_seq_idx = relayd_id; - channel->relayd_id = relayd_id; - if (relayd_id != (uint64_t) -1ULL) { - ret = consumer_send_relayd_stream(stream, path); - if (ret < 0) { - ERR("sending stream to relayd"); - goto end_unlock; - } - } else { - ret = consumer_stream_create_output_files(stream, - false); - if (ret < 0) { - goto end_unlock; - } - DBG("Kernel consumer snapshot stream (%" PRIu64 ")", - stream->key); - } - - ret = kernctl_buffer_flush_empty(stream->wait_fd); - if (ret < 0) { - /* - * Doing a buffer flush which does not take into - * account empty packets. This is not perfect - * for stream intersection, but required as a - * fall-back when "flush_empty" is not - * implemented by lttng-modules. - */ - ret = kernctl_buffer_flush(stream->wait_fd); - if (ret < 0) { - ERR("Failed to flush kernel stream"); - goto end_unlock; - } - goto end_unlock; - } - - ret = lttng_kconsumer_take_snapshot(stream); - if (ret < 0) { - ERR("Taking kernel snapshot"); - goto end_unlock; - } - - ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos); - if (ret < 0) { - ERR("Produced kernel snapshot position"); - goto end_unlock; - } - - ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos); - if (ret < 0) { - ERR("Consumerd kernel snapshot position"); - goto end_unlock; - } - - consumed_pos = consumer_get_consume_start_pos(consumed_pos, - produced_pos, nb_packets_per_stream, - stream->max_sb_size); - - while ((long) (consumed_pos - produced_pos) < 0) { - ssize_t read_len; - unsigned long len, padded_len; - const char *subbuf_addr; - struct lttng_buffer_view subbuf_view; - - health_code_update(); - DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos); - - ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos); - if (ret < 0) { - if (ret != -EAGAIN) { - PERROR("kernctl_get_subbuf snapshot"); - goto end_unlock; - } - DBG("Kernel consumer get subbuf failed. Skipping it."); - consumed_pos += stream->max_sb_size; - stream->chan->lost_packets++; - continue; - } - - ret = kernctl_get_subbuf_size(stream->wait_fd, &len); - if (ret < 0) { - ERR("Snapshot kernctl_get_subbuf_size"); - goto error_put_subbuf; - } - - ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len); - if (ret < 0) { - ERR("Snapshot kernctl_get_padded_subbuf_size"); - goto error_put_subbuf; - } - - ret = get_current_subbuf_addr(stream, &subbuf_addr); - if (ret) { - goto error_put_subbuf; - } - - subbuf_view = lttng_buffer_view_init( - subbuf_addr, 0, padded_len); - read_len = lttng_consumer_on_read_subbuffer_mmap( - stream, &subbuf_view, - padded_len - len); - /* - * We write the padded len in local tracefiles but the data len - * when using a relay. Display the error but continue processing - * to try to release the subbuffer. - */ - if (relayd_id != (uint64_t) -1ULL) { - if (read_len != len) { - ERR("Error sending to the relay (ret: %zd != len: %lu)", - read_len, len); - } - } else { - if (read_len != padded_len) { - ERR("Error writing to tracefile (ret: %zd != len: %lu)", - read_len, padded_len); - } - } - - ret = kernctl_put_subbuf(stream->wait_fd); - if (ret < 0) { - ERR("Snapshot kernctl_put_subbuf"); - goto end_unlock; - } - consumed_pos += stream->max_sb_size; - } - - if (relayd_id == (uint64_t) -1ULL) { - if (stream->out_fd >= 0) { - ret = close(stream->out_fd); - if (ret < 0) { - PERROR("Kernel consumer snapshot close out_fd"); - goto end_unlock; - } - stream->out_fd = -1; - } - } else { - close_relayd_stream(stream); - stream->net_seq_idx = (uint64_t) -1ULL; - } - lttng_trace_chunk_put(stream->trace_chunk); - stream->trace_chunk = NULL; - pthread_mutex_unlock(&stream->lock); - } - - /* All good! */ - ret = 0; - goto end; - -error_put_subbuf: - ret = kernctl_put_subbuf(stream->wait_fd); - if (ret < 0) { - ERR("Snapshot kernctl_put_subbuf error path"); - } -end_unlock: - pthread_mutex_unlock(&stream->lock); -end: - rcu_read_unlock(); - return ret; -} - -/* - * Read the whole metadata available for a snapshot. - * RCU read-side lock must be held across this function to ensure existence of - * metadata_channel. The channel lock must be held by the caller. - * - * Returns 0 on success, < 0 on error - */ -static int lttng_kconsumer_snapshot_metadata( - struct lttng_consumer_channel *metadata_channel, - uint64_t key, char *path, uint64_t relayd_id, - struct lttng_consumer_local_data *ctx) -{ - int ret, use_relayd = 0; - ssize_t ret_read; - struct lttng_consumer_stream *metadata_stream; - - LTTNG_ASSERT(ctx); - - DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s", - key, path); - - rcu_read_lock(); - - metadata_stream = metadata_channel->metadata_stream; - LTTNG_ASSERT(metadata_stream); - - pthread_mutex_lock(&metadata_stream->lock); - LTTNG_ASSERT(metadata_channel->trace_chunk); - LTTNG_ASSERT(metadata_stream->trace_chunk); - - /* Flag once that we have a valid relayd for the stream. */ - if (relayd_id != (uint64_t) -1ULL) { - use_relayd = 1; - } - - if (use_relayd) { - ret = consumer_send_relayd_stream(metadata_stream, path); - if (ret < 0) { - goto error_snapshot; - } - } else { - ret = consumer_stream_create_output_files(metadata_stream, - false); - if (ret < 0) { - goto error_snapshot; - } - } - - do { - health_code_update(); - - ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true); - if (ret_read < 0) { - ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)", - ret_read); - ret = ret_read; - goto error_snapshot; - } - } while (ret_read > 0); - - if (use_relayd) { - close_relayd_stream(metadata_stream); - metadata_stream->net_seq_idx = (uint64_t) -1ULL; - } else { - if (metadata_stream->out_fd >= 0) { - ret = close(metadata_stream->out_fd); - if (ret < 0) { - PERROR("Kernel consumer snapshot metadata close out_fd"); - /* - * Don't go on error here since the snapshot was successful at this - * point but somehow the close failed. - */ - } - metadata_stream->out_fd = -1; - lttng_trace_chunk_put(metadata_stream->trace_chunk); - metadata_stream->trace_chunk = NULL; - } - } - - ret = 0; -error_snapshot: - pthread_mutex_unlock(&metadata_stream->lock); - cds_list_del(&metadata_stream->send_node); - consumer_stream_destroy(metadata_stream, NULL); - metadata_channel->metadata_stream = NULL; - rcu_read_unlock(); - return ret; -} - -/* - * Receive command from session daemon and process it. - * - * Return 1 on success else a negative value or 0. - */ -int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx, - int sock, struct pollfd *consumer_sockpoll) -{ - int ret_func; - enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; - struct lttcomm_consumer_msg msg; - - health_code_update(); - - { - ssize_t ret_recv; - - ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg)); - if (ret_recv != sizeof(msg)) { - if (ret_recv > 0) { - lttng_consumer_send_error(ctx, - LTTCOMM_CONSUMERD_ERROR_RECV_CMD); - ret_recv = -1; - } - return ret_recv; - } - } - - health_code_update(); - - /* Deprecated command */ - LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP); - - health_code_update(); - - /* relayd needs RCU read-side protection */ - rcu_read_lock(); - - switch (msg.cmd_type) { - case LTTNG_CONSUMER_ADD_RELAYD_SOCKET: - { - /* Session daemon status message are handled in the following call. */ - consumer_add_relayd_socket(msg.u.relayd_sock.net_index, - msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll, - &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id, - msg.u.relayd_sock.relayd_session_id); - goto end_nosignal; - } - case LTTNG_CONSUMER_ADD_CHANNEL: - { - struct lttng_consumer_channel *new_channel; - int ret_send_status, ret_add_channel = 0; - const uint64_t chunk_id = msg.u.channel.chunk_id.value; - - health_code_update(); - - /* First send a status message before receiving the fds. */ - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto error_fatal; - } - - health_code_update(); - - DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key); - new_channel = consumer_allocate_channel(msg.u.channel.channel_key, - msg.u.channel.session_id, - msg.u.channel.chunk_id.is_set ? - &chunk_id : NULL, - msg.u.channel.pathname, - msg.u.channel.name, - msg.u.channel.relayd_id, msg.u.channel.output, - msg.u.channel.tracefile_size, - msg.u.channel.tracefile_count, 0, - msg.u.channel.monitor, - msg.u.channel.live_timer_interval, - msg.u.channel.is_live, - NULL, NULL); - if (new_channel == NULL) { - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); - goto end_nosignal; - } - new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams; - switch (msg.u.channel.output) { - case LTTNG_EVENT_SPLICE: - new_channel->output = CONSUMER_CHANNEL_SPLICE; - break; - case LTTNG_EVENT_MMAP: - new_channel->output = CONSUMER_CHANNEL_MMAP; - break; - default: - ERR("Channel output unknown %d", msg.u.channel.output); - goto end_nosignal; - } - - /* Translate and save channel type. */ - switch (msg.u.channel.type) { - case CONSUMER_CHANNEL_TYPE_DATA: - case CONSUMER_CHANNEL_TYPE_METADATA: - new_channel->type = msg.u.channel.type; - break; - default: - abort(); - goto end_nosignal; - }; - - health_code_update(); - - if (ctx->on_recv_channel != NULL) { - int ret_recv_channel = - ctx->on_recv_channel(new_channel); - if (ret_recv_channel == 0) { - ret_add_channel = consumer_add_channel( - new_channel, ctx); - } else if (ret_recv_channel < 0) { - goto end_nosignal; - } - } else { - ret_add_channel = - consumer_add_channel(new_channel, ctx); - } - if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA && - !ret_add_channel) { - int monitor_start_ret; - - DBG("Consumer starting monitor timer"); - consumer_timer_live_start(new_channel, - msg.u.channel.live_timer_interval); - monitor_start_ret = consumer_timer_monitor_start( - new_channel, - msg.u.channel.monitor_timer_interval); - if (monitor_start_ret < 0) { - ERR("Starting channel monitoring timer failed"); - goto end_nosignal; - } - } - - health_code_update(); - - /* If we received an error in add_channel, we need to report it. */ - if (ret_add_channel < 0) { - ret_send_status = consumer_send_status_msg( - sock, ret_add_channel); - if (ret_send_status < 0) { - goto error_fatal; - } - goto end_nosignal; - } - - goto end_nosignal; - } - case LTTNG_CONSUMER_ADD_STREAM: - { - int fd; - struct lttng_pipe *stream_pipe; - struct lttng_consumer_stream *new_stream; - struct lttng_consumer_channel *channel; - int alloc_ret = 0; - int ret_send_status, ret_poll, ret_get_max_subbuf_size; - ssize_t ret_pipe_write, ret_recv; - - /* - * Get stream's channel reference. Needed when adding the stream to the - * global hash table. - */ - channel = consumer_find_channel(msg.u.stream.channel_key); - if (!channel) { - /* - * We could not find the channel. Can happen if cpu hotplug - * happens while tearing down. - */ - ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } - - health_code_update(); - - /* First send a status message before receiving the fds. */ - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto error_add_stream_fatal; - } - - health_code_update(); - - if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) { - /* Channel was not found. */ - goto error_add_stream_nosignal; - } - - /* Blocking call */ - health_poll_entry(); - ret_poll = lttng_consumer_poll_socket(consumer_sockpoll); - health_poll_exit(); - if (ret_poll) { - goto error_add_stream_fatal; - } - - health_code_update(); - - /* Get stream file descriptor from socket */ - ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1); - if (ret_recv != sizeof(fd)) { - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD); - ret_func = ret_recv; - goto end; - } - - health_code_update(); - - /* - * Send status code to session daemon only if the recv works. If the - * above recv() failed, the session daemon is notified through the - * error socket and the teardown is eventually done. - */ - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto error_add_stream_nosignal; - } - - health_code_update(); - - pthread_mutex_lock(&channel->lock); - new_stream = consumer_stream_create( - channel, - channel->key, - fd, - channel->name, - channel->relayd_id, - channel->session_id, - channel->trace_chunk, - msg.u.stream.cpu, - &alloc_ret, - channel->type, - channel->monitor); - if (new_stream == NULL) { - switch (alloc_ret) { - case -ENOMEM: - case -EINVAL: - default: - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); - break; - } - pthread_mutex_unlock(&channel->lock); - goto error_add_stream_nosignal; - } - - new_stream->wait_fd = fd; - ret_get_max_subbuf_size = kernctl_get_max_subbuf_size( - new_stream->wait_fd, &new_stream->max_sb_size); - if (ret_get_max_subbuf_size < 0) { - pthread_mutex_unlock(&channel->lock); - ERR("Failed to get kernel maximal subbuffer size"); - goto error_add_stream_nosignal; - } - - consumer_stream_update_channel_attributes(new_stream, - channel); - - /* - * We've just assigned the channel to the stream so increment the - * refcount right now. We don't need to increment the refcount for - * streams in no monitor because we handle manually the cleanup of - * those. It is very important to make sure there is NO prior - * consumer_del_stream() calls or else the refcount will be unbalanced. - */ - if (channel->monitor) { - uatomic_inc(&new_stream->chan->refcount); - } - - /* - * The buffer flush is done on the session daemon side for the kernel - * so no need for the stream "hangup_flush_done" variable to be - * tracked. This is important for a kernel stream since we don't rely - * on the flush state of the stream to read data. It's not the case for - * user space tracing. - */ - new_stream->hangup_flush_done = 0; - - health_code_update(); - - pthread_mutex_lock(&new_stream->lock); - if (ctx->on_recv_stream) { - int ret_recv_stream = ctx->on_recv_stream(new_stream); - if (ret_recv_stream < 0) { - pthread_mutex_unlock(&new_stream->lock); - pthread_mutex_unlock(&channel->lock); - consumer_stream_free(new_stream); - goto error_add_stream_nosignal; - } - } - health_code_update(); - - if (new_stream->metadata_flag) { - channel->metadata_stream = new_stream; - } - - /* Do not monitor this stream. */ - if (!channel->monitor) { - DBG("Kernel consumer add stream %s in no monitor mode with " - "relayd id %" PRIu64, new_stream->name, - new_stream->net_seq_idx); - cds_list_add(&new_stream->send_node, &channel->streams.head); - pthread_mutex_unlock(&new_stream->lock); - pthread_mutex_unlock(&channel->lock); - goto end_add_stream; - } - - /* Send stream to relayd if the stream has an ID. */ - if (new_stream->net_seq_idx != (uint64_t) -1ULL) { - int ret_send_relayd_stream; - - ret_send_relayd_stream = consumer_send_relayd_stream( - new_stream, new_stream->chan->pathname); - if (ret_send_relayd_stream < 0) { - pthread_mutex_unlock(&new_stream->lock); - pthread_mutex_unlock(&channel->lock); - consumer_stream_free(new_stream); - goto error_add_stream_nosignal; - } - - /* - * If adding an extra stream to an already - * existing channel (e.g. cpu hotplug), we need - * to send the "streams_sent" command to relayd. - */ - if (channel->streams_sent_to_relayd) { - int ret_send_relayd_streams_sent; - - ret_send_relayd_streams_sent = - consumer_send_relayd_streams_sent( - new_stream->net_seq_idx); - if (ret_send_relayd_streams_sent < 0) { - pthread_mutex_unlock(&new_stream->lock); - pthread_mutex_unlock(&channel->lock); - goto error_add_stream_nosignal; - } - } - } - pthread_mutex_unlock(&new_stream->lock); - pthread_mutex_unlock(&channel->lock); - - /* Get the right pipe where the stream will be sent. */ - if (new_stream->metadata_flag) { - consumer_add_metadata_stream(new_stream); - stream_pipe = ctx->consumer_metadata_pipe; - } else { - consumer_add_data_stream(new_stream); - stream_pipe = ctx->consumer_data_pipe; - } - - /* Visible to other threads */ - new_stream->globally_visible = 1; - - health_code_update(); - - ret_pipe_write = lttng_pipe_write( - stream_pipe, &new_stream, sizeof(new_stream)); - if (ret_pipe_write < 0) { - ERR("Consumer write %s stream to pipe %d", - new_stream->metadata_flag ? "metadata" : "data", - lttng_pipe_get_writefd(stream_pipe)); - if (new_stream->metadata_flag) { - consumer_del_stream_for_metadata(new_stream); - } else { - consumer_del_stream_for_data(new_stream); - } - goto error_add_stream_nosignal; - } - - DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64, - new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id); -end_add_stream: - break; -error_add_stream_nosignal: - goto end_nosignal; -error_add_stream_fatal: - goto error_fatal; - } - case LTTNG_CONSUMER_STREAMS_SENT: - { - struct lttng_consumer_channel *channel; - int ret_send_status; - - /* - * Get stream's channel reference. Needed when adding the stream to the - * global hash table. - */ - channel = consumer_find_channel(msg.u.sent_streams.channel_key); - if (!channel) { - /* - * We could not find the channel. Can happen if cpu hotplug - * happens while tearing down. - */ - ERR("Unable to find channel key %" PRIu64, - msg.u.sent_streams.channel_key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } - - health_code_update(); - - /* - * Send status code to session daemon. - */ - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0 || - ret_code != LTTCOMM_CONSUMERD_SUCCESS) { - /* Somehow, the session daemon is not responding anymore. */ - goto error_streams_sent_nosignal; - } - - health_code_update(); - - /* - * We should not send this message if we don't monitor the - * streams in this channel. - */ - if (!channel->monitor) { - goto end_error_streams_sent; - } - - health_code_update(); - /* Send stream to relayd if the stream has an ID. */ - if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) { - int ret_send_relay_streams; - - ret_send_relay_streams = consumer_send_relayd_streams_sent( - msg.u.sent_streams.net_seq_idx); - if (ret_send_relay_streams < 0) { - goto error_streams_sent_nosignal; - } - channel->streams_sent_to_relayd = true; - } -end_error_streams_sent: - break; -error_streams_sent_nosignal: - goto end_nosignal; - } - case LTTNG_CONSUMER_UPDATE_STREAM: - { - rcu_read_unlock(); - return -ENOSYS; - } - case LTTNG_CONSUMER_DESTROY_RELAYD: - { - uint64_t index = msg.u.destroy_relayd.net_seq_idx; - struct consumer_relayd_sock_pair *relayd; - int ret_send_status; - - DBG("Kernel consumer destroying relayd %" PRIu64, index); - - /* Get relayd reference if exists. */ - relayd = consumer_find_relayd(index); - if (relayd == NULL) { - DBG("Unable to find relayd %" PRIu64, index); - ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; - } - - /* - * Each relayd socket pair has a refcount of stream attached to it - * which tells if the relayd is still active or not depending on the - * refcount value. - * - * This will set the destroy flag of the relayd object and destroy it - * if the refcount reaches zero when called. - * - * The destroy can happen either here or when a stream fd hangs up. - */ - if (relayd) { - consumer_flag_relayd_for_destroy(relayd); - } - - health_code_update(); - - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto error_fatal; - } - - goto end_nosignal; - } - case LTTNG_CONSUMER_DATA_PENDING: - { - int32_t ret_data_pending; - uint64_t id = msg.u.data_pending.session_id; - ssize_t ret_send; - - DBG("Kernel consumer data pending command for id %" PRIu64, id); - - ret_data_pending = consumer_data_pending(id); - - health_code_update(); - - /* Send back returned value to session daemon */ - ret_send = lttcomm_send_unix_sock(sock, &ret_data_pending, - sizeof(ret_data_pending)); - if (ret_send < 0) { - PERROR("send data pending ret code"); - goto error_fatal; - } - - /* - * No need to send back a status message since the data pending - * returned value is the response. - */ - break; - } - case LTTNG_CONSUMER_SNAPSHOT_CHANNEL: - { - struct lttng_consumer_channel *channel; - uint64_t key = msg.u.snapshot_channel.key; - int ret_send_status; - - channel = consumer_find_channel(key); - if (!channel) { - ERR("Channel %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } else { - pthread_mutex_lock(&channel->lock); - if (msg.u.snapshot_channel.metadata == 1) { - int ret_snapshot; - - ret_snapshot = lttng_kconsumer_snapshot_metadata( - channel, key, - msg.u.snapshot_channel.pathname, - msg.u.snapshot_channel.relayd_id, - ctx); - if (ret_snapshot < 0) { - ERR("Snapshot metadata failed"); - ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; - } - } else { - int ret_snapshot; - - ret_snapshot = lttng_kconsumer_snapshot_channel( - channel, key, - msg.u.snapshot_channel.pathname, - msg.u.snapshot_channel.relayd_id, - msg.u.snapshot_channel - .nb_packets_per_stream, - ctx); - if (ret_snapshot < 0) { - ERR("Snapshot channel failed"); - ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; - } - } - pthread_mutex_unlock(&channel->lock); - } - health_code_update(); - - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_nosignal; - } - break; - } - case LTTNG_CONSUMER_DESTROY_CHANNEL: - { - uint64_t key = msg.u.destroy_channel.key; - struct lttng_consumer_channel *channel; - int ret_send_status; - - channel = consumer_find_channel(key); - if (!channel) { - ERR("Kernel consumer destroy channel %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } - - health_code_update(); - - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_destroy_channel; - } - - health_code_update(); - - /* Stop right now if no channel was found. */ - if (!channel) { - goto end_destroy_channel; - } - - /* - * This command should ONLY be issued for channel with streams set in - * no monitor mode. - */ - LTTNG_ASSERT(!channel->monitor); - - /* - * The refcount should ALWAYS be 0 in the case of a channel in no - * monitor mode. - */ - LTTNG_ASSERT(!uatomic_sub_return(&channel->refcount, 1)); - - consumer_del_channel(channel); -end_destroy_channel: - goto end_nosignal; - } - case LTTNG_CONSUMER_DISCARDED_EVENTS: - { - ssize_t ret; - uint64_t count; - struct lttng_consumer_channel *channel; - uint64_t id = msg.u.discarded_events.session_id; - uint64_t key = msg.u.discarded_events.channel_key; - - DBG("Kernel consumer discarded events command for session id %" - PRIu64 ", channel key %" PRIu64, id, key); - - channel = consumer_find_channel(key); - if (!channel) { - ERR("Kernel consumer discarded events channel %" - PRIu64 " not found", key); - count = 0; - } else { - count = channel->discarded_events; - } - - health_code_update(); - - /* Send back returned value to session daemon */ - ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); - if (ret < 0) { - PERROR("send discarded events"); - goto error_fatal; - } - - break; - } - case LTTNG_CONSUMER_LOST_PACKETS: - { - ssize_t ret; - uint64_t count; - struct lttng_consumer_channel *channel; - uint64_t id = msg.u.lost_packets.session_id; - uint64_t key = msg.u.lost_packets.channel_key; - - DBG("Kernel consumer lost packets command for session id %" - PRIu64 ", channel key %" PRIu64, id, key); - - channel = consumer_find_channel(key); - if (!channel) { - ERR("Kernel consumer lost packets channel %" - PRIu64 " not found", key); - count = 0; - } else { - count = channel->lost_packets; - } - - health_code_update(); - - /* Send back returned value to session daemon */ - ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); - if (ret < 0) { - PERROR("send lost packets"); - goto error_fatal; - } - - break; - } - case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE: - { - int channel_monitor_pipe; - int ret_send_status, ret_set_channel_monitor_pipe; - ssize_t ret_recv; - - ret_code = LTTCOMM_CONSUMERD_SUCCESS; - /* Successfully received the command's type. */ - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - goto error_fatal; - } - - ret_recv = lttcomm_recv_fds_unix_sock( - sock, &channel_monitor_pipe, 1); - if (ret_recv != sizeof(channel_monitor_pipe)) { - ERR("Failed to receive channel monitor pipe"); - goto error_fatal; - } - - DBG("Received channel monitor pipe (%d)", channel_monitor_pipe); - ret_set_channel_monitor_pipe = - consumer_timer_thread_set_channel_monitor_pipe( - channel_monitor_pipe); - if (!ret_set_channel_monitor_pipe) { - int flags; - int ret_fcntl; - - ret_code = LTTCOMM_CONSUMERD_SUCCESS; - /* Set the pipe as non-blocking. */ - ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0); - if (ret_fcntl == -1) { - PERROR("fcntl get flags of the channel monitoring pipe"); - goto error_fatal; - } - flags = ret_fcntl; - - ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL, - flags | O_NONBLOCK); - if (ret_fcntl == -1) { - PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe"); - goto error_fatal; - } - DBG("Channel monitor pipe set as non-blocking"); - } else { - ret_code = LTTCOMM_CONSUMERD_ALREADY_SET; - } - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - goto error_fatal; - } - break; - } - case LTTNG_CONSUMER_ROTATE_CHANNEL: - { - struct lttng_consumer_channel *channel; - uint64_t key = msg.u.rotate_channel.key; - int ret_send_status; - - DBG("Consumer rotate channel %" PRIu64, key); - - channel = consumer_find_channel(key); - if (!channel) { - ERR("Channel %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } else { - /* - * Sample the rotate position of all the streams in this channel. - */ - int ret_rotate_channel; - - ret_rotate_channel = lttng_consumer_rotate_channel( - channel, key, - msg.u.rotate_channel.relayd_id, - msg.u.rotate_channel.metadata, ctx); - if (ret_rotate_channel < 0) { - ERR("Rotate channel failed"); - ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL; - } - - health_code_update(); - } - - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto error_rotate_channel; - } - if (channel) { - /* Rotate the streams that are ready right now. */ - int ret_rotate; - - ret_rotate = lttng_consumer_rotate_ready_streams( - channel, key, ctx); - if (ret_rotate < 0) { - ERR("Rotate ready streams failed"); - } - } - break; -error_rotate_channel: - goto end_nosignal; - } - case LTTNG_CONSUMER_CLEAR_CHANNEL: - { - struct lttng_consumer_channel *channel; - uint64_t key = msg.u.clear_channel.key; - int ret_send_status; - - channel = consumer_find_channel(key); - if (!channel) { - DBG("Channel %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } else { - int ret_clear_channel; - - ret_clear_channel = - lttng_consumer_clear_channel(channel); - if (ret_clear_channel) { - ERR("Clear channel failed"); - ret_code = ret_clear_channel; - } - - health_code_update(); - } - - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_nosignal; - } - - break; - } - case LTTNG_CONSUMER_INIT: - { - int ret_send_status; - - ret_code = lttng_consumer_init_command(ctx, - msg.u.init.sessiond_uuid); - health_code_update(); - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_nosignal; - } - break; - } - case LTTNG_CONSUMER_CREATE_TRACE_CHUNK: - { - const struct lttng_credentials credentials = { - .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid), - .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid), - }; - const bool is_local_trace = - !msg.u.create_trace_chunk.relayd_id.is_set; - const uint64_t relayd_id = - msg.u.create_trace_chunk.relayd_id.value; - const char *chunk_override_name = - *msg.u.create_trace_chunk.override_name ? - msg.u.create_trace_chunk.override_name : - NULL; - struct lttng_directory_handle *chunk_directory_handle = NULL; - - /* - * The session daemon will only provide a chunk directory file - * descriptor for local traces. - */ - if (is_local_trace) { - int chunk_dirfd; - int ret_send_status; - ssize_t ret_recv; - - /* Acnowledge the reception of the command. */ - ret_send_status = consumer_send_status_msg( - sock, LTTCOMM_CONSUMERD_SUCCESS); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_nosignal; - } - - ret_recv = lttcomm_recv_fds_unix_sock( - sock, &chunk_dirfd, 1); - if (ret_recv != sizeof(chunk_dirfd)) { - ERR("Failed to receive trace chunk directory file descriptor"); - goto error_fatal; - } - - DBG("Received trace chunk directory fd (%d)", - chunk_dirfd); - chunk_directory_handle = lttng_directory_handle_create_from_dirfd( - chunk_dirfd); - if (!chunk_directory_handle) { - ERR("Failed to initialize chunk directory handle from directory file descriptor"); - if (close(chunk_dirfd)) { - PERROR("Failed to close chunk directory file descriptor"); - } - goto error_fatal; - } - } - - ret_code = lttng_consumer_create_trace_chunk( - !is_local_trace ? &relayd_id : NULL, - msg.u.create_trace_chunk.session_id, - msg.u.create_trace_chunk.chunk_id, - (time_t) msg.u.create_trace_chunk - .creation_timestamp, - chunk_override_name, - msg.u.create_trace_chunk.credentials.is_set ? - &credentials : - NULL, - chunk_directory_handle); - lttng_directory_handle_put(chunk_directory_handle); - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK: - { - enum lttng_trace_chunk_command_type close_command = - msg.u.close_trace_chunk.close_command.value; - const uint64_t relayd_id = - msg.u.close_trace_chunk.relayd_id.value; - struct lttcomm_consumer_close_trace_chunk_reply reply; - char path[LTTNG_PATH_MAX]; - ssize_t ret_send; - - ret_code = lttng_consumer_close_trace_chunk( - msg.u.close_trace_chunk.relayd_id.is_set ? - &relayd_id : - NULL, - msg.u.close_trace_chunk.session_id, - msg.u.close_trace_chunk.chunk_id, - (time_t) msg.u.close_trace_chunk.close_timestamp, - msg.u.close_trace_chunk.close_command.is_set ? - &close_command : - NULL, path); - reply.ret_code = ret_code; - reply.path_length = strlen(path) + 1; - ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply)); - if (ret_send != sizeof(reply)) { - goto error_fatal; - } - ret_send = lttcomm_send_unix_sock( - sock, path, reply.path_length); - if (ret_send != reply.path_length) { - goto error_fatal; - } - goto end_nosignal; - } - case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS: - { - const uint64_t relayd_id = - msg.u.trace_chunk_exists.relayd_id.value; - - ret_code = lttng_consumer_trace_chunk_exists( - msg.u.trace_chunk_exists.relayd_id.is_set ? - &relayd_id : NULL, - msg.u.trace_chunk_exists.session_id, - msg.u.trace_chunk_exists.chunk_id); - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS: - { - const uint64_t key = msg.u.open_channel_packets.key; - struct lttng_consumer_channel *channel = - consumer_find_channel(key); - - if (channel) { - pthread_mutex_lock(&channel->lock); - ret_code = lttng_consumer_open_channel_packets(channel); - pthread_mutex_unlock(&channel->lock); - } else { - WARN("Channel %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } - - health_code_update(); - goto end_msg_sessiond; - } - default: - goto end_nosignal; - } - -end_nosignal: - /* - * Return 1 to indicate success since the 0 value can be a socket - * shutdown during the recv() or send() call. - */ - ret_func = 1; - goto end; -error_fatal: - /* This will issue a consumer stop. */ - ret_func = -1; - goto end; -end_msg_sessiond: - /* - * The returned value here is not useful since either way we'll return 1 to - * the caller because the session daemon socket management is done - * elsewhere. Returning a negative code or 0 will shutdown the consumer. - */ - { - int ret_send_status; - - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - goto error_fatal; - } - } - - ret_func = 1; - -end: - health_code_update(); - rcu_read_unlock(); - return ret_func; -} - -/* - * Sync metadata meaning request them to the session daemon and snapshot to the - * metadata thread can consumer them. - * - * Metadata stream lock MUST be acquired. - */ -enum sync_metadata_status lttng_kconsumer_sync_metadata( - struct lttng_consumer_stream *metadata) -{ - int ret; - enum sync_metadata_status status; - - LTTNG_ASSERT(metadata); - - ret = kernctl_buffer_flush(metadata->wait_fd); - if (ret < 0) { - ERR("Failed to flush kernel stream"); - status = SYNC_METADATA_STATUS_ERROR; - goto end; - } - - ret = kernctl_snapshot(metadata->wait_fd); - if (ret < 0) { - if (errno == EAGAIN) { - /* No new metadata, exit. */ - DBG("Sync metadata, no new kernel metadata"); - status = SYNC_METADATA_STATUS_NO_DATA; - } else { - ERR("Sync metadata, taking kernel snapshot failed."); - status = SYNC_METADATA_STATUS_ERROR; - } - } else { - status = SYNC_METADATA_STATUS_NEW_DATA; - } - -end: - return status; -} - -static -int extract_common_subbuffer_info(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuf) -{ - int ret; - - ret = kernctl_get_subbuf_size( - stream->wait_fd, &subbuf->info.data.subbuf_size); - if (ret) { - goto end; - } - - ret = kernctl_get_padded_subbuf_size( - stream->wait_fd, &subbuf->info.data.padded_subbuf_size); - if (ret) { - goto end; - } - -end: - return ret; -} - -static -int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuf) -{ - int ret; - - ret = extract_common_subbuffer_info(stream, subbuf); - if (ret) { - goto end; - } - - ret = kernctl_get_metadata_version( - stream->wait_fd, &subbuf->info.metadata.version); - if (ret) { - goto end; - } - -end: - return ret; -} - -static -int extract_data_subbuffer_info(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuf) -{ - int ret; - - ret = extract_common_subbuffer_info(stream, subbuf); - if (ret) { - goto end; - } - - ret = kernctl_get_packet_size( - stream->wait_fd, &subbuf->info.data.packet_size); - if (ret < 0) { - PERROR("Failed to get sub-buffer packet size"); - goto end; - } - - ret = kernctl_get_content_size( - stream->wait_fd, &subbuf->info.data.content_size); - if (ret < 0) { - PERROR("Failed to get sub-buffer content size"); - goto end; - } - - ret = kernctl_get_timestamp_begin( - stream->wait_fd, &subbuf->info.data.timestamp_begin); - if (ret < 0) { - PERROR("Failed to get sub-buffer begin timestamp"); - goto end; - } - - ret = kernctl_get_timestamp_end( - stream->wait_fd, &subbuf->info.data.timestamp_end); - if (ret < 0) { - PERROR("Failed to get sub-buffer end timestamp"); - goto end; - } - - ret = kernctl_get_events_discarded( - stream->wait_fd, &subbuf->info.data.events_discarded); - if (ret) { - PERROR("Failed to get sub-buffer events discarded count"); - goto end; - } - - ret = kernctl_get_sequence_number(stream->wait_fd, - &subbuf->info.data.sequence_number.value); - if (ret) { - /* May not be supported by older LTTng-modules. */ - if (ret != -ENOTTY) { - PERROR("Failed to get sub-buffer sequence number"); - goto end; - } - } else { - subbuf->info.data.sequence_number.is_set = true; - } - - ret = kernctl_get_stream_id( - stream->wait_fd, &subbuf->info.data.stream_id); - if (ret < 0) { - PERROR("Failed to get stream id"); - goto end; - } - - ret = kernctl_get_instance_id(stream->wait_fd, - &subbuf->info.data.stream_instance_id.value); - if (ret) { - /* May not be supported by older LTTng-modules. */ - if (ret != -ENOTTY) { - PERROR("Failed to get stream instance id"); - goto end; - } - } else { - subbuf->info.data.stream_instance_id.is_set = true; - } -end: - return ret; -} - -static -enum get_next_subbuffer_status get_subbuffer_common( - struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - int ret; - enum get_next_subbuffer_status status; - - ret = kernctl_get_next_subbuf(stream->wait_fd); - switch (ret) { - case 0: - status = GET_NEXT_SUBBUFFER_STATUS_OK; - break; - case -ENODATA: - case -EAGAIN: - /* - * The caller only expects -ENODATA when there is no data to - * read, but the kernel tracer returns -EAGAIN when there is - * currently no data for a non-finalized stream, and -ENODATA - * when there is no data for a finalized stream. Those can be - * combined into a -ENODATA return value. - */ - status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; - goto end; - default: - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } - - ret = stream->read_subbuffer_ops.extract_subbuffer_info( - stream, subbuffer); - if (ret) { - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - } -end: - return status; -} - -static -enum get_next_subbuffer_status get_next_subbuffer_splice( - struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - const enum get_next_subbuffer_status status = - get_subbuffer_common(stream, subbuffer); - - if (status != GET_NEXT_SUBBUFFER_STATUS_OK) { - goto end; - } - - subbuffer->buffer.fd = stream->wait_fd; -end: - return status; -} - -static -enum get_next_subbuffer_status get_next_subbuffer_mmap( - struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - int ret; - enum get_next_subbuffer_status status; - const char *addr; - - status = get_subbuffer_common(stream, subbuffer); - if (status != GET_NEXT_SUBBUFFER_STATUS_OK) { - goto end; - } - - ret = get_current_subbuf_addr(stream, &addr); - if (ret) { - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } - - subbuffer->buffer.buffer = lttng_buffer_view_init( - addr, 0, subbuffer->info.data.padded_subbuf_size); -end: - return status; -} - -static -enum get_next_subbuffer_status get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - int ret; - const char *addr; - bool coherent; - enum get_next_subbuffer_status status; - - ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd, - &coherent); - if (ret) { - goto end; - } - - ret = stream->read_subbuffer_ops.extract_subbuffer_info( - stream, subbuffer); - if (ret) { - goto end; - } - - LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent); - - ret = get_current_subbuf_addr(stream, &addr); - if (ret) { - goto end; - } - - subbuffer->buffer.buffer = lttng_buffer_view_init( - addr, 0, subbuffer->info.data.padded_subbuf_size); - DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s", - subbuffer->info.metadata.padded_subbuf_size, - coherent ? "true" : "false"); -end: - /* - * The caller only expects -ENODATA when there is no data to read, but - * the kernel tracer returns -EAGAIN when there is currently no data - * for a non-finalized stream, and -ENODATA when there is no data for a - * finalized stream. Those can be combined into a -ENODATA return value. - */ - switch (ret) { - case 0: - status = GET_NEXT_SUBBUFFER_STATUS_OK; - break; - case -ENODATA: - case -EAGAIN: - /* - * The caller only expects -ENODATA when there is no data to - * read, but the kernel tracer returns -EAGAIN when there is - * currently no data for a non-finalized stream, and -ENODATA - * when there is no data for a finalized stream. Those can be - * combined into a -ENODATA return value. - */ - status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; - break; - default: - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - break; - } - - return status; -} - -static -int put_next_subbuffer(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - const int ret = kernctl_put_next_subbuf(stream->wait_fd); - - if (ret) { - if (ret == -EFAULT) { - PERROR("Error in unreserving sub buffer"); - } else if (ret == -EIO) { - /* Should never happen with newer LTTng versions */ - PERROR("Reader has been pushed by the writer, last sub-buffer corrupted"); - } - } - - return ret; -} - -static -bool is_get_next_check_metadata_available(int tracer_fd) -{ - const int ret = kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL); - const bool available = ret != -ENOTTY; - - if (ret == 0) { - /* get succeeded, make sure to put the subbuffer. */ - kernctl_put_subbuf(tracer_fd); - } - - return available; -} - -static -int signal_metadata(struct lttng_consumer_stream *stream, - struct lttng_consumer_local_data *ctx) -{ - ASSERT_LOCKED(stream->metadata_rdv_lock); - return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0; -} - -static -int lttng_kconsumer_set_stream_ops( - struct lttng_consumer_stream *stream) -{ - int ret = 0; - - if (stream->metadata_flag && stream->chan->is_live) { - DBG("Attempting to enable metadata bucketization for live consumers"); - if (is_get_next_check_metadata_available(stream->wait_fd)) { - DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached"); - stream->read_subbuffer_ops.get_next_subbuffer = - get_next_subbuffer_metadata_check; - ret = consumer_stream_enable_metadata_bucketization( - stream); - if (ret) { - goto end; - } - } else { - /* - * The kernel tracer version is too old to indicate - * when the metadata stream has reached a "coherent" - * (parseable) point. - * - * This means that a live viewer may see an incoherent - * sequence of metadata and fail to parse it. - */ - WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream"); - metadata_bucket_destroy(stream->metadata_bucket); - stream->metadata_bucket = NULL; - } - - stream->read_subbuffer_ops.on_sleep = signal_metadata; - } - - if (!stream->read_subbuffer_ops.get_next_subbuffer) { - if (stream->chan->output == CONSUMER_CHANNEL_MMAP) { - stream->read_subbuffer_ops.get_next_subbuffer = - get_next_subbuffer_mmap; - } else { - stream->read_subbuffer_ops.get_next_subbuffer = - get_next_subbuffer_splice; - } - } - - if (stream->metadata_flag) { - stream->read_subbuffer_ops.extract_subbuffer_info = - extract_metadata_subbuffer_info; - } else { - stream->read_subbuffer_ops.extract_subbuffer_info = - extract_data_subbuffer_info; - if (stream->chan->is_live) { - stream->read_subbuffer_ops.send_live_beacon = - consumer_flush_kernel_index; - } - } - - stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer; -end: - return ret; -} - -int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream) -{ - int ret; - - LTTNG_ASSERT(stream); - - /* - * Don't create anything if this is set for streaming or if there is - * no current trace chunk on the parent channel. - */ - if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor && - stream->chan->trace_chunk) { - ret = consumer_stream_create_output_files(stream, true); - if (ret) { - goto error; - } - } - - if (stream->output == LTTNG_EVENT_MMAP) { - /* get the len of the mmap region */ - unsigned long mmap_len; - - ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len); - if (ret != 0) { - PERROR("kernctl_get_mmap_len"); - goto error_close_fd; - } - stream->mmap_len = (size_t) mmap_len; - - stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ, - MAP_PRIVATE, stream->wait_fd, 0); - if (stream->mmap_base == MAP_FAILED) { - PERROR("Error mmaping"); - ret = -1; - goto error_close_fd; - } - } - - ret = lttng_kconsumer_set_stream_ops(stream); - if (ret) { - goto error_close_fd; - } - - /* we return 0 to let the library handle the FD internally */ - return 0; - -error_close_fd: - if (stream->out_fd >= 0) { - int err; - - err = close(stream->out_fd); - LTTNG_ASSERT(!err); - stream->out_fd = -1; - } -error: - return ret; -} - -/* - * Check if data is still being extracted from the buffers for a specific - * stream. Consumer data lock MUST be acquired before calling this function - * and the stream lock. - * - * Return 1 if the traced data are still getting read else 0 meaning that the - * data is available for trace viewer reading. - */ -int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream) -{ - int ret; - - LTTNG_ASSERT(stream); - - if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) { - ret = 0; - goto end; - } - - ret = kernctl_get_next_subbuf(stream->wait_fd); - if (ret == 0) { - /* There is still data so let's put back this subbuffer. */ - ret = kernctl_put_subbuf(stream->wait_fd); - LTTNG_ASSERT(ret == 0); - ret = 1; /* Data is pending */ - goto end; - } - - /* Data is NOT pending and ready to be read. */ - ret = 0; - -end: - return ret; -} diff --git a/src/common/kernel-consumer/kernel-consumer.cpp b/src/common/kernel-consumer/kernel-consumer.cpp new file mode 100644 index 000000000..aa443027e --- /dev/null +++ b/src/common/kernel-consumer/kernel-consumer.cpp @@ -0,0 +1,1920 @@ +/* + * Copyright (C) 2011 Julien Desfossez + * Copyright (C) 2011 Mathieu Desnoyers + * Copyright (C) 2017 Jérémie Galarneau + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#define _LGPL_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "kernel-consumer.h" + +extern struct lttng_consumer_global_data the_consumer_data; +extern int consumer_poll_timeout; + +/* + * Take a snapshot for a specific fd + * + * Returns 0 on success, < 0 on error + */ +int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream) +{ + int ret = 0; + int infd = stream->wait_fd; + + ret = kernctl_snapshot(infd); + /* + * -EAGAIN is not an error, it just means that there is no data to + * be read. + */ + if (ret != 0 && ret != -EAGAIN) { + PERROR("Getting sub-buffer snapshot."); + } + + return ret; +} + +/* + * Sample consumed and produced positions for a specific fd. + * + * Returns 0 on success, < 0 on error. + */ +int lttng_kconsumer_sample_snapshot_positions( + struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + + return kernctl_snapshot_sample_positions(stream->wait_fd); +} + +/* + * Get the produced position + * + * Returns 0 on success, < 0 on error + */ +int lttng_kconsumer_get_produced_snapshot(struct lttng_consumer_stream *stream, + unsigned long *pos) +{ + int ret; + int infd = stream->wait_fd; + + ret = kernctl_snapshot_get_produced(infd, pos); + if (ret != 0) { + PERROR("kernctl_snapshot_get_produced"); + } + + return ret; +} + +/* + * Get the consumerd position + * + * Returns 0 on success, < 0 on error + */ +int lttng_kconsumer_get_consumed_snapshot(struct lttng_consumer_stream *stream, + unsigned long *pos) +{ + int ret; + int infd = stream->wait_fd; + + ret = kernctl_snapshot_get_consumed(infd, pos); + if (ret != 0) { + PERROR("kernctl_snapshot_get_consumed"); + } + + return ret; +} + +static +int get_current_subbuf_addr(struct lttng_consumer_stream *stream, + const char **addr) +{ + int ret; + unsigned long mmap_offset; + const char *mmap_base = (const char *) stream->mmap_base; + + ret = kernctl_get_mmap_read_offset(stream->wait_fd, &mmap_offset); + if (ret < 0) { + PERROR("Failed to get mmap read offset"); + goto error; + } + + *addr = mmap_base + mmap_offset; +error: + return ret; +} + +/* + * Take a snapshot of all the stream of a channel + * RCU read-side lock must be held across this function to ensure existence of + * channel. The channel lock must be held by the caller. + * + * Returns 0 on success, < 0 on error + */ +static int lttng_kconsumer_snapshot_channel( + struct lttng_consumer_channel *channel, + uint64_t key, char *path, uint64_t relayd_id, + uint64_t nb_packets_per_stream, + struct lttng_consumer_local_data *ctx) +{ + int ret; + struct lttng_consumer_stream *stream; + + DBG("Kernel consumer snapshot channel %" PRIu64, key); + + rcu_read_lock(); + + /* Splice is not supported yet for channel snapshot. */ + if (channel->output != CONSUMER_CHANNEL_MMAP) { + ERR("Unsupported output type for channel \"%s\": mmap output is required to record a snapshot", + channel->name); + ret = -1; + goto end; + } + + cds_list_for_each_entry(stream, &channel->streams.head, send_node) { + unsigned long consumed_pos, produced_pos; + + health_code_update(); + + /* + * Lock stream because we are about to change its state. + */ + pthread_mutex_lock(&stream->lock); + + LTTNG_ASSERT(channel->trace_chunk); + if (!lttng_trace_chunk_get(channel->trace_chunk)) { + /* + * Can't happen barring an internal error as the channel + * holds a reference to the trace chunk. + */ + ERR("Failed to acquire reference to channel's trace chunk"); + ret = -1; + goto end_unlock; + } + LTTNG_ASSERT(!stream->trace_chunk); + stream->trace_chunk = channel->trace_chunk; + + /* + * Assign the received relayd ID so we can use it for streaming. The streams + * are not visible to anyone so this is OK to change it. + */ + stream->net_seq_idx = relayd_id; + channel->relayd_id = relayd_id; + if (relayd_id != (uint64_t) -1ULL) { + ret = consumer_send_relayd_stream(stream, path); + if (ret < 0) { + ERR("sending stream to relayd"); + goto end_unlock; + } + } else { + ret = consumer_stream_create_output_files(stream, + false); + if (ret < 0) { + goto end_unlock; + } + DBG("Kernel consumer snapshot stream (%" PRIu64 ")", + stream->key); + } + + ret = kernctl_buffer_flush_empty(stream->wait_fd); + if (ret < 0) { + /* + * Doing a buffer flush which does not take into + * account empty packets. This is not perfect + * for stream intersection, but required as a + * fall-back when "flush_empty" is not + * implemented by lttng-modules. + */ + ret = kernctl_buffer_flush(stream->wait_fd); + if (ret < 0) { + ERR("Failed to flush kernel stream"); + goto end_unlock; + } + goto end_unlock; + } + + ret = lttng_kconsumer_take_snapshot(stream); + if (ret < 0) { + ERR("Taking kernel snapshot"); + goto end_unlock; + } + + ret = lttng_kconsumer_get_produced_snapshot(stream, &produced_pos); + if (ret < 0) { + ERR("Produced kernel snapshot position"); + goto end_unlock; + } + + ret = lttng_kconsumer_get_consumed_snapshot(stream, &consumed_pos); + if (ret < 0) { + ERR("Consumerd kernel snapshot position"); + goto end_unlock; + } + + consumed_pos = consumer_get_consume_start_pos(consumed_pos, + produced_pos, nb_packets_per_stream, + stream->max_sb_size); + + while ((long) (consumed_pos - produced_pos) < 0) { + ssize_t read_len; + unsigned long len, padded_len; + const char *subbuf_addr; + struct lttng_buffer_view subbuf_view; + + health_code_update(); + DBG("Kernel consumer taking snapshot at pos %lu", consumed_pos); + + ret = kernctl_get_subbuf(stream->wait_fd, &consumed_pos); + if (ret < 0) { + if (ret != -EAGAIN) { + PERROR("kernctl_get_subbuf snapshot"); + goto end_unlock; + } + DBG("Kernel consumer get subbuf failed. Skipping it."); + consumed_pos += stream->max_sb_size; + stream->chan->lost_packets++; + continue; + } + + ret = kernctl_get_subbuf_size(stream->wait_fd, &len); + if (ret < 0) { + ERR("Snapshot kernctl_get_subbuf_size"); + goto error_put_subbuf; + } + + ret = kernctl_get_padded_subbuf_size(stream->wait_fd, &padded_len); + if (ret < 0) { + ERR("Snapshot kernctl_get_padded_subbuf_size"); + goto error_put_subbuf; + } + + ret = get_current_subbuf_addr(stream, &subbuf_addr); + if (ret) { + goto error_put_subbuf; + } + + subbuf_view = lttng_buffer_view_init( + subbuf_addr, 0, padded_len); + read_len = lttng_consumer_on_read_subbuffer_mmap( + stream, &subbuf_view, + padded_len - len); + /* + * We write the padded len in local tracefiles but the data len + * when using a relay. Display the error but continue processing + * to try to release the subbuffer. + */ + if (relayd_id != (uint64_t) -1ULL) { + if (read_len != len) { + ERR("Error sending to the relay (ret: %zd != len: %lu)", + read_len, len); + } + } else { + if (read_len != padded_len) { + ERR("Error writing to tracefile (ret: %zd != len: %lu)", + read_len, padded_len); + } + } + + ret = kernctl_put_subbuf(stream->wait_fd); + if (ret < 0) { + ERR("Snapshot kernctl_put_subbuf"); + goto end_unlock; + } + consumed_pos += stream->max_sb_size; + } + + if (relayd_id == (uint64_t) -1ULL) { + if (stream->out_fd >= 0) { + ret = close(stream->out_fd); + if (ret < 0) { + PERROR("Kernel consumer snapshot close out_fd"); + goto end_unlock; + } + stream->out_fd = -1; + } + } else { + close_relayd_stream(stream); + stream->net_seq_idx = (uint64_t) -1ULL; + } + lttng_trace_chunk_put(stream->trace_chunk); + stream->trace_chunk = NULL; + pthread_mutex_unlock(&stream->lock); + } + + /* All good! */ + ret = 0; + goto end; + +error_put_subbuf: + ret = kernctl_put_subbuf(stream->wait_fd); + if (ret < 0) { + ERR("Snapshot kernctl_put_subbuf error path"); + } +end_unlock: + pthread_mutex_unlock(&stream->lock); +end: + rcu_read_unlock(); + return ret; +} + +/* + * Read the whole metadata available for a snapshot. + * RCU read-side lock must be held across this function to ensure existence of + * metadata_channel. The channel lock must be held by the caller. + * + * Returns 0 on success, < 0 on error + */ +static int lttng_kconsumer_snapshot_metadata( + struct lttng_consumer_channel *metadata_channel, + uint64_t key, char *path, uint64_t relayd_id, + struct lttng_consumer_local_data *ctx) +{ + int ret, use_relayd = 0; + ssize_t ret_read; + struct lttng_consumer_stream *metadata_stream; + + LTTNG_ASSERT(ctx); + + DBG("Kernel consumer snapshot metadata with key %" PRIu64 " at path %s", + key, path); + + rcu_read_lock(); + + metadata_stream = metadata_channel->metadata_stream; + LTTNG_ASSERT(metadata_stream); + + pthread_mutex_lock(&metadata_stream->lock); + LTTNG_ASSERT(metadata_channel->trace_chunk); + LTTNG_ASSERT(metadata_stream->trace_chunk); + + /* Flag once that we have a valid relayd for the stream. */ + if (relayd_id != (uint64_t) -1ULL) { + use_relayd = 1; + } + + if (use_relayd) { + ret = consumer_send_relayd_stream(metadata_stream, path); + if (ret < 0) { + goto error_snapshot; + } + } else { + ret = consumer_stream_create_output_files(metadata_stream, + false); + if (ret < 0) { + goto error_snapshot; + } + } + + do { + health_code_update(); + + ret_read = lttng_consumer_read_subbuffer(metadata_stream, ctx, true); + if (ret_read < 0) { + ERR("Kernel snapshot reading metadata subbuffer (ret: %zd)", + ret_read); + ret = ret_read; + goto error_snapshot; + } + } while (ret_read > 0); + + if (use_relayd) { + close_relayd_stream(metadata_stream); + metadata_stream->net_seq_idx = (uint64_t) -1ULL; + } else { + if (metadata_stream->out_fd >= 0) { + ret = close(metadata_stream->out_fd); + if (ret < 0) { + PERROR("Kernel consumer snapshot metadata close out_fd"); + /* + * Don't go on error here since the snapshot was successful at this + * point but somehow the close failed. + */ + } + metadata_stream->out_fd = -1; + lttng_trace_chunk_put(metadata_stream->trace_chunk); + metadata_stream->trace_chunk = NULL; + } + } + + ret = 0; +error_snapshot: + pthread_mutex_unlock(&metadata_stream->lock); + cds_list_del(&metadata_stream->send_node); + consumer_stream_destroy(metadata_stream, NULL); + metadata_channel->metadata_stream = NULL; + rcu_read_unlock(); + return ret; +} + +/* + * Receive command from session daemon and process it. + * + * Return 1 on success else a negative value or 0. + */ +int lttng_kconsumer_recv_cmd(struct lttng_consumer_local_data *ctx, + int sock, struct pollfd *consumer_sockpoll) +{ + int ret_func; + enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; + struct lttcomm_consumer_msg msg; + + health_code_update(); + + { + ssize_t ret_recv; + + ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg)); + if (ret_recv != sizeof(msg)) { + if (ret_recv > 0) { + lttng_consumer_send_error(ctx, + LTTCOMM_CONSUMERD_ERROR_RECV_CMD); + ret_recv = -1; + } + return ret_recv; + } + } + + health_code_update(); + + /* Deprecated command */ + LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP); + + health_code_update(); + + /* relayd needs RCU read-side protection */ + rcu_read_lock(); + + switch (msg.cmd_type) { + case LTTNG_CONSUMER_ADD_RELAYD_SOCKET: + { + /* Session daemon status message are handled in the following call. */ + consumer_add_relayd_socket(msg.u.relayd_sock.net_index, + msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll, + &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id, + msg.u.relayd_sock.relayd_session_id); + goto end_nosignal; + } + case LTTNG_CONSUMER_ADD_CHANNEL: + { + struct lttng_consumer_channel *new_channel; + int ret_send_status, ret_add_channel = 0; + const uint64_t chunk_id = msg.u.channel.chunk_id.value; + + health_code_update(); + + /* First send a status message before receiving the fds. */ + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto error_fatal; + } + + health_code_update(); + + DBG("consumer_add_channel %" PRIu64, msg.u.channel.channel_key); + new_channel = consumer_allocate_channel(msg.u.channel.channel_key, + msg.u.channel.session_id, + msg.u.channel.chunk_id.is_set ? + &chunk_id : NULL, + msg.u.channel.pathname, + msg.u.channel.name, + msg.u.channel.relayd_id, msg.u.channel.output, + msg.u.channel.tracefile_size, + msg.u.channel.tracefile_count, 0, + msg.u.channel.monitor, + msg.u.channel.live_timer_interval, + msg.u.channel.is_live, + NULL, NULL); + if (new_channel == NULL) { + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); + goto end_nosignal; + } + new_channel->nb_init_stream_left = msg.u.channel.nb_init_streams; + switch (msg.u.channel.output) { + case LTTNG_EVENT_SPLICE: + new_channel->output = CONSUMER_CHANNEL_SPLICE; + break; + case LTTNG_EVENT_MMAP: + new_channel->output = CONSUMER_CHANNEL_MMAP; + break; + default: + ERR("Channel output unknown %d", msg.u.channel.output); + goto end_nosignal; + } + + /* Translate and save channel type. */ + switch (msg.u.channel.type) { + case CONSUMER_CHANNEL_TYPE_DATA: + case CONSUMER_CHANNEL_TYPE_METADATA: + new_channel->type = (consumer_channel_type) msg.u.channel.type; + break; + default: + abort(); + goto end_nosignal; + }; + + health_code_update(); + + if (ctx->on_recv_channel != NULL) { + int ret_recv_channel = + ctx->on_recv_channel(new_channel); + if (ret_recv_channel == 0) { + ret_add_channel = consumer_add_channel( + new_channel, ctx); + } else if (ret_recv_channel < 0) { + goto end_nosignal; + } + } else { + ret_add_channel = + consumer_add_channel(new_channel, ctx); + } + if (msg.u.channel.type == CONSUMER_CHANNEL_TYPE_DATA && + !ret_add_channel) { + int monitor_start_ret; + + DBG("Consumer starting monitor timer"); + consumer_timer_live_start(new_channel, + msg.u.channel.live_timer_interval); + monitor_start_ret = consumer_timer_monitor_start( + new_channel, + msg.u.channel.monitor_timer_interval); + if (monitor_start_ret < 0) { + ERR("Starting channel monitoring timer failed"); + goto end_nosignal; + } + } + + health_code_update(); + + /* If we received an error in add_channel, we need to report it. */ + if (ret_add_channel < 0) { + ret_send_status = consumer_send_status_msg( + sock, ret_add_channel); + if (ret_send_status < 0) { + goto error_fatal; + } + goto end_nosignal; + } + + goto end_nosignal; + } + case LTTNG_CONSUMER_ADD_STREAM: + { + int fd; + struct lttng_pipe *stream_pipe; + struct lttng_consumer_stream *new_stream; + struct lttng_consumer_channel *channel; + int alloc_ret = 0; + int ret_send_status, ret_poll, ret_get_max_subbuf_size; + ssize_t ret_pipe_write, ret_recv; + + /* + * Get stream's channel reference. Needed when adding the stream to the + * global hash table. + */ + channel = consumer_find_channel(msg.u.stream.channel_key); + if (!channel) { + /* + * We could not find the channel. Can happen if cpu hotplug + * happens while tearing down. + */ + ERR("Unable to find channel key %" PRIu64, msg.u.stream.channel_key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } + + health_code_update(); + + /* First send a status message before receiving the fds. */ + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto error_add_stream_fatal; + } + + health_code_update(); + + if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) { + /* Channel was not found. */ + goto error_add_stream_nosignal; + } + + /* Blocking call */ + health_poll_entry(); + ret_poll = lttng_consumer_poll_socket(consumer_sockpoll); + health_poll_exit(); + if (ret_poll) { + goto error_add_stream_fatal; + } + + health_code_update(); + + /* Get stream file descriptor from socket */ + ret_recv = lttcomm_recv_fds_unix_sock(sock, &fd, 1); + if (ret_recv != sizeof(fd)) { + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD); + ret_func = ret_recv; + goto end; + } + + health_code_update(); + + /* + * Send status code to session daemon only if the recv works. If the + * above recv() failed, the session daemon is notified through the + * error socket and the teardown is eventually done. + */ + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto error_add_stream_nosignal; + } + + health_code_update(); + + pthread_mutex_lock(&channel->lock); + new_stream = consumer_stream_create( + channel, + channel->key, + fd, + channel->name, + channel->relayd_id, + channel->session_id, + channel->trace_chunk, + msg.u.stream.cpu, + &alloc_ret, + channel->type, + channel->monitor); + if (new_stream == NULL) { + switch (alloc_ret) { + case -ENOMEM: + case -EINVAL: + default: + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); + break; + } + pthread_mutex_unlock(&channel->lock); + goto error_add_stream_nosignal; + } + + new_stream->wait_fd = fd; + ret_get_max_subbuf_size = kernctl_get_max_subbuf_size( + new_stream->wait_fd, &new_stream->max_sb_size); + if (ret_get_max_subbuf_size < 0) { + pthread_mutex_unlock(&channel->lock); + ERR("Failed to get kernel maximal subbuffer size"); + goto error_add_stream_nosignal; + } + + consumer_stream_update_channel_attributes(new_stream, + channel); + + /* + * We've just assigned the channel to the stream so increment the + * refcount right now. We don't need to increment the refcount for + * streams in no monitor because we handle manually the cleanup of + * those. It is very important to make sure there is NO prior + * consumer_del_stream() calls or else the refcount will be unbalanced. + */ + if (channel->monitor) { + uatomic_inc(&new_stream->chan->refcount); + } + + /* + * The buffer flush is done on the session daemon side for the kernel + * so no need for the stream "hangup_flush_done" variable to be + * tracked. This is important for a kernel stream since we don't rely + * on the flush state of the stream to read data. It's not the case for + * user space tracing. + */ + new_stream->hangup_flush_done = 0; + + health_code_update(); + + pthread_mutex_lock(&new_stream->lock); + if (ctx->on_recv_stream) { + int ret_recv_stream = ctx->on_recv_stream(new_stream); + if (ret_recv_stream < 0) { + pthread_mutex_unlock(&new_stream->lock); + pthread_mutex_unlock(&channel->lock); + consumer_stream_free(new_stream); + goto error_add_stream_nosignal; + } + } + health_code_update(); + + if (new_stream->metadata_flag) { + channel->metadata_stream = new_stream; + } + + /* Do not monitor this stream. */ + if (!channel->monitor) { + DBG("Kernel consumer add stream %s in no monitor mode with " + "relayd id %" PRIu64, new_stream->name, + new_stream->net_seq_idx); + cds_list_add(&new_stream->send_node, &channel->streams.head); + pthread_mutex_unlock(&new_stream->lock); + pthread_mutex_unlock(&channel->lock); + goto end_add_stream; + } + + /* Send stream to relayd if the stream has an ID. */ + if (new_stream->net_seq_idx != (uint64_t) -1ULL) { + int ret_send_relayd_stream; + + ret_send_relayd_stream = consumer_send_relayd_stream( + new_stream, new_stream->chan->pathname); + if (ret_send_relayd_stream < 0) { + pthread_mutex_unlock(&new_stream->lock); + pthread_mutex_unlock(&channel->lock); + consumer_stream_free(new_stream); + goto error_add_stream_nosignal; + } + + /* + * If adding an extra stream to an already + * existing channel (e.g. cpu hotplug), we need + * to send the "streams_sent" command to relayd. + */ + if (channel->streams_sent_to_relayd) { + int ret_send_relayd_streams_sent; + + ret_send_relayd_streams_sent = + consumer_send_relayd_streams_sent( + new_stream->net_seq_idx); + if (ret_send_relayd_streams_sent < 0) { + pthread_mutex_unlock(&new_stream->lock); + pthread_mutex_unlock(&channel->lock); + goto error_add_stream_nosignal; + } + } + } + pthread_mutex_unlock(&new_stream->lock); + pthread_mutex_unlock(&channel->lock); + + /* Get the right pipe where the stream will be sent. */ + if (new_stream->metadata_flag) { + consumer_add_metadata_stream(new_stream); + stream_pipe = ctx->consumer_metadata_pipe; + } else { + consumer_add_data_stream(new_stream); + stream_pipe = ctx->consumer_data_pipe; + } + + /* Visible to other threads */ + new_stream->globally_visible = 1; + + health_code_update(); + + ret_pipe_write = lttng_pipe_write( + stream_pipe, &new_stream, sizeof(new_stream)); + if (ret_pipe_write < 0) { + ERR("Consumer write %s stream to pipe %d", + new_stream->metadata_flag ? "metadata" : "data", + lttng_pipe_get_writefd(stream_pipe)); + if (new_stream->metadata_flag) { + consumer_del_stream_for_metadata(new_stream); + } else { + consumer_del_stream_for_data(new_stream); + } + goto error_add_stream_nosignal; + } + + DBG("Kernel consumer ADD_STREAM %s (fd: %d) %s with relayd id %" PRIu64, + new_stream->name, fd, new_stream->chan->pathname, new_stream->relayd_stream_id); +end_add_stream: + break; +error_add_stream_nosignal: + goto end_nosignal; +error_add_stream_fatal: + goto error_fatal; + } + case LTTNG_CONSUMER_STREAMS_SENT: + { + struct lttng_consumer_channel *channel; + int ret_send_status; + + /* + * Get stream's channel reference. Needed when adding the stream to the + * global hash table. + */ + channel = consumer_find_channel(msg.u.sent_streams.channel_key); + if (!channel) { + /* + * We could not find the channel. Can happen if cpu hotplug + * happens while tearing down. + */ + ERR("Unable to find channel key %" PRIu64, + msg.u.sent_streams.channel_key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } + + health_code_update(); + + /* + * Send status code to session daemon. + */ + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0 || + ret_code != LTTCOMM_CONSUMERD_SUCCESS) { + /* Somehow, the session daemon is not responding anymore. */ + goto error_streams_sent_nosignal; + } + + health_code_update(); + + /* + * We should not send this message if we don't monitor the + * streams in this channel. + */ + if (!channel->monitor) { + goto end_error_streams_sent; + } + + health_code_update(); + /* Send stream to relayd if the stream has an ID. */ + if (msg.u.sent_streams.net_seq_idx != (uint64_t) -1ULL) { + int ret_send_relay_streams; + + ret_send_relay_streams = consumer_send_relayd_streams_sent( + msg.u.sent_streams.net_seq_idx); + if (ret_send_relay_streams < 0) { + goto error_streams_sent_nosignal; + } + channel->streams_sent_to_relayd = true; + } +end_error_streams_sent: + break; +error_streams_sent_nosignal: + goto end_nosignal; + } + case LTTNG_CONSUMER_UPDATE_STREAM: + { + rcu_read_unlock(); + return -ENOSYS; + } + case LTTNG_CONSUMER_DESTROY_RELAYD: + { + uint64_t index = msg.u.destroy_relayd.net_seq_idx; + struct consumer_relayd_sock_pair *relayd; + int ret_send_status; + + DBG("Kernel consumer destroying relayd %" PRIu64, index); + + /* Get relayd reference if exists. */ + relayd = consumer_find_relayd(index); + if (relayd == NULL) { + DBG("Unable to find relayd %" PRIu64, index); + ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; + } + + /* + * Each relayd socket pair has a refcount of stream attached to it + * which tells if the relayd is still active or not depending on the + * refcount value. + * + * This will set the destroy flag of the relayd object and destroy it + * if the refcount reaches zero when called. + * + * The destroy can happen either here or when a stream fd hangs up. + */ + if (relayd) { + consumer_flag_relayd_for_destroy(relayd); + } + + health_code_update(); + + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto error_fatal; + } + + goto end_nosignal; + } + case LTTNG_CONSUMER_DATA_PENDING: + { + int32_t ret_data_pending; + uint64_t id = msg.u.data_pending.session_id; + ssize_t ret_send; + + DBG("Kernel consumer data pending command for id %" PRIu64, id); + + ret_data_pending = consumer_data_pending(id); + + health_code_update(); + + /* Send back returned value to session daemon */ + ret_send = lttcomm_send_unix_sock(sock, &ret_data_pending, + sizeof(ret_data_pending)); + if (ret_send < 0) { + PERROR("send data pending ret code"); + goto error_fatal; + } + + /* + * No need to send back a status message since the data pending + * returned value is the response. + */ + break; + } + case LTTNG_CONSUMER_SNAPSHOT_CHANNEL: + { + struct lttng_consumer_channel *channel; + uint64_t key = msg.u.snapshot_channel.key; + int ret_send_status; + + channel = consumer_find_channel(key); + if (!channel) { + ERR("Channel %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } else { + pthread_mutex_lock(&channel->lock); + if (msg.u.snapshot_channel.metadata == 1) { + int ret_snapshot; + + ret_snapshot = lttng_kconsumer_snapshot_metadata( + channel, key, + msg.u.snapshot_channel.pathname, + msg.u.snapshot_channel.relayd_id, + ctx); + if (ret_snapshot < 0) { + ERR("Snapshot metadata failed"); + ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; + } + } else { + int ret_snapshot; + + ret_snapshot = lttng_kconsumer_snapshot_channel( + channel, key, + msg.u.snapshot_channel.pathname, + msg.u.snapshot_channel.relayd_id, + msg.u.snapshot_channel + .nb_packets_per_stream, + ctx); + if (ret_snapshot < 0) { + ERR("Snapshot channel failed"); + ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; + } + } + pthread_mutex_unlock(&channel->lock); + } + health_code_update(); + + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_nosignal; + } + break; + } + case LTTNG_CONSUMER_DESTROY_CHANNEL: + { + uint64_t key = msg.u.destroy_channel.key; + struct lttng_consumer_channel *channel; + int ret_send_status; + + channel = consumer_find_channel(key); + if (!channel) { + ERR("Kernel consumer destroy channel %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } + + health_code_update(); + + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_destroy_channel; + } + + health_code_update(); + + /* Stop right now if no channel was found. */ + if (!channel) { + goto end_destroy_channel; + } + + /* + * This command should ONLY be issued for channel with streams set in + * no monitor mode. + */ + LTTNG_ASSERT(!channel->monitor); + + /* + * The refcount should ALWAYS be 0 in the case of a channel in no + * monitor mode. + */ + LTTNG_ASSERT(!uatomic_sub_return(&channel->refcount, 1)); + + consumer_del_channel(channel); +end_destroy_channel: + goto end_nosignal; + } + case LTTNG_CONSUMER_DISCARDED_EVENTS: + { + ssize_t ret; + uint64_t count; + struct lttng_consumer_channel *channel; + uint64_t id = msg.u.discarded_events.session_id; + uint64_t key = msg.u.discarded_events.channel_key; + + DBG("Kernel consumer discarded events command for session id %" + PRIu64 ", channel key %" PRIu64, id, key); + + channel = consumer_find_channel(key); + if (!channel) { + ERR("Kernel consumer discarded events channel %" + PRIu64 " not found", key); + count = 0; + } else { + count = channel->discarded_events; + } + + health_code_update(); + + /* Send back returned value to session daemon */ + ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); + if (ret < 0) { + PERROR("send discarded events"); + goto error_fatal; + } + + break; + } + case LTTNG_CONSUMER_LOST_PACKETS: + { + ssize_t ret; + uint64_t count; + struct lttng_consumer_channel *channel; + uint64_t id = msg.u.lost_packets.session_id; + uint64_t key = msg.u.lost_packets.channel_key; + + DBG("Kernel consumer lost packets command for session id %" + PRIu64 ", channel key %" PRIu64, id, key); + + channel = consumer_find_channel(key); + if (!channel) { + ERR("Kernel consumer lost packets channel %" + PRIu64 " not found", key); + count = 0; + } else { + count = channel->lost_packets; + } + + health_code_update(); + + /* Send back returned value to session daemon */ + ret = lttcomm_send_unix_sock(sock, &count, sizeof(count)); + if (ret < 0) { + PERROR("send lost packets"); + goto error_fatal; + } + + break; + } + case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE: + { + int channel_monitor_pipe; + int ret_send_status, ret_set_channel_monitor_pipe; + ssize_t ret_recv; + + ret_code = LTTCOMM_CONSUMERD_SUCCESS; + /* Successfully received the command's type. */ + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + goto error_fatal; + } + + ret_recv = lttcomm_recv_fds_unix_sock( + sock, &channel_monitor_pipe, 1); + if (ret_recv != sizeof(channel_monitor_pipe)) { + ERR("Failed to receive channel monitor pipe"); + goto error_fatal; + } + + DBG("Received channel monitor pipe (%d)", channel_monitor_pipe); + ret_set_channel_monitor_pipe = + consumer_timer_thread_set_channel_monitor_pipe( + channel_monitor_pipe); + if (!ret_set_channel_monitor_pipe) { + int flags; + int ret_fcntl; + + ret_code = LTTCOMM_CONSUMERD_SUCCESS; + /* Set the pipe as non-blocking. */ + ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0); + if (ret_fcntl == -1) { + PERROR("fcntl get flags of the channel monitoring pipe"); + goto error_fatal; + } + flags = ret_fcntl; + + ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL, + flags | O_NONBLOCK); + if (ret_fcntl == -1) { + PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe"); + goto error_fatal; + } + DBG("Channel monitor pipe set as non-blocking"); + } else { + ret_code = LTTCOMM_CONSUMERD_ALREADY_SET; + } + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + goto error_fatal; + } + break; + } + case LTTNG_CONSUMER_ROTATE_CHANNEL: + { + struct lttng_consumer_channel *channel; + uint64_t key = msg.u.rotate_channel.key; + int ret_send_status; + + DBG("Consumer rotate channel %" PRIu64, key); + + channel = consumer_find_channel(key); + if (!channel) { + ERR("Channel %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } else { + /* + * Sample the rotate position of all the streams in this channel. + */ + int ret_rotate_channel; + + ret_rotate_channel = lttng_consumer_rotate_channel( + channel, key, + msg.u.rotate_channel.relayd_id, + msg.u.rotate_channel.metadata, ctx); + if (ret_rotate_channel < 0) { + ERR("Rotate channel failed"); + ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL; + } + + health_code_update(); + } + + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto error_rotate_channel; + } + if (channel) { + /* Rotate the streams that are ready right now. */ + int ret_rotate; + + ret_rotate = lttng_consumer_rotate_ready_streams( + channel, key, ctx); + if (ret_rotate < 0) { + ERR("Rotate ready streams failed"); + } + } + break; +error_rotate_channel: + goto end_nosignal; + } + case LTTNG_CONSUMER_CLEAR_CHANNEL: + { + struct lttng_consumer_channel *channel; + uint64_t key = msg.u.clear_channel.key; + int ret_send_status; + + channel = consumer_find_channel(key); + if (!channel) { + DBG("Channel %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } else { + int ret_clear_channel; + + ret_clear_channel = + lttng_consumer_clear_channel(channel); + if (ret_clear_channel) { + ERR("Clear channel failed"); + ret_code = (lttcomm_return_code) ret_clear_channel; + } + + health_code_update(); + } + + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_nosignal; + } + + break; + } + case LTTNG_CONSUMER_INIT: + { + int ret_send_status; + + ret_code = lttng_consumer_init_command(ctx, + msg.u.init.sessiond_uuid); + health_code_update(); + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_nosignal; + } + break; + } + case LTTNG_CONSUMER_CREATE_TRACE_CHUNK: + { + const struct lttng_credentials credentials = { + .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid), + .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid), + }; + const bool is_local_trace = + !msg.u.create_trace_chunk.relayd_id.is_set; + const uint64_t relayd_id = + msg.u.create_trace_chunk.relayd_id.value; + const char *chunk_override_name = + *msg.u.create_trace_chunk.override_name ? + msg.u.create_trace_chunk.override_name : + NULL; + struct lttng_directory_handle *chunk_directory_handle = NULL; + + /* + * The session daemon will only provide a chunk directory file + * descriptor for local traces. + */ + if (is_local_trace) { + int chunk_dirfd; + int ret_send_status; + ssize_t ret_recv; + + /* Acnowledge the reception of the command. */ + ret_send_status = consumer_send_status_msg( + sock, LTTCOMM_CONSUMERD_SUCCESS); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_nosignal; + } + + ret_recv = lttcomm_recv_fds_unix_sock( + sock, &chunk_dirfd, 1); + if (ret_recv != sizeof(chunk_dirfd)) { + ERR("Failed to receive trace chunk directory file descriptor"); + goto error_fatal; + } + + DBG("Received trace chunk directory fd (%d)", + chunk_dirfd); + chunk_directory_handle = lttng_directory_handle_create_from_dirfd( + chunk_dirfd); + if (!chunk_directory_handle) { + ERR("Failed to initialize chunk directory handle from directory file descriptor"); + if (close(chunk_dirfd)) { + PERROR("Failed to close chunk directory file descriptor"); + } + goto error_fatal; + } + } + + ret_code = lttng_consumer_create_trace_chunk( + !is_local_trace ? &relayd_id : NULL, + msg.u.create_trace_chunk.session_id, + msg.u.create_trace_chunk.chunk_id, + (time_t) msg.u.create_trace_chunk + .creation_timestamp, + chunk_override_name, + msg.u.create_trace_chunk.credentials.is_set ? + &credentials : + NULL, + chunk_directory_handle); + lttng_directory_handle_put(chunk_directory_handle); + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK: + { + enum lttng_trace_chunk_command_type close_command = + (lttng_trace_chunk_command_type) msg.u.close_trace_chunk.close_command.value; + const uint64_t relayd_id = + msg.u.close_trace_chunk.relayd_id.value; + struct lttcomm_consumer_close_trace_chunk_reply reply; + char path[LTTNG_PATH_MAX]; + ssize_t ret_send; + + ret_code = lttng_consumer_close_trace_chunk( + msg.u.close_trace_chunk.relayd_id.is_set ? + &relayd_id : + NULL, + msg.u.close_trace_chunk.session_id, + msg.u.close_trace_chunk.chunk_id, + (time_t) msg.u.close_trace_chunk.close_timestamp, + msg.u.close_trace_chunk.close_command.is_set ? + &close_command : + NULL, path); + reply.ret_code = ret_code; + reply.path_length = strlen(path) + 1; + ret_send = lttcomm_send_unix_sock(sock, &reply, sizeof(reply)); + if (ret_send != sizeof(reply)) { + goto error_fatal; + } + ret_send = lttcomm_send_unix_sock( + sock, path, reply.path_length); + if (ret_send != reply.path_length) { + goto error_fatal; + } + goto end_nosignal; + } + case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS: + { + const uint64_t relayd_id = + msg.u.trace_chunk_exists.relayd_id.value; + + ret_code = lttng_consumer_trace_chunk_exists( + msg.u.trace_chunk_exists.relayd_id.is_set ? + &relayd_id : NULL, + msg.u.trace_chunk_exists.session_id, + msg.u.trace_chunk_exists.chunk_id); + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS: + { + const uint64_t key = msg.u.open_channel_packets.key; + struct lttng_consumer_channel *channel = + consumer_find_channel(key); + + if (channel) { + pthread_mutex_lock(&channel->lock); + ret_code = lttng_consumer_open_channel_packets(channel); + pthread_mutex_unlock(&channel->lock); + } else { + WARN("Channel %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } + + health_code_update(); + goto end_msg_sessiond; + } + default: + goto end_nosignal; + } + +end_nosignal: + /* + * Return 1 to indicate success since the 0 value can be a socket + * shutdown during the recv() or send() call. + */ + ret_func = 1; + goto end; +error_fatal: + /* This will issue a consumer stop. */ + ret_func = -1; + goto end; +end_msg_sessiond: + /* + * The returned value here is not useful since either way we'll return 1 to + * the caller because the session daemon socket management is done + * elsewhere. Returning a negative code or 0 will shutdown the consumer. + */ + { + int ret_send_status; + + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + goto error_fatal; + } + } + + ret_func = 1; + +end: + health_code_update(); + rcu_read_unlock(); + return ret_func; +} + +/* + * Sync metadata meaning request them to the session daemon and snapshot to the + * metadata thread can consumer them. + * + * Metadata stream lock MUST be acquired. + */ +enum sync_metadata_status lttng_kconsumer_sync_metadata( + struct lttng_consumer_stream *metadata) +{ + int ret; + enum sync_metadata_status status; + + LTTNG_ASSERT(metadata); + + ret = kernctl_buffer_flush(metadata->wait_fd); + if (ret < 0) { + ERR("Failed to flush kernel stream"); + status = SYNC_METADATA_STATUS_ERROR; + goto end; + } + + ret = kernctl_snapshot(metadata->wait_fd); + if (ret < 0) { + if (errno == EAGAIN) { + /* No new metadata, exit. */ + DBG("Sync metadata, no new kernel metadata"); + status = SYNC_METADATA_STATUS_NO_DATA; + } else { + ERR("Sync metadata, taking kernel snapshot failed."); + status = SYNC_METADATA_STATUS_ERROR; + } + } else { + status = SYNC_METADATA_STATUS_NEW_DATA; + } + +end: + return status; +} + +static +int extract_common_subbuffer_info(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuf) +{ + int ret; + + ret = kernctl_get_subbuf_size( + stream->wait_fd, &subbuf->info.data.subbuf_size); + if (ret) { + goto end; + } + + ret = kernctl_get_padded_subbuf_size( + stream->wait_fd, &subbuf->info.data.padded_subbuf_size); + if (ret) { + goto end; + } + +end: + return ret; +} + +static +int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuf) +{ + int ret; + + ret = extract_common_subbuffer_info(stream, subbuf); + if (ret) { + goto end; + } + + ret = kernctl_get_metadata_version( + stream->wait_fd, &subbuf->info.metadata.version); + if (ret) { + goto end; + } + +end: + return ret; +} + +static +int extract_data_subbuffer_info(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuf) +{ + int ret; + + ret = extract_common_subbuffer_info(stream, subbuf); + if (ret) { + goto end; + } + + ret = kernctl_get_packet_size( + stream->wait_fd, &subbuf->info.data.packet_size); + if (ret < 0) { + PERROR("Failed to get sub-buffer packet size"); + goto end; + } + + ret = kernctl_get_content_size( + stream->wait_fd, &subbuf->info.data.content_size); + if (ret < 0) { + PERROR("Failed to get sub-buffer content size"); + goto end; + } + + ret = kernctl_get_timestamp_begin( + stream->wait_fd, &subbuf->info.data.timestamp_begin); + if (ret < 0) { + PERROR("Failed to get sub-buffer begin timestamp"); + goto end; + } + + ret = kernctl_get_timestamp_end( + stream->wait_fd, &subbuf->info.data.timestamp_end); + if (ret < 0) { + PERROR("Failed to get sub-buffer end timestamp"); + goto end; + } + + ret = kernctl_get_events_discarded( + stream->wait_fd, &subbuf->info.data.events_discarded); + if (ret) { + PERROR("Failed to get sub-buffer events discarded count"); + goto end; + } + + ret = kernctl_get_sequence_number(stream->wait_fd, + &subbuf->info.data.sequence_number.value); + if (ret) { + /* May not be supported by older LTTng-modules. */ + if (ret != -ENOTTY) { + PERROR("Failed to get sub-buffer sequence number"); + goto end; + } + } else { + subbuf->info.data.sequence_number.is_set = true; + } + + ret = kernctl_get_stream_id( + stream->wait_fd, &subbuf->info.data.stream_id); + if (ret < 0) { + PERROR("Failed to get stream id"); + goto end; + } + + ret = kernctl_get_instance_id(stream->wait_fd, + &subbuf->info.data.stream_instance_id.value); + if (ret) { + /* May not be supported by older LTTng-modules. */ + if (ret != -ENOTTY) { + PERROR("Failed to get stream instance id"); + goto end; + } + } else { + subbuf->info.data.stream_instance_id.is_set = true; + } +end: + return ret; +} + +static +enum get_next_subbuffer_status get_subbuffer_common( + struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + int ret; + enum get_next_subbuffer_status status; + + ret = kernctl_get_next_subbuf(stream->wait_fd); + switch (ret) { + case 0: + status = GET_NEXT_SUBBUFFER_STATUS_OK; + break; + case -ENODATA: + case -EAGAIN: + /* + * The caller only expects -ENODATA when there is no data to + * read, but the kernel tracer returns -EAGAIN when there is + * currently no data for a non-finalized stream, and -ENODATA + * when there is no data for a finalized stream. Those can be + * combined into a -ENODATA return value. + */ + status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; + goto end; + default: + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } + + ret = stream->read_subbuffer_ops.extract_subbuffer_info( + stream, subbuffer); + if (ret) { + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + } +end: + return status; +} + +static +enum get_next_subbuffer_status get_next_subbuffer_splice( + struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + const enum get_next_subbuffer_status status = + get_subbuffer_common(stream, subbuffer); + + if (status != GET_NEXT_SUBBUFFER_STATUS_OK) { + goto end; + } + + subbuffer->buffer.fd = stream->wait_fd; +end: + return status; +} + +static +enum get_next_subbuffer_status get_next_subbuffer_mmap( + struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + int ret; + enum get_next_subbuffer_status status; + const char *addr; + + status = get_subbuffer_common(stream, subbuffer); + if (status != GET_NEXT_SUBBUFFER_STATUS_OK) { + goto end; + } + + ret = get_current_subbuf_addr(stream, &addr); + if (ret) { + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } + + subbuffer->buffer.buffer = lttng_buffer_view_init( + addr, 0, subbuffer->info.data.padded_subbuf_size); +end: + return status; +} + +static +enum get_next_subbuffer_status get_next_subbuffer_metadata_check(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + int ret; + const char *addr; + bool coherent; + enum get_next_subbuffer_status status; + + ret = kernctl_get_next_subbuf_metadata_check(stream->wait_fd, + &coherent); + if (ret) { + goto end; + } + + ret = stream->read_subbuffer_ops.extract_subbuffer_info( + stream, subbuffer); + if (ret) { + goto end; + } + + LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent); + + ret = get_current_subbuf_addr(stream, &addr); + if (ret) { + goto end; + } + + subbuffer->buffer.buffer = lttng_buffer_view_init( + addr, 0, subbuffer->info.data.padded_subbuf_size); + DBG("Got metadata packet with padded_subbuf_size = %lu, coherent = %s", + subbuffer->info.metadata.padded_subbuf_size, + coherent ? "true" : "false"); +end: + /* + * The caller only expects -ENODATA when there is no data to read, but + * the kernel tracer returns -EAGAIN when there is currently no data + * for a non-finalized stream, and -ENODATA when there is no data for a + * finalized stream. Those can be combined into a -ENODATA return value. + */ + switch (ret) { + case 0: + status = GET_NEXT_SUBBUFFER_STATUS_OK; + break; + case -ENODATA: + case -EAGAIN: + /* + * The caller only expects -ENODATA when there is no data to + * read, but the kernel tracer returns -EAGAIN when there is + * currently no data for a non-finalized stream, and -ENODATA + * when there is no data for a finalized stream. Those can be + * combined into a -ENODATA return value. + */ + status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; + break; + default: + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + break; + } + + return status; +} + +static +int put_next_subbuffer(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + const int ret = kernctl_put_next_subbuf(stream->wait_fd); + + if (ret) { + if (ret == -EFAULT) { + PERROR("Error in unreserving sub buffer"); + } else if (ret == -EIO) { + /* Should never happen with newer LTTng versions */ + PERROR("Reader has been pushed by the writer, last sub-buffer corrupted"); + } + } + + return ret; +} + +static +bool is_get_next_check_metadata_available(int tracer_fd) +{ + const int ret = kernctl_get_next_subbuf_metadata_check(tracer_fd, NULL); + const bool available = ret != -ENOTTY; + + if (ret == 0) { + /* get succeeded, make sure to put the subbuffer. */ + kernctl_put_subbuf(tracer_fd); + } + + return available; +} + +static +int signal_metadata(struct lttng_consumer_stream *stream, + struct lttng_consumer_local_data *ctx) +{ + ASSERT_LOCKED(stream->metadata_rdv_lock); + return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0; +} + +static +int lttng_kconsumer_set_stream_ops( + struct lttng_consumer_stream *stream) +{ + int ret = 0; + + if (stream->metadata_flag && stream->chan->is_live) { + DBG("Attempting to enable metadata bucketization for live consumers"); + if (is_get_next_check_metadata_available(stream->wait_fd)) { + DBG("Kernel tracer supports get_next_subbuffer_metadata_check, metadata will be accumulated until a coherent state is reached"); + stream->read_subbuffer_ops.get_next_subbuffer = + get_next_subbuffer_metadata_check; + ret = consumer_stream_enable_metadata_bucketization( + stream); + if (ret) { + goto end; + } + } else { + /* + * The kernel tracer version is too old to indicate + * when the metadata stream has reached a "coherent" + * (parseable) point. + * + * This means that a live viewer may see an incoherent + * sequence of metadata and fail to parse it. + */ + WARN("Kernel tracer does not support get_next_subbuffer_metadata_check which may cause live clients to fail to parse the metadata stream"); + metadata_bucket_destroy(stream->metadata_bucket); + stream->metadata_bucket = NULL; + } + + stream->read_subbuffer_ops.on_sleep = signal_metadata; + } + + if (!stream->read_subbuffer_ops.get_next_subbuffer) { + if (stream->chan->output == CONSUMER_CHANNEL_MMAP) { + stream->read_subbuffer_ops.get_next_subbuffer = + get_next_subbuffer_mmap; + } else { + stream->read_subbuffer_ops.get_next_subbuffer = + get_next_subbuffer_splice; + } + } + + if (stream->metadata_flag) { + stream->read_subbuffer_ops.extract_subbuffer_info = + extract_metadata_subbuffer_info; + } else { + stream->read_subbuffer_ops.extract_subbuffer_info = + extract_data_subbuffer_info; + if (stream->chan->is_live) { + stream->read_subbuffer_ops.send_live_beacon = + consumer_flush_kernel_index; + } + } + + stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer; +end: + return ret; +} + +int lttng_kconsumer_on_recv_stream(struct lttng_consumer_stream *stream) +{ + int ret; + + LTTNG_ASSERT(stream); + + /* + * Don't create anything if this is set for streaming or if there is + * no current trace chunk on the parent channel. + */ + if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor && + stream->chan->trace_chunk) { + ret = consumer_stream_create_output_files(stream, true); + if (ret) { + goto error; + } + } + + if (stream->output == LTTNG_EVENT_MMAP) { + /* get the len of the mmap region */ + unsigned long mmap_len; + + ret = kernctl_get_mmap_len(stream->wait_fd, &mmap_len); + if (ret != 0) { + PERROR("kernctl_get_mmap_len"); + goto error_close_fd; + } + stream->mmap_len = (size_t) mmap_len; + + stream->mmap_base = mmap(NULL, stream->mmap_len, PROT_READ, + MAP_PRIVATE, stream->wait_fd, 0); + if (stream->mmap_base == MAP_FAILED) { + PERROR("Error mmaping"); + ret = -1; + goto error_close_fd; + } + } + + ret = lttng_kconsumer_set_stream_ops(stream); + if (ret) { + goto error_close_fd; + } + + /* we return 0 to let the library handle the FD internally */ + return 0; + +error_close_fd: + if (stream->out_fd >= 0) { + int err; + + err = close(stream->out_fd); + LTTNG_ASSERT(!err); + stream->out_fd = -1; + } +error: + return ret; +} + +/* + * Check if data is still being extracted from the buffers for a specific + * stream. Consumer data lock MUST be acquired before calling this function + * and the stream lock. + * + * Return 1 if the traced data are still getting read else 0 meaning that the + * data is available for trace viewer reading. + */ +int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream) +{ + int ret; + + LTTNG_ASSERT(stream); + + if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) { + ret = 0; + goto end; + } + + ret = kernctl_get_next_subbuf(stream->wait_fd); + if (ret == 0) { + /* There is still data so let's put back this subbuffer. */ + ret = kernctl_put_subbuf(stream->wait_fd); + LTTNG_ASSERT(ret == 0); + ret = 1; /* Data is pending */ + goto end; + } + + /* Data is NOT pending and ready to be read. */ + ret = 0; + +end: + return ret; +} diff --git a/src/common/kernel-consumer/kernel-consumer.h b/src/common/kernel-consumer/kernel-consumer.h index 004becb0f..71a33b73f 100644 --- a/src/common/kernel-consumer/kernel-consumer.h +++ b/src/common/kernel-consumer/kernel-consumer.h @@ -13,6 +13,10 @@ #include #include +#ifdef __cplusplus +extern "C" { +#endif + int lttng_kconsumer_take_snapshot(struct lttng_consumer_stream *stream); int lttng_kconsumer_sample_snapshot_positions( struct lttng_consumer_stream *stream); @@ -27,4 +31,8 @@ int lttng_kconsumer_data_pending(struct lttng_consumer_stream *stream); enum sync_metadata_status lttng_kconsumer_sync_metadata( struct lttng_consumer_stream *metadata); +#ifdef __cplusplus +} +#endif + #endif /* _LTTNG_KCONSUMER_H */ diff --git a/src/common/ust-consumer/Makefile.am b/src/common/ust-consumer/Makefile.am index 5ab0fe602..26f166785 100644 --- a/src/common/ust-consumer/Makefile.am +++ b/src/common/ust-consumer/Makefile.am @@ -4,7 +4,9 @@ if HAVE_LIBLTTNG_UST_CTL noinst_LTLIBRARIES = libust-consumer.la -libust_consumer_la_SOURCES = ust-consumer.c ust-consumer.h +libust_consumer_la_SOURCES = \ + ust-consumer.cpp \ + ust-consumer.h libust_consumer_la_LIBADD = \ $(UST_CTL_LIBS) \ diff --git a/src/common/ust-consumer/ust-consumer.c b/src/common/ust-consumer/ust-consumer.c deleted file mode 100644 index 310b670b1..000000000 --- a/src/common/ust-consumer/ust-consumer.c +++ /dev/null @@ -1,3438 +0,0 @@ -/* - * Copyright (C) 2011 Julien Desfossez - * Copyright (C) 2011 Mathieu Desnoyers - * Copyright (C) 2017 Jérémie Galarneau - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#define _LGPL_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "ust-consumer.h" - -#define INT_MAX_STR_LEN 12 /* includes \0 */ - -extern struct lttng_consumer_global_data the_consumer_data; -extern int consumer_poll_timeout; - -LTTNG_EXPORT DEFINE_LTTNG_UST_SIGBUS_STATE(); - -/* - * Free channel object and all streams associated with it. This MUST be used - * only and only if the channel has _NEVER_ been added to the global channel - * hash table. - */ -static void destroy_channel(struct lttng_consumer_channel *channel) -{ - struct lttng_consumer_stream *stream, *stmp; - - LTTNG_ASSERT(channel); - - DBG("UST consumer cleaning stream list"); - - cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head, - send_node) { - - health_code_update(); - - cds_list_del(&stream->send_node); - lttng_ust_ctl_destroy_stream(stream->ustream); - lttng_trace_chunk_put(stream->trace_chunk); - free(stream); - } - - /* - * If a channel is available meaning that was created before the streams - * were, delete it. - */ - if (channel->uchan) { - lttng_ustconsumer_del_channel(channel); - lttng_ustconsumer_free_channel(channel); - } - - if (channel->trace_chunk) { - lttng_trace_chunk_put(channel->trace_chunk); - } - - free(channel); -} - -/* - * Add channel to internal consumer state. - * - * Returns 0 on success or else a negative value. - */ -static int add_channel(struct lttng_consumer_channel *channel, - struct lttng_consumer_local_data *ctx) -{ - int ret = 0; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(ctx); - - if (ctx->on_recv_channel != NULL) { - ret = ctx->on_recv_channel(channel); - if (ret == 0) { - ret = consumer_add_channel(channel, ctx); - } else if (ret < 0) { - /* Most likely an ENOMEM. */ - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); - goto error; - } - } else { - ret = consumer_add_channel(channel, ctx); - } - - DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key); - -error: - return ret; -} - -/* - * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the - * error value if applicable is set in it else it is kept untouched. - * - * Return NULL on error else the newly allocated stream object. - */ -static struct lttng_consumer_stream *allocate_stream(int cpu, int key, - struct lttng_consumer_channel *channel, - struct lttng_consumer_local_data *ctx, int *_alloc_ret) -{ - int alloc_ret; - struct lttng_consumer_stream *stream = NULL; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(ctx); - - stream = consumer_stream_create( - channel, - channel->key, - key, - channel->name, - channel->relayd_id, - channel->session_id, - channel->trace_chunk, - cpu, - &alloc_ret, - channel->type, - channel->monitor); - if (stream == NULL) { - switch (alloc_ret) { - case -ENOENT: - /* - * We could not find the channel. Can happen if cpu hotplug - * happens while tearing down. - */ - DBG3("Could not find channel"); - break; - case -ENOMEM: - case -EINVAL: - default: - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); - break; - } - goto error; - } - - consumer_stream_update_channel_attributes(stream, channel); - -error: - if (_alloc_ret) { - *_alloc_ret = alloc_ret; - } - return stream; -} - -/* - * Send the given stream pointer to the corresponding thread. - * - * Returns 0 on success else a negative value. - */ -static int send_stream_to_thread(struct lttng_consumer_stream *stream, - struct lttng_consumer_local_data *ctx) -{ - int ret; - struct lttng_pipe *stream_pipe; - - /* Get the right pipe where the stream will be sent. */ - if (stream->metadata_flag) { - consumer_add_metadata_stream(stream); - stream_pipe = ctx->consumer_metadata_pipe; - } else { - consumer_add_data_stream(stream); - stream_pipe = ctx->consumer_data_pipe; - } - - /* - * From this point on, the stream's ownership has been moved away from - * the channel and it becomes globally visible. Hence, remove it from - * the local stream list to prevent the stream from being both local and - * global. - */ - stream->globally_visible = 1; - cds_list_del(&stream->send_node); - - ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream)); - if (ret < 0) { - ERR("Consumer write %s stream to pipe %d", - stream->metadata_flag ? "metadata" : "data", - lttng_pipe_get_writefd(stream_pipe)); - if (stream->metadata_flag) { - consumer_del_stream_for_metadata(stream); - } else { - consumer_del_stream_for_data(stream); - } - goto error; - } - -error: - return ret; -} - -static -int get_stream_shm_path(char *stream_shm_path, const char *shm_path, int cpu) -{ - char cpu_nr[INT_MAX_STR_LEN]; /* int max len */ - int ret; - - strncpy(stream_shm_path, shm_path, PATH_MAX); - stream_shm_path[PATH_MAX - 1] = '\0'; - ret = snprintf(cpu_nr, INT_MAX_STR_LEN, "%i", cpu); - if (ret < 0) { - PERROR("snprintf"); - goto end; - } - strncat(stream_shm_path, cpu_nr, - PATH_MAX - strlen(stream_shm_path) - 1); - ret = 0; -end: - return ret; -} - -/* - * Create streams for the given channel using liblttng-ust-ctl. - * The channel lock must be acquired by the caller. - * - * Return 0 on success else a negative value. - */ -static int create_ust_streams(struct lttng_consumer_channel *channel, - struct lttng_consumer_local_data *ctx) -{ - int ret, cpu = 0; - struct lttng_ust_ctl_consumer_stream *ustream; - struct lttng_consumer_stream *stream; - pthread_mutex_t *current_stream_lock = NULL; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(ctx); - - /* - * While a stream is available from ustctl. When NULL is returned, we've - * reached the end of the possible stream for the channel. - */ - while ((ustream = lttng_ust_ctl_create_stream(channel->uchan, cpu))) { - int wait_fd; - int ust_metadata_pipe[2]; - - health_code_update(); - - if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) { - ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe); - if (ret < 0) { - ERR("Create ust metadata poll pipe"); - goto error; - } - wait_fd = ust_metadata_pipe[0]; - } else { - wait_fd = lttng_ust_ctl_stream_get_wait_fd(ustream); - } - - /* Allocate consumer stream object. */ - stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret); - if (!stream) { - goto error_alloc; - } - stream->ustream = ustream; - /* - * Store it so we can save multiple function calls afterwards since - * this value is used heavily in the stream threads. This is UST - * specific so this is why it's done after allocation. - */ - stream->wait_fd = wait_fd; - - /* - * Increment channel refcount since the channel reference has now been - * assigned in the allocation process above. - */ - if (stream->chan->monitor) { - uatomic_inc(&stream->chan->refcount); - } - - pthread_mutex_lock(&stream->lock); - current_stream_lock = &stream->lock; - /* - * Order is important this is why a list is used. On error, the caller - * should clean this list. - */ - cds_list_add_tail(&stream->send_node, &channel->streams.head); - - ret = lttng_ust_ctl_get_max_subbuf_size(stream->ustream, - &stream->max_sb_size); - if (ret < 0) { - ERR("lttng_ust_ctl_get_max_subbuf_size failed for stream %s", - stream->name); - goto error; - } - - /* Do actions once stream has been received. */ - if (ctx->on_recv_stream) { - ret = ctx->on_recv_stream(stream); - if (ret < 0) { - goto error; - } - } - - DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64, - stream->name, stream->key, stream->relayd_stream_id); - - /* Set next CPU stream. */ - channel->streams.count = ++cpu; - - /* Keep stream reference when creating metadata. */ - if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) { - channel->metadata_stream = stream; - if (channel->monitor) { - /* Set metadata poll pipe if we created one */ - memcpy(stream->ust_metadata_poll_pipe, - ust_metadata_pipe, - sizeof(ust_metadata_pipe)); - } - } - pthread_mutex_unlock(&stream->lock); - current_stream_lock = NULL; - } - - return 0; - -error: -error_alloc: - if (current_stream_lock) { - pthread_mutex_unlock(current_stream_lock); - } - return ret; -} - -static int open_ust_stream_fd(struct lttng_consumer_channel *channel, int cpu, - const struct lttng_credentials *session_credentials) -{ - char shm_path[PATH_MAX]; - int ret; - - if (!channel->shm_path[0]) { - return shm_create_anonymous("ust-consumer"); - } - ret = get_stream_shm_path(shm_path, channel->shm_path, cpu); - if (ret) { - goto error_shm_path; - } - return run_as_open(shm_path, - O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR, - lttng_credentials_get_uid(session_credentials), - lttng_credentials_get_gid(session_credentials)); - -error_shm_path: - return -1; -} - -/* - * Create an UST channel with the given attributes and send it to the session - * daemon using the ust ctl API. - * - * Return 0 on success or else a negative value. - */ -static int create_ust_channel(struct lttng_consumer_channel *channel, - struct lttng_ust_ctl_consumer_channel_attr *attr, - struct lttng_ust_ctl_consumer_channel **ust_chanp) -{ - int ret, nr_stream_fds, i, j; - int *stream_fds; - struct lttng_ust_ctl_consumer_channel *ust_channel; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(attr); - LTTNG_ASSERT(ust_chanp); - LTTNG_ASSERT(channel->buffer_credentials.is_set); - - DBG3("Creating channel to ustctl with attr: [overwrite: %d, " - "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", " - "switch_timer_interval: %u, read_timer_interval: %u, " - "output: %d, type: %d", attr->overwrite, attr->subbuf_size, - attr->num_subbuf, attr->switch_timer_interval, - attr->read_timer_interval, attr->output, attr->type); - - if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) - nr_stream_fds = 1; - else - nr_stream_fds = lttng_ust_ctl_get_nr_stream_per_channel(); - stream_fds = zmalloc(nr_stream_fds * sizeof(*stream_fds)); - if (!stream_fds) { - ret = -1; - goto error_alloc; - } - for (i = 0; i < nr_stream_fds; i++) { - stream_fds[i] = open_ust_stream_fd(channel, i, - &channel->buffer_credentials.value); - if (stream_fds[i] < 0) { - ret = -1; - goto error_open; - } - } - ust_channel = lttng_ust_ctl_create_channel(attr, stream_fds, nr_stream_fds); - if (!ust_channel) { - ret = -1; - goto error_create; - } - channel->nr_stream_fds = nr_stream_fds; - channel->stream_fds = stream_fds; - *ust_chanp = ust_channel; - - return 0; - -error_create: -error_open: - for (j = i - 1; j >= 0; j--) { - int closeret; - - closeret = close(stream_fds[j]); - if (closeret) { - PERROR("close"); - } - if (channel->shm_path[0]) { - char shm_path[PATH_MAX]; - - closeret = get_stream_shm_path(shm_path, - channel->shm_path, j); - if (closeret) { - ERR("Cannot get stream shm path"); - } - closeret = run_as_unlink(shm_path, - lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR( - channel->buffer_credentials)), - lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR( - channel->buffer_credentials))); - if (closeret) { - PERROR("unlink %s", shm_path); - } - } - } - /* Try to rmdir all directories under shm_path root. */ - if (channel->root_shm_path[0]) { - (void) run_as_rmdir_recursive(channel->root_shm_path, - lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR( - channel->buffer_credentials)), - lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR( - channel->buffer_credentials)), - LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG); - } - free(stream_fds); -error_alloc: - return ret; -} - -/* - * Send a single given stream to the session daemon using the sock. - * - * Return 0 on success else a negative value. - */ -static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream) -{ - int ret; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(sock >= 0); - - DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key); - - /* Send stream to session daemon. */ - ret = lttng_ust_ctl_send_stream_to_sessiond(sock, stream->ustream); - if (ret < 0) { - goto error; - } - -error: - return ret; -} - -/* - * Send channel to sessiond and relayd if applicable. - * - * Return 0 on success or else a negative value. - */ -static int send_channel_to_sessiond_and_relayd(int sock, - struct lttng_consumer_channel *channel, - struct lttng_consumer_local_data *ctx, int *relayd_error) -{ - int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS; - struct lttng_consumer_stream *stream; - uint64_t net_seq_idx = -1ULL; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(ctx); - LTTNG_ASSERT(sock >= 0); - - DBG("UST consumer sending channel %s to sessiond", channel->name); - - if (channel->relayd_id != (uint64_t) -1ULL) { - cds_list_for_each_entry(stream, &channel->streams.head, send_node) { - - health_code_update(); - - /* Try to send the stream to the relayd if one is available. */ - DBG("Sending stream %" PRIu64 " of channel \"%s\" to relayd", - stream->key, channel->name); - ret = consumer_send_relayd_stream(stream, stream->chan->pathname); - if (ret < 0) { - /* - * Flag that the relayd was the problem here probably due to a - * communicaton error on the socket. - */ - if (relayd_error) { - *relayd_error = 1; - } - ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; - } - if (net_seq_idx == -1ULL) { - net_seq_idx = stream->net_seq_idx; - } - } - } - - /* Inform sessiond that we are about to send channel and streams. */ - ret = consumer_send_status_msg(sock, ret_code); - if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) { - /* - * Either the session daemon is not responding or the relayd died so we - * stop now. - */ - goto error; - } - - /* Send channel to sessiond. */ - ret = lttng_ust_ctl_send_channel_to_sessiond(sock, channel->uchan); - if (ret < 0) { - goto error; - } - - ret = lttng_ust_ctl_channel_close_wakeup_fd(channel->uchan); - if (ret < 0) { - goto error; - } - - /* The channel was sent successfully to the sessiond at this point. */ - cds_list_for_each_entry(stream, &channel->streams.head, send_node) { - - health_code_update(); - - /* Send stream to session daemon. */ - ret = send_sessiond_stream(sock, stream); - if (ret < 0) { - goto error; - } - } - - /* Tell sessiond there is no more stream. */ - ret = lttng_ust_ctl_send_stream_to_sessiond(sock, NULL); - if (ret < 0) { - goto error; - } - - DBG("UST consumer NULL stream sent to sessiond"); - - return 0; - -error: - if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) { - ret = -1; - } - return ret; -} - -/* - * Creates a channel and streams and add the channel it to the channel internal - * state. The created stream must ONLY be sent once the GET_CHANNEL command is - * received. - * - * Return 0 on success or else, a negative value is returned and the channel - * MUST be destroyed by consumer_del_channel(). - */ -static int ask_channel(struct lttng_consumer_local_data *ctx, - struct lttng_consumer_channel *channel, - struct lttng_ust_ctl_consumer_channel_attr *attr) -{ - int ret; - - LTTNG_ASSERT(ctx); - LTTNG_ASSERT(channel); - LTTNG_ASSERT(attr); - - /* - * This value is still used by the kernel consumer since for the kernel, - * the stream ownership is not IN the consumer so we need to have the - * number of left stream that needs to be initialized so we can know when - * to delete the channel (see consumer.c). - * - * As for the user space tracer now, the consumer creates and sends the - * stream to the session daemon which only sends them to the application - * once every stream of a channel is received making this value useless - * because we they will be added to the poll thread before the application - * receives them. This ensures that a stream can not hang up during - * initilization of a channel. - */ - channel->nb_init_stream_left = 0; - - /* The reply msg status is handled in the following call. */ - ret = create_ust_channel(channel, attr, &channel->uchan); - if (ret < 0) { - goto end; - } - - channel->wait_fd = lttng_ust_ctl_channel_get_wait_fd(channel->uchan); - - /* - * For the snapshots (no monitor), we create the metadata streams - * on demand, not during the channel creation. - */ - if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) { - ret = 0; - goto end; - } - - /* Open all streams for this channel. */ - pthread_mutex_lock(&channel->lock); - ret = create_ust_streams(channel, ctx); - pthread_mutex_unlock(&channel->lock); - if (ret < 0) { - goto end; - } - -end: - return ret; -} - -/* - * Send all stream of a channel to the right thread handling it. - * - * On error, return a negative value else 0 on success. - */ -static int send_streams_to_thread(struct lttng_consumer_channel *channel, - struct lttng_consumer_local_data *ctx) -{ - int ret = 0; - struct lttng_consumer_stream *stream, *stmp; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(ctx); - - /* Send streams to the corresponding thread. */ - cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head, - send_node) { - - health_code_update(); - - /* Sending the stream to the thread. */ - ret = send_stream_to_thread(stream, ctx); - if (ret < 0) { - /* - * If we are unable to send the stream to the thread, there is - * a big problem so just stop everything. - */ - goto error; - } - } - -error: - return ret; -} - -/* - * Flush channel's streams using the given key to retrieve the channel. - * - * Return 0 on success else an LTTng error code. - */ -static int flush_channel(uint64_t chan_key) -{ - int ret = 0; - struct lttng_consumer_channel *channel; - struct lttng_consumer_stream *stream; - struct lttng_ht *ht; - struct lttng_ht_iter iter; - - DBG("UST consumer flush channel key %" PRIu64, chan_key); - - rcu_read_lock(); - channel = consumer_find_channel(chan_key); - if (!channel) { - ERR("UST consumer flush channel %" PRIu64 " not found", chan_key); - ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; - goto error; - } - - ht = the_consumer_data.stream_per_chan_id_ht; - - /* For each stream of the channel id, flush it. */ - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct, - &channel->key, &iter.iter, stream, node_channel_id.node) { - - health_code_update(); - - pthread_mutex_lock(&stream->lock); - - /* - * Protect against concurrent teardown of a stream. - */ - if (cds_lfht_is_node_deleted(&stream->node.node)) { - goto next; - } - - if (!stream->quiescent) { - ret = lttng_ust_ctl_flush_buffer(stream->ustream, 0); - if (ret) { - ERR("Failed to flush buffer while flushing channel: channel key = %" PRIu64 ", channel name = '%s'", - chan_key, channel->name); - ret = LTTNG_ERR_BUFFER_FLUSH_FAILED; - pthread_mutex_unlock(&stream->lock); - goto error; - } - stream->quiescent = true; - } -next: - pthread_mutex_unlock(&stream->lock); - } -error: - rcu_read_unlock(); - return ret; -} - -/* - * Clear quiescent state from channel's streams using the given key to - * retrieve the channel. - * - * Return 0 on success else an LTTng error code. - */ -static int clear_quiescent_channel(uint64_t chan_key) -{ - int ret = 0; - struct lttng_consumer_channel *channel; - struct lttng_consumer_stream *stream; - struct lttng_ht *ht; - struct lttng_ht_iter iter; - - DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key); - - rcu_read_lock(); - channel = consumer_find_channel(chan_key); - if (!channel) { - ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key); - ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; - goto error; - } - - ht = the_consumer_data.stream_per_chan_id_ht; - - /* For each stream of the channel id, clear quiescent state. */ - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct, - &channel->key, &iter.iter, stream, node_channel_id.node) { - - health_code_update(); - - pthread_mutex_lock(&stream->lock); - stream->quiescent = false; - pthread_mutex_unlock(&stream->lock); - } -error: - rcu_read_unlock(); - return ret; -} - -/* - * Close metadata stream wakeup_fd using the given key to retrieve the channel. - * - * Return 0 on success else an LTTng error code. - */ -static int close_metadata(uint64_t chan_key) -{ - int ret = 0; - struct lttng_consumer_channel *channel; - unsigned int channel_monitor; - - DBG("UST consumer close metadata key %" PRIu64, chan_key); - - channel = consumer_find_channel(chan_key); - if (!channel) { - /* - * This is possible if the metadata thread has issue a delete because - * the endpoint point of the stream hung up. There is no way the - * session daemon can know about it thus use a DBG instead of an actual - * error. - */ - DBG("UST consumer close metadata %" PRIu64 " not found", chan_key); - ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; - goto error; - } - - pthread_mutex_lock(&the_consumer_data.lock); - pthread_mutex_lock(&channel->lock); - channel_monitor = channel->monitor; - if (cds_lfht_is_node_deleted(&channel->node.node)) { - goto error_unlock; - } - - lttng_ustconsumer_close_metadata(channel); - pthread_mutex_unlock(&channel->lock); - pthread_mutex_unlock(&the_consumer_data.lock); - - /* - * The ownership of a metadata channel depends on the type of - * session to which it belongs. In effect, the monitor flag is checked - * to determine if this metadata channel is in "snapshot" mode or not. - * - * In the non-snapshot case, the metadata channel is created along with - * a single stream which will remain present until the metadata channel - * is destroyed (on the destruction of its session). In this case, the - * metadata stream in "monitored" by the metadata poll thread and holds - * the ownership of its channel. - * - * Closing the metadata will cause the metadata stream's "metadata poll - * pipe" to be closed. Closing this pipe will wake-up the metadata poll - * thread which will teardown the metadata stream which, in return, - * deletes the metadata channel. - * - * In the snapshot case, the metadata stream is created and destroyed - * on every snapshot record. Since the channel doesn't have an owner - * other than the session daemon, it is safe to destroy it immediately - * on reception of the CLOSE_METADATA command. - */ - if (!channel_monitor) { - /* - * The channel and consumer_data locks must be - * released before this call since consumer_del_channel - * re-acquires the channel and consumer_data locks to teardown - * the channel and queue its reclamation by the "call_rcu" - * worker thread. - */ - consumer_del_channel(channel); - } - - return ret; -error_unlock: - pthread_mutex_unlock(&channel->lock); - pthread_mutex_unlock(&the_consumer_data.lock); -error: - return ret; -} - -/* - * RCU read side lock MUST be acquired before calling this function. - * - * Return 0 on success else an LTTng error code. - */ -static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key) -{ - int ret; - struct lttng_consumer_channel *metadata; - - DBG("UST consumer setup metadata key %" PRIu64, key); - - metadata = consumer_find_channel(key); - if (!metadata) { - ERR("UST consumer push metadata %" PRIu64 " not found", key); - ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; - goto end; - } - - /* - * In no monitor mode, the metadata channel has no stream(s) so skip the - * ownership transfer to the metadata thread. - */ - if (!metadata->monitor) { - DBG("Metadata channel in no monitor"); - ret = 0; - goto end; - } - - /* - * Send metadata stream to relayd if one available. Availability is - * known if the stream is still in the list of the channel. - */ - if (cds_list_empty(&metadata->streams.head)) { - ERR("Metadata channel key %" PRIu64 ", no stream available.", key); - ret = LTTCOMM_CONSUMERD_ERROR_METADATA; - goto error_no_stream; - } - - /* Send metadata stream to relayd if needed. */ - if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) { - ret = consumer_send_relayd_stream(metadata->metadata_stream, - metadata->pathname); - if (ret < 0) { - ret = LTTCOMM_CONSUMERD_ERROR_METADATA; - goto error; - } - ret = consumer_send_relayd_streams_sent( - metadata->metadata_stream->net_seq_idx); - if (ret < 0) { - ret = LTTCOMM_CONSUMERD_RELAYD_FAIL; - goto error; - } - } - - /* - * Ownership of metadata stream is passed along. Freeing is handled by - * the callee. - */ - ret = send_streams_to_thread(metadata, ctx); - if (ret < 0) { - /* - * If we are unable to send the stream to the thread, there is - * a big problem so just stop everything. - */ - ret = LTTCOMM_CONSUMERD_FATAL; - goto send_streams_error; - } - /* List MUST be empty after or else it could be reused. */ - LTTNG_ASSERT(cds_list_empty(&metadata->streams.head)); - - ret = 0; - goto end; - -error: - /* - * Delete metadata channel on error. At this point, the metadata stream can - * NOT be monitored by the metadata thread thus having the guarantee that - * the stream is still in the local stream list of the channel. This call - * will make sure to clean that list. - */ - consumer_stream_destroy(metadata->metadata_stream, NULL); - cds_list_del(&metadata->metadata_stream->send_node); - metadata->metadata_stream = NULL; -send_streams_error: -error_no_stream: -end: - return ret; -} - -/* - * Snapshot the whole metadata. - * RCU read-side lock must be held by the caller. - * - * Returns 0 on success, < 0 on error - */ -static int snapshot_metadata(struct lttng_consumer_channel *metadata_channel, - uint64_t key, char *path, uint64_t relayd_id, - struct lttng_consumer_local_data *ctx) -{ - int ret = 0; - struct lttng_consumer_stream *metadata_stream; - - LTTNG_ASSERT(path); - LTTNG_ASSERT(ctx); - - DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s", - key, path); - - rcu_read_lock(); - - LTTNG_ASSERT(!metadata_channel->monitor); - - health_code_update(); - - /* - * Ask the sessiond if we have new metadata waiting and update the - * consumer metadata cache. - */ - ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1); - if (ret < 0) { - goto error; - } - - health_code_update(); - - /* - * The metadata stream is NOT created in no monitor mode when the channel - * is created on a sessiond ask channel command. - */ - ret = create_ust_streams(metadata_channel, ctx); - if (ret < 0) { - goto error; - } - - metadata_stream = metadata_channel->metadata_stream; - LTTNG_ASSERT(metadata_stream); - - pthread_mutex_lock(&metadata_stream->lock); - if (relayd_id != (uint64_t) -1ULL) { - metadata_stream->net_seq_idx = relayd_id; - ret = consumer_send_relayd_stream(metadata_stream, path); - } else { - ret = consumer_stream_create_output_files(metadata_stream, - false); - } - pthread_mutex_unlock(&metadata_stream->lock); - if (ret < 0) { - goto error_stream; - } - - do { - health_code_update(); - - ret = lttng_consumer_read_subbuffer(metadata_stream, ctx, true); - if (ret < 0) { - goto error_stream; - } - } while (ret > 0); - -error_stream: - /* - * Clean up the stream completly because the next snapshot will use a new - * metadata stream. - */ - consumer_stream_destroy(metadata_stream, NULL); - cds_list_del(&metadata_stream->send_node); - metadata_channel->metadata_stream = NULL; - -error: - rcu_read_unlock(); - return ret; -} - -static -int get_current_subbuf_addr(struct lttng_consumer_stream *stream, - const char **addr) -{ - int ret; - unsigned long mmap_offset; - const char *mmap_base; - - mmap_base = lttng_ust_ctl_get_mmap_base(stream->ustream); - if (!mmap_base) { - ERR("Failed to get mmap base for stream `%s`", - stream->name); - ret = -EPERM; - goto error; - } - - ret = lttng_ust_ctl_get_mmap_read_offset(stream->ustream, &mmap_offset); - if (ret != 0) { - ERR("Failed to get mmap offset for stream `%s`", stream->name); - ret = -EINVAL; - goto error; - } - - *addr = mmap_base + mmap_offset; -error: - return ret; - -} - -/* - * Take a snapshot of all the stream of a channel. - * RCU read-side lock and the channel lock must be held by the caller. - * - * Returns 0 on success, < 0 on error - */ -static int snapshot_channel(struct lttng_consumer_channel *channel, - uint64_t key, char *path, uint64_t relayd_id, - uint64_t nb_packets_per_stream, - struct lttng_consumer_local_data *ctx) -{ - int ret; - unsigned use_relayd = 0; - unsigned long consumed_pos, produced_pos; - struct lttng_consumer_stream *stream; - - LTTNG_ASSERT(path); - LTTNG_ASSERT(ctx); - - rcu_read_lock(); - - if (relayd_id != (uint64_t) -1ULL) { - use_relayd = 1; - } - - LTTNG_ASSERT(!channel->monitor); - DBG("UST consumer snapshot channel %" PRIu64, key); - - cds_list_for_each_entry(stream, &channel->streams.head, send_node) { - health_code_update(); - - /* Lock stream because we are about to change its state. */ - pthread_mutex_lock(&stream->lock); - LTTNG_ASSERT(channel->trace_chunk); - if (!lttng_trace_chunk_get(channel->trace_chunk)) { - /* - * Can't happen barring an internal error as the channel - * holds a reference to the trace chunk. - */ - ERR("Failed to acquire reference to channel's trace chunk"); - ret = -1; - goto error_unlock; - } - LTTNG_ASSERT(!stream->trace_chunk); - stream->trace_chunk = channel->trace_chunk; - - stream->net_seq_idx = relayd_id; - - if (use_relayd) { - ret = consumer_send_relayd_stream(stream, path); - if (ret < 0) { - goto error_unlock; - } - } else { - ret = consumer_stream_create_output_files(stream, - false); - if (ret < 0) { - goto error_unlock; - } - DBG("UST consumer snapshot stream (%" PRIu64 ")", - stream->key); - } - - /* - * If tracing is active, we want to perform a "full" buffer flush. - * Else, if quiescent, it has already been done by the prior stop. - */ - if (!stream->quiescent) { - ret = lttng_ust_ctl_flush_buffer(stream->ustream, 0); - if (ret < 0) { - ERR("Failed to flush buffer during snapshot of channel: channel key = %" PRIu64 ", channel name = '%s'", - channel->key, channel->name); - goto error_unlock; - } - } - - ret = lttng_ustconsumer_take_snapshot(stream); - if (ret < 0) { - ERR("Taking UST snapshot"); - goto error_unlock; - } - - ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos); - if (ret < 0) { - ERR("Produced UST snapshot position"); - goto error_unlock; - } - - ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos); - if (ret < 0) { - ERR("Consumerd UST snapshot position"); - goto error_unlock; - } - - /* - * The original value is sent back if max stream size is larger than - * the possible size of the snapshot. Also, we assume that the session - * daemon should never send a maximum stream size that is lower than - * subbuffer size. - */ - consumed_pos = consumer_get_consume_start_pos(consumed_pos, - produced_pos, nb_packets_per_stream, - stream->max_sb_size); - - while ((long) (consumed_pos - produced_pos) < 0) { - ssize_t read_len; - unsigned long len, padded_len; - const char *subbuf_addr; - struct lttng_buffer_view subbuf_view; - - health_code_update(); - - DBG("UST consumer taking snapshot at pos %lu", consumed_pos); - - ret = lttng_ust_ctl_get_subbuf(stream->ustream, &consumed_pos); - if (ret < 0) { - if (ret != -EAGAIN) { - PERROR("lttng_ust_ctl_get_subbuf snapshot"); - goto error_close_stream; - } - DBG("UST consumer get subbuf failed. Skipping it."); - consumed_pos += stream->max_sb_size; - stream->chan->lost_packets++; - continue; - } - - ret = lttng_ust_ctl_get_subbuf_size(stream->ustream, &len); - if (ret < 0) { - ERR("Snapshot lttng_ust_ctl_get_subbuf_size"); - goto error_put_subbuf; - } - - ret = lttng_ust_ctl_get_padded_subbuf_size(stream->ustream, &padded_len); - if (ret < 0) { - ERR("Snapshot lttng_ust_ctl_get_padded_subbuf_size"); - goto error_put_subbuf; - } - - ret = get_current_subbuf_addr(stream, &subbuf_addr); - if (ret) { - goto error_put_subbuf; - } - - subbuf_view = lttng_buffer_view_init( - subbuf_addr, 0, padded_len); - read_len = lttng_consumer_on_read_subbuffer_mmap( - stream, &subbuf_view, padded_len - len); - if (use_relayd) { - if (read_len != len) { - ret = -EPERM; - goto error_put_subbuf; - } - } else { - if (read_len != padded_len) { - ret = -EPERM; - goto error_put_subbuf; - } - } - - ret = lttng_ust_ctl_put_subbuf(stream->ustream); - if (ret < 0) { - ERR("Snapshot lttng_ust_ctl_put_subbuf"); - goto error_close_stream; - } - consumed_pos += stream->max_sb_size; - } - - /* Simply close the stream so we can use it on the next snapshot. */ - consumer_stream_close(stream); - pthread_mutex_unlock(&stream->lock); - } - - rcu_read_unlock(); - return 0; - -error_put_subbuf: - if (lttng_ust_ctl_put_subbuf(stream->ustream) < 0) { - ERR("Snapshot lttng_ust_ctl_put_subbuf"); - } -error_close_stream: - consumer_stream_close(stream); -error_unlock: - pthread_mutex_unlock(&stream->lock); - rcu_read_unlock(); - return ret; -} - -static -void metadata_stream_reset_cache_consumed_position( - struct lttng_consumer_stream *stream) -{ - ASSERT_LOCKED(stream->lock); - - DBG("Reset metadata cache of session %" PRIu64, - stream->chan->session_id); - stream->ust_metadata_pushed = 0; -} - -/* - * Receive the metadata updates from the sessiond. Supports receiving - * overlapping metadata, but is needs to always belong to a contiguous - * range starting from 0. - * Be careful about the locks held when calling this function: it needs - * the metadata cache flush to concurrently progress in order to - * complete. - */ -int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset, - uint64_t len, uint64_t version, - struct lttng_consumer_channel *channel, int timer, int wait) -{ - int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS; - char *metadata_str; - enum consumer_metadata_cache_write_status cache_write_status; - - DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len); - - metadata_str = zmalloc(len * sizeof(char)); - if (!metadata_str) { - PERROR("zmalloc metadata string"); - ret_code = LTTCOMM_CONSUMERD_ENOMEM; - goto end; - } - - health_code_update(); - - /* Receive metadata string. */ - ret = lttcomm_recv_unix_sock(sock, metadata_str, len); - if (ret < 0) { - /* Session daemon is dead so return gracefully. */ - ret_code = ret; - goto end_free; - } - - health_code_update(); - - pthread_mutex_lock(&channel->metadata_cache->lock); - cache_write_status = consumer_metadata_cache_write( - channel->metadata_cache, offset, len, version, - metadata_str); - pthread_mutex_unlock(&channel->metadata_cache->lock); - switch (cache_write_status) { - case CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE: - /* - * The write entirely overlapped with existing contents of the - * same metadata version (same content); there is nothing to do. - */ - break; - case CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED: - /* - * The metadata cache was invalidated (previously pushed - * content has been overwritten). Reset the stream's consumed - * metadata position to ensure the metadata poll thread consumes - * the whole cache. - */ - pthread_mutex_lock(&channel->metadata_stream->lock); - metadata_stream_reset_cache_consumed_position( - channel->metadata_stream); - pthread_mutex_unlock(&channel->metadata_stream->lock); - /* Fall-through. */ - case CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT: - /* - * In both cases, the metadata poll thread has new data to - * consume. - */ - ret = consumer_metadata_wakeup_pipe(channel); - if (ret) { - ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA; - goto end_free; - } - break; - case CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR: - /* Unable to handle metadata. Notify session daemon. */ - ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA; - /* - * Skip metadata flush on write error since the offset and len might - * not have been updated which could create an infinite loop below when - * waiting for the metadata cache to be flushed. - */ - goto end_free; - default: - abort(); - } - - if (!wait) { - goto end_free; - } - while (consumer_metadata_cache_flushed(channel, offset + len, timer)) { - DBG("Waiting for metadata to be flushed"); - - health_code_update(); - - usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME); - } - -end_free: - free(metadata_str); -end: - return ret_code; -} - -/* - * Receive command from session daemon and process it. - * - * Return 1 on success else a negative value or 0. - */ -int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx, - int sock, struct pollfd *consumer_sockpoll) -{ - int ret_func; - enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; - struct lttcomm_consumer_msg msg; - struct lttng_consumer_channel *channel = NULL; - - health_code_update(); - - { - ssize_t ret_recv; - - ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg)); - if (ret_recv != sizeof(msg)) { - DBG("Consumer received unexpected message size %zd (expects %zu)", - ret_recv, sizeof(msg)); - /* - * The ret value might 0 meaning an orderly shutdown but this is ok - * since the caller handles this. - */ - if (ret_recv > 0) { - lttng_consumer_send_error(ctx, - LTTCOMM_CONSUMERD_ERROR_RECV_CMD); - ret_recv = -1; - } - return ret_recv; - } - } - - health_code_update(); - - /* deprecated */ - LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP); - - health_code_update(); - - /* relayd needs RCU read-side lock */ - rcu_read_lock(); - - switch (msg.cmd_type) { - case LTTNG_CONSUMER_ADD_RELAYD_SOCKET: - { - /* Session daemon status message are handled in the following call. */ - consumer_add_relayd_socket(msg.u.relayd_sock.net_index, - msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll, - &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id, - msg.u.relayd_sock.relayd_session_id); - goto end_nosignal; - } - case LTTNG_CONSUMER_DESTROY_RELAYD: - { - uint64_t index = msg.u.destroy_relayd.net_seq_idx; - struct consumer_relayd_sock_pair *relayd; - - DBG("UST consumer destroying relayd %" PRIu64, index); - - /* Get relayd reference if exists. */ - relayd = consumer_find_relayd(index); - if (relayd == NULL) { - DBG("Unable to find relayd %" PRIu64, index); - ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; - } - - /* - * Each relayd socket pair has a refcount of stream attached to it - * which tells if the relayd is still active or not depending on the - * refcount value. - * - * This will set the destroy flag of the relayd object and destroy it - * if the refcount reaches zero when called. - * - * The destroy can happen either here or when a stream fd hangs up. - */ - if (relayd) { - consumer_flag_relayd_for_destroy(relayd); - } - - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_UPDATE_STREAM: - { - rcu_read_unlock(); - return -ENOSYS; - } - case LTTNG_CONSUMER_DATA_PENDING: - { - int is_data_pending; - ssize_t ret_send; - uint64_t id = msg.u.data_pending.session_id; - - DBG("UST consumer data pending command for id %" PRIu64, id); - - is_data_pending = consumer_data_pending(id); - - /* Send back returned value to session daemon */ - ret_send = lttcomm_send_unix_sock(sock, &is_data_pending, - sizeof(is_data_pending)); - if (ret_send < 0) { - DBG("Error when sending the data pending ret code: %zd", - ret_send); - goto error_fatal; - } - - /* - * No need to send back a status message since the data pending - * returned value is the response. - */ - break; - } - case LTTNG_CONSUMER_ASK_CHANNEL_CREATION: - { - int ret_ask_channel, ret_add_channel, ret_send; - struct lttng_ust_ctl_consumer_channel_attr attr; - const uint64_t chunk_id = msg.u.ask_channel.chunk_id.value; - const struct lttng_credentials buffer_credentials = { - .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.ask_channel.buffer_credentials.uid), - .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.ask_channel.buffer_credentials.gid), - }; - - /* Create a plain object and reserve a channel key. */ - channel = consumer_allocate_channel( - msg.u.ask_channel.key, - msg.u.ask_channel.session_id, - msg.u.ask_channel.chunk_id.is_set ? - &chunk_id : NULL, - msg.u.ask_channel.pathname, - msg.u.ask_channel.name, - msg.u.ask_channel.relayd_id, - (enum lttng_event_output) msg.u.ask_channel.output, - msg.u.ask_channel.tracefile_size, - msg.u.ask_channel.tracefile_count, - msg.u.ask_channel.session_id_per_pid, - msg.u.ask_channel.monitor, - msg.u.ask_channel.live_timer_interval, - msg.u.ask_channel.is_live, - msg.u.ask_channel.root_shm_path, - msg.u.ask_channel.shm_path); - if (!channel) { - goto end_channel_error; - } - - LTTNG_OPTIONAL_SET(&channel->buffer_credentials, - buffer_credentials); - - /* - * Assign UST application UID to the channel. This value is ignored for - * per PID buffers. This is specific to UST thus setting this after the - * allocation. - */ - channel->ust_app_uid = msg.u.ask_channel.ust_app_uid; - - /* Build channel attributes from received message. */ - attr.subbuf_size = msg.u.ask_channel.subbuf_size; - attr.num_subbuf = msg.u.ask_channel.num_subbuf; - attr.overwrite = msg.u.ask_channel.overwrite; - attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval; - attr.read_timer_interval = msg.u.ask_channel.read_timer_interval; - attr.chan_id = msg.u.ask_channel.chan_id; - memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid)); - attr.blocking_timeout= msg.u.ask_channel.blocking_timeout; - - /* Match channel buffer type to the UST abi. */ - switch (msg.u.ask_channel.output) { - case LTTNG_EVENT_MMAP: - default: - attr.output = LTTNG_UST_ABI_MMAP; - break; - } - - /* Translate and save channel type. */ - switch (msg.u.ask_channel.type) { - case LTTNG_UST_ABI_CHAN_PER_CPU: - channel->type = CONSUMER_CHANNEL_TYPE_DATA; - attr.type = LTTNG_UST_ABI_CHAN_PER_CPU; - /* - * Set refcount to 1 for owner. Below, we will - * pass ownership to the - * consumer_thread_channel_poll() thread. - */ - channel->refcount = 1; - break; - case LTTNG_UST_ABI_CHAN_METADATA: - channel->type = CONSUMER_CHANNEL_TYPE_METADATA; - attr.type = LTTNG_UST_ABI_CHAN_METADATA; - break; - default: - abort(); - goto error_fatal; - }; - - health_code_update(); - - ret_ask_channel = ask_channel(ctx, channel, &attr); - if (ret_ask_channel < 0) { - goto end_channel_error; - } - - if (msg.u.ask_channel.type == LTTNG_UST_ABI_CHAN_METADATA) { - int ret_allocate; - - ret_allocate = consumer_metadata_cache_allocate( - channel); - if (ret_allocate < 0) { - ERR("Allocating metadata cache"); - goto end_channel_error; - } - consumer_timer_switch_start(channel, attr.switch_timer_interval); - attr.switch_timer_interval = 0; - } else { - int monitor_start_ret; - - consumer_timer_live_start(channel, - msg.u.ask_channel.live_timer_interval); - monitor_start_ret = consumer_timer_monitor_start( - channel, - msg.u.ask_channel.monitor_timer_interval); - if (monitor_start_ret < 0) { - ERR("Starting channel monitoring timer failed"); - goto end_channel_error; - } - } - - health_code_update(); - - /* - * Add the channel to the internal state AFTER all streams were created - * and successfully sent to session daemon. This way, all streams must - * be ready before this channel is visible to the threads. - * If add_channel succeeds, ownership of the channel is - * passed to consumer_thread_channel_poll(). - */ - ret_add_channel = add_channel(channel, ctx); - if (ret_add_channel < 0) { - if (msg.u.ask_channel.type == LTTNG_UST_ABI_CHAN_METADATA) { - if (channel->switch_timer_enabled == 1) { - consumer_timer_switch_stop(channel); - } - consumer_metadata_cache_destroy(channel); - } - if (channel->live_timer_enabled == 1) { - consumer_timer_live_stop(channel); - } - if (channel->monitor_timer_enabled == 1) { - consumer_timer_monitor_stop(channel); - } - goto end_channel_error; - } - - health_code_update(); - - /* - * Channel and streams are now created. Inform the session daemon that - * everything went well and should wait to receive the channel and - * streams with ustctl API. - */ - ret_send = consumer_send_status_channel(sock, channel); - if (ret_send < 0) { - /* - * There is probably a problem on the socket. - */ - goto error_fatal; - } - - break; - } - case LTTNG_CONSUMER_GET_CHANNEL: - { - int ret, relayd_err = 0; - uint64_t key = msg.u.get_channel.key; - struct lttng_consumer_channel *found_channel; - - found_channel = consumer_find_channel(key); - if (!found_channel) { - ERR("UST consumer get channel key %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - goto end_get_channel; - } - - health_code_update(); - - /* Send the channel to sessiond (and relayd, if applicable). */ - ret = send_channel_to_sessiond_and_relayd( - sock, found_channel, ctx, &relayd_err); - if (ret < 0) { - if (relayd_err) { - /* - * We were unable to send to the relayd the stream so avoid - * sending back a fatal error to the thread since this is OK - * and the consumer can continue its work. The above call - * has sent the error status message to the sessiond. - */ - goto end_get_channel_nosignal; - } - /* - * The communicaton was broken hence there is a bad state between - * the consumer and sessiond so stop everything. - */ - goto error_get_channel_fatal; - } - - health_code_update(); - - /* - * In no monitor mode, the streams ownership is kept inside the channel - * so don't send them to the data thread. - */ - if (!found_channel->monitor) { - goto end_get_channel; - } - - ret = send_streams_to_thread(found_channel, ctx); - if (ret < 0) { - /* - * If we are unable to send the stream to the thread, there is - * a big problem so just stop everything. - */ - goto error_get_channel_fatal; - } - /* List MUST be empty after or else it could be reused. */ - LTTNG_ASSERT(cds_list_empty(&found_channel->streams.head)); -end_get_channel: - goto end_msg_sessiond; -error_get_channel_fatal: - goto error_fatal; -end_get_channel_nosignal: - goto end_nosignal; - } - case LTTNG_CONSUMER_DESTROY_CHANNEL: - { - uint64_t key = msg.u.destroy_channel.key; - - /* - * Only called if streams have not been sent to stream - * manager thread. However, channel has been sent to - * channel manager thread. - */ - notify_thread_del_channel(ctx, key); - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_CLOSE_METADATA: - { - int ret; - - ret = close_metadata(msg.u.close_metadata.key); - if (ret != 0) { - ret_code = ret; - } - - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_FLUSH_CHANNEL: - { - int ret; - - ret = flush_channel(msg.u.flush_channel.key); - if (ret != 0) { - ret_code = ret; - } - - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL: - { - int ret; - - ret = clear_quiescent_channel( - msg.u.clear_quiescent_channel.key); - if (ret != 0) { - ret_code = ret; - } - - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_PUSH_METADATA: - { - int ret; - uint64_t len = msg.u.push_metadata.len; - uint64_t key = msg.u.push_metadata.key; - uint64_t offset = msg.u.push_metadata.target_offset; - uint64_t version = msg.u.push_metadata.version; - struct lttng_consumer_channel *found_channel; - - DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, - len); - - found_channel = consumer_find_channel(key); - if (!found_channel) { - /* - * This is possible if the metadata creation on the consumer side - * is in flight vis-a-vis a concurrent push metadata from the - * session daemon. Simply return that the channel failed and the - * session daemon will handle that message correctly considering - * that this race is acceptable thus the DBG() statement here. - */ - DBG("UST consumer push metadata %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL; - goto end_push_metadata_msg_sessiond; - } - - health_code_update(); - - if (!len) { - /* - * There is nothing to receive. We have simply - * checked whether the channel can be found. - */ - ret_code = LTTCOMM_CONSUMERD_SUCCESS; - goto end_push_metadata_msg_sessiond; - } - - /* Tell session daemon we are ready to receive the metadata. */ - ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS); - if (ret < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto error_push_metadata_fatal; - } - - health_code_update(); - - /* Wait for more data. */ - health_poll_entry(); - ret = lttng_consumer_poll_socket(consumer_sockpoll); - health_poll_exit(); - if (ret) { - goto error_push_metadata_fatal; - } - - health_code_update(); - - ret = lttng_ustconsumer_recv_metadata(sock, key, offset, len, - version, found_channel, 0, 1); - if (ret < 0) { - /* error receiving from sessiond */ - goto error_push_metadata_fatal; - } else { - ret_code = ret; - goto end_push_metadata_msg_sessiond; - } -end_push_metadata_msg_sessiond: - goto end_msg_sessiond; -error_push_metadata_fatal: - goto error_fatal; - } - case LTTNG_CONSUMER_SETUP_METADATA: - { - int ret; - - ret = setup_metadata(ctx, msg.u.setup_metadata.key); - if (ret) { - ret_code = ret; - } - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_SNAPSHOT_CHANNEL: - { - struct lttng_consumer_channel *found_channel; - uint64_t key = msg.u.snapshot_channel.key; - int ret_send; - - found_channel = consumer_find_channel(key); - if (!found_channel) { - DBG("UST snapshot channel not found for key %" PRIu64, key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } else { - if (msg.u.snapshot_channel.metadata) { - int ret_snapshot; - - ret_snapshot = snapshot_metadata(found_channel, - key, - msg.u.snapshot_channel.pathname, - msg.u.snapshot_channel.relayd_id, - ctx); - if (ret_snapshot < 0) { - ERR("Snapshot metadata failed"); - ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; - } - } else { - int ret_snapshot; - - ret_snapshot = snapshot_channel(found_channel, - key, - msg.u.snapshot_channel.pathname, - msg.u.snapshot_channel.relayd_id, - msg.u.snapshot_channel - .nb_packets_per_stream, - ctx); - if (ret_snapshot < 0) { - ERR("Snapshot channel failed"); - ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; - } - } - } - health_code_update(); - ret_send = consumer_send_status_msg(sock, ret_code); - if (ret_send < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_nosignal; - } - health_code_update(); - break; - } - case LTTNG_CONSUMER_DISCARDED_EVENTS: - { - int ret = 0; - uint64_t discarded_events; - struct lttng_ht_iter iter; - struct lttng_ht *ht; - struct lttng_consumer_stream *stream; - uint64_t id = msg.u.discarded_events.session_id; - uint64_t key = msg.u.discarded_events.channel_key; - - DBG("UST consumer discarded events command for session id %" - PRIu64, id); - rcu_read_lock(); - pthread_mutex_lock(&the_consumer_data.lock); - - ht = the_consumer_data.stream_list_ht; - - /* - * We only need a reference to the channel, but they are not - * directly indexed, so we just use the first matching stream - * to extract the information we need, we default to 0 if not - * found (no events are dropped if the channel is not yet in - * use). - */ - discarded_events = 0; - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&id, lttng_ht_seed), - ht->match_fct, &id, - &iter.iter, stream, node_session_id.node) { - if (stream->chan->key == key) { - discarded_events = stream->chan->discarded_events; - break; - } - } - pthread_mutex_unlock(&the_consumer_data.lock); - rcu_read_unlock(); - - DBG("UST consumer discarded events command for session id %" - PRIu64 ", channel key %" PRIu64, id, key); - - health_code_update(); - - /* Send back returned value to session daemon */ - ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events)); - if (ret < 0) { - PERROR("send discarded events"); - goto error_fatal; - } - - break; - } - case LTTNG_CONSUMER_LOST_PACKETS: - { - int ret; - uint64_t lost_packets; - struct lttng_ht_iter iter; - struct lttng_ht *ht; - struct lttng_consumer_stream *stream; - uint64_t id = msg.u.lost_packets.session_id; - uint64_t key = msg.u.lost_packets.channel_key; - - DBG("UST consumer lost packets command for session id %" - PRIu64, id); - rcu_read_lock(); - pthread_mutex_lock(&the_consumer_data.lock); - - ht = the_consumer_data.stream_list_ht; - - /* - * We only need a reference to the channel, but they are not - * directly indexed, so we just use the first matching stream - * to extract the information we need, we default to 0 if not - * found (no packets lost if the channel is not yet in use). - */ - lost_packets = 0; - cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct(&id, lttng_ht_seed), - ht->match_fct, &id, - &iter.iter, stream, node_session_id.node) { - if (stream->chan->key == key) { - lost_packets = stream->chan->lost_packets; - break; - } - } - pthread_mutex_unlock(&the_consumer_data.lock); - rcu_read_unlock(); - - DBG("UST consumer lost packets command for session id %" - PRIu64 ", channel key %" PRIu64, id, key); - - health_code_update(); - - /* Send back returned value to session daemon */ - ret = lttcomm_send_unix_sock(sock, &lost_packets, - sizeof(lost_packets)); - if (ret < 0) { - PERROR("send lost packets"); - goto error_fatal; - } - - break; - } - case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE: - { - int channel_monitor_pipe, ret_send, - ret_set_channel_monitor_pipe; - ssize_t ret_recv; - - ret_code = LTTCOMM_CONSUMERD_SUCCESS; - /* Successfully received the command's type. */ - ret_send = consumer_send_status_msg(sock, ret_code); - if (ret_send < 0) { - goto error_fatal; - } - - ret_recv = lttcomm_recv_fds_unix_sock( - sock, &channel_monitor_pipe, 1); - if (ret_recv != sizeof(channel_monitor_pipe)) { - ERR("Failed to receive channel monitor pipe"); - goto error_fatal; - } - - DBG("Received channel monitor pipe (%d)", channel_monitor_pipe); - ret_set_channel_monitor_pipe = - consumer_timer_thread_set_channel_monitor_pipe( - channel_monitor_pipe); - if (!ret_set_channel_monitor_pipe) { - int flags; - int ret_fcntl; - - ret_code = LTTCOMM_CONSUMERD_SUCCESS; - /* Set the pipe as non-blocking. */ - ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0); - if (ret_fcntl == -1) { - PERROR("fcntl get flags of the channel monitoring pipe"); - goto error_fatal; - } - flags = ret_fcntl; - - ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL, - flags | O_NONBLOCK); - if (ret_fcntl == -1) { - PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe"); - goto error_fatal; - } - DBG("Channel monitor pipe set as non-blocking"); - } else { - ret_code = LTTCOMM_CONSUMERD_ALREADY_SET; - } - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_ROTATE_CHANNEL: - { - struct lttng_consumer_channel *found_channel; - uint64_t key = msg.u.rotate_channel.key; - int ret_send_status; - - found_channel = consumer_find_channel(key); - if (!found_channel) { - DBG("Channel %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } else { - int rotate_channel; - - /* - * Sample the rotate position of all the streams in - * this channel. - */ - rotate_channel = lttng_consumer_rotate_channel( - found_channel, key, - msg.u.rotate_channel.relayd_id, - msg.u.rotate_channel.metadata, ctx); - if (rotate_channel < 0) { - ERR("Rotate channel failed"); - ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL; - } - - health_code_update(); - } - - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_rotate_channel_nosignal; - } - - /* - * Rotate the streams that are ready right now. - * FIXME: this is a second consecutive iteration over the - * streams in a channel, there is probably a better way to - * handle this, but it needs to be after the - * consumer_send_status_msg() call. - */ - if (found_channel) { - int ret_rotate_read_streams; - - ret_rotate_read_streams = - lttng_consumer_rotate_ready_streams( - found_channel, key, - ctx); - if (ret_rotate_read_streams < 0) { - ERR("Rotate channel failed"); - } - } - break; -end_rotate_channel_nosignal: - goto end_nosignal; - } - case LTTNG_CONSUMER_CLEAR_CHANNEL: - { - struct lttng_consumer_channel *found_channel; - uint64_t key = msg.u.clear_channel.key; - int ret_send_status; - - found_channel = consumer_find_channel(key); - if (!found_channel) { - DBG("Channel %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } else { - int ret_clear_channel; - - ret_clear_channel = lttng_consumer_clear_channel( - found_channel); - if (ret_clear_channel) { - ERR("Clear channel failed key %" PRIu64, key); - ret_code = ret_clear_channel; - } - - health_code_update(); - } - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_nosignal; - } - break; - } - case LTTNG_CONSUMER_INIT: - { - int ret_send_status; - - ret_code = lttng_consumer_init_command(ctx, - msg.u.init.sessiond_uuid); - health_code_update(); - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_nosignal; - } - break; - } - case LTTNG_CONSUMER_CREATE_TRACE_CHUNK: - { - const struct lttng_credentials credentials = { - .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid), - .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid), - }; - const bool is_local_trace = - !msg.u.create_trace_chunk.relayd_id.is_set; - const uint64_t relayd_id = - msg.u.create_trace_chunk.relayd_id.value; - const char *chunk_override_name = - *msg.u.create_trace_chunk.override_name ? - msg.u.create_trace_chunk.override_name : - NULL; - struct lttng_directory_handle *chunk_directory_handle = NULL; - - /* - * The session daemon will only provide a chunk directory file - * descriptor for local traces. - */ - if (is_local_trace) { - int chunk_dirfd; - int ret_send_status; - ssize_t ret_recv; - - /* Acnowledge the reception of the command. */ - ret_send_status = consumer_send_status_msg( - sock, LTTCOMM_CONSUMERD_SUCCESS); - if (ret_send_status < 0) { - /* Somehow, the session daemon is not responding anymore. */ - goto end_nosignal; - } - - /* - * Receive trace chunk domain dirfd. - */ - ret_recv = lttcomm_recv_fds_unix_sock( - sock, &chunk_dirfd, 1); - if (ret_recv != sizeof(chunk_dirfd)) { - ERR("Failed to receive trace chunk domain directory file descriptor"); - goto error_fatal; - } - - DBG("Received trace chunk domain directory fd (%d)", - chunk_dirfd); - chunk_directory_handle = lttng_directory_handle_create_from_dirfd( - chunk_dirfd); - if (!chunk_directory_handle) { - ERR("Failed to initialize chunk domain directory handle from directory file descriptor"); - if (close(chunk_dirfd)) { - PERROR("Failed to close chunk directory file descriptor"); - } - goto error_fatal; - } - } - - ret_code = lttng_consumer_create_trace_chunk( - !is_local_trace ? &relayd_id : NULL, - msg.u.create_trace_chunk.session_id, - msg.u.create_trace_chunk.chunk_id, - (time_t) msg.u.create_trace_chunk - .creation_timestamp, - chunk_override_name, - msg.u.create_trace_chunk.credentials.is_set ? - &credentials : - NULL, - chunk_directory_handle); - lttng_directory_handle_put(chunk_directory_handle); - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK: - { - enum lttng_trace_chunk_command_type close_command = - msg.u.close_trace_chunk.close_command.value; - const uint64_t relayd_id = - msg.u.close_trace_chunk.relayd_id.value; - struct lttcomm_consumer_close_trace_chunk_reply reply; - char closed_trace_chunk_path[LTTNG_PATH_MAX] = {}; - int ret; - - ret_code = lttng_consumer_close_trace_chunk( - msg.u.close_trace_chunk.relayd_id.is_set ? - &relayd_id : - NULL, - msg.u.close_trace_chunk.session_id, - msg.u.close_trace_chunk.chunk_id, - (time_t) msg.u.close_trace_chunk.close_timestamp, - msg.u.close_trace_chunk.close_command.is_set ? - &close_command : - NULL, closed_trace_chunk_path); - reply.ret_code = ret_code; - reply.path_length = strlen(closed_trace_chunk_path) + 1; - ret = lttcomm_send_unix_sock(sock, &reply, sizeof(reply)); - if (ret != sizeof(reply)) { - goto error_fatal; - } - ret = lttcomm_send_unix_sock(sock, closed_trace_chunk_path, - reply.path_length); - if (ret != reply.path_length) { - goto error_fatal; - } - goto end_nosignal; - } - case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS: - { - const uint64_t relayd_id = - msg.u.trace_chunk_exists.relayd_id.value; - - ret_code = lttng_consumer_trace_chunk_exists( - msg.u.trace_chunk_exists.relayd_id.is_set ? - &relayd_id : NULL, - msg.u.trace_chunk_exists.session_id, - msg.u.trace_chunk_exists.chunk_id); - goto end_msg_sessiond; - } - case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS: - { - const uint64_t key = msg.u.open_channel_packets.key; - struct lttng_consumer_channel *found_channel = - consumer_find_channel(key); - - if (found_channel) { - pthread_mutex_lock(&found_channel->lock); - ret_code = lttng_consumer_open_channel_packets( - found_channel); - pthread_mutex_unlock(&found_channel->lock); - } else { - /* - * The channel could have disappeared in per-pid - * buffering mode. - */ - DBG("Channel %" PRIu64 " not found", key); - ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; - } - - health_code_update(); - goto end_msg_sessiond; - } - default: - break; - } - -end_nosignal: - /* - * Return 1 to indicate success since the 0 value can be a socket - * shutdown during the recv() or send() call. - */ - ret_func = 1; - goto end; - -end_msg_sessiond: - /* - * The returned value here is not useful since either way we'll return 1 to - * the caller because the session daemon socket management is done - * elsewhere. Returning a negative code or 0 will shutdown the consumer. - */ - { - int ret_send_status; - - ret_send_status = consumer_send_status_msg(sock, ret_code); - if (ret_send_status < 0) { - goto error_fatal; - } - } - - ret_func = 1; - goto end; - -end_channel_error: - if (channel) { - /* - * Free channel here since no one has a reference to it. We don't - * free after that because a stream can store this pointer. - */ - destroy_channel(channel); - } - /* We have to send a status channel message indicating an error. */ - { - int ret_send_status; - - ret_send_status = consumer_send_status_channel(sock, NULL); - if (ret_send_status < 0) { - /* Stop everything if session daemon can not be notified. */ - goto error_fatal; - } - } - - ret_func = 1; - goto end; - -error_fatal: - /* This will issue a consumer stop. */ - ret_func = -1; - goto end; - -end: - rcu_read_unlock(); - health_code_update(); - return ret_func; -} - -int lttng_ust_flush_buffer(struct lttng_consumer_stream *stream, - int producer_active) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - return lttng_ust_ctl_flush_buffer(stream->ustream, producer_active); -} - -/* - * Take a snapshot for a specific stream. - * - * Returns 0 on success, < 0 on error - */ -int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - return lttng_ust_ctl_snapshot(stream->ustream); -} - -/* - * Sample consumed and produced positions for a specific stream. - * - * Returns 0 on success, < 0 on error. - */ -int lttng_ustconsumer_sample_snapshot_positions( - struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - return lttng_ust_ctl_snapshot_sample_positions(stream->ustream); -} - -/* - * Get the produced position - * - * Returns 0 on success, < 0 on error - */ -int lttng_ustconsumer_get_produced_snapshot( - struct lttng_consumer_stream *stream, unsigned long *pos) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - LTTNG_ASSERT(pos); - - return lttng_ust_ctl_snapshot_get_produced(stream->ustream, pos); -} - -/* - * Get the consumed position - * - * Returns 0 on success, < 0 on error - */ -int lttng_ustconsumer_get_consumed_snapshot( - struct lttng_consumer_stream *stream, unsigned long *pos) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - LTTNG_ASSERT(pos); - - return lttng_ust_ctl_snapshot_get_consumed(stream->ustream, pos); -} - -int lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream, - int producer) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - return lttng_ust_ctl_flush_buffer(stream->ustream, producer); -} - -int lttng_ustconsumer_clear_buffer(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - return lttng_ust_ctl_clear_buffer(stream->ustream); -} - -int lttng_ustconsumer_get_current_timestamp( - struct lttng_consumer_stream *stream, uint64_t *ts) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - LTTNG_ASSERT(ts); - - return lttng_ust_ctl_get_current_timestamp(stream->ustream, ts); -} - -int lttng_ustconsumer_get_sequence_number( - struct lttng_consumer_stream *stream, uint64_t *seq) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - LTTNG_ASSERT(seq); - - return lttng_ust_ctl_get_sequence_number(stream->ustream, seq); -} - -/* - * Called when the stream signals the consumer that it has hung up. - */ -void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - pthread_mutex_lock(&stream->lock); - if (!stream->quiescent) { - if (lttng_ust_ctl_flush_buffer(stream->ustream, 0) < 0) { - ERR("Failed to flush buffer on stream hang-up"); - } else { - stream->quiescent = true; - } - } - pthread_mutex_unlock(&stream->lock); - stream->hangup_flush_done = 1; -} - -void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan) -{ - int i; - - LTTNG_ASSERT(chan); - LTTNG_ASSERT(chan->uchan); - LTTNG_ASSERT(chan->buffer_credentials.is_set); - - if (chan->switch_timer_enabled == 1) { - consumer_timer_switch_stop(chan); - } - for (i = 0; i < chan->nr_stream_fds; i++) { - int ret; - - ret = close(chan->stream_fds[i]); - if (ret) { - PERROR("close"); - } - if (chan->shm_path[0]) { - char shm_path[PATH_MAX]; - - ret = get_stream_shm_path(shm_path, chan->shm_path, i); - if (ret) { - ERR("Cannot get stream shm path"); - } - ret = run_as_unlink(shm_path, - lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR( - chan->buffer_credentials)), - lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR( - chan->buffer_credentials))); - if (ret) { - PERROR("unlink %s", shm_path); - } - } - } -} - -void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan) -{ - LTTNG_ASSERT(chan); - LTTNG_ASSERT(chan->uchan); - LTTNG_ASSERT(chan->buffer_credentials.is_set); - - consumer_metadata_cache_destroy(chan); - lttng_ust_ctl_destroy_channel(chan->uchan); - /* Try to rmdir all directories under shm_path root. */ - if (chan->root_shm_path[0]) { - (void) run_as_rmdir_recursive(chan->root_shm_path, - lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR( - chan->buffer_credentials)), - lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR( - chan->buffer_credentials)), - LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG); - } - free(chan->stream_fds); -} - -void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - if (stream->chan->switch_timer_enabled == 1) { - consumer_timer_switch_stop(stream->chan); - } - lttng_ust_ctl_destroy_stream(stream->ustream); -} - -int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - return lttng_ust_ctl_stream_get_wakeup_fd(stream->ustream); -} - -int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - - return lttng_ust_ctl_stream_close_wakeup_fd(stream->ustream); -} - -/* - * Write up to one packet from the metadata cache to the channel. - * - * Returns the number of bytes pushed from the cache into the ring buffer, or a - * negative value on error. - */ -static -int commit_one_metadata_packet(struct lttng_consumer_stream *stream) -{ - ssize_t write_len; - int ret; - - pthread_mutex_lock(&stream->chan->metadata_cache->lock); - if (stream->chan->metadata_cache->contents.size == - stream->ust_metadata_pushed) { - /* - * In the context of a user space metadata channel, a - * change in version can be detected in two ways: - * 1) During the pre-consume of the `read_subbuffer` loop, - * 2) When populating the metadata ring buffer (i.e. here). - * - * This function is invoked when there is no metadata - * available in the ring-buffer. If all data was consumed - * up to the size of the metadata cache, there is no metadata - * to insert in the ring-buffer. - * - * However, the metadata version could still have changed (a - * regeneration without any new data will yield the same cache - * size). - * - * The cache's version is checked for a version change and the - * consumed position is reset if one occurred. - * - * This check is only necessary for the user space domain as - * it has to manage the cache explicitly. If this reset was not - * performed, no metadata would be consumed (and no reset would - * occur as part of the pre-consume) until the metadata size - * exceeded the cache size. - */ - if (stream->metadata_version != - stream->chan->metadata_cache->version) { - metadata_stream_reset_cache_consumed_position(stream); - consumer_stream_metadata_set_version(stream, - stream->chan->metadata_cache->version); - } else { - ret = 0; - goto end; - } - } - - write_len = lttng_ust_ctl_write_one_packet_to_channel(stream->chan->uchan, - &stream->chan->metadata_cache->contents.data[stream->ust_metadata_pushed], - stream->chan->metadata_cache->contents.size - - stream->ust_metadata_pushed); - LTTNG_ASSERT(write_len != 0); - if (write_len < 0) { - ERR("Writing one metadata packet"); - ret = write_len; - goto end; - } - stream->ust_metadata_pushed += write_len; - - LTTNG_ASSERT(stream->chan->metadata_cache->contents.size >= - stream->ust_metadata_pushed); - ret = write_len; - - /* - * Switch packet (but don't open the next one) on every commit of - * a metadata packet. Since the subbuffer is fully filled (with padding, - * if needed), the stream is "quiescent" after this commit. - */ - if (lttng_ust_ctl_flush_buffer(stream->ustream, 1)) { - ERR("Failed to flush buffer while commiting one metadata packet"); - ret = -EIO; - } else { - stream->quiescent = true; - } -end: - pthread_mutex_unlock(&stream->chan->metadata_cache->lock); - return ret; -} - - -/* - * Sync metadata meaning request them to the session daemon and snapshot to the - * metadata thread can consumer them. - * - * Metadata stream lock is held here, but we need to release it when - * interacting with sessiond, else we cause a deadlock with live - * awaiting on metadata to be pushed out. - * - * The RCU read side lock must be held by the caller. - */ -enum sync_metadata_status lttng_ustconsumer_sync_metadata( - struct lttng_consumer_local_data *ctx, - struct lttng_consumer_stream *metadata_stream) -{ - int ret; - enum sync_metadata_status status; - struct lttng_consumer_channel *metadata_channel; - - LTTNG_ASSERT(ctx); - LTTNG_ASSERT(metadata_stream); - - metadata_channel = metadata_stream->chan; - pthread_mutex_unlock(&metadata_stream->lock); - /* - * Request metadata from the sessiond, but don't wait for the flush - * because we locked the metadata thread. - */ - ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 0); - pthread_mutex_lock(&metadata_stream->lock); - if (ret < 0) { - status = SYNC_METADATA_STATUS_ERROR; - goto end; - } - - /* - * The metadata stream and channel can be deleted while the - * metadata stream lock was released. The streamed is checked - * for deletion before we use it further. - * - * Note that it is safe to access a logically-deleted stream since its - * existence is still guaranteed by the RCU read side lock. However, - * it should no longer be used. The close/deletion of the metadata - * channel and stream already guarantees that all metadata has been - * consumed. Therefore, there is nothing left to do in this function. - */ - if (consumer_stream_is_deleted(metadata_stream)) { - DBG("Metadata stream %" PRIu64 " was deleted during the metadata synchronization", - metadata_stream->key); - status = SYNC_METADATA_STATUS_NO_DATA; - goto end; - } - - ret = commit_one_metadata_packet(metadata_stream); - if (ret < 0) { - status = SYNC_METADATA_STATUS_ERROR; - goto end; - } else if (ret > 0) { - status = SYNC_METADATA_STATUS_NEW_DATA; - } else /* ret == 0 */ { - status = SYNC_METADATA_STATUS_NO_DATA; - goto end; - } - - ret = lttng_ust_ctl_snapshot(metadata_stream->ustream); - if (ret < 0) { - ERR("Failed to take a snapshot of the metadata ring-buffer positions, ret = %d", ret); - status = SYNC_METADATA_STATUS_ERROR; - goto end; - } - -end: - return status; -} - -/* - * Return 0 on success else a negative value. - */ -static int notify_if_more_data(struct lttng_consumer_stream *stream, - struct lttng_consumer_local_data *ctx) -{ - int ret; - struct lttng_ust_ctl_consumer_stream *ustream; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(ctx); - - ustream = stream->ustream; - - /* - * First, we are going to check if there is a new subbuffer available - * before reading the stream wait_fd. - */ - /* Get the next subbuffer */ - ret = lttng_ust_ctl_get_next_subbuf(ustream); - if (ret) { - /* No more data found, flag the stream. */ - stream->has_data = 0; - ret = 0; - goto end; - } - - ret = lttng_ust_ctl_put_subbuf(ustream); - LTTNG_ASSERT(!ret); - - /* This stream still has data. Flag it and wake up the data thread. */ - stream->has_data = 1; - - if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) { - ssize_t writelen; - - writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1); - if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) { - ret = writelen; - goto end; - } - - /* The wake up pipe has been notified. */ - ctx->has_wakeup = 1; - } - ret = 0; - -end: - return ret; -} - -static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream *stream) -{ - int ret = 0; - - /* - * We can consume the 1 byte written into the wait_fd by - * UST. Don't trigger error if we cannot read this one byte - * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK. - * - * This is only done when the stream is monitored by a thread, - * before the flush is done after a hangup and if the stream - * is not flagged with data since there might be nothing to - * consume in the wait fd but still have data available - * flagged by the consumer wake up pipe. - */ - if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) { - char dummy; - ssize_t readlen; - - readlen = lttng_read(stream->wait_fd, &dummy, 1); - if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) { - ret = readlen; - } - } - - return ret; -} - -static int extract_common_subbuffer_info(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuf) -{ - int ret; - - ret = lttng_ust_ctl_get_subbuf_size( - stream->ustream, &subbuf->info.data.subbuf_size); - if (ret) { - goto end; - } - - ret = lttng_ust_ctl_get_padded_subbuf_size( - stream->ustream, &subbuf->info.data.padded_subbuf_size); - if (ret) { - goto end; - } - -end: - return ret; -} - -static int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuf) -{ - int ret; - - ret = extract_common_subbuffer_info(stream, subbuf); - if (ret) { - goto end; - } - - subbuf->info.metadata.version = stream->metadata_version; - -end: - return ret; -} - -static int extract_data_subbuffer_info(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuf) -{ - int ret; - - ret = extract_common_subbuffer_info(stream, subbuf); - if (ret) { - goto end; - } - - ret = lttng_ust_ctl_get_packet_size( - stream->ustream, &subbuf->info.data.packet_size); - if (ret < 0) { - PERROR("Failed to get sub-buffer packet size"); - goto end; - } - - ret = lttng_ust_ctl_get_content_size( - stream->ustream, &subbuf->info.data.content_size); - if (ret < 0) { - PERROR("Failed to get sub-buffer content size"); - goto end; - } - - ret = lttng_ust_ctl_get_timestamp_begin( - stream->ustream, &subbuf->info.data.timestamp_begin); - if (ret < 0) { - PERROR("Failed to get sub-buffer begin timestamp"); - goto end; - } - - ret = lttng_ust_ctl_get_timestamp_end( - stream->ustream, &subbuf->info.data.timestamp_end); - if (ret < 0) { - PERROR("Failed to get sub-buffer end timestamp"); - goto end; - } - - ret = lttng_ust_ctl_get_events_discarded( - stream->ustream, &subbuf->info.data.events_discarded); - if (ret) { - PERROR("Failed to get sub-buffer events discarded count"); - goto end; - } - - ret = lttng_ust_ctl_get_sequence_number(stream->ustream, - &subbuf->info.data.sequence_number.value); - if (ret) { - /* May not be supported by older LTTng-modules. */ - if (ret != -ENOTTY) { - PERROR("Failed to get sub-buffer sequence number"); - goto end; - } - } else { - subbuf->info.data.sequence_number.is_set = true; - } - - ret = lttng_ust_ctl_get_stream_id( - stream->ustream, &subbuf->info.data.stream_id); - if (ret < 0) { - PERROR("Failed to get stream id"); - goto end; - } - - ret = lttng_ust_ctl_get_instance_id(stream->ustream, - &subbuf->info.data.stream_instance_id.value); - if (ret) { - /* May not be supported by older LTTng-modules. */ - if (ret != -ENOTTY) { - PERROR("Failed to get stream instance id"); - goto end; - } - } else { - subbuf->info.data.stream_instance_id.is_set = true; - } -end: - return ret; -} - -static int get_next_subbuffer_common(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - int ret; - const char *addr; - - ret = stream->read_subbuffer_ops.extract_subbuffer_info( - stream, subbuffer); - if (ret) { - goto end; - } - - ret = get_current_subbuf_addr(stream, &addr); - if (ret) { - goto end; - } - - subbuffer->buffer.buffer = lttng_buffer_view_init( - addr, 0, subbuffer->info.data.padded_subbuf_size); - LTTNG_ASSERT(subbuffer->buffer.buffer.data != NULL); -end: - return ret; -} - -static enum get_next_subbuffer_status get_next_subbuffer( - struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - int ret; - enum get_next_subbuffer_status status; - - ret = lttng_ust_ctl_get_next_subbuf(stream->ustream); - switch (ret) { - case 0: - status = GET_NEXT_SUBBUFFER_STATUS_OK; - break; - case -ENODATA: - case -EAGAIN: - /* - * The caller only expects -ENODATA when there is no data to - * read, but the kernel tracer returns -EAGAIN when there is - * currently no data for a non-finalized stream, and -ENODATA - * when there is no data for a finalized stream. Those can be - * combined into a -ENODATA return value. - */ - status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; - goto end; - default: - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } - - ret = get_next_subbuffer_common(stream, subbuffer); - if (ret) { - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } -end: - return status; -} - -static enum get_next_subbuffer_status get_next_subbuffer_metadata( - struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - int ret; - bool cache_empty; - bool got_subbuffer; - bool coherent; - bool buffer_empty; - unsigned long consumed_pos, produced_pos; - enum get_next_subbuffer_status status; - - do { - ret = lttng_ust_ctl_get_next_subbuf(stream->ustream); - if (ret == 0) { - got_subbuffer = true; - } else { - got_subbuffer = false; - if (ret != -EAGAIN) { - /* Fatal error. */ - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } - } - - /* - * Determine if the cache is empty and ensure that a sub-buffer - * is made available if the cache is not empty. - */ - if (!got_subbuffer) { - ret = commit_one_metadata_packet(stream); - if (ret < 0 && ret != -ENOBUFS) { - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } else if (ret == 0) { - /* Not an error, the cache is empty. */ - cache_empty = true; - status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; - goto end; - } else { - cache_empty = false; - } - } else { - pthread_mutex_lock(&stream->chan->metadata_cache->lock); - cache_empty = stream->chan->metadata_cache->contents.size == - stream->ust_metadata_pushed; - pthread_mutex_unlock(&stream->chan->metadata_cache->lock); - } - } while (!got_subbuffer); - - /* Populate sub-buffer infos and view. */ - ret = get_next_subbuffer_common(stream, subbuffer); - if (ret) { - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } - - ret = lttng_ustconsumer_sample_snapshot_positions(stream); - if (ret < 0) { - /* - * -EAGAIN is not expected since we got a sub-buffer and haven't - * pushed the consumption position yet (on put_next). - */ - PERROR("Failed to take a snapshot of metadata buffer positions"); - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } - - ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos); - if (ret) { - PERROR("Failed to get metadata consumed position"); - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } - - ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos); - if (ret) { - PERROR("Failed to get metadata produced position"); - status = GET_NEXT_SUBBUFFER_STATUS_ERROR; - goto end; - } - - /* Last sub-buffer of the ring buffer ? */ - buffer_empty = (consumed_pos + stream->max_sb_size) == produced_pos; - - /* - * The sessiond registry lock ensures that coherent units of metadata - * are pushed to the consumer daemon at once. Hence, if a sub-buffer is - * acquired, the cache is empty, and it is the only available sub-buffer - * available, it is safe to assume that it is "coherent". - */ - coherent = got_subbuffer && cache_empty && buffer_empty; - - LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent); - status = GET_NEXT_SUBBUFFER_STATUS_OK; -end: - return status; -} - -static int put_next_subbuffer(struct lttng_consumer_stream *stream, - struct stream_subbuffer *subbuffer) -{ - const int ret = lttng_ust_ctl_put_next_subbuf(stream->ustream); - - LTTNG_ASSERT(ret == 0); - return ret; -} - -static int signal_metadata(struct lttng_consumer_stream *stream, - struct lttng_consumer_local_data *ctx) -{ - ASSERT_LOCKED(stream->metadata_rdv_lock); - return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0; -} - -static int lttng_ustconsumer_set_stream_ops( - struct lttng_consumer_stream *stream) -{ - int ret = 0; - - stream->read_subbuffer_ops.on_wake_up = consumer_stream_ust_on_wake_up; - if (stream->metadata_flag) { - stream->read_subbuffer_ops.get_next_subbuffer = - get_next_subbuffer_metadata; - stream->read_subbuffer_ops.extract_subbuffer_info = - extract_metadata_subbuffer_info; - stream->read_subbuffer_ops.reset_metadata = - metadata_stream_reset_cache_consumed_position; - if (stream->chan->is_live) { - stream->read_subbuffer_ops.on_sleep = signal_metadata; - ret = consumer_stream_enable_metadata_bucketization( - stream); - if (ret) { - goto end; - } - } - } else { - stream->read_subbuffer_ops.get_next_subbuffer = - get_next_subbuffer; - stream->read_subbuffer_ops.extract_subbuffer_info = - extract_data_subbuffer_info; - stream->read_subbuffer_ops.on_sleep = notify_if_more_data; - if (stream->chan->is_live) { - stream->read_subbuffer_ops.send_live_beacon = - consumer_flush_ust_index; - } - } - - stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer; -end: - return ret; -} - -/* - * Called when a stream is created. - * - * Return 0 on success or else a negative value. - */ -int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream) -{ - int ret; - - LTTNG_ASSERT(stream); - - /* - * Don't create anything if this is set for streaming or if there is - * no current trace chunk on the parent channel. - */ - if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor && - stream->chan->trace_chunk) { - ret = consumer_stream_create_output_files(stream, true); - if (ret) { - goto error; - } - } - - lttng_ustconsumer_set_stream_ops(stream); - ret = 0; - -error: - return ret; -} - -/* - * Check if data is still being extracted from the buffers for a specific - * stream. Consumer data lock MUST be acquired before calling this function - * and the stream lock. - * - * Return 1 if the traced data are still getting read else 0 meaning that the - * data is available for trace viewer reading. - */ -int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream) -{ - int ret; - - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream->ustream); - ASSERT_LOCKED(stream->lock); - - DBG("UST consumer checking data pending"); - - if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) { - ret = 0; - goto end; - } - - if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) { - uint64_t contiguous, pushed; - - /* Ease our life a bit. */ - pthread_mutex_lock(&stream->chan->metadata_cache->lock); - contiguous = stream->chan->metadata_cache->contents.size; - pthread_mutex_unlock(&stream->chan->metadata_cache->lock); - pushed = stream->ust_metadata_pushed; - - /* - * We can simply check whether all contiguously available data - * has been pushed to the ring buffer, since the push operation - * is performed within get_next_subbuf(), and because both - * get_next_subbuf() and put_next_subbuf() are issued atomically - * thanks to the stream lock within - * lttng_ustconsumer_read_subbuffer(). This basically means that - * whetnever ust_metadata_pushed is incremented, the associated - * metadata has been consumed from the metadata stream. - */ - DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64, - contiguous, pushed); - LTTNG_ASSERT(((int64_t) (contiguous - pushed)) >= 0); - if ((contiguous != pushed) || - (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) { - ret = 1; /* Data is pending */ - goto end; - } - } else { - ret = lttng_ust_ctl_get_next_subbuf(stream->ustream); - if (ret == 0) { - /* - * There is still data so let's put back this - * subbuffer. - */ - ret = lttng_ust_ctl_put_subbuf(stream->ustream); - LTTNG_ASSERT(ret == 0); - ret = 1; /* Data is pending */ - goto end; - } - } - - /* Data is NOT pending so ready to be read. */ - ret = 0; - -end: - return ret; -} - -/* - * Stop a given metadata channel timer if enabled and close the wait fd which - * is the poll pipe of the metadata stream. - * - * This MUST be called with the metadata channel lock acquired. - */ -void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata) -{ - int ret; - - LTTNG_ASSERT(metadata); - LTTNG_ASSERT(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA); - - DBG("Closing metadata channel key %" PRIu64, metadata->key); - - if (metadata->switch_timer_enabled == 1) { - consumer_timer_switch_stop(metadata); - } - - if (!metadata->metadata_stream) { - goto end; - } - - /* - * Closing write side so the thread monitoring the stream wakes up if any - * and clean the metadata stream. - */ - if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) { - ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]); - if (ret < 0) { - PERROR("closing metadata pipe write side"); - } - metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1; - } - -end: - return; -} - -/* - * Close every metadata stream wait fd of the metadata hash table. This - * function MUST be used very carefully so not to run into a race between the - * metadata thread handling streams and this function closing their wait fd. - * - * For UST, this is used when the session daemon hangs up. Its the metadata - * producer so calling this is safe because we are assured that no state change - * can occur in the metadata thread for the streams in the hash table. - */ -void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht) -{ - struct lttng_ht_iter iter; - struct lttng_consumer_stream *stream; - - LTTNG_ASSERT(metadata_ht); - LTTNG_ASSERT(metadata_ht->ht); - - DBG("UST consumer closing all metadata streams"); - - rcu_read_lock(); - cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, - node.node) { - - health_code_update(); - - pthread_mutex_lock(&stream->chan->lock); - lttng_ustconsumer_close_metadata(stream->chan); - pthread_mutex_unlock(&stream->chan->lock); - - } - rcu_read_unlock(); -} - -void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream) -{ - int ret; - - ret = lttng_ust_ctl_stream_close_wakeup_fd(stream->ustream); - if (ret < 0) { - ERR("Unable to close wakeup fd"); - } -} - -/* - * Please refer to consumer-timer.c before adding any lock within this - * function or any of its callees. Timers have a very strict locking - * semantic with respect to teardown. Failure to respect this semantic - * introduces deadlocks. - * - * DON'T hold the metadata lock when calling this function, else this - * can cause deadlock involving consumer awaiting for metadata to be - * pushed out due to concurrent interaction with the session daemon. - */ -int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx, - struct lttng_consumer_channel *channel, int timer, int wait) -{ - struct lttcomm_metadata_request_msg request; - struct lttcomm_consumer_msg msg; - enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; - uint64_t len, key, offset, version; - int ret; - - LTTNG_ASSERT(channel); - LTTNG_ASSERT(channel->metadata_cache); - - memset(&request, 0, sizeof(request)); - - /* send the metadata request to sessiond */ - switch (the_consumer_data.type) { - case LTTNG_CONSUMER64_UST: - request.bits_per_long = 64; - break; - case LTTNG_CONSUMER32_UST: - request.bits_per_long = 32; - break; - default: - request.bits_per_long = 0; - break; - } - - request.session_id = channel->session_id; - request.session_id_per_pid = channel->session_id_per_pid; - /* - * Request the application UID here so the metadata of that application can - * be sent back. The channel UID corresponds to the user UID of the session - * used for the rights on the stream file(s). - */ - request.uid = channel->ust_app_uid; - request.key = channel->key; - - DBG("Sending metadata request to sessiond, session id %" PRIu64 - ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64, - request.session_id, request.session_id_per_pid, request.uid, - request.key); - - pthread_mutex_lock(&ctx->metadata_socket_lock); - - health_code_update(); - - ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request, - sizeof(request)); - if (ret < 0) { - ERR("Asking metadata to sessiond"); - goto end; - } - - health_code_update(); - - /* Receive the metadata from sessiond */ - ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg, - sizeof(msg)); - if (ret != sizeof(msg)) { - DBG("Consumer received unexpected message size %d (expects %zu)", - ret, sizeof(msg)); - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD); - /* - * The ret value might 0 meaning an orderly shutdown but this is ok - * since the caller handles this. - */ - goto end; - } - - health_code_update(); - - if (msg.cmd_type == LTTNG_ERR_UND) { - /* No registry found */ - (void) consumer_send_status_msg(ctx->consumer_metadata_socket, - ret_code); - ret = 0; - goto end; - } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) { - ERR("Unexpected cmd_type received %d", msg.cmd_type); - ret = -1; - goto end; - } - - len = msg.u.push_metadata.len; - key = msg.u.push_metadata.key; - offset = msg.u.push_metadata.target_offset; - version = msg.u.push_metadata.version; - - LTTNG_ASSERT(key == channel->key); - if (len == 0) { - DBG("No new metadata to receive for key %" PRIu64, key); - } - - health_code_update(); - - /* Tell session daemon we are ready to receive the metadata. */ - ret = consumer_send_status_msg(ctx->consumer_metadata_socket, - LTTCOMM_CONSUMERD_SUCCESS); - if (ret < 0 || len == 0) { - /* - * Somehow, the session daemon is not responding anymore or there is - * nothing to receive. - */ - goto end; - } - - health_code_update(); - - ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket, - key, offset, len, version, channel, timer, wait); - if (ret >= 0) { - /* - * Only send the status msg if the sessiond is alive meaning a positive - * ret code. - */ - (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret); - } - ret = 0; - -end: - health_code_update(); - - pthread_mutex_unlock(&ctx->metadata_socket_lock); - return ret; -} - -/* - * Return the ustctl call for the get stream id. - */ -int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream, - uint64_t *stream_id) -{ - LTTNG_ASSERT(stream); - LTTNG_ASSERT(stream_id); - - return lttng_ust_ctl_get_stream_id(stream->ustream, stream_id); -} - -void lttng_ustconsumer_sigbus_handle(void *addr) -{ - lttng_ust_ctl_sigbus_handle(addr); -} diff --git a/src/common/ust-consumer/ust-consumer.cpp b/src/common/ust-consumer/ust-consumer.cpp new file mode 100644 index 000000000..c9ec3c557 --- /dev/null +++ b/src/common/ust-consumer/ust-consumer.cpp @@ -0,0 +1,3439 @@ +/* + * Copyright (C) 2011 Julien Desfossez + * Copyright (C) 2011 Mathieu Desnoyers + * Copyright (C) 2017 Jérémie Galarneau + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#define _LGPL_SOURCE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ust-consumer.h" + +#define INT_MAX_STR_LEN 12 /* includes \0 */ + +extern struct lttng_consumer_global_data the_consumer_data; +extern int consumer_poll_timeout; + +LTTNG_EXPORT DEFINE_LTTNG_UST_SIGBUS_STATE(); + +/* + * Free channel object and all streams associated with it. This MUST be used + * only and only if the channel has _NEVER_ been added to the global channel + * hash table. + */ +static void destroy_channel(struct lttng_consumer_channel *channel) +{ + struct lttng_consumer_stream *stream, *stmp; + + LTTNG_ASSERT(channel); + + DBG("UST consumer cleaning stream list"); + + cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head, + send_node) { + + health_code_update(); + + cds_list_del(&stream->send_node); + lttng_ust_ctl_destroy_stream(stream->ustream); + lttng_trace_chunk_put(stream->trace_chunk); + free(stream); + } + + /* + * If a channel is available meaning that was created before the streams + * were, delete it. + */ + if (channel->uchan) { + lttng_ustconsumer_del_channel(channel); + lttng_ustconsumer_free_channel(channel); + } + + if (channel->trace_chunk) { + lttng_trace_chunk_put(channel->trace_chunk); + } + + free(channel); +} + +/* + * Add channel to internal consumer state. + * + * Returns 0 on success or else a negative value. + */ +static int add_channel(struct lttng_consumer_channel *channel, + struct lttng_consumer_local_data *ctx) +{ + int ret = 0; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(ctx); + + if (ctx->on_recv_channel != NULL) { + ret = ctx->on_recv_channel(channel); + if (ret == 0) { + ret = consumer_add_channel(channel, ctx); + } else if (ret < 0) { + /* Most likely an ENOMEM. */ + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); + goto error; + } + } else { + ret = consumer_add_channel(channel, ctx); + } + + DBG("UST consumer channel added (key: %" PRIu64 ")", channel->key); + +error: + return ret; +} + +/* + * Allocate and return a consumer stream object. If _alloc_ret is not NULL, the + * error value if applicable is set in it else it is kept untouched. + * + * Return NULL on error else the newly allocated stream object. + */ +static struct lttng_consumer_stream *allocate_stream(int cpu, int key, + struct lttng_consumer_channel *channel, + struct lttng_consumer_local_data *ctx, int *_alloc_ret) +{ + int alloc_ret; + struct lttng_consumer_stream *stream = NULL; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(ctx); + + stream = consumer_stream_create( + channel, + channel->key, + key, + channel->name, + channel->relayd_id, + channel->session_id, + channel->trace_chunk, + cpu, + &alloc_ret, + channel->type, + channel->monitor); + if (stream == NULL) { + switch (alloc_ret) { + case -ENOENT: + /* + * We could not find the channel. Can happen if cpu hotplug + * happens while tearing down. + */ + DBG3("Could not find channel"); + break; + case -ENOMEM: + case -EINVAL: + default: + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_OUTFD_ERROR); + break; + } + goto error; + } + + consumer_stream_update_channel_attributes(stream, channel); + +error: + if (_alloc_ret) { + *_alloc_ret = alloc_ret; + } + return stream; +} + +/* + * Send the given stream pointer to the corresponding thread. + * + * Returns 0 on success else a negative value. + */ +static int send_stream_to_thread(struct lttng_consumer_stream *stream, + struct lttng_consumer_local_data *ctx) +{ + int ret; + struct lttng_pipe *stream_pipe; + + /* Get the right pipe where the stream will be sent. */ + if (stream->metadata_flag) { + consumer_add_metadata_stream(stream); + stream_pipe = ctx->consumer_metadata_pipe; + } else { + consumer_add_data_stream(stream); + stream_pipe = ctx->consumer_data_pipe; + } + + /* + * From this point on, the stream's ownership has been moved away from + * the channel and it becomes globally visible. Hence, remove it from + * the local stream list to prevent the stream from being both local and + * global. + */ + stream->globally_visible = 1; + cds_list_del(&stream->send_node); + + ret = lttng_pipe_write(stream_pipe, &stream, sizeof(stream)); + if (ret < 0) { + ERR("Consumer write %s stream to pipe %d", + stream->metadata_flag ? "metadata" : "data", + lttng_pipe_get_writefd(stream_pipe)); + if (stream->metadata_flag) { + consumer_del_stream_for_metadata(stream); + } else { + consumer_del_stream_for_data(stream); + } + goto error; + } + +error: + return ret; +} + +static +int get_stream_shm_path(char *stream_shm_path, const char *shm_path, int cpu) +{ + char cpu_nr[INT_MAX_STR_LEN]; /* int max len */ + int ret; + + strncpy(stream_shm_path, shm_path, PATH_MAX); + stream_shm_path[PATH_MAX - 1] = '\0'; + ret = snprintf(cpu_nr, INT_MAX_STR_LEN, "%i", cpu); + if (ret < 0) { + PERROR("snprintf"); + goto end; + } + strncat(stream_shm_path, cpu_nr, + PATH_MAX - strlen(stream_shm_path) - 1); + ret = 0; +end: + return ret; +} + +/* + * Create streams for the given channel using liblttng-ust-ctl. + * The channel lock must be acquired by the caller. + * + * Return 0 on success else a negative value. + */ +static int create_ust_streams(struct lttng_consumer_channel *channel, + struct lttng_consumer_local_data *ctx) +{ + int ret, cpu = 0; + struct lttng_ust_ctl_consumer_stream *ustream; + struct lttng_consumer_stream *stream; + pthread_mutex_t *current_stream_lock = NULL; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(ctx); + + /* + * While a stream is available from ustctl. When NULL is returned, we've + * reached the end of the possible stream for the channel. + */ + while ((ustream = lttng_ust_ctl_create_stream(channel->uchan, cpu))) { + int wait_fd; + int ust_metadata_pipe[2]; + + health_code_update(); + + if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && channel->monitor) { + ret = utils_create_pipe_cloexec_nonblock(ust_metadata_pipe); + if (ret < 0) { + ERR("Create ust metadata poll pipe"); + goto error; + } + wait_fd = ust_metadata_pipe[0]; + } else { + wait_fd = lttng_ust_ctl_stream_get_wait_fd(ustream); + } + + /* Allocate consumer stream object. */ + stream = allocate_stream(cpu, wait_fd, channel, ctx, &ret); + if (!stream) { + goto error_alloc; + } + stream->ustream = ustream; + /* + * Store it so we can save multiple function calls afterwards since + * this value is used heavily in the stream threads. This is UST + * specific so this is why it's done after allocation. + */ + stream->wait_fd = wait_fd; + + /* + * Increment channel refcount since the channel reference has now been + * assigned in the allocation process above. + */ + if (stream->chan->monitor) { + uatomic_inc(&stream->chan->refcount); + } + + pthread_mutex_lock(&stream->lock); + current_stream_lock = &stream->lock; + /* + * Order is important this is why a list is used. On error, the caller + * should clean this list. + */ + cds_list_add_tail(&stream->send_node, &channel->streams.head); + + ret = lttng_ust_ctl_get_max_subbuf_size(stream->ustream, + &stream->max_sb_size); + if (ret < 0) { + ERR("lttng_ust_ctl_get_max_subbuf_size failed for stream %s", + stream->name); + goto error; + } + + /* Do actions once stream has been received. */ + if (ctx->on_recv_stream) { + ret = ctx->on_recv_stream(stream); + if (ret < 0) { + goto error; + } + } + + DBG("UST consumer add stream %s (key: %" PRIu64 ") with relayd id %" PRIu64, + stream->name, stream->key, stream->relayd_stream_id); + + /* Set next CPU stream. */ + channel->streams.count = ++cpu; + + /* Keep stream reference when creating metadata. */ + if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) { + channel->metadata_stream = stream; + if (channel->monitor) { + /* Set metadata poll pipe if we created one */ + memcpy(stream->ust_metadata_poll_pipe, + ust_metadata_pipe, + sizeof(ust_metadata_pipe)); + } + } + pthread_mutex_unlock(&stream->lock); + current_stream_lock = NULL; + } + + return 0; + +error: +error_alloc: + if (current_stream_lock) { + pthread_mutex_unlock(current_stream_lock); + } + return ret; +} + +static int open_ust_stream_fd(struct lttng_consumer_channel *channel, int cpu, + const struct lttng_credentials *session_credentials) +{ + char shm_path[PATH_MAX]; + int ret; + + if (!channel->shm_path[0]) { + return shm_create_anonymous("ust-consumer"); + } + ret = get_stream_shm_path(shm_path, channel->shm_path, cpu); + if (ret) { + goto error_shm_path; + } + return run_as_open(shm_path, + O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR, + lttng_credentials_get_uid(session_credentials), + lttng_credentials_get_gid(session_credentials)); + +error_shm_path: + return -1; +} + +/* + * Create an UST channel with the given attributes and send it to the session + * daemon using the ust ctl API. + * + * Return 0 on success or else a negative value. + */ +static int create_ust_channel(struct lttng_consumer_channel *channel, + struct lttng_ust_ctl_consumer_channel_attr *attr, + struct lttng_ust_ctl_consumer_channel **ust_chanp) +{ + int ret, nr_stream_fds, i, j; + int *stream_fds; + struct lttng_ust_ctl_consumer_channel *ust_channel; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(attr); + LTTNG_ASSERT(ust_chanp); + LTTNG_ASSERT(channel->buffer_credentials.is_set); + + DBG3("Creating channel to ustctl with attr: [overwrite: %d, " + "subbuf_size: %" PRIu64 ", num_subbuf: %" PRIu64 ", " + "switch_timer_interval: %u, read_timer_interval: %u, " + "output: %d, type: %d", attr->overwrite, attr->subbuf_size, + attr->num_subbuf, attr->switch_timer_interval, + attr->read_timer_interval, attr->output, attr->type); + + if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA) + nr_stream_fds = 1; + else + nr_stream_fds = lttng_ust_ctl_get_nr_stream_per_channel(); + stream_fds = (int *) zmalloc(nr_stream_fds * sizeof(*stream_fds)); + if (!stream_fds) { + ret = -1; + goto error_alloc; + } + for (i = 0; i < nr_stream_fds; i++) { + stream_fds[i] = open_ust_stream_fd(channel, i, + &channel->buffer_credentials.value); + if (stream_fds[i] < 0) { + ret = -1; + goto error_open; + } + } + ust_channel = lttng_ust_ctl_create_channel(attr, stream_fds, nr_stream_fds); + if (!ust_channel) { + ret = -1; + goto error_create; + } + channel->nr_stream_fds = nr_stream_fds; + channel->stream_fds = stream_fds; + *ust_chanp = ust_channel; + + return 0; + +error_create: +error_open: + for (j = i - 1; j >= 0; j--) { + int closeret; + + closeret = close(stream_fds[j]); + if (closeret) { + PERROR("close"); + } + if (channel->shm_path[0]) { + char shm_path[PATH_MAX]; + + closeret = get_stream_shm_path(shm_path, + channel->shm_path, j); + if (closeret) { + ERR("Cannot get stream shm path"); + } + closeret = run_as_unlink(shm_path, + lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR( + channel->buffer_credentials)), + lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR( + channel->buffer_credentials))); + if (closeret) { + PERROR("unlink %s", shm_path); + } + } + } + /* Try to rmdir all directories under shm_path root. */ + if (channel->root_shm_path[0]) { + (void) run_as_rmdir_recursive(channel->root_shm_path, + lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR( + channel->buffer_credentials)), + lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR( + channel->buffer_credentials)), + LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG); + } + free(stream_fds); +error_alloc: + return ret; +} + +/* + * Send a single given stream to the session daemon using the sock. + * + * Return 0 on success else a negative value. + */ +static int send_sessiond_stream(int sock, struct lttng_consumer_stream *stream) +{ + int ret; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(sock >= 0); + + DBG("UST consumer sending stream %" PRIu64 " to sessiond", stream->key); + + /* Send stream to session daemon. */ + ret = lttng_ust_ctl_send_stream_to_sessiond(sock, stream->ustream); + if (ret < 0) { + goto error; + } + +error: + return ret; +} + +/* + * Send channel to sessiond and relayd if applicable. + * + * Return 0 on success or else a negative value. + */ +static int send_channel_to_sessiond_and_relayd(int sock, + struct lttng_consumer_channel *channel, + struct lttng_consumer_local_data *ctx, int *relayd_error) +{ + int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS; + struct lttng_consumer_stream *stream; + uint64_t net_seq_idx = -1ULL; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(ctx); + LTTNG_ASSERT(sock >= 0); + + DBG("UST consumer sending channel %s to sessiond", channel->name); + + if (channel->relayd_id != (uint64_t) -1ULL) { + cds_list_for_each_entry(stream, &channel->streams.head, send_node) { + + health_code_update(); + + /* Try to send the stream to the relayd if one is available. */ + DBG("Sending stream %" PRIu64 " of channel \"%s\" to relayd", + stream->key, channel->name); + ret = consumer_send_relayd_stream(stream, stream->chan->pathname); + if (ret < 0) { + /* + * Flag that the relayd was the problem here probably due to a + * communicaton error on the socket. + */ + if (relayd_error) { + *relayd_error = 1; + } + ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; + } + if (net_seq_idx == -1ULL) { + net_seq_idx = stream->net_seq_idx; + } + } + } + + /* Inform sessiond that we are about to send channel and streams. */ + ret = consumer_send_status_msg(sock, ret_code); + if (ret < 0 || ret_code != LTTCOMM_CONSUMERD_SUCCESS) { + /* + * Either the session daemon is not responding or the relayd died so we + * stop now. + */ + goto error; + } + + /* Send channel to sessiond. */ + ret = lttng_ust_ctl_send_channel_to_sessiond(sock, channel->uchan); + if (ret < 0) { + goto error; + } + + ret = lttng_ust_ctl_channel_close_wakeup_fd(channel->uchan); + if (ret < 0) { + goto error; + } + + /* The channel was sent successfully to the sessiond at this point. */ + cds_list_for_each_entry(stream, &channel->streams.head, send_node) { + + health_code_update(); + + /* Send stream to session daemon. */ + ret = send_sessiond_stream(sock, stream); + if (ret < 0) { + goto error; + } + } + + /* Tell sessiond there is no more stream. */ + ret = lttng_ust_ctl_send_stream_to_sessiond(sock, NULL); + if (ret < 0) { + goto error; + } + + DBG("UST consumer NULL stream sent to sessiond"); + + return 0; + +error: + if (ret_code != LTTCOMM_CONSUMERD_SUCCESS) { + ret = -1; + } + return ret; +} + +/* + * Creates a channel and streams and add the channel it to the channel internal + * state. The created stream must ONLY be sent once the GET_CHANNEL command is + * received. + * + * Return 0 on success or else, a negative value is returned and the channel + * MUST be destroyed by consumer_del_channel(). + */ +static int ask_channel(struct lttng_consumer_local_data *ctx, + struct lttng_consumer_channel *channel, + struct lttng_ust_ctl_consumer_channel_attr *attr) +{ + int ret; + + LTTNG_ASSERT(ctx); + LTTNG_ASSERT(channel); + LTTNG_ASSERT(attr); + + /* + * This value is still used by the kernel consumer since for the kernel, + * the stream ownership is not IN the consumer so we need to have the + * number of left stream that needs to be initialized so we can know when + * to delete the channel (see consumer.c). + * + * As for the user space tracer now, the consumer creates and sends the + * stream to the session daemon which only sends them to the application + * once every stream of a channel is received making this value useless + * because we they will be added to the poll thread before the application + * receives them. This ensures that a stream can not hang up during + * initilization of a channel. + */ + channel->nb_init_stream_left = 0; + + /* The reply msg status is handled in the following call. */ + ret = create_ust_channel(channel, attr, &channel->uchan); + if (ret < 0) { + goto end; + } + + channel->wait_fd = lttng_ust_ctl_channel_get_wait_fd(channel->uchan); + + /* + * For the snapshots (no monitor), we create the metadata streams + * on demand, not during the channel creation. + */ + if (channel->type == CONSUMER_CHANNEL_TYPE_METADATA && !channel->monitor) { + ret = 0; + goto end; + } + + /* Open all streams for this channel. */ + pthread_mutex_lock(&channel->lock); + ret = create_ust_streams(channel, ctx); + pthread_mutex_unlock(&channel->lock); + if (ret < 0) { + goto end; + } + +end: + return ret; +} + +/* + * Send all stream of a channel to the right thread handling it. + * + * On error, return a negative value else 0 on success. + */ +static int send_streams_to_thread(struct lttng_consumer_channel *channel, + struct lttng_consumer_local_data *ctx) +{ + int ret = 0; + struct lttng_consumer_stream *stream, *stmp; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(ctx); + + /* Send streams to the corresponding thread. */ + cds_list_for_each_entry_safe(stream, stmp, &channel->streams.head, + send_node) { + + health_code_update(); + + /* Sending the stream to the thread. */ + ret = send_stream_to_thread(stream, ctx); + if (ret < 0) { + /* + * If we are unable to send the stream to the thread, there is + * a big problem so just stop everything. + */ + goto error; + } + } + +error: + return ret; +} + +/* + * Flush channel's streams using the given key to retrieve the channel. + * + * Return 0 on success else an LTTng error code. + */ +static int flush_channel(uint64_t chan_key) +{ + int ret = 0; + struct lttng_consumer_channel *channel; + struct lttng_consumer_stream *stream; + struct lttng_ht *ht; + struct lttng_ht_iter iter; + + DBG("UST consumer flush channel key %" PRIu64, chan_key); + + rcu_read_lock(); + channel = consumer_find_channel(chan_key); + if (!channel) { + ERR("UST consumer flush channel %" PRIu64 " not found", chan_key); + ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; + goto error; + } + + ht = the_consumer_data.stream_per_chan_id_ht; + + /* For each stream of the channel id, flush it. */ + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct, + &channel->key, &iter.iter, stream, node_channel_id.node) { + + health_code_update(); + + pthread_mutex_lock(&stream->lock); + + /* + * Protect against concurrent teardown of a stream. + */ + if (cds_lfht_is_node_deleted(&stream->node.node)) { + goto next; + } + + if (!stream->quiescent) { + ret = lttng_ust_ctl_flush_buffer(stream->ustream, 0); + if (ret) { + ERR("Failed to flush buffer while flushing channel: channel key = %" PRIu64 ", channel name = '%s'", + chan_key, channel->name); + ret = LTTNG_ERR_BUFFER_FLUSH_FAILED; + pthread_mutex_unlock(&stream->lock); + goto error; + } + stream->quiescent = true; + } +next: + pthread_mutex_unlock(&stream->lock); + } +error: + rcu_read_unlock(); + return ret; +} + +/* + * Clear quiescent state from channel's streams using the given key to + * retrieve the channel. + * + * Return 0 on success else an LTTng error code. + */ +static int clear_quiescent_channel(uint64_t chan_key) +{ + int ret = 0; + struct lttng_consumer_channel *channel; + struct lttng_consumer_stream *stream; + struct lttng_ht *ht; + struct lttng_ht_iter iter; + + DBG("UST consumer clear quiescent channel key %" PRIu64, chan_key); + + rcu_read_lock(); + channel = consumer_find_channel(chan_key); + if (!channel) { + ERR("UST consumer clear quiescent channel %" PRIu64 " not found", chan_key); + ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; + goto error; + } + + ht = the_consumer_data.stream_per_chan_id_ht; + + /* For each stream of the channel id, clear quiescent state. */ + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&channel->key, lttng_ht_seed), ht->match_fct, + &channel->key, &iter.iter, stream, node_channel_id.node) { + + health_code_update(); + + pthread_mutex_lock(&stream->lock); + stream->quiescent = false; + pthread_mutex_unlock(&stream->lock); + } +error: + rcu_read_unlock(); + return ret; +} + +/* + * Close metadata stream wakeup_fd using the given key to retrieve the channel. + * + * Return 0 on success else an LTTng error code. + */ +static int close_metadata(uint64_t chan_key) +{ + int ret = 0; + struct lttng_consumer_channel *channel; + unsigned int channel_monitor; + + DBG("UST consumer close metadata key %" PRIu64, chan_key); + + channel = consumer_find_channel(chan_key); + if (!channel) { + /* + * This is possible if the metadata thread has issue a delete because + * the endpoint point of the stream hung up. There is no way the + * session daemon can know about it thus use a DBG instead of an actual + * error. + */ + DBG("UST consumer close metadata %" PRIu64 " not found", chan_key); + ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; + goto error; + } + + pthread_mutex_lock(&the_consumer_data.lock); + pthread_mutex_lock(&channel->lock); + channel_monitor = channel->monitor; + if (cds_lfht_is_node_deleted(&channel->node.node)) { + goto error_unlock; + } + + lttng_ustconsumer_close_metadata(channel); + pthread_mutex_unlock(&channel->lock); + pthread_mutex_unlock(&the_consumer_data.lock); + + /* + * The ownership of a metadata channel depends on the type of + * session to which it belongs. In effect, the monitor flag is checked + * to determine if this metadata channel is in "snapshot" mode or not. + * + * In the non-snapshot case, the metadata channel is created along with + * a single stream which will remain present until the metadata channel + * is destroyed (on the destruction of its session). In this case, the + * metadata stream in "monitored" by the metadata poll thread and holds + * the ownership of its channel. + * + * Closing the metadata will cause the metadata stream's "metadata poll + * pipe" to be closed. Closing this pipe will wake-up the metadata poll + * thread which will teardown the metadata stream which, in return, + * deletes the metadata channel. + * + * In the snapshot case, the metadata stream is created and destroyed + * on every snapshot record. Since the channel doesn't have an owner + * other than the session daemon, it is safe to destroy it immediately + * on reception of the CLOSE_METADATA command. + */ + if (!channel_monitor) { + /* + * The channel and consumer_data locks must be + * released before this call since consumer_del_channel + * re-acquires the channel and consumer_data locks to teardown + * the channel and queue its reclamation by the "call_rcu" + * worker thread. + */ + consumer_del_channel(channel); + } + + return ret; +error_unlock: + pthread_mutex_unlock(&channel->lock); + pthread_mutex_unlock(&the_consumer_data.lock); +error: + return ret; +} + +/* + * RCU read side lock MUST be acquired before calling this function. + * + * Return 0 on success else an LTTng error code. + */ +static int setup_metadata(struct lttng_consumer_local_data *ctx, uint64_t key) +{ + int ret; + struct lttng_consumer_channel *metadata; + + DBG("UST consumer setup metadata key %" PRIu64, key); + + metadata = consumer_find_channel(key); + if (!metadata) { + ERR("UST consumer push metadata %" PRIu64 " not found", key); + ret = LTTNG_ERR_UST_CHAN_NOT_FOUND; + goto end; + } + + /* + * In no monitor mode, the metadata channel has no stream(s) so skip the + * ownership transfer to the metadata thread. + */ + if (!metadata->monitor) { + DBG("Metadata channel in no monitor"); + ret = 0; + goto end; + } + + /* + * Send metadata stream to relayd if one available. Availability is + * known if the stream is still in the list of the channel. + */ + if (cds_list_empty(&metadata->streams.head)) { + ERR("Metadata channel key %" PRIu64 ", no stream available.", key); + ret = LTTCOMM_CONSUMERD_ERROR_METADATA; + goto error_no_stream; + } + + /* Send metadata stream to relayd if needed. */ + if (metadata->metadata_stream->net_seq_idx != (uint64_t) -1ULL) { + ret = consumer_send_relayd_stream(metadata->metadata_stream, + metadata->pathname); + if (ret < 0) { + ret = LTTCOMM_CONSUMERD_ERROR_METADATA; + goto error; + } + ret = consumer_send_relayd_streams_sent( + metadata->metadata_stream->net_seq_idx); + if (ret < 0) { + ret = LTTCOMM_CONSUMERD_RELAYD_FAIL; + goto error; + } + } + + /* + * Ownership of metadata stream is passed along. Freeing is handled by + * the callee. + */ + ret = send_streams_to_thread(metadata, ctx); + if (ret < 0) { + /* + * If we are unable to send the stream to the thread, there is + * a big problem so just stop everything. + */ + ret = LTTCOMM_CONSUMERD_FATAL; + goto send_streams_error; + } + /* List MUST be empty after or else it could be reused. */ + LTTNG_ASSERT(cds_list_empty(&metadata->streams.head)); + + ret = 0; + goto end; + +error: + /* + * Delete metadata channel on error. At this point, the metadata stream can + * NOT be monitored by the metadata thread thus having the guarantee that + * the stream is still in the local stream list of the channel. This call + * will make sure to clean that list. + */ + consumer_stream_destroy(metadata->metadata_stream, NULL); + cds_list_del(&metadata->metadata_stream->send_node); + metadata->metadata_stream = NULL; +send_streams_error: +error_no_stream: +end: + return ret; +} + +/* + * Snapshot the whole metadata. + * RCU read-side lock must be held by the caller. + * + * Returns 0 on success, < 0 on error + */ +static int snapshot_metadata(struct lttng_consumer_channel *metadata_channel, + uint64_t key, char *path, uint64_t relayd_id, + struct lttng_consumer_local_data *ctx) +{ + int ret = 0; + struct lttng_consumer_stream *metadata_stream; + + LTTNG_ASSERT(path); + LTTNG_ASSERT(ctx); + + DBG("UST consumer snapshot metadata with key %" PRIu64 " at path %s", + key, path); + + rcu_read_lock(); + + LTTNG_ASSERT(!metadata_channel->monitor); + + health_code_update(); + + /* + * Ask the sessiond if we have new metadata waiting and update the + * consumer metadata cache. + */ + ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 1); + if (ret < 0) { + goto error; + } + + health_code_update(); + + /* + * The metadata stream is NOT created in no monitor mode when the channel + * is created on a sessiond ask channel command. + */ + ret = create_ust_streams(metadata_channel, ctx); + if (ret < 0) { + goto error; + } + + metadata_stream = metadata_channel->metadata_stream; + LTTNG_ASSERT(metadata_stream); + + pthread_mutex_lock(&metadata_stream->lock); + if (relayd_id != (uint64_t) -1ULL) { + metadata_stream->net_seq_idx = relayd_id; + ret = consumer_send_relayd_stream(metadata_stream, path); + } else { + ret = consumer_stream_create_output_files(metadata_stream, + false); + } + pthread_mutex_unlock(&metadata_stream->lock); + if (ret < 0) { + goto error_stream; + } + + do { + health_code_update(); + + ret = lttng_consumer_read_subbuffer(metadata_stream, ctx, true); + if (ret < 0) { + goto error_stream; + } + } while (ret > 0); + +error_stream: + /* + * Clean up the stream completly because the next snapshot will use a new + * metadata stream. + */ + consumer_stream_destroy(metadata_stream, NULL); + cds_list_del(&metadata_stream->send_node); + metadata_channel->metadata_stream = NULL; + +error: + rcu_read_unlock(); + return ret; +} + +static +int get_current_subbuf_addr(struct lttng_consumer_stream *stream, + const char **addr) +{ + int ret; + unsigned long mmap_offset; + const char *mmap_base; + + mmap_base = (const char *) lttng_ust_ctl_get_mmap_base(stream->ustream); + if (!mmap_base) { + ERR("Failed to get mmap base for stream `%s`", + stream->name); + ret = -EPERM; + goto error; + } + + ret = lttng_ust_ctl_get_mmap_read_offset(stream->ustream, &mmap_offset); + if (ret != 0) { + ERR("Failed to get mmap offset for stream `%s`", stream->name); + ret = -EINVAL; + goto error; + } + + *addr = mmap_base + mmap_offset; +error: + return ret; + +} + +/* + * Take a snapshot of all the stream of a channel. + * RCU read-side lock and the channel lock must be held by the caller. + * + * Returns 0 on success, < 0 on error + */ +static int snapshot_channel(struct lttng_consumer_channel *channel, + uint64_t key, char *path, uint64_t relayd_id, + uint64_t nb_packets_per_stream, + struct lttng_consumer_local_data *ctx) +{ + int ret; + unsigned use_relayd = 0; + unsigned long consumed_pos, produced_pos; + struct lttng_consumer_stream *stream; + + LTTNG_ASSERT(path); + LTTNG_ASSERT(ctx); + + rcu_read_lock(); + + if (relayd_id != (uint64_t) -1ULL) { + use_relayd = 1; + } + + LTTNG_ASSERT(!channel->monitor); + DBG("UST consumer snapshot channel %" PRIu64, key); + + cds_list_for_each_entry(stream, &channel->streams.head, send_node) { + health_code_update(); + + /* Lock stream because we are about to change its state. */ + pthread_mutex_lock(&stream->lock); + LTTNG_ASSERT(channel->trace_chunk); + if (!lttng_trace_chunk_get(channel->trace_chunk)) { + /* + * Can't happen barring an internal error as the channel + * holds a reference to the trace chunk. + */ + ERR("Failed to acquire reference to channel's trace chunk"); + ret = -1; + goto error_unlock; + } + LTTNG_ASSERT(!stream->trace_chunk); + stream->trace_chunk = channel->trace_chunk; + + stream->net_seq_idx = relayd_id; + + if (use_relayd) { + ret = consumer_send_relayd_stream(stream, path); + if (ret < 0) { + goto error_unlock; + } + } else { + ret = consumer_stream_create_output_files(stream, + false); + if (ret < 0) { + goto error_unlock; + } + DBG("UST consumer snapshot stream (%" PRIu64 ")", + stream->key); + } + + /* + * If tracing is active, we want to perform a "full" buffer flush. + * Else, if quiescent, it has already been done by the prior stop. + */ + if (!stream->quiescent) { + ret = lttng_ust_ctl_flush_buffer(stream->ustream, 0); + if (ret < 0) { + ERR("Failed to flush buffer during snapshot of channel: channel key = %" PRIu64 ", channel name = '%s'", + channel->key, channel->name); + goto error_unlock; + } + } + + ret = lttng_ustconsumer_take_snapshot(stream); + if (ret < 0) { + ERR("Taking UST snapshot"); + goto error_unlock; + } + + ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos); + if (ret < 0) { + ERR("Produced UST snapshot position"); + goto error_unlock; + } + + ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos); + if (ret < 0) { + ERR("Consumerd UST snapshot position"); + goto error_unlock; + } + + /* + * The original value is sent back if max stream size is larger than + * the possible size of the snapshot. Also, we assume that the session + * daemon should never send a maximum stream size that is lower than + * subbuffer size. + */ + consumed_pos = consumer_get_consume_start_pos(consumed_pos, + produced_pos, nb_packets_per_stream, + stream->max_sb_size); + + while ((long) (consumed_pos - produced_pos) < 0) { + ssize_t read_len; + unsigned long len, padded_len; + const char *subbuf_addr; + struct lttng_buffer_view subbuf_view; + + health_code_update(); + + DBG("UST consumer taking snapshot at pos %lu", consumed_pos); + + ret = lttng_ust_ctl_get_subbuf(stream->ustream, &consumed_pos); + if (ret < 0) { + if (ret != -EAGAIN) { + PERROR("lttng_ust_ctl_get_subbuf snapshot"); + goto error_close_stream; + } + DBG("UST consumer get subbuf failed. Skipping it."); + consumed_pos += stream->max_sb_size; + stream->chan->lost_packets++; + continue; + } + + ret = lttng_ust_ctl_get_subbuf_size(stream->ustream, &len); + if (ret < 0) { + ERR("Snapshot lttng_ust_ctl_get_subbuf_size"); + goto error_put_subbuf; + } + + ret = lttng_ust_ctl_get_padded_subbuf_size(stream->ustream, &padded_len); + if (ret < 0) { + ERR("Snapshot lttng_ust_ctl_get_padded_subbuf_size"); + goto error_put_subbuf; + } + + ret = get_current_subbuf_addr(stream, &subbuf_addr); + if (ret) { + goto error_put_subbuf; + } + + subbuf_view = lttng_buffer_view_init( + subbuf_addr, 0, padded_len); + read_len = lttng_consumer_on_read_subbuffer_mmap( + stream, &subbuf_view, padded_len - len); + if (use_relayd) { + if (read_len != len) { + ret = -EPERM; + goto error_put_subbuf; + } + } else { + if (read_len != padded_len) { + ret = -EPERM; + goto error_put_subbuf; + } + } + + ret = lttng_ust_ctl_put_subbuf(stream->ustream); + if (ret < 0) { + ERR("Snapshot lttng_ust_ctl_put_subbuf"); + goto error_close_stream; + } + consumed_pos += stream->max_sb_size; + } + + /* Simply close the stream so we can use it on the next snapshot. */ + consumer_stream_close(stream); + pthread_mutex_unlock(&stream->lock); + } + + rcu_read_unlock(); + return 0; + +error_put_subbuf: + if (lttng_ust_ctl_put_subbuf(stream->ustream) < 0) { + ERR("Snapshot lttng_ust_ctl_put_subbuf"); + } +error_close_stream: + consumer_stream_close(stream); +error_unlock: + pthread_mutex_unlock(&stream->lock); + rcu_read_unlock(); + return ret; +} + +static +void metadata_stream_reset_cache_consumed_position( + struct lttng_consumer_stream *stream) +{ + ASSERT_LOCKED(stream->lock); + + DBG("Reset metadata cache of session %" PRIu64, + stream->chan->session_id); + stream->ust_metadata_pushed = 0; +} + +/* + * Receive the metadata updates from the sessiond. Supports receiving + * overlapping metadata, but is needs to always belong to a contiguous + * range starting from 0. + * Be careful about the locks held when calling this function: it needs + * the metadata cache flush to concurrently progress in order to + * complete. + */ +int lttng_ustconsumer_recv_metadata(int sock, uint64_t key, uint64_t offset, + uint64_t len, uint64_t version, + struct lttng_consumer_channel *channel, int timer, int wait) +{ + int ret, ret_code = LTTCOMM_CONSUMERD_SUCCESS; + char *metadata_str; + enum consumer_metadata_cache_write_status cache_write_status; + + DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, len); + + metadata_str = (char *) zmalloc(len * sizeof(char)); + if (!metadata_str) { + PERROR("zmalloc metadata string"); + ret_code = LTTCOMM_CONSUMERD_ENOMEM; + goto end; + } + + health_code_update(); + + /* Receive metadata string. */ + ret = lttcomm_recv_unix_sock(sock, metadata_str, len); + if (ret < 0) { + /* Session daemon is dead so return gracefully. */ + ret_code = ret; + goto end_free; + } + + health_code_update(); + + pthread_mutex_lock(&channel->metadata_cache->lock); + cache_write_status = consumer_metadata_cache_write( + channel->metadata_cache, offset, len, version, + metadata_str); + pthread_mutex_unlock(&channel->metadata_cache->lock); + switch (cache_write_status) { + case CONSUMER_METADATA_CACHE_WRITE_STATUS_NO_CHANGE: + /* + * The write entirely overlapped with existing contents of the + * same metadata version (same content); there is nothing to do. + */ + break; + case CONSUMER_METADATA_CACHE_WRITE_STATUS_INVALIDATED: + /* + * The metadata cache was invalidated (previously pushed + * content has been overwritten). Reset the stream's consumed + * metadata position to ensure the metadata poll thread consumes + * the whole cache. + */ + pthread_mutex_lock(&channel->metadata_stream->lock); + metadata_stream_reset_cache_consumed_position( + channel->metadata_stream); + pthread_mutex_unlock(&channel->metadata_stream->lock); + /* Fall-through. */ + case CONSUMER_METADATA_CACHE_WRITE_STATUS_APPENDED_CONTENT: + /* + * In both cases, the metadata poll thread has new data to + * consume. + */ + ret = consumer_metadata_wakeup_pipe(channel); + if (ret) { + ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA; + goto end_free; + } + break; + case CONSUMER_METADATA_CACHE_WRITE_STATUS_ERROR: + /* Unable to handle metadata. Notify session daemon. */ + ret_code = LTTCOMM_CONSUMERD_ERROR_METADATA; + /* + * Skip metadata flush on write error since the offset and len might + * not have been updated which could create an infinite loop below when + * waiting for the metadata cache to be flushed. + */ + goto end_free; + default: + abort(); + } + + if (!wait) { + goto end_free; + } + while (consumer_metadata_cache_flushed(channel, offset + len, timer)) { + DBG("Waiting for metadata to be flushed"); + + health_code_update(); + + usleep(DEFAULT_METADATA_AVAILABILITY_WAIT_TIME); + } + +end_free: + free(metadata_str); +end: + return ret_code; +} + +/* + * Receive command from session daemon and process it. + * + * Return 1 on success else a negative value or 0. + */ +int lttng_ustconsumer_recv_cmd(struct lttng_consumer_local_data *ctx, + int sock, struct pollfd *consumer_sockpoll) +{ + int ret_func; + enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; + struct lttcomm_consumer_msg msg; + struct lttng_consumer_channel *channel = NULL; + + health_code_update(); + + { + ssize_t ret_recv; + + ret_recv = lttcomm_recv_unix_sock(sock, &msg, sizeof(msg)); + if (ret_recv != sizeof(msg)) { + DBG("Consumer received unexpected message size %zd (expects %zu)", + ret_recv, sizeof(msg)); + /* + * The ret value might 0 meaning an orderly shutdown but this is ok + * since the caller handles this. + */ + if (ret_recv > 0) { + lttng_consumer_send_error(ctx, + LTTCOMM_CONSUMERD_ERROR_RECV_CMD); + ret_recv = -1; + } + return ret_recv; + } + } + + health_code_update(); + + /* deprecated */ + LTTNG_ASSERT(msg.cmd_type != LTTNG_CONSUMER_STOP); + + health_code_update(); + + /* relayd needs RCU read-side lock */ + rcu_read_lock(); + + switch (msg.cmd_type) { + case LTTNG_CONSUMER_ADD_RELAYD_SOCKET: + { + /* Session daemon status message are handled in the following call. */ + consumer_add_relayd_socket(msg.u.relayd_sock.net_index, + msg.u.relayd_sock.type, ctx, sock, consumer_sockpoll, + &msg.u.relayd_sock.sock, msg.u.relayd_sock.session_id, + msg.u.relayd_sock.relayd_session_id); + goto end_nosignal; + } + case LTTNG_CONSUMER_DESTROY_RELAYD: + { + uint64_t index = msg.u.destroy_relayd.net_seq_idx; + struct consumer_relayd_sock_pair *relayd; + + DBG("UST consumer destroying relayd %" PRIu64, index); + + /* Get relayd reference if exists. */ + relayd = consumer_find_relayd(index); + if (relayd == NULL) { + DBG("Unable to find relayd %" PRIu64, index); + ret_code = LTTCOMM_CONSUMERD_RELAYD_FAIL; + } + + /* + * Each relayd socket pair has a refcount of stream attached to it + * which tells if the relayd is still active or not depending on the + * refcount value. + * + * This will set the destroy flag of the relayd object and destroy it + * if the refcount reaches zero when called. + * + * The destroy can happen either here or when a stream fd hangs up. + */ + if (relayd) { + consumer_flag_relayd_for_destroy(relayd); + } + + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_UPDATE_STREAM: + { + rcu_read_unlock(); + return -ENOSYS; + } + case LTTNG_CONSUMER_DATA_PENDING: + { + int is_data_pending; + ssize_t ret_send; + uint64_t id = msg.u.data_pending.session_id; + + DBG("UST consumer data pending command for id %" PRIu64, id); + + is_data_pending = consumer_data_pending(id); + + /* Send back returned value to session daemon */ + ret_send = lttcomm_send_unix_sock(sock, &is_data_pending, + sizeof(is_data_pending)); + if (ret_send < 0) { + DBG("Error when sending the data pending ret code: %zd", + ret_send); + goto error_fatal; + } + + /* + * No need to send back a status message since the data pending + * returned value is the response. + */ + break; + } + case LTTNG_CONSUMER_ASK_CHANNEL_CREATION: + { + int ret_ask_channel, ret_add_channel, ret_send; + struct lttng_ust_ctl_consumer_channel_attr attr; + const uint64_t chunk_id = msg.u.ask_channel.chunk_id.value; + const struct lttng_credentials buffer_credentials = { + .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.ask_channel.buffer_credentials.uid), + .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.ask_channel.buffer_credentials.gid), + }; + + /* Create a plain object and reserve a channel key. */ + channel = consumer_allocate_channel( + msg.u.ask_channel.key, + msg.u.ask_channel.session_id, + msg.u.ask_channel.chunk_id.is_set ? + &chunk_id : NULL, + msg.u.ask_channel.pathname, + msg.u.ask_channel.name, + msg.u.ask_channel.relayd_id, + (enum lttng_event_output) msg.u.ask_channel.output, + msg.u.ask_channel.tracefile_size, + msg.u.ask_channel.tracefile_count, + msg.u.ask_channel.session_id_per_pid, + msg.u.ask_channel.monitor, + msg.u.ask_channel.live_timer_interval, + msg.u.ask_channel.is_live, + msg.u.ask_channel.root_shm_path, + msg.u.ask_channel.shm_path); + if (!channel) { + goto end_channel_error; + } + + LTTNG_OPTIONAL_SET(&channel->buffer_credentials, + buffer_credentials); + + /* + * Assign UST application UID to the channel. This value is ignored for + * per PID buffers. This is specific to UST thus setting this after the + * allocation. + */ + channel->ust_app_uid = msg.u.ask_channel.ust_app_uid; + + /* Build channel attributes from received message. */ + attr.subbuf_size = msg.u.ask_channel.subbuf_size; + attr.num_subbuf = msg.u.ask_channel.num_subbuf; + attr.overwrite = msg.u.ask_channel.overwrite; + attr.switch_timer_interval = msg.u.ask_channel.switch_timer_interval; + attr.read_timer_interval = msg.u.ask_channel.read_timer_interval; + attr.chan_id = msg.u.ask_channel.chan_id; + memcpy(attr.uuid, msg.u.ask_channel.uuid, sizeof(attr.uuid)); + attr.blocking_timeout= msg.u.ask_channel.blocking_timeout; + + /* Match channel buffer type to the UST abi. */ + switch (msg.u.ask_channel.output) { + case LTTNG_EVENT_MMAP: + default: + attr.output = LTTNG_UST_ABI_MMAP; + break; + } + + /* Translate and save channel type. */ + switch (msg.u.ask_channel.type) { + case LTTNG_UST_ABI_CHAN_PER_CPU: + channel->type = CONSUMER_CHANNEL_TYPE_DATA; + attr.type = LTTNG_UST_ABI_CHAN_PER_CPU; + /* + * Set refcount to 1 for owner. Below, we will + * pass ownership to the + * consumer_thread_channel_poll() thread. + */ + channel->refcount = 1; + break; + case LTTNG_UST_ABI_CHAN_METADATA: + channel->type = CONSUMER_CHANNEL_TYPE_METADATA; + attr.type = LTTNG_UST_ABI_CHAN_METADATA; + break; + default: + abort(); + goto error_fatal; + }; + + health_code_update(); + + ret_ask_channel = ask_channel(ctx, channel, &attr); + if (ret_ask_channel < 0) { + goto end_channel_error; + } + + if (msg.u.ask_channel.type == LTTNG_UST_ABI_CHAN_METADATA) { + int ret_allocate; + + ret_allocate = consumer_metadata_cache_allocate( + channel); + if (ret_allocate < 0) { + ERR("Allocating metadata cache"); + goto end_channel_error; + } + consumer_timer_switch_start(channel, attr.switch_timer_interval); + attr.switch_timer_interval = 0; + } else { + int monitor_start_ret; + + consumer_timer_live_start(channel, + msg.u.ask_channel.live_timer_interval); + monitor_start_ret = consumer_timer_monitor_start( + channel, + msg.u.ask_channel.monitor_timer_interval); + if (monitor_start_ret < 0) { + ERR("Starting channel monitoring timer failed"); + goto end_channel_error; + } + } + + health_code_update(); + + /* + * Add the channel to the internal state AFTER all streams were created + * and successfully sent to session daemon. This way, all streams must + * be ready before this channel is visible to the threads. + * If add_channel succeeds, ownership of the channel is + * passed to consumer_thread_channel_poll(). + */ + ret_add_channel = add_channel(channel, ctx); + if (ret_add_channel < 0) { + if (msg.u.ask_channel.type == LTTNG_UST_ABI_CHAN_METADATA) { + if (channel->switch_timer_enabled == 1) { + consumer_timer_switch_stop(channel); + } + consumer_metadata_cache_destroy(channel); + } + if (channel->live_timer_enabled == 1) { + consumer_timer_live_stop(channel); + } + if (channel->monitor_timer_enabled == 1) { + consumer_timer_monitor_stop(channel); + } + goto end_channel_error; + } + + health_code_update(); + + /* + * Channel and streams are now created. Inform the session daemon that + * everything went well and should wait to receive the channel and + * streams with ustctl API. + */ + ret_send = consumer_send_status_channel(sock, channel); + if (ret_send < 0) { + /* + * There is probably a problem on the socket. + */ + goto error_fatal; + } + + break; + } + case LTTNG_CONSUMER_GET_CHANNEL: + { + int ret, relayd_err = 0; + uint64_t key = msg.u.get_channel.key; + struct lttng_consumer_channel *found_channel; + + found_channel = consumer_find_channel(key); + if (!found_channel) { + ERR("UST consumer get channel key %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + goto end_get_channel; + } + + health_code_update(); + + /* Send the channel to sessiond (and relayd, if applicable). */ + ret = send_channel_to_sessiond_and_relayd( + sock, found_channel, ctx, &relayd_err); + if (ret < 0) { + if (relayd_err) { + /* + * We were unable to send to the relayd the stream so avoid + * sending back a fatal error to the thread since this is OK + * and the consumer can continue its work. The above call + * has sent the error status message to the sessiond. + */ + goto end_get_channel_nosignal; + } + /* + * The communicaton was broken hence there is a bad state between + * the consumer and sessiond so stop everything. + */ + goto error_get_channel_fatal; + } + + health_code_update(); + + /* + * In no monitor mode, the streams ownership is kept inside the channel + * so don't send them to the data thread. + */ + if (!found_channel->monitor) { + goto end_get_channel; + } + + ret = send_streams_to_thread(found_channel, ctx); + if (ret < 0) { + /* + * If we are unable to send the stream to the thread, there is + * a big problem so just stop everything. + */ + goto error_get_channel_fatal; + } + /* List MUST be empty after or else it could be reused. */ + LTTNG_ASSERT(cds_list_empty(&found_channel->streams.head)); +end_get_channel: + goto end_msg_sessiond; +error_get_channel_fatal: + goto error_fatal; +end_get_channel_nosignal: + goto end_nosignal; + } + case LTTNG_CONSUMER_DESTROY_CHANNEL: + { + uint64_t key = msg.u.destroy_channel.key; + + /* + * Only called if streams have not been sent to stream + * manager thread. However, channel has been sent to + * channel manager thread. + */ + notify_thread_del_channel(ctx, key); + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_CLOSE_METADATA: + { + int ret; + + ret = close_metadata(msg.u.close_metadata.key); + if (ret != 0) { + ret_code = (lttcomm_return_code) ret; + } + + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_FLUSH_CHANNEL: + { + int ret; + + ret = flush_channel(msg.u.flush_channel.key); + if (ret != 0) { + ret_code = (lttcomm_return_code) ret; + } + + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_CLEAR_QUIESCENT_CHANNEL: + { + int ret; + + ret = clear_quiescent_channel( + msg.u.clear_quiescent_channel.key); + if (ret != 0) { + ret_code = (lttcomm_return_code) ret; + } + + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_PUSH_METADATA: + { + int ret; + uint64_t len = msg.u.push_metadata.len; + uint64_t key = msg.u.push_metadata.key; + uint64_t offset = msg.u.push_metadata.target_offset; + uint64_t version = msg.u.push_metadata.version; + struct lttng_consumer_channel *found_channel; + + DBG("UST consumer push metadata key %" PRIu64 " of len %" PRIu64, key, + len); + + found_channel = consumer_find_channel(key); + if (!found_channel) { + /* + * This is possible if the metadata creation on the consumer side + * is in flight vis-a-vis a concurrent push metadata from the + * session daemon. Simply return that the channel failed and the + * session daemon will handle that message correctly considering + * that this race is acceptable thus the DBG() statement here. + */ + DBG("UST consumer push metadata %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHANNEL_FAIL; + goto end_push_metadata_msg_sessiond; + } + + health_code_update(); + + if (!len) { + /* + * There is nothing to receive. We have simply + * checked whether the channel can be found. + */ + ret_code = LTTCOMM_CONSUMERD_SUCCESS; + goto end_push_metadata_msg_sessiond; + } + + /* Tell session daemon we are ready to receive the metadata. */ + ret = consumer_send_status_msg(sock, LTTCOMM_CONSUMERD_SUCCESS); + if (ret < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto error_push_metadata_fatal; + } + + health_code_update(); + + /* Wait for more data. */ + health_poll_entry(); + ret = lttng_consumer_poll_socket(consumer_sockpoll); + health_poll_exit(); + if (ret) { + goto error_push_metadata_fatal; + } + + health_code_update(); + + ret = lttng_ustconsumer_recv_metadata(sock, key, offset, len, + version, found_channel, 0, 1); + if (ret < 0) { + /* error receiving from sessiond */ + goto error_push_metadata_fatal; + } else { + ret_code = (lttcomm_return_code) ret; + goto end_push_metadata_msg_sessiond; + } +end_push_metadata_msg_sessiond: + goto end_msg_sessiond; +error_push_metadata_fatal: + goto error_fatal; + } + case LTTNG_CONSUMER_SETUP_METADATA: + { + int ret; + + ret = setup_metadata(ctx, msg.u.setup_metadata.key); + if (ret) { + ret_code = (lttcomm_return_code) ret; + } + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_SNAPSHOT_CHANNEL: + { + struct lttng_consumer_channel *found_channel; + uint64_t key = msg.u.snapshot_channel.key; + int ret_send; + + found_channel = consumer_find_channel(key); + if (!found_channel) { + DBG("UST snapshot channel not found for key %" PRIu64, key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } else { + if (msg.u.snapshot_channel.metadata) { + int ret_snapshot; + + ret_snapshot = snapshot_metadata(found_channel, + key, + msg.u.snapshot_channel.pathname, + msg.u.snapshot_channel.relayd_id, + ctx); + if (ret_snapshot < 0) { + ERR("Snapshot metadata failed"); + ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; + } + } else { + int ret_snapshot; + + ret_snapshot = snapshot_channel(found_channel, + key, + msg.u.snapshot_channel.pathname, + msg.u.snapshot_channel.relayd_id, + msg.u.snapshot_channel + .nb_packets_per_stream, + ctx); + if (ret_snapshot < 0) { + ERR("Snapshot channel failed"); + ret_code = LTTCOMM_CONSUMERD_SNAPSHOT_FAILED; + } + } + } + health_code_update(); + ret_send = consumer_send_status_msg(sock, ret_code); + if (ret_send < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_nosignal; + } + health_code_update(); + break; + } + case LTTNG_CONSUMER_DISCARDED_EVENTS: + { + int ret = 0; + uint64_t discarded_events; + struct lttng_ht_iter iter; + struct lttng_ht *ht; + struct lttng_consumer_stream *stream; + uint64_t id = msg.u.discarded_events.session_id; + uint64_t key = msg.u.discarded_events.channel_key; + + DBG("UST consumer discarded events command for session id %" + PRIu64, id); + rcu_read_lock(); + pthread_mutex_lock(&the_consumer_data.lock); + + ht = the_consumer_data.stream_list_ht; + + /* + * We only need a reference to the channel, but they are not + * directly indexed, so we just use the first matching stream + * to extract the information we need, we default to 0 if not + * found (no events are dropped if the channel is not yet in + * use). + */ + discarded_events = 0; + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&id, lttng_ht_seed), + ht->match_fct, &id, + &iter.iter, stream, node_session_id.node) { + if (stream->chan->key == key) { + discarded_events = stream->chan->discarded_events; + break; + } + } + pthread_mutex_unlock(&the_consumer_data.lock); + rcu_read_unlock(); + + DBG("UST consumer discarded events command for session id %" + PRIu64 ", channel key %" PRIu64, id, key); + + health_code_update(); + + /* Send back returned value to session daemon */ + ret = lttcomm_send_unix_sock(sock, &discarded_events, sizeof(discarded_events)); + if (ret < 0) { + PERROR("send discarded events"); + goto error_fatal; + } + + break; + } + case LTTNG_CONSUMER_LOST_PACKETS: + { + int ret; + uint64_t lost_packets; + struct lttng_ht_iter iter; + struct lttng_ht *ht; + struct lttng_consumer_stream *stream; + uint64_t id = msg.u.lost_packets.session_id; + uint64_t key = msg.u.lost_packets.channel_key; + + DBG("UST consumer lost packets command for session id %" + PRIu64, id); + rcu_read_lock(); + pthread_mutex_lock(&the_consumer_data.lock); + + ht = the_consumer_data.stream_list_ht; + + /* + * We only need a reference to the channel, but they are not + * directly indexed, so we just use the first matching stream + * to extract the information we need, we default to 0 if not + * found (no packets lost if the channel is not yet in use). + */ + lost_packets = 0; + cds_lfht_for_each_entry_duplicate(ht->ht, + ht->hash_fct(&id, lttng_ht_seed), + ht->match_fct, &id, + &iter.iter, stream, node_session_id.node) { + if (stream->chan->key == key) { + lost_packets = stream->chan->lost_packets; + break; + } + } + pthread_mutex_unlock(&the_consumer_data.lock); + rcu_read_unlock(); + + DBG("UST consumer lost packets command for session id %" + PRIu64 ", channel key %" PRIu64, id, key); + + health_code_update(); + + /* Send back returned value to session daemon */ + ret = lttcomm_send_unix_sock(sock, &lost_packets, + sizeof(lost_packets)); + if (ret < 0) { + PERROR("send lost packets"); + goto error_fatal; + } + + break; + } + case LTTNG_CONSUMER_SET_CHANNEL_MONITOR_PIPE: + { + int channel_monitor_pipe, ret_send, + ret_set_channel_monitor_pipe; + ssize_t ret_recv; + + ret_code = LTTCOMM_CONSUMERD_SUCCESS; + /* Successfully received the command's type. */ + ret_send = consumer_send_status_msg(sock, ret_code); + if (ret_send < 0) { + goto error_fatal; + } + + ret_recv = lttcomm_recv_fds_unix_sock( + sock, &channel_monitor_pipe, 1); + if (ret_recv != sizeof(channel_monitor_pipe)) { + ERR("Failed to receive channel monitor pipe"); + goto error_fatal; + } + + DBG("Received channel monitor pipe (%d)", channel_monitor_pipe); + ret_set_channel_monitor_pipe = + consumer_timer_thread_set_channel_monitor_pipe( + channel_monitor_pipe); + if (!ret_set_channel_monitor_pipe) { + int flags; + int ret_fcntl; + + ret_code = LTTCOMM_CONSUMERD_SUCCESS; + /* Set the pipe as non-blocking. */ + ret_fcntl = fcntl(channel_monitor_pipe, F_GETFL, 0); + if (ret_fcntl == -1) { + PERROR("fcntl get flags of the channel monitoring pipe"); + goto error_fatal; + } + flags = ret_fcntl; + + ret_fcntl = fcntl(channel_monitor_pipe, F_SETFL, + flags | O_NONBLOCK); + if (ret_fcntl == -1) { + PERROR("fcntl set O_NONBLOCK flag of the channel monitoring pipe"); + goto error_fatal; + } + DBG("Channel monitor pipe set as non-blocking"); + } else { + ret_code = LTTCOMM_CONSUMERD_ALREADY_SET; + } + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_ROTATE_CHANNEL: + { + struct lttng_consumer_channel *found_channel; + uint64_t key = msg.u.rotate_channel.key; + int ret_send_status; + + found_channel = consumer_find_channel(key); + if (!found_channel) { + DBG("Channel %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } else { + int rotate_channel; + + /* + * Sample the rotate position of all the streams in + * this channel. + */ + rotate_channel = lttng_consumer_rotate_channel( + found_channel, key, + msg.u.rotate_channel.relayd_id, + msg.u.rotate_channel.metadata, ctx); + if (rotate_channel < 0) { + ERR("Rotate channel failed"); + ret_code = LTTCOMM_CONSUMERD_ROTATION_FAIL; + } + + health_code_update(); + } + + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_rotate_channel_nosignal; + } + + /* + * Rotate the streams that are ready right now. + * FIXME: this is a second consecutive iteration over the + * streams in a channel, there is probably a better way to + * handle this, but it needs to be after the + * consumer_send_status_msg() call. + */ + if (found_channel) { + int ret_rotate_read_streams; + + ret_rotate_read_streams = + lttng_consumer_rotate_ready_streams( + found_channel, key, + ctx); + if (ret_rotate_read_streams < 0) { + ERR("Rotate channel failed"); + } + } + break; +end_rotate_channel_nosignal: + goto end_nosignal; + } + case LTTNG_CONSUMER_CLEAR_CHANNEL: + { + struct lttng_consumer_channel *found_channel; + uint64_t key = msg.u.clear_channel.key; + int ret_send_status; + + found_channel = consumer_find_channel(key); + if (!found_channel) { + DBG("Channel %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } else { + int ret_clear_channel; + + ret_clear_channel = lttng_consumer_clear_channel( + found_channel); + if (ret_clear_channel) { + ERR("Clear channel failed key %" PRIu64, key); + ret_code = (lttcomm_return_code) ret_clear_channel; + } + + health_code_update(); + } + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_nosignal; + } + break; + } + case LTTNG_CONSUMER_INIT: + { + int ret_send_status; + + ret_code = lttng_consumer_init_command(ctx, + msg.u.init.sessiond_uuid); + health_code_update(); + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_nosignal; + } + break; + } + case LTTNG_CONSUMER_CREATE_TRACE_CHUNK: + { + const struct lttng_credentials credentials = { + .uid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.uid), + .gid = LTTNG_OPTIONAL_INIT_VALUE(msg.u.create_trace_chunk.credentials.value.gid), + }; + const bool is_local_trace = + !msg.u.create_trace_chunk.relayd_id.is_set; + const uint64_t relayd_id = + msg.u.create_trace_chunk.relayd_id.value; + const char *chunk_override_name = + *msg.u.create_trace_chunk.override_name ? + msg.u.create_trace_chunk.override_name : + NULL; + struct lttng_directory_handle *chunk_directory_handle = NULL; + + /* + * The session daemon will only provide a chunk directory file + * descriptor for local traces. + */ + if (is_local_trace) { + int chunk_dirfd; + int ret_send_status; + ssize_t ret_recv; + + /* Acnowledge the reception of the command. */ + ret_send_status = consumer_send_status_msg( + sock, LTTCOMM_CONSUMERD_SUCCESS); + if (ret_send_status < 0) { + /* Somehow, the session daemon is not responding anymore. */ + goto end_nosignal; + } + + /* + * Receive trace chunk domain dirfd. + */ + ret_recv = lttcomm_recv_fds_unix_sock( + sock, &chunk_dirfd, 1); + if (ret_recv != sizeof(chunk_dirfd)) { + ERR("Failed to receive trace chunk domain directory file descriptor"); + goto error_fatal; + } + + DBG("Received trace chunk domain directory fd (%d)", + chunk_dirfd); + chunk_directory_handle = lttng_directory_handle_create_from_dirfd( + chunk_dirfd); + if (!chunk_directory_handle) { + ERR("Failed to initialize chunk domain directory handle from directory file descriptor"); + if (close(chunk_dirfd)) { + PERROR("Failed to close chunk directory file descriptor"); + } + goto error_fatal; + } + } + + ret_code = lttng_consumer_create_trace_chunk( + !is_local_trace ? &relayd_id : NULL, + msg.u.create_trace_chunk.session_id, + msg.u.create_trace_chunk.chunk_id, + (time_t) msg.u.create_trace_chunk + .creation_timestamp, + chunk_override_name, + msg.u.create_trace_chunk.credentials.is_set ? + &credentials : + NULL, + chunk_directory_handle); + lttng_directory_handle_put(chunk_directory_handle); + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_CLOSE_TRACE_CHUNK: + { + enum lttng_trace_chunk_command_type close_command = + (lttng_trace_chunk_command_type) + msg.u.close_trace_chunk.close_command.value; + const uint64_t relayd_id = + msg.u.close_trace_chunk.relayd_id.value; + struct lttcomm_consumer_close_trace_chunk_reply reply; + char closed_trace_chunk_path[LTTNG_PATH_MAX] = {}; + int ret; + + ret_code = lttng_consumer_close_trace_chunk( + msg.u.close_trace_chunk.relayd_id.is_set ? + &relayd_id : + NULL, + msg.u.close_trace_chunk.session_id, + msg.u.close_trace_chunk.chunk_id, + (time_t) msg.u.close_trace_chunk.close_timestamp, + msg.u.close_trace_chunk.close_command.is_set ? + &close_command : + NULL, closed_trace_chunk_path); + reply.ret_code = ret_code; + reply.path_length = strlen(closed_trace_chunk_path) + 1; + ret = lttcomm_send_unix_sock(sock, &reply, sizeof(reply)); + if (ret != sizeof(reply)) { + goto error_fatal; + } + ret = lttcomm_send_unix_sock(sock, closed_trace_chunk_path, + reply.path_length); + if (ret != reply.path_length) { + goto error_fatal; + } + goto end_nosignal; + } + case LTTNG_CONSUMER_TRACE_CHUNK_EXISTS: + { + const uint64_t relayd_id = + msg.u.trace_chunk_exists.relayd_id.value; + + ret_code = lttng_consumer_trace_chunk_exists( + msg.u.trace_chunk_exists.relayd_id.is_set ? + &relayd_id : NULL, + msg.u.trace_chunk_exists.session_id, + msg.u.trace_chunk_exists.chunk_id); + goto end_msg_sessiond; + } + case LTTNG_CONSUMER_OPEN_CHANNEL_PACKETS: + { + const uint64_t key = msg.u.open_channel_packets.key; + struct lttng_consumer_channel *found_channel = + consumer_find_channel(key); + + if (found_channel) { + pthread_mutex_lock(&found_channel->lock); + ret_code = lttng_consumer_open_channel_packets( + found_channel); + pthread_mutex_unlock(&found_channel->lock); + } else { + /* + * The channel could have disappeared in per-pid + * buffering mode. + */ + DBG("Channel %" PRIu64 " not found", key); + ret_code = LTTCOMM_CONSUMERD_CHAN_NOT_FOUND; + } + + health_code_update(); + goto end_msg_sessiond; + } + default: + break; + } + +end_nosignal: + /* + * Return 1 to indicate success since the 0 value can be a socket + * shutdown during the recv() or send() call. + */ + ret_func = 1; + goto end; + +end_msg_sessiond: + /* + * The returned value here is not useful since either way we'll return 1 to + * the caller because the session daemon socket management is done + * elsewhere. Returning a negative code or 0 will shutdown the consumer. + */ + { + int ret_send_status; + + ret_send_status = consumer_send_status_msg(sock, ret_code); + if (ret_send_status < 0) { + goto error_fatal; + } + } + + ret_func = 1; + goto end; + +end_channel_error: + if (channel) { + /* + * Free channel here since no one has a reference to it. We don't + * free after that because a stream can store this pointer. + */ + destroy_channel(channel); + } + /* We have to send a status channel message indicating an error. */ + { + int ret_send_status; + + ret_send_status = consumer_send_status_channel(sock, NULL); + if (ret_send_status < 0) { + /* Stop everything if session daemon can not be notified. */ + goto error_fatal; + } + } + + ret_func = 1; + goto end; + +error_fatal: + /* This will issue a consumer stop. */ + ret_func = -1; + goto end; + +end: + rcu_read_unlock(); + health_code_update(); + return ret_func; +} + +int lttng_ust_flush_buffer(struct lttng_consumer_stream *stream, + int producer_active) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + return lttng_ust_ctl_flush_buffer(stream->ustream, producer_active); +} + +/* + * Take a snapshot for a specific stream. + * + * Returns 0 on success, < 0 on error + */ +int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + return lttng_ust_ctl_snapshot(stream->ustream); +} + +/* + * Sample consumed and produced positions for a specific stream. + * + * Returns 0 on success, < 0 on error. + */ +int lttng_ustconsumer_sample_snapshot_positions( + struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + return lttng_ust_ctl_snapshot_sample_positions(stream->ustream); +} + +/* + * Get the produced position + * + * Returns 0 on success, < 0 on error + */ +int lttng_ustconsumer_get_produced_snapshot( + struct lttng_consumer_stream *stream, unsigned long *pos) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + LTTNG_ASSERT(pos); + + return lttng_ust_ctl_snapshot_get_produced(stream->ustream, pos); +} + +/* + * Get the consumed position + * + * Returns 0 on success, < 0 on error + */ +int lttng_ustconsumer_get_consumed_snapshot( + struct lttng_consumer_stream *stream, unsigned long *pos) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + LTTNG_ASSERT(pos); + + return lttng_ust_ctl_snapshot_get_consumed(stream->ustream, pos); +} + +int lttng_ustconsumer_flush_buffer(struct lttng_consumer_stream *stream, + int producer) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + return lttng_ust_ctl_flush_buffer(stream->ustream, producer); +} + +int lttng_ustconsumer_clear_buffer(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + return lttng_ust_ctl_clear_buffer(stream->ustream); +} + +int lttng_ustconsumer_get_current_timestamp( + struct lttng_consumer_stream *stream, uint64_t *ts) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + LTTNG_ASSERT(ts); + + return lttng_ust_ctl_get_current_timestamp(stream->ustream, ts); +} + +int lttng_ustconsumer_get_sequence_number( + struct lttng_consumer_stream *stream, uint64_t *seq) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + LTTNG_ASSERT(seq); + + return lttng_ust_ctl_get_sequence_number(stream->ustream, seq); +} + +/* + * Called when the stream signals the consumer that it has hung up. + */ +void lttng_ustconsumer_on_stream_hangup(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + pthread_mutex_lock(&stream->lock); + if (!stream->quiescent) { + if (lttng_ust_ctl_flush_buffer(stream->ustream, 0) < 0) { + ERR("Failed to flush buffer on stream hang-up"); + } else { + stream->quiescent = true; + } + } + pthread_mutex_unlock(&stream->lock); + stream->hangup_flush_done = 1; +} + +void lttng_ustconsumer_del_channel(struct lttng_consumer_channel *chan) +{ + int i; + + LTTNG_ASSERT(chan); + LTTNG_ASSERT(chan->uchan); + LTTNG_ASSERT(chan->buffer_credentials.is_set); + + if (chan->switch_timer_enabled == 1) { + consumer_timer_switch_stop(chan); + } + for (i = 0; i < chan->nr_stream_fds; i++) { + int ret; + + ret = close(chan->stream_fds[i]); + if (ret) { + PERROR("close"); + } + if (chan->shm_path[0]) { + char shm_path[PATH_MAX]; + + ret = get_stream_shm_path(shm_path, chan->shm_path, i); + if (ret) { + ERR("Cannot get stream shm path"); + } + ret = run_as_unlink(shm_path, + lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR( + chan->buffer_credentials)), + lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR( + chan->buffer_credentials))); + if (ret) { + PERROR("unlink %s", shm_path); + } + } + } +} + +void lttng_ustconsumer_free_channel(struct lttng_consumer_channel *chan) +{ + LTTNG_ASSERT(chan); + LTTNG_ASSERT(chan->uchan); + LTTNG_ASSERT(chan->buffer_credentials.is_set); + + consumer_metadata_cache_destroy(chan); + lttng_ust_ctl_destroy_channel(chan->uchan); + /* Try to rmdir all directories under shm_path root. */ + if (chan->root_shm_path[0]) { + (void) run_as_rmdir_recursive(chan->root_shm_path, + lttng_credentials_get_uid(LTTNG_OPTIONAL_GET_PTR( + chan->buffer_credentials)), + lttng_credentials_get_gid(LTTNG_OPTIONAL_GET_PTR( + chan->buffer_credentials)), + LTTNG_DIRECTORY_HANDLE_SKIP_NON_EMPTY_FLAG); + } + free(chan->stream_fds); +} + +void lttng_ustconsumer_del_stream(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + if (stream->chan->switch_timer_enabled == 1) { + consumer_timer_switch_stop(stream->chan); + } + lttng_ust_ctl_destroy_stream(stream->ustream); +} + +int lttng_ustconsumer_get_wakeup_fd(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + return lttng_ust_ctl_stream_get_wakeup_fd(stream->ustream); +} + +int lttng_ustconsumer_close_wakeup_fd(struct lttng_consumer_stream *stream) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + + return lttng_ust_ctl_stream_close_wakeup_fd(stream->ustream); +} + +/* + * Write up to one packet from the metadata cache to the channel. + * + * Returns the number of bytes pushed from the cache into the ring buffer, or a + * negative value on error. + */ +static +int commit_one_metadata_packet(struct lttng_consumer_stream *stream) +{ + ssize_t write_len; + int ret; + + pthread_mutex_lock(&stream->chan->metadata_cache->lock); + if (stream->chan->metadata_cache->contents.size == + stream->ust_metadata_pushed) { + /* + * In the context of a user space metadata channel, a + * change in version can be detected in two ways: + * 1) During the pre-consume of the `read_subbuffer` loop, + * 2) When populating the metadata ring buffer (i.e. here). + * + * This function is invoked when there is no metadata + * available in the ring-buffer. If all data was consumed + * up to the size of the metadata cache, there is no metadata + * to insert in the ring-buffer. + * + * However, the metadata version could still have changed (a + * regeneration without any new data will yield the same cache + * size). + * + * The cache's version is checked for a version change and the + * consumed position is reset if one occurred. + * + * This check is only necessary for the user space domain as + * it has to manage the cache explicitly. If this reset was not + * performed, no metadata would be consumed (and no reset would + * occur as part of the pre-consume) until the metadata size + * exceeded the cache size. + */ + if (stream->metadata_version != + stream->chan->metadata_cache->version) { + metadata_stream_reset_cache_consumed_position(stream); + consumer_stream_metadata_set_version(stream, + stream->chan->metadata_cache->version); + } else { + ret = 0; + goto end; + } + } + + write_len = lttng_ust_ctl_write_one_packet_to_channel(stream->chan->uchan, + &stream->chan->metadata_cache->contents.data[stream->ust_metadata_pushed], + stream->chan->metadata_cache->contents.size - + stream->ust_metadata_pushed); + LTTNG_ASSERT(write_len != 0); + if (write_len < 0) { + ERR("Writing one metadata packet"); + ret = write_len; + goto end; + } + stream->ust_metadata_pushed += write_len; + + LTTNG_ASSERT(stream->chan->metadata_cache->contents.size >= + stream->ust_metadata_pushed); + ret = write_len; + + /* + * Switch packet (but don't open the next one) on every commit of + * a metadata packet. Since the subbuffer is fully filled (with padding, + * if needed), the stream is "quiescent" after this commit. + */ + if (lttng_ust_ctl_flush_buffer(stream->ustream, 1)) { + ERR("Failed to flush buffer while commiting one metadata packet"); + ret = -EIO; + } else { + stream->quiescent = true; + } +end: + pthread_mutex_unlock(&stream->chan->metadata_cache->lock); + return ret; +} + + +/* + * Sync metadata meaning request them to the session daemon and snapshot to the + * metadata thread can consumer them. + * + * Metadata stream lock is held here, but we need to release it when + * interacting with sessiond, else we cause a deadlock with live + * awaiting on metadata to be pushed out. + * + * The RCU read side lock must be held by the caller. + */ +enum sync_metadata_status lttng_ustconsumer_sync_metadata( + struct lttng_consumer_local_data *ctx, + struct lttng_consumer_stream *metadata_stream) +{ + int ret; + enum sync_metadata_status status; + struct lttng_consumer_channel *metadata_channel; + + LTTNG_ASSERT(ctx); + LTTNG_ASSERT(metadata_stream); + + metadata_channel = metadata_stream->chan; + pthread_mutex_unlock(&metadata_stream->lock); + /* + * Request metadata from the sessiond, but don't wait for the flush + * because we locked the metadata thread. + */ + ret = lttng_ustconsumer_request_metadata(ctx, metadata_channel, 0, 0); + pthread_mutex_lock(&metadata_stream->lock); + if (ret < 0) { + status = SYNC_METADATA_STATUS_ERROR; + goto end; + } + + /* + * The metadata stream and channel can be deleted while the + * metadata stream lock was released. The streamed is checked + * for deletion before we use it further. + * + * Note that it is safe to access a logically-deleted stream since its + * existence is still guaranteed by the RCU read side lock. However, + * it should no longer be used. The close/deletion of the metadata + * channel and stream already guarantees that all metadata has been + * consumed. Therefore, there is nothing left to do in this function. + */ + if (consumer_stream_is_deleted(metadata_stream)) { + DBG("Metadata stream %" PRIu64 " was deleted during the metadata synchronization", + metadata_stream->key); + status = SYNC_METADATA_STATUS_NO_DATA; + goto end; + } + + ret = commit_one_metadata_packet(metadata_stream); + if (ret < 0) { + status = SYNC_METADATA_STATUS_ERROR; + goto end; + } else if (ret > 0) { + status = SYNC_METADATA_STATUS_NEW_DATA; + } else /* ret == 0 */ { + status = SYNC_METADATA_STATUS_NO_DATA; + goto end; + } + + ret = lttng_ust_ctl_snapshot(metadata_stream->ustream); + if (ret < 0) { + ERR("Failed to take a snapshot of the metadata ring-buffer positions, ret = %d", ret); + status = SYNC_METADATA_STATUS_ERROR; + goto end; + } + +end: + return status; +} + +/* + * Return 0 on success else a negative value. + */ +static int notify_if_more_data(struct lttng_consumer_stream *stream, + struct lttng_consumer_local_data *ctx) +{ + int ret; + struct lttng_ust_ctl_consumer_stream *ustream; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(ctx); + + ustream = stream->ustream; + + /* + * First, we are going to check if there is a new subbuffer available + * before reading the stream wait_fd. + */ + /* Get the next subbuffer */ + ret = lttng_ust_ctl_get_next_subbuf(ustream); + if (ret) { + /* No more data found, flag the stream. */ + stream->has_data = 0; + ret = 0; + goto end; + } + + ret = lttng_ust_ctl_put_subbuf(ustream); + LTTNG_ASSERT(!ret); + + /* This stream still has data. Flag it and wake up the data thread. */ + stream->has_data = 1; + + if (stream->monitor && !stream->hangup_flush_done && !ctx->has_wakeup) { + ssize_t writelen; + + writelen = lttng_pipe_write(ctx->consumer_wakeup_pipe, "!", 1); + if (writelen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) { + ret = writelen; + goto end; + } + + /* The wake up pipe has been notified. */ + ctx->has_wakeup = 1; + } + ret = 0; + +end: + return ret; +} + +static int consumer_stream_ust_on_wake_up(struct lttng_consumer_stream *stream) +{ + int ret = 0; + + /* + * We can consume the 1 byte written into the wait_fd by + * UST. Don't trigger error if we cannot read this one byte + * (read returns 0), or if the error is EAGAIN or EWOULDBLOCK. + * + * This is only done when the stream is monitored by a thread, + * before the flush is done after a hangup and if the stream + * is not flagged with data since there might be nothing to + * consume in the wait fd but still have data available + * flagged by the consumer wake up pipe. + */ + if (stream->monitor && !stream->hangup_flush_done && !stream->has_data) { + char dummy; + ssize_t readlen; + + readlen = lttng_read(stream->wait_fd, &dummy, 1); + if (readlen < 0 && errno != EAGAIN && errno != EWOULDBLOCK) { + ret = readlen; + } + } + + return ret; +} + +static int extract_common_subbuffer_info(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuf) +{ + int ret; + + ret = lttng_ust_ctl_get_subbuf_size( + stream->ustream, &subbuf->info.data.subbuf_size); + if (ret) { + goto end; + } + + ret = lttng_ust_ctl_get_padded_subbuf_size( + stream->ustream, &subbuf->info.data.padded_subbuf_size); + if (ret) { + goto end; + } + +end: + return ret; +} + +static int extract_metadata_subbuffer_info(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuf) +{ + int ret; + + ret = extract_common_subbuffer_info(stream, subbuf); + if (ret) { + goto end; + } + + subbuf->info.metadata.version = stream->metadata_version; + +end: + return ret; +} + +static int extract_data_subbuffer_info(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuf) +{ + int ret; + + ret = extract_common_subbuffer_info(stream, subbuf); + if (ret) { + goto end; + } + + ret = lttng_ust_ctl_get_packet_size( + stream->ustream, &subbuf->info.data.packet_size); + if (ret < 0) { + PERROR("Failed to get sub-buffer packet size"); + goto end; + } + + ret = lttng_ust_ctl_get_content_size( + stream->ustream, &subbuf->info.data.content_size); + if (ret < 0) { + PERROR("Failed to get sub-buffer content size"); + goto end; + } + + ret = lttng_ust_ctl_get_timestamp_begin( + stream->ustream, &subbuf->info.data.timestamp_begin); + if (ret < 0) { + PERROR("Failed to get sub-buffer begin timestamp"); + goto end; + } + + ret = lttng_ust_ctl_get_timestamp_end( + stream->ustream, &subbuf->info.data.timestamp_end); + if (ret < 0) { + PERROR("Failed to get sub-buffer end timestamp"); + goto end; + } + + ret = lttng_ust_ctl_get_events_discarded( + stream->ustream, &subbuf->info.data.events_discarded); + if (ret) { + PERROR("Failed to get sub-buffer events discarded count"); + goto end; + } + + ret = lttng_ust_ctl_get_sequence_number(stream->ustream, + &subbuf->info.data.sequence_number.value); + if (ret) { + /* May not be supported by older LTTng-modules. */ + if (ret != -ENOTTY) { + PERROR("Failed to get sub-buffer sequence number"); + goto end; + } + } else { + subbuf->info.data.sequence_number.is_set = true; + } + + ret = lttng_ust_ctl_get_stream_id( + stream->ustream, &subbuf->info.data.stream_id); + if (ret < 0) { + PERROR("Failed to get stream id"); + goto end; + } + + ret = lttng_ust_ctl_get_instance_id(stream->ustream, + &subbuf->info.data.stream_instance_id.value); + if (ret) { + /* May not be supported by older LTTng-modules. */ + if (ret != -ENOTTY) { + PERROR("Failed to get stream instance id"); + goto end; + } + } else { + subbuf->info.data.stream_instance_id.is_set = true; + } +end: + return ret; +} + +static int get_next_subbuffer_common(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + int ret; + const char *addr; + + ret = stream->read_subbuffer_ops.extract_subbuffer_info( + stream, subbuffer); + if (ret) { + goto end; + } + + ret = get_current_subbuf_addr(stream, &addr); + if (ret) { + goto end; + } + + subbuffer->buffer.buffer = lttng_buffer_view_init( + addr, 0, subbuffer->info.data.padded_subbuf_size); + LTTNG_ASSERT(subbuffer->buffer.buffer.data != NULL); +end: + return ret; +} + +static enum get_next_subbuffer_status get_next_subbuffer( + struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + int ret; + enum get_next_subbuffer_status status; + + ret = lttng_ust_ctl_get_next_subbuf(stream->ustream); + switch (ret) { + case 0: + status = GET_NEXT_SUBBUFFER_STATUS_OK; + break; + case -ENODATA: + case -EAGAIN: + /* + * The caller only expects -ENODATA when there is no data to + * read, but the kernel tracer returns -EAGAIN when there is + * currently no data for a non-finalized stream, and -ENODATA + * when there is no data for a finalized stream. Those can be + * combined into a -ENODATA return value. + */ + status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; + goto end; + default: + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } + + ret = get_next_subbuffer_common(stream, subbuffer); + if (ret) { + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } +end: + return status; +} + +static enum get_next_subbuffer_status get_next_subbuffer_metadata( + struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + int ret; + bool cache_empty; + bool got_subbuffer; + bool coherent; + bool buffer_empty; + unsigned long consumed_pos, produced_pos; + enum get_next_subbuffer_status status; + + do { + ret = lttng_ust_ctl_get_next_subbuf(stream->ustream); + if (ret == 0) { + got_subbuffer = true; + } else { + got_subbuffer = false; + if (ret != -EAGAIN) { + /* Fatal error. */ + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } + } + + /* + * Determine if the cache is empty and ensure that a sub-buffer + * is made available if the cache is not empty. + */ + if (!got_subbuffer) { + ret = commit_one_metadata_packet(stream); + if (ret < 0 && ret != -ENOBUFS) { + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } else if (ret == 0) { + /* Not an error, the cache is empty. */ + cache_empty = true; + status = GET_NEXT_SUBBUFFER_STATUS_NO_DATA; + goto end; + } else { + cache_empty = false; + } + } else { + pthread_mutex_lock(&stream->chan->metadata_cache->lock); + cache_empty = stream->chan->metadata_cache->contents.size == + stream->ust_metadata_pushed; + pthread_mutex_unlock(&stream->chan->metadata_cache->lock); + } + } while (!got_subbuffer); + + /* Populate sub-buffer infos and view. */ + ret = get_next_subbuffer_common(stream, subbuffer); + if (ret) { + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } + + ret = lttng_ustconsumer_sample_snapshot_positions(stream); + if (ret < 0) { + /* + * -EAGAIN is not expected since we got a sub-buffer and haven't + * pushed the consumption position yet (on put_next). + */ + PERROR("Failed to take a snapshot of metadata buffer positions"); + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } + + ret = lttng_ustconsumer_get_consumed_snapshot(stream, &consumed_pos); + if (ret) { + PERROR("Failed to get metadata consumed position"); + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } + + ret = lttng_ustconsumer_get_produced_snapshot(stream, &produced_pos); + if (ret) { + PERROR("Failed to get metadata produced position"); + status = GET_NEXT_SUBBUFFER_STATUS_ERROR; + goto end; + } + + /* Last sub-buffer of the ring buffer ? */ + buffer_empty = (consumed_pos + stream->max_sb_size) == produced_pos; + + /* + * The sessiond registry lock ensures that coherent units of metadata + * are pushed to the consumer daemon at once. Hence, if a sub-buffer is + * acquired, the cache is empty, and it is the only available sub-buffer + * available, it is safe to assume that it is "coherent". + */ + coherent = got_subbuffer && cache_empty && buffer_empty; + + LTTNG_OPTIONAL_SET(&subbuffer->info.metadata.coherent, coherent); + status = GET_NEXT_SUBBUFFER_STATUS_OK; +end: + return status; +} + +static int put_next_subbuffer(struct lttng_consumer_stream *stream, + struct stream_subbuffer *subbuffer) +{ + const int ret = lttng_ust_ctl_put_next_subbuf(stream->ustream); + + LTTNG_ASSERT(ret == 0); + return ret; +} + +static int signal_metadata(struct lttng_consumer_stream *stream, + struct lttng_consumer_local_data *ctx) +{ + ASSERT_LOCKED(stream->metadata_rdv_lock); + return pthread_cond_broadcast(&stream->metadata_rdv) ? -errno : 0; +} + +static int lttng_ustconsumer_set_stream_ops( + struct lttng_consumer_stream *stream) +{ + int ret = 0; + + stream->read_subbuffer_ops.on_wake_up = consumer_stream_ust_on_wake_up; + if (stream->metadata_flag) { + stream->read_subbuffer_ops.get_next_subbuffer = + get_next_subbuffer_metadata; + stream->read_subbuffer_ops.extract_subbuffer_info = + extract_metadata_subbuffer_info; + stream->read_subbuffer_ops.reset_metadata = + metadata_stream_reset_cache_consumed_position; + if (stream->chan->is_live) { + stream->read_subbuffer_ops.on_sleep = signal_metadata; + ret = consumer_stream_enable_metadata_bucketization( + stream); + if (ret) { + goto end; + } + } + } else { + stream->read_subbuffer_ops.get_next_subbuffer = + get_next_subbuffer; + stream->read_subbuffer_ops.extract_subbuffer_info = + extract_data_subbuffer_info; + stream->read_subbuffer_ops.on_sleep = notify_if_more_data; + if (stream->chan->is_live) { + stream->read_subbuffer_ops.send_live_beacon = + consumer_flush_ust_index; + } + } + + stream->read_subbuffer_ops.put_next_subbuffer = put_next_subbuffer; +end: + return ret; +} + +/* + * Called when a stream is created. + * + * Return 0 on success or else a negative value. + */ +int lttng_ustconsumer_on_recv_stream(struct lttng_consumer_stream *stream) +{ + int ret; + + LTTNG_ASSERT(stream); + + /* + * Don't create anything if this is set for streaming or if there is + * no current trace chunk on the parent channel. + */ + if (stream->net_seq_idx == (uint64_t) -1ULL && stream->chan->monitor && + stream->chan->trace_chunk) { + ret = consumer_stream_create_output_files(stream, true); + if (ret) { + goto error; + } + } + + lttng_ustconsumer_set_stream_ops(stream); + ret = 0; + +error: + return ret; +} + +/* + * Check if data is still being extracted from the buffers for a specific + * stream. Consumer data lock MUST be acquired before calling this function + * and the stream lock. + * + * Return 1 if the traced data are still getting read else 0 meaning that the + * data is available for trace viewer reading. + */ +int lttng_ustconsumer_data_pending(struct lttng_consumer_stream *stream) +{ + int ret; + + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream->ustream); + ASSERT_LOCKED(stream->lock); + + DBG("UST consumer checking data pending"); + + if (stream->endpoint_status != CONSUMER_ENDPOINT_ACTIVE) { + ret = 0; + goto end; + } + + if (stream->chan->type == CONSUMER_CHANNEL_TYPE_METADATA) { + uint64_t contiguous, pushed; + + /* Ease our life a bit. */ + pthread_mutex_lock(&stream->chan->metadata_cache->lock); + contiguous = stream->chan->metadata_cache->contents.size; + pthread_mutex_unlock(&stream->chan->metadata_cache->lock); + pushed = stream->ust_metadata_pushed; + + /* + * We can simply check whether all contiguously available data + * has been pushed to the ring buffer, since the push operation + * is performed within get_next_subbuf(), and because both + * get_next_subbuf() and put_next_subbuf() are issued atomically + * thanks to the stream lock within + * lttng_ustconsumer_read_subbuffer(). This basically means that + * whetnever ust_metadata_pushed is incremented, the associated + * metadata has been consumed from the metadata stream. + */ + DBG("UST consumer metadata pending check: contiguous %" PRIu64 " vs pushed %" PRIu64, + contiguous, pushed); + LTTNG_ASSERT(((int64_t) (contiguous - pushed)) >= 0); + if ((contiguous != pushed) || + (((int64_t) contiguous - pushed) > 0 || contiguous == 0)) { + ret = 1; /* Data is pending */ + goto end; + } + } else { + ret = lttng_ust_ctl_get_next_subbuf(stream->ustream); + if (ret == 0) { + /* + * There is still data so let's put back this + * subbuffer. + */ + ret = lttng_ust_ctl_put_subbuf(stream->ustream); + LTTNG_ASSERT(ret == 0); + ret = 1; /* Data is pending */ + goto end; + } + } + + /* Data is NOT pending so ready to be read. */ + ret = 0; + +end: + return ret; +} + +/* + * Stop a given metadata channel timer if enabled and close the wait fd which + * is the poll pipe of the metadata stream. + * + * This MUST be called with the metadata channel lock acquired. + */ +void lttng_ustconsumer_close_metadata(struct lttng_consumer_channel *metadata) +{ + int ret; + + LTTNG_ASSERT(metadata); + LTTNG_ASSERT(metadata->type == CONSUMER_CHANNEL_TYPE_METADATA); + + DBG("Closing metadata channel key %" PRIu64, metadata->key); + + if (metadata->switch_timer_enabled == 1) { + consumer_timer_switch_stop(metadata); + } + + if (!metadata->metadata_stream) { + goto end; + } + + /* + * Closing write side so the thread monitoring the stream wakes up if any + * and clean the metadata stream. + */ + if (metadata->metadata_stream->ust_metadata_poll_pipe[1] >= 0) { + ret = close(metadata->metadata_stream->ust_metadata_poll_pipe[1]); + if (ret < 0) { + PERROR("closing metadata pipe write side"); + } + metadata->metadata_stream->ust_metadata_poll_pipe[1] = -1; + } + +end: + return; +} + +/* + * Close every metadata stream wait fd of the metadata hash table. This + * function MUST be used very carefully so not to run into a race between the + * metadata thread handling streams and this function closing their wait fd. + * + * For UST, this is used when the session daemon hangs up. Its the metadata + * producer so calling this is safe because we are assured that no state change + * can occur in the metadata thread for the streams in the hash table. + */ +void lttng_ustconsumer_close_all_metadata(struct lttng_ht *metadata_ht) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + LTTNG_ASSERT(metadata_ht); + LTTNG_ASSERT(metadata_ht->ht); + + DBG("UST consumer closing all metadata streams"); + + rcu_read_lock(); + cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, + node.node) { + + health_code_update(); + + pthread_mutex_lock(&stream->chan->lock); + lttng_ustconsumer_close_metadata(stream->chan); + pthread_mutex_unlock(&stream->chan->lock); + + } + rcu_read_unlock(); +} + +void lttng_ustconsumer_close_stream_wakeup(struct lttng_consumer_stream *stream) +{ + int ret; + + ret = lttng_ust_ctl_stream_close_wakeup_fd(stream->ustream); + if (ret < 0) { + ERR("Unable to close wakeup fd"); + } +} + +/* + * Please refer to consumer-timer.c before adding any lock within this + * function or any of its callees. Timers have a very strict locking + * semantic with respect to teardown. Failure to respect this semantic + * introduces deadlocks. + * + * DON'T hold the metadata lock when calling this function, else this + * can cause deadlock involving consumer awaiting for metadata to be + * pushed out due to concurrent interaction with the session daemon. + */ +int lttng_ustconsumer_request_metadata(struct lttng_consumer_local_data *ctx, + struct lttng_consumer_channel *channel, int timer, int wait) +{ + struct lttcomm_metadata_request_msg request; + struct lttcomm_consumer_msg msg; + enum lttcomm_return_code ret_code = LTTCOMM_CONSUMERD_SUCCESS; + uint64_t len, key, offset, version; + int ret; + + LTTNG_ASSERT(channel); + LTTNG_ASSERT(channel->metadata_cache); + + memset(&request, 0, sizeof(request)); + + /* send the metadata request to sessiond */ + switch (the_consumer_data.type) { + case LTTNG_CONSUMER64_UST: + request.bits_per_long = 64; + break; + case LTTNG_CONSUMER32_UST: + request.bits_per_long = 32; + break; + default: + request.bits_per_long = 0; + break; + } + + request.session_id = channel->session_id; + request.session_id_per_pid = channel->session_id_per_pid; + /* + * Request the application UID here so the metadata of that application can + * be sent back. The channel UID corresponds to the user UID of the session + * used for the rights on the stream file(s). + */ + request.uid = channel->ust_app_uid; + request.key = channel->key; + + DBG("Sending metadata request to sessiond, session id %" PRIu64 + ", per-pid %" PRIu64 ", app UID %u and channel key %" PRIu64, + request.session_id, request.session_id_per_pid, request.uid, + request.key); + + pthread_mutex_lock(&ctx->metadata_socket_lock); + + health_code_update(); + + ret = lttcomm_send_unix_sock(ctx->consumer_metadata_socket, &request, + sizeof(request)); + if (ret < 0) { + ERR("Asking metadata to sessiond"); + goto end; + } + + health_code_update(); + + /* Receive the metadata from sessiond */ + ret = lttcomm_recv_unix_sock(ctx->consumer_metadata_socket, &msg, + sizeof(msg)); + if (ret != sizeof(msg)) { + DBG("Consumer received unexpected message size %d (expects %zu)", + ret, sizeof(msg)); + lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_CMD); + /* + * The ret value might 0 meaning an orderly shutdown but this is ok + * since the caller handles this. + */ + goto end; + } + + health_code_update(); + + if (msg.cmd_type == LTTNG_ERR_UND) { + /* No registry found */ + (void) consumer_send_status_msg(ctx->consumer_metadata_socket, + ret_code); + ret = 0; + goto end; + } else if (msg.cmd_type != LTTNG_CONSUMER_PUSH_METADATA) { + ERR("Unexpected cmd_type received %d", msg.cmd_type); + ret = -1; + goto end; + } + + len = msg.u.push_metadata.len; + key = msg.u.push_metadata.key; + offset = msg.u.push_metadata.target_offset; + version = msg.u.push_metadata.version; + + LTTNG_ASSERT(key == channel->key); + if (len == 0) { + DBG("No new metadata to receive for key %" PRIu64, key); + } + + health_code_update(); + + /* Tell session daemon we are ready to receive the metadata. */ + ret = consumer_send_status_msg(ctx->consumer_metadata_socket, + LTTCOMM_CONSUMERD_SUCCESS); + if (ret < 0 || len == 0) { + /* + * Somehow, the session daemon is not responding anymore or there is + * nothing to receive. + */ + goto end; + } + + health_code_update(); + + ret = lttng_ustconsumer_recv_metadata(ctx->consumer_metadata_socket, + key, offset, len, version, channel, timer, wait); + if (ret >= 0) { + /* + * Only send the status msg if the sessiond is alive meaning a positive + * ret code. + */ + (void) consumer_send_status_msg(ctx->consumer_metadata_socket, ret); + } + ret = 0; + +end: + health_code_update(); + + pthread_mutex_unlock(&ctx->metadata_socket_lock); + return ret; +} + +/* + * Return the ustctl call for the get stream id. + */ +int lttng_ustconsumer_get_stream_id(struct lttng_consumer_stream *stream, + uint64_t *stream_id) +{ + LTTNG_ASSERT(stream); + LTTNG_ASSERT(stream_id); + + return lttng_ust_ctl_get_stream_id(stream->ustream, stream_id); +} + +void lttng_ustconsumer_sigbus_handle(void *addr) +{ + lttng_ust_ctl_sigbus_handle(addr); +} diff --git a/src/common/ust-consumer/ust-consumer.h b/src/common/ust-consumer/ust-consumer.h index e2507a7f4..29ae853da 100644 --- a/src/common/ust-consumer/ust-consumer.h +++ b/src/common/ust-consumer/ust-consumer.h @@ -13,6 +13,10 @@ #include #include +#ifdef __cplusplus +extern "C" { +#endif + #ifdef HAVE_LIBLTTNG_UST_CTL int lttng_ustconsumer_take_snapshot(struct lttng_consumer_stream *stream); @@ -240,4 +244,8 @@ void lttng_ustconsumer_sigbus_handle(void *addr) } #endif /* HAVE_LIBLTTNG_UST_CTL */ +#ifdef __cplusplus +} +#endif + #endif /* _LTTNG_USTCONSUMER_H */ diff --git a/tests/regression/tools/notification/Makefile.am b/tests/regression/tools/notification/Makefile.am index b9e4b8ac2..eaab5314a 100644 --- a/tests/regression/tools/notification/Makefile.am +++ b/tests/regression/tools/notification/Makefile.am @@ -12,8 +12,8 @@ if NO_SHARED CLEANFILES = libpause_consumer.so libpause_consumer.so.debug libpause_sessiond.so libpause_sessiond.so.debug EXTRA_DIST = \ base_client.c \ - consumer_testpoints.c \ - sessiond_testpoints.c \ + consumer_testpoints.cpp \ + sessiond_testpoints.cpp \ notification.c \ test_notification_kernel_buffer_usage \ test_notification_kernel_capture \ @@ -35,7 +35,7 @@ else FORCE_SHARED_LIB_OPTIONS = -module -shared -avoid-version \ -rpath $(abs_builddir) -libpause_consumer_la_SOURCES = consumer_testpoints.c +libpause_consumer_la_SOURCES = consumer_testpoints.cpp libpause_consumer_la_LIBADD = \ $(top_builddir)/src/common/sessiond-comm/libsessiond-comm.la \ $(top_builddir)/src/common/libcommon.la \ @@ -43,7 +43,7 @@ libpause_consumer_la_LIBADD = \ $(DL_LIBS) libpause_consumer_la_LDFLAGS = $(FORCE_SHARED_LIB_OPTIONS) -libpause_sessiond_la_SOURCES = sessiond_testpoints.c +libpause_sessiond_la_SOURCES = sessiond_testpoints.cpp libpause_sessiond_la_LIBADD = \ $(top_builddir)/src/common/libcommon.la \ $(top_builddir)/src/lib/lttng-ctl/liblttng-ctl.la \ diff --git a/tests/regression/tools/notification/consumer_testpoints.c b/tests/regression/tools/notification/consumer_testpoints.c deleted file mode 100644 index 6294683f9..000000000 --- a/tests/regression/tools/notification/consumer_testpoints.c +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright (C) 2017 Jérémie Galarneau - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static char *pause_pipe_path; -static struct lttng_pipe *pause_pipe; -static int *data_consumption_state; -static enum lttng_consumer_type (*lttng_consumer_get_type)(void); - -int lttng_opt_verbose; -int lttng_opt_mi; -int lttng_opt_quiet; - -static -void __attribute__((destructor)) pause_pipe_fini(void) -{ - int ret; - - if (pause_pipe_path) { - ret = unlink(pause_pipe_path); - if (ret) { - PERROR("unlink pause pipe"); - } - } - - free(pause_pipe_path); - lttng_pipe_destroy(pause_pipe); -} - -/* - * We use this testpoint, invoked at the start of the consumerd's data handling - * thread to create a named pipe/FIFO which a test application can use to either - * pause or resume the consumption of data. - */ -LTTNG_EXPORT int __testpoint_consumerd_thread_data(void); -int __testpoint_consumerd_thread_data(void) -{ - int ret = 0; - const char *pause_pipe_path_prefix, *domain; - - pause_pipe_path_prefix = lttng_secure_getenv( - "CONSUMER_PAUSE_PIPE_PATH"); - if (!pause_pipe_path_prefix) { - ret = -1; - goto end; - } - - /* - * These symbols are exclusive to the consumerd process, hence we can't - * rely on their presence in the sessiond. Not looking-up these symbols - * dynamically would not allow this shared object to be LD_PRELOAD-ed - * when launching the session daemon. - */ - data_consumption_state = dlsym(NULL, "data_consumption_paused"); - LTTNG_ASSERT(data_consumption_state); - lttng_consumer_get_type = dlsym(NULL, "lttng_consumer_get_type"); - LTTNG_ASSERT(lttng_consumer_get_type); - - switch (lttng_consumer_get_type()) { - case LTTNG_CONSUMER_KERNEL: - domain = "kernel"; - break; - case LTTNG_CONSUMER32_UST: - domain = "ust32"; - break; - case LTTNG_CONSUMER64_UST: - domain = "ust64"; - break; - default: - abort(); - } - - ret = asprintf(&pause_pipe_path, "%s-%s", pause_pipe_path_prefix, - domain); - if (ret < 1) { - ERR("Failed to allocate pause pipe path"); - goto end; - } - - DBG("Creating pause pipe at %s", pause_pipe_path); - pause_pipe = lttng_pipe_named_open(pause_pipe_path, - S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP, O_NONBLOCK); - if (!pause_pipe) { - ERR("Failed to create pause pipe at %s", pause_pipe_path); - ret = -1; - goto end; - } - - /* Only the read end of the pipe is useful to us. */ - ret = lttng_pipe_write_close(pause_pipe); -end: - return ret; -} - -LTTNG_EXPORT int __testpoint_consumerd_thread_data_poll(void); -int __testpoint_consumerd_thread_data_poll(void) -{ - int ret = 0; - uint8_t value; - bool value_read = false; - - if (!pause_pipe) { - ret = -1; - goto end; - } - - /* Purge pipe and only consider the freshest value. */ - do { - ret = lttng_pipe_read(pause_pipe, &value, sizeof(value)); - if (ret == sizeof(value)) { - value_read = true; - } - } while (ret == sizeof(value)); - - ret = (errno == EAGAIN) ? 0 : -errno; - - if (value_read) { - *data_consumption_state = !!value; - DBG("Message received on pause pipe: %s data consumption", - *data_consumption_state ? "paused" : "resumed"); - } -end: - return ret; -} diff --git a/tests/regression/tools/notification/consumer_testpoints.cpp b/tests/regression/tools/notification/consumer_testpoints.cpp new file mode 100644 index 000000000..07c855c9c --- /dev/null +++ b/tests/regression/tools/notification/consumer_testpoints.cpp @@ -0,0 +1,139 @@ +/* + * Copyright (C) 2017 Jérémie Galarneau + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char *pause_pipe_path; +static struct lttng_pipe *pause_pipe; +static int *data_consumption_state; +static enum lttng_consumer_type (*lttng_consumer_get_type)(void); + +int lttng_opt_verbose; +int lttng_opt_mi; +int lttng_opt_quiet; + +static +void __attribute__((destructor)) pause_pipe_fini(void) +{ + int ret; + + if (pause_pipe_path) { + ret = unlink(pause_pipe_path); + if (ret) { + PERROR("unlink pause pipe"); + } + } + + free(pause_pipe_path); + lttng_pipe_destroy(pause_pipe); +} + +/* + * We use this testpoint, invoked at the start of the consumerd's data handling + * thread to create a named pipe/FIFO which a test application can use to either + * pause or resume the consumption of data. + */ +extern "C" LTTNG_EXPORT int __testpoint_consumerd_thread_data(void); +int __testpoint_consumerd_thread_data(void) +{ + int ret = 0; + const char *pause_pipe_path_prefix, *domain; + + pause_pipe_path_prefix = lttng_secure_getenv( + "CONSUMER_PAUSE_PIPE_PATH"); + if (!pause_pipe_path_prefix) { + ret = -1; + goto end; + } + + /* + * These symbols are exclusive to the consumerd process, hence we can't + * rely on their presence in the sessiond. Not looking-up these symbols + * dynamically would not allow this shared object to be LD_PRELOAD-ed + * when launching the session daemon. + */ + data_consumption_state = (int *) dlsym(NULL, "data_consumption_paused"); + LTTNG_ASSERT(data_consumption_state); + lttng_consumer_get_type = (lttng_consumer_type (*)()) dlsym(NULL, "lttng_consumer_get_type"); + LTTNG_ASSERT(lttng_consumer_get_type); + + switch (lttng_consumer_get_type()) { + case LTTNG_CONSUMER_KERNEL: + domain = "kernel"; + break; + case LTTNG_CONSUMER32_UST: + domain = "ust32"; + break; + case LTTNG_CONSUMER64_UST: + domain = "ust64"; + break; + default: + abort(); + } + + ret = asprintf(&pause_pipe_path, "%s-%s", pause_pipe_path_prefix, + domain); + if (ret < 1) { + ERR("Failed to allocate pause pipe path"); + goto end; + } + + DBG("Creating pause pipe at %s", pause_pipe_path); + pause_pipe = lttng_pipe_named_open(pause_pipe_path, + S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP, O_NONBLOCK); + if (!pause_pipe) { + ERR("Failed to create pause pipe at %s", pause_pipe_path); + ret = -1; + goto end; + } + + /* Only the read end of the pipe is useful to us. */ + ret = lttng_pipe_write_close(pause_pipe); +end: + return ret; +} + +extern "C" LTTNG_EXPORT int __testpoint_consumerd_thread_data_poll(void); +int __testpoint_consumerd_thread_data_poll(void) +{ + int ret = 0; + uint8_t value; + bool value_read = false; + + if (!pause_pipe) { + ret = -1; + goto end; + } + + /* Purge pipe and only consider the freshest value. */ + do { + ret = lttng_pipe_read(pause_pipe, &value, sizeof(value)); + if (ret == sizeof(value)) { + value_read = true; + } + } while (ret == sizeof(value)); + + ret = (errno == EAGAIN) ? 0 : -errno; + + if (value_read) { + *data_consumption_state = !!value; + DBG("Message received on pause pipe: %s data consumption", + *data_consumption_state ? "paused" : "resumed"); + } +end: + return ret; +} diff --git a/tests/regression/tools/notification/sessiond_testpoints.c b/tests/regression/tools/notification/sessiond_testpoints.c deleted file mode 100644 index 2bfd7cab7..000000000 --- a/tests/regression/tools/notification/sessiond_testpoints.c +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Copyright (C) 2017 Jérémie Galarneau - * Copyright (C) 2020 Francis Deslauriers - * - * SPDX-License-Identifier: GPL-2.0-only - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -static char *pause_pipe_path; -static struct lttng_pipe *pause_pipe; -static int *notifier_notif_consumption_state;; - -int lttng_opt_verbose; -int lttng_opt_mi; -int lttng_opt_quiet; - -static -void __attribute__((destructor)) pause_pipe_fini(void) -{ - int ret; - - if (pause_pipe_path) { - ret = unlink(pause_pipe_path); - if (ret) { - PERROR("Failed to unlink pause pipe: path = %s", - pause_pipe_path); - } - } - - free(pause_pipe_path); - lttng_pipe_destroy(pause_pipe); -} - -LTTNG_EXPORT int __testpoint_sessiond_thread_notification(void); -int __testpoint_sessiond_thread_notification(void) -{ - int ret = 0; - const char *pause_pipe_path_prefix; - - pause_pipe_path_prefix = lttng_secure_getenv( - "NOTIFIER_PAUSE_PIPE_PATH"); - if (!pause_pipe_path_prefix) { - ret = -1; - goto end; - } - - notifier_notif_consumption_state = dlsym(NULL, "notifier_consumption_paused"); - LTTNG_ASSERT(notifier_notif_consumption_state); - - ret = asprintf(&pause_pipe_path, "%s", pause_pipe_path_prefix); - if (ret < 1) { - ERR("Failed to allocate pause pipe path"); - goto end; - } - - DBG("Creating pause pipe at %s", pause_pipe_path); - pause_pipe = lttng_pipe_named_open(pause_pipe_path, - S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP, O_NONBLOCK); - if (!pause_pipe) { - ERR("Failed to create pause pipe at %s", pause_pipe_path); - ret = -1; - goto end; - } - - /* Only the read end of the pipe is useful to us. */ - ret = lttng_pipe_write_close(pause_pipe); -end: - return ret; -} - -LTTNG_EXPORT int __testpoint_sessiond_handle_notifier_event_pipe(void); -int __testpoint_sessiond_handle_notifier_event_pipe(void) -{ - int ret = 0; - uint8_t value; - bool value_read = false; - - if (!pause_pipe) { - ret = -1; - goto end; - } - - /* Purge pipe and only consider the freshest value. */ - do { - ret = lttng_pipe_read(pause_pipe, &value, sizeof(value)); - if (ret == sizeof(value)) { - value_read = true; - } - } while (ret == sizeof(value)); - - ret = (errno == EAGAIN) ? 0 : -errno; - - if (value_read) { - *notifier_notif_consumption_state = !!value; - DBG("Message received on pause pipe: %s data consumption", - *notifier_notif_consumption_state ? "paused" : "resumed"); - } -end: - return ret; -} diff --git a/tests/regression/tools/notification/sessiond_testpoints.cpp b/tests/regression/tools/notification/sessiond_testpoints.cpp new file mode 100644 index 000000000..fa3fe7f41 --- /dev/null +++ b/tests/regression/tools/notification/sessiond_testpoints.cpp @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2017 Jérémie Galarneau + * Copyright (C) 2020 Francis Deslauriers + * + * SPDX-License-Identifier: GPL-2.0-only + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static char *pause_pipe_path; +static struct lttng_pipe *pause_pipe; +static int *notifier_notif_consumption_state;; + +int lttng_opt_verbose; +int lttng_opt_mi; +int lttng_opt_quiet; + +static +void __attribute__((destructor)) pause_pipe_fini(void) +{ + int ret; + + if (pause_pipe_path) { + ret = unlink(pause_pipe_path); + if (ret) { + PERROR("Failed to unlink pause pipe: path = %s", + pause_pipe_path); + } + } + + free(pause_pipe_path); + lttng_pipe_destroy(pause_pipe); +} + +extern "C" LTTNG_EXPORT int __testpoint_sessiond_thread_notification(void); +int __testpoint_sessiond_thread_notification(void) +{ + int ret = 0; + const char *pause_pipe_path_prefix; + + pause_pipe_path_prefix = lttng_secure_getenv( + "NOTIFIER_PAUSE_PIPE_PATH"); + if (!pause_pipe_path_prefix) { + ret = -1; + goto end; + } + + notifier_notif_consumption_state = (int *) dlsym(NULL, "notifier_consumption_paused"); + LTTNG_ASSERT(notifier_notif_consumption_state); + + ret = asprintf(&pause_pipe_path, "%s", pause_pipe_path_prefix); + if (ret < 1) { + ERR("Failed to allocate pause pipe path"); + goto end; + } + + DBG("Creating pause pipe at %s", pause_pipe_path); + pause_pipe = lttng_pipe_named_open(pause_pipe_path, + S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP, O_NONBLOCK); + if (!pause_pipe) { + ERR("Failed to create pause pipe at %s", pause_pipe_path); + ret = -1; + goto end; + } + + /* Only the read end of the pipe is useful to us. */ + ret = lttng_pipe_write_close(pause_pipe); +end: + return ret; +} + +extern "C" LTTNG_EXPORT int __testpoint_sessiond_handle_notifier_event_pipe(void); +int __testpoint_sessiond_handle_notifier_event_pipe(void) +{ + int ret = 0; + uint8_t value; + bool value_read = false; + + if (!pause_pipe) { + ret = -1; + goto end; + } + + /* Purge pipe and only consider the freshest value. */ + do { + ret = lttng_pipe_read(pause_pipe, &value, sizeof(value)); + if (ret == sizeof(value)) { + value_read = true; + } + } while (ret == sizeof(value)); + + ret = (errno == EAGAIN) ? 0 : -errno; + + if (value_read) { + *notifier_notif_consumption_state = !!value; + DBG("Message received on pause pipe: %s data consumption", + *notifier_notif_consumption_state ? "paused" : "resumed"); + } +end: + return ret; +}