X-Git-Url: https://git.lttng.org/?p=lttng-tools.git;a=blobdiff_plain;f=src%2Fcommon%2Fconsumer.c;h=70cde9c1028d978d7deb1865ff84fe9455988a56;hp=53c618067f8d6546b91ccc8f77854d7d2b36ed72;hb=228b5bf7fa0bd5cf9e709717e578e28bccd57a73;hpb=ab1027f48fa7e2dd29b3c85548ec42f26d06be25 diff --git a/src/common/consumer.c b/src/common/consumer.c index 53c618067..70cde9c10 100644 --- a/src/common/consumer.c +++ b/src/common/consumer.c @@ -47,9 +47,6 @@ struct lttng_consumer_global_data consumer_data = { .type = LTTNG_CONSUMER_UNKNOWN, }; -/* timeout parameter, to control the polling thread grace period. */ -int consumer_poll_timeout = -1; - /* * Flag to inform the polling thread to quit when all fd hung up. Updated by * the consumer_thread_receive_fds when it notices that all fds has hung up. @@ -59,15 +56,27 @@ int consumer_poll_timeout = -1; volatile int consumer_quit; /* - * The following two hash tables are visible by all threads which are separated - * in different source files. - * * Global hash table containing respectively metadata and data streams. The * stream element in this ht should only be updated by the metadata poll thread * for the metadata and the data poll thread for the data. */ -struct lttng_ht *metadata_ht; -struct lttng_ht *data_ht; +static struct lttng_ht *metadata_ht; +static struct lttng_ht *data_ht; + +/* + * Notify a thread pipe to poll back again. This usually means that some global + * state has changed so we just send back the thread in a poll wait call. + */ +static void notify_thread_pipe(int wpipe) +{ + int ret; + + do { + struct lttng_consumer_stream *null_stream = NULL; + + ret = write(wpipe, &null_stream, sizeof(null_stream)); + } while (ret < 0 && errno == EINTR); +} /* * Find a stream. The consumer_data.lock must be locked during this @@ -118,6 +127,12 @@ void consumer_steal_stream_key(int key, struct lttng_ht *ht) rcu_read_unlock(); } +/* + * Return a channel object for the given key. + * + * RCU read side lock MUST be acquired before calling this function and + * protects the channel ptr. + */ static struct lttng_consumer_channel *consumer_find_channel(int key) { struct lttng_ht_iter iter; @@ -129,8 +144,6 @@ static struct lttng_consumer_channel *consumer_find_channel(int key) return NULL; } - rcu_read_lock(); - lttng_ht_lookup(consumer_data.channel_ht, (void *)((unsigned long) key), &iter); node = lttng_ht_iter_get_node_ulong(&iter); @@ -138,8 +151,6 @@ static struct lttng_consumer_channel *consumer_find_channel(int key) channel = caa_container_of(node, struct lttng_consumer_channel, node); } - rcu_read_unlock(); - return channel; } @@ -182,6 +193,17 @@ static void consumer_rcu_free_relayd(struct rcu_head *head) struct consumer_relayd_sock_pair *relayd = caa_container_of(node, struct consumer_relayd_sock_pair, node); + /* + * Close all sockets. This is done in the call RCU since we don't want the + * socket fds to be reassigned thus potentially creating bad state of the + * relayd object. + * + * We do not have to lock the control socket mutex here since at this stage + * there is no one referencing to this relayd object. + */ + (void) relayd_close(&relayd->control_sock); + (void) relayd_close(&relayd->data_sock); + free(relayd); } @@ -204,20 +226,111 @@ static void destroy_relayd(struct consumer_relayd_sock_pair *relayd) iter.iter.node = &relayd->node.node; ret = lttng_ht_del(consumer_data.relayd_ht, &iter); if (ret != 0) { - /* We assume the relayd was already destroyed */ + /* We assume the relayd is being or is destroyed */ return; } - /* Close all sockets */ - pthread_mutex_lock(&relayd->ctrl_sock_mutex); - (void) relayd_close(&relayd->control_sock); - pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - (void) relayd_close(&relayd->data_sock); - /* RCU free() call */ call_rcu(&relayd->node.head, consumer_rcu_free_relayd); } +/* + * Iterate over the relayd hash table and destroy each element. Finally, + * destroy the whole hash table. + */ +static void cleanup_relayd_ht(void) +{ + struct lttng_ht_iter iter; + struct consumer_relayd_sock_pair *relayd; + + rcu_read_lock(); + + cds_lfht_for_each_entry(consumer_data.relayd_ht->ht, &iter.iter, relayd, + node.node) { + destroy_relayd(relayd); + } + + lttng_ht_destroy(consumer_data.relayd_ht); + + rcu_read_unlock(); +} + +/* + * Update the end point status of all streams having the given network sequence + * index (relayd index). + * + * It's atomically set without having the stream mutex locked which is fine + * because we handle the write/read race with a pipe wakeup for each thread. + */ +static void update_endpoint_status_by_netidx(int net_seq_idx, + enum consumer_endpoint_status status) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + DBG("Consumer set delete flag on stream by idx %d", net_seq_idx); + + rcu_read_lock(); + + /* Let's begin with metadata */ + cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) { + if (stream->net_seq_idx == net_seq_idx) { + uatomic_set(&stream->endpoint_status, status); + DBG("Delete flag set to metadata stream %d", stream->wait_fd); + } + } + + /* Follow up by the data streams */ + cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) { + if (stream->net_seq_idx == net_seq_idx) { + uatomic_set(&stream->endpoint_status, status); + DBG("Delete flag set to data stream %d", stream->wait_fd); + } + } + rcu_read_unlock(); +} + +/* + * Cleanup a relayd object by flagging every associated streams for deletion, + * destroying the object meaning removing it from the relayd hash table, + * closing the sockets and freeing the memory in a RCU call. + * + * If a local data context is available, notify the threads that the streams' + * state have changed. + */ +static void cleanup_relayd(struct consumer_relayd_sock_pair *relayd, + struct lttng_consumer_local_data *ctx) +{ + int netidx; + + assert(relayd); + + DBG("Cleaning up relayd sockets"); + + /* Save the net sequence index before destroying the object */ + netidx = relayd->net_seq_idx; + + /* + * Delete the relayd from the relayd hash table, close the sockets and free + * the object in a RCU call. + */ + destroy_relayd(relayd); + + /* Set inactive endpoint to all streams */ + update_endpoint_status_by_netidx(netidx, CONSUMER_ENDPOINT_INACTIVE); + + /* + * With a local data context, notify the threads that the streams' state + * have changed. The write() action on the pipe acts as an "implicit" + * memory barrier ordering the updates of the end point status from the + * read of this status which happens AFTER receiving this notify. + */ + if (ctx) { + notify_thread_pipe(ctx->consumer_data_pipe[1]); + notify_thread_pipe(ctx->consumer_metadata_pipe[1]); + } +} + /* * Flag a relayd socket pair for destruction. Destroy it if the refcount * reaches zero. @@ -251,12 +364,15 @@ void consumer_del_stream(struct lttng_consumer_stream *stream, assert(stream); + DBG("Consumer del stream %d", stream->wait_fd); + if (ht == NULL) { /* Means the stream was allocated but not successfully added */ goto free_stream; } pthread_mutex_lock(&consumer_data.lock); + pthread_mutex_lock(&stream->lock); switch (consumer_data.type) { case LTTNG_CONSUMER_KERNEL: @@ -348,6 +464,7 @@ void consumer_del_stream(struct lttng_consumer_stream *stream, end: consumer_data.need_update = 1; + pthread_mutex_unlock(&stream->lock); pthread_mutex_unlock(&consumer_data.lock); if (free_chan) { @@ -381,6 +498,8 @@ struct lttng_consumer_stream *consumer_allocate_stream( goto end; } + rcu_read_lock(); + /* * Get stream's channel reference. Needed when adding the stream to the * global hash table. @@ -437,9 +556,12 @@ struct lttng_consumer_stream *consumer_allocate_stream( stream->path_name, stream->key, stream->shm_fd, stream->wait_fd, (unsigned long long) stream->mmap_len, stream->out_fd, stream->net_seq_idx, stream->session_id); + + rcu_read_unlock(); return stream; error: + rcu_read_unlock(); free(stream); end: return NULL; @@ -460,6 +582,7 @@ static int consumer_add_stream(struct lttng_consumer_stream *stream, DBG3("Adding consumer stream %d", stream->key); pthread_mutex_lock(&consumer_data.lock); + pthread_mutex_lock(&stream->lock); rcu_read_lock(); /* Steal stream identifier to avoid having streams with the same key */ @@ -499,6 +622,7 @@ static int consumer_add_stream(struct lttng_consumer_stream *stream, consumer_data.need_update = 1; rcu_read_unlock(); + pthread_mutex_unlock(&stream->lock); pthread_mutex_unlock(&consumer_data.lock); return ret; @@ -661,6 +785,8 @@ void consumer_del_channel(struct lttng_consumer_channel *channel) int ret; struct lttng_ht_iter iter; + DBG("Consumer delete channel key %d", channel->key); + pthread_mutex_lock(&consumer_data.lock); switch (consumer_data.type) { @@ -804,7 +930,17 @@ static int consumer_update_poll_array( DBG("Updating poll fd array"); rcu_read_lock(); cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { - if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM) { + /* + * Only active streams with an active end point can be added to the + * poll set and local stream storage of the thread. + * + * There is a potential race here for endpoint_status to be updated + * just after the check. However, this is OK since the stream(s) will + * be deleted once the thread is notified that the end point state has + * changed where this function will be called back again. + */ + if (stream->state != LTTNG_CONSUMER_ACTIVE_STREAM || + stream->endpoint_status == CONSUMER_ENDPOINT_INACTIVE) { continue; } DBG("Active FD %d", stream->wait_fd); @@ -888,8 +1024,8 @@ int lttng_consumer_send_error( } /* - * Close all the tracefiles and stream fds, should be called when all instances - * are destroyed. + * Close all the tracefiles and stream fds and MUST be called when all + * instances are destroyed i.e. when all threads were joined and are ended. */ void lttng_consumer_cleanup(void) { @@ -908,6 +1044,15 @@ void lttng_consumer_cleanup(void) rcu_read_unlock(); lttng_ht_destroy(consumer_data.channel_ht); + + cleanup_relayd_ht(); + + /* + * This HT contains streams that are freed by either the metadata thread or + * the data thread so we do *nothing* on the hash table and simply destroy + * it. + */ + lttng_ht_destroy(consumer_data.stream_list_ht); } /* @@ -1154,6 +1299,8 @@ end: * core function for writing trace buffers to either the local filesystem or * the network. * + * It must be called with the stream lock held. + * * Careful review MUST be put if any changes occur! * * Returns the number of bytes written @@ -1169,12 +1316,11 @@ ssize_t lttng_consumer_on_read_subbuffer_mmap( /* Default is on the disk */ int outfd = stream->out_fd; struct consumer_relayd_sock_pair *relayd = NULL; + unsigned int relayd_hang_up = 0; /* RCU lock for the relayd pointer */ rcu_read_lock(); - pthread_mutex_lock(&stream->lock); - /* Flag that the current stream if set for network streaming. */ if (stream->net_seq_idx != -1) { relayd = consumer_find_relayd(stream->net_seq_idx); @@ -1228,11 +1374,22 @@ ssize_t lttng_consumer_on_read_subbuffer_mmap( ret = write_relayd_metadata_id(outfd, stream, relayd, padding); if (ret < 0) { written = ret; + /* Socket operation failed. We consider the relayd dead */ + if (ret == -EPIPE || ret == -EINVAL) { + relayd_hang_up = 1; + goto write_error; + } goto end; } } + } else { + /* Socket operation failed. We consider the relayd dead */ + if (ret == -EPIPE || ret == -EINVAL) { + relayd_hang_up = 1; + goto write_error; + } + /* Else, use the default set before which is the filesystem. */ } - /* Else, use the default set before which is the filesystem. */ } else { /* No streaming, we have to set the len with the full padding */ len += padding; @@ -1248,6 +1405,11 @@ ssize_t lttng_consumer_on_read_subbuffer_mmap( if (written == 0) { written = ret; } + /* Socket operation failed. We consider the relayd dead */ + if (errno == EPIPE || errno == EINVAL) { + relayd_hang_up = 1; + goto write_error; + } goto end; } else if (ret > len) { PERROR("Error in file write (ret %zd > len %lu)", ret, len); @@ -1269,12 +1431,20 @@ ssize_t lttng_consumer_on_read_subbuffer_mmap( } lttng_consumer_sync_trace_file(stream, orig_offset); +write_error: + /* + * This is a special case that the relayd has closed its socket. Let's + * cleanup the relayd object and all associated streams. + */ + if (relayd && relayd_hang_up) { + cleanup_relayd(relayd, ctx); + } + end: /* Unlock only if ctrl socket used */ if (relayd && stream->metadata_flag) { pthread_mutex_unlock(&relayd->ctrl_sock_mutex); } - pthread_mutex_unlock(&stream->lock); rcu_read_unlock(); return written; @@ -1283,6 +1453,8 @@ end: /* * Splice the data from the ring buffer to the tracefile. * + * It must be called with the stream lock held. + * * Returns the number of bytes spliced. */ ssize_t lttng_consumer_on_read_subbuffer_splice( @@ -1298,6 +1470,7 @@ ssize_t lttng_consumer_on_read_subbuffer_splice( int outfd = stream->out_fd; struct consumer_relayd_sock_pair *relayd = NULL; int *splice_pipe; + unsigned int relayd_hang_up = 0; switch (consumer_data.type) { case LTTNG_CONSUMER_KERNEL: @@ -1314,8 +1487,6 @@ ssize_t lttng_consumer_on_read_subbuffer_splice( /* RCU lock for the relayd pointer */ rcu_read_lock(); - pthread_mutex_lock(&stream->lock); - /* Flag that the current stream if set for network streaming. */ if (stream->net_seq_idx != -1) { relayd = consumer_find_relayd(stream->net_seq_idx); @@ -1350,6 +1521,12 @@ ssize_t lttng_consumer_on_read_subbuffer_splice( padding); if (ret < 0) { written = ret; + /* Socket operation failed. We consider the relayd dead */ + if (ret == -EBADF) { + WARN("Remote relayd disconnected. Stopping"); + relayd_hang_up = 1; + goto write_error; + } goto end; } @@ -1361,7 +1538,12 @@ ssize_t lttng_consumer_on_read_subbuffer_splice( /* Use the returned socket. */ outfd = ret; } else { - ERR("Remote relayd disconnected. Stopping"); + /* Socket operation failed. We consider the relayd dead */ + if (ret == -EBADF) { + WARN("Remote relayd disconnected. Stopping"); + relayd_hang_up = 1; + goto write_error; + } goto end; } } else { @@ -1410,6 +1592,12 @@ ssize_t lttng_consumer_on_read_subbuffer_splice( if (written == 0) { written = ret_splice; } + /* Socket operation failed. We consider the relayd dead */ + if (errno == EBADF || errno == EPIPE) { + WARN("Remote relayd disconnected. Stopping"); + relayd_hang_up = 1; + goto write_error; + } ret = errno; goto splice_error; } else if (ret_splice > len) { @@ -1437,12 +1625,20 @@ ssize_t lttng_consumer_on_read_subbuffer_splice( goto end; +write_error: + /* + * This is a special case that the relayd has closed its socket. Let's + * cleanup the relayd object and all associated streams. + */ + if (relayd && relayd_hang_up) { + cleanup_relayd(relayd, ctx); + /* Skip splice error so the consumer does not fail */ + goto end; + } + splice_error: /* send the appropriate error description to sessiond */ switch (ret) { - case EBADF: - lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EBADF); - break; case EINVAL: lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_SPLICE_EINVAL); break; @@ -1458,7 +1654,6 @@ end: if (relayd && stream->metadata_flag) { pthread_mutex_unlock(&relayd->ctrl_sock_mutex); } - pthread_mutex_unlock(&stream->lock); rcu_read_unlock(); return written; @@ -1532,7 +1727,6 @@ int lttng_consumer_recv_cmd(struct lttng_consumer_local_data *ctx, */ static void destroy_data_stream_ht(struct lttng_ht *ht) { - int ret; struct lttng_ht_iter iter; struct lttng_consumer_stream *stream; @@ -1542,10 +1736,11 @@ static void destroy_data_stream_ht(struct lttng_ht *ht) rcu_read_lock(); cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { - ret = lttng_ht_del(ht, &iter); - assert(!ret); - - call_rcu(&stream->node.head, consumer_free_stream); + /* + * Ignore return value since we are currently cleaning up so any error + * can't be handled. + */ + (void) consumer_del_stream(stream, ht); } rcu_read_unlock(); @@ -1559,7 +1754,6 @@ static void destroy_data_stream_ht(struct lttng_ht *ht) */ static void destroy_stream_ht(struct lttng_ht *ht) { - int ret; struct lttng_ht_iter iter; struct lttng_consumer_stream *stream; @@ -1569,10 +1763,11 @@ static void destroy_stream_ht(struct lttng_ht *ht) rcu_read_lock(); cds_lfht_for_each_entry(ht->ht, &iter.iter, stream, node.node) { - ret = lttng_ht_del(ht, &iter); - assert(!ret); - - call_rcu(&stream->node.head, consumer_free_stream); + /* + * Ignore return value since we are currently cleaning up so any error + * can't be handled. + */ + (void) consumer_del_metadata_stream(stream, ht); } rcu_read_unlock(); @@ -1605,6 +1800,8 @@ void consumer_del_metadata_stream(struct lttng_consumer_stream *stream, } pthread_mutex_lock(&consumer_data.lock); + pthread_mutex_lock(&stream->lock); + switch (consumer_data.type) { case LTTNG_CONSUMER_KERNEL: if (stream->mmap_base != NULL) { @@ -1694,6 +1891,7 @@ void consumer_del_metadata_stream(struct lttng_consumer_stream *stream, } end: + pthread_mutex_unlock(&stream->lock); pthread_mutex_unlock(&consumer_data.lock); if (free_chan) { @@ -1713,6 +1911,8 @@ static int consumer_add_metadata_stream(struct lttng_consumer_stream *stream, { int ret = 0; struct consumer_relayd_sock_pair *relayd; + struct lttng_ht_iter iter; + struct lttng_ht_node_ulong *node; assert(stream); assert(ht); @@ -1720,6 +1920,7 @@ static int consumer_add_metadata_stream(struct lttng_consumer_stream *stream, DBG3("Adding metadata stream %d to hash table", stream->wait_fd); pthread_mutex_lock(&consumer_data.lock); + pthread_mutex_lock(&stream->lock); /* * From here, refcounts are updated so be _careful_ when returning an error @@ -1727,6 +1928,15 @@ static int consumer_add_metadata_stream(struct lttng_consumer_stream *stream, */ rcu_read_lock(); + + /* + * Lookup the stream just to make sure it does not exist in our internal + * state. This should NEVER happen. + */ + lttng_ht_lookup(ht, (void *)((unsigned long) stream->wait_fd), &iter); + node = lttng_ht_iter_get_node_ulong(&iter); + assert(!node); + /* Find relayd and, if one is found, increment refcount. */ relayd = consumer_find_relayd(stream->net_seq_idx); if (relayd != NULL) { @@ -1747,9 +1957,6 @@ static int consumer_add_metadata_stream(struct lttng_consumer_stream *stream, uatomic_dec(&stream->chan->nb_init_streams); } - /* Steal stream identifier to avoid having streams with the same key */ - consumer_steal_stream_key(stream->key, ht); - lttng_ht_add_unique_ulong(ht, &stream->node); /* @@ -1761,10 +1968,64 @@ static int consumer_add_metadata_stream(struct lttng_consumer_stream *stream, rcu_read_unlock(); + pthread_mutex_unlock(&stream->lock); pthread_mutex_unlock(&consumer_data.lock); return ret; } +/* + * Delete data stream that are flagged for deletion (endpoint_status). + */ +static void validate_endpoint_status_data_stream(void) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + DBG("Consumer delete flagged data stream"); + + rcu_read_lock(); + cds_lfht_for_each_entry(data_ht->ht, &iter.iter, stream, node.node) { + /* Validate delete flag of the stream */ + if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) { + continue; + } + /* Delete it right now */ + consumer_del_stream(stream, data_ht); + } + rcu_read_unlock(); +} + +/* + * Delete metadata stream that are flagged for deletion (endpoint_status). + */ +static void validate_endpoint_status_metadata_stream( + struct lttng_poll_event *pollset) +{ + struct lttng_ht_iter iter; + struct lttng_consumer_stream *stream; + + DBG("Consumer delete flagged metadata stream"); + + assert(pollset); + + rcu_read_lock(); + cds_lfht_for_each_entry(metadata_ht->ht, &iter.iter, stream, node.node) { + /* Validate delete flag of the stream */ + if (stream->endpoint_status == CONSUMER_ENDPOINT_ACTIVE) { + continue; + } + /* + * Remove from pollset so the metadata thread can continue without + * blocking on a deleted stream. + */ + lttng_poll_del(pollset, stream->wait_fd); + + /* Delete it right now */ + consumer_del_metadata_stream(stream, metadata_ht); + } + rcu_read_unlock(); +} + /* * Thread polls on metadata file descriptor and write them on disk or on the * network. @@ -1782,6 +2043,12 @@ void *consumer_thread_metadata_poll(void *data) rcu_register_thread(); + metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); + if (!metadata_ht) { + /* ENOMEM at this point. Better to bail out. */ + goto error; + } + DBG("Thread metadata poll started"); /* Size is set to 1 for the consumer_metadata pipe */ @@ -1839,7 +2106,10 @@ restart: * since their might be data to consume. */ lttng_poll_del(&events, ctx->consumer_metadata_pipe[0]); - close(ctx->consumer_metadata_pipe[0]); + ret = close(ctx->consumer_metadata_pipe[0]); + if (ret < 0) { + PERROR("close metadata pipe"); + } continue; } else if (revents & LPOLLIN) { do { @@ -1856,6 +2126,13 @@ restart: continue; } + /* A NULL stream means that the state has changed. */ + if (stream == NULL) { + /* Check for deleted streams. */ + validate_endpoint_status_metadata_stream(&events); + continue; + } + DBG("Adding metadata stream %d to poll set", stream->wait_fd); @@ -1938,9 +2215,7 @@ end: DBG("Metadata poll thread exiting"); lttng_poll_clean(&events); - if (metadata_ht) { - destroy_stream_ht(metadata_ht); - } + destroy_stream_ht(metadata_ht); rcu_unregister_thread(); return NULL; @@ -1965,6 +2240,7 @@ void *consumer_thread_data_poll(void *data) data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); if (data_ht == NULL) { + /* ENOMEM at this point. Better to bail out. */ goto end; } @@ -2025,7 +2301,7 @@ void *consumer_thread_data_poll(void *data) /* poll on the array of fds */ restart: DBG("polling on %d fd", nb_fd + 1); - num_rdy = poll(pollfd, nb_fd + 1, consumer_poll_timeout); + num_rdy = poll(pollfd, nb_fd + 1, -1); DBG("poll num_rdy : %d", num_rdy); if (num_rdy == -1) { /* @@ -2063,6 +2339,7 @@ void *consumer_thread_data_poll(void *data) * waking us up to test it. */ if (new_stream == NULL) { + validate_endpoint_status_data_stream(); continue; } @@ -2083,6 +2360,9 @@ void *consumer_thread_data_poll(void *data) /* Take care of high priority channels first. */ for (i = 0; i < nb_fd; i++) { + if (local_stream[i] == NULL) { + continue; + } if (pollfd[i].revents & POLLPRI) { DBG("Urgent read on fd %d", pollfd[i].fd); high_prio = 1; @@ -2091,6 +2371,7 @@ void *consumer_thread_data_poll(void *data) if (len < 0 && len != -EAGAIN && len != -ENODATA) { /* Clean the stream and free it. */ consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; } else if (len > 0) { local_stream[i]->data_read = 1; } @@ -2107,6 +2388,9 @@ void *consumer_thread_data_poll(void *data) /* Take care of low priority channels. */ for (i = 0; i < nb_fd; i++) { + if (local_stream[i] == NULL) { + continue; + } if ((pollfd[i].revents & POLLIN) || local_stream[i]->hangup_flush_done) { DBG("Normal read on fd %d", pollfd[i].fd); @@ -2115,6 +2399,7 @@ void *consumer_thread_data_poll(void *data) if (len < 0 && len != -EAGAIN && len != -ENODATA) { /* Clean the stream and free it. */ consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; } else if (len > 0) { local_stream[i]->data_read = 1; } @@ -2123,12 +2408,15 @@ void *consumer_thread_data_poll(void *data) /* Handle hangup and errors */ for (i = 0; i < nb_fd; i++) { + if (local_stream[i] == NULL) { + continue; + } if (!local_stream[i]->hangup_flush_done && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL)) && (consumer_data.type == LTTNG_CONSUMER32_UST || consumer_data.type == LTTNG_CONSUMER64_UST)) { DBG("fd %d is hup|err|nval. Attempting flush and read.", - pollfd[i].fd); + pollfd[i].fd); lttng_ustconsumer_on_stream_hangup(local_stream[i]); /* Attempt read again, for the data we just flushed. */ local_stream[i]->data_read = 1; @@ -2142,22 +2430,27 @@ void *consumer_thread_data_poll(void *data) DBG("Polling fd %d tells it has hung up.", pollfd[i].fd); if (!local_stream[i]->data_read) { consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; num_hup++; } } else if (pollfd[i].revents & POLLERR) { ERR("Error returned in polling fd %d.", pollfd[i].fd); if (!local_stream[i]->data_read) { consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; num_hup++; } } else if (pollfd[i].revents & POLLNVAL) { ERR("Polling fd %d tells fd is not open.", pollfd[i].fd); if (!local_stream[i]->data_read) { consumer_del_stream(local_stream[i], data_ht); + local_stream[i] = NULL; num_hup++; } } - local_stream[i]->data_read = 0; + if (local_stream[i] != NULL) { + local_stream[i]->data_read = 0; + } } } end: @@ -2179,12 +2472,13 @@ end: * only tracked fd in the poll set. The thread will take care of closing * the read side. */ - close(ctx->consumer_metadata_pipe[1]); - - if (data_ht) { - destroy_data_stream_ht(data_ht); + ret = close(ctx->consumer_metadata_pipe[1]); + if (ret < 0) { + PERROR("close data pipe"); } + destroy_data_stream_ht(data_ht); + rcu_unregister_thread(); return NULL; } @@ -2195,7 +2489,7 @@ end: */ void *consumer_thread_sessiond_poll(void *data) { - int sock, client_socket, ret; + int sock = -1, client_socket, ret; /* * structure to poll for incoming data on communication socket avoids * making blocking sockets. @@ -2255,6 +2549,13 @@ void *consumer_thread_sessiond_poll(void *data) goto end; } + /* This socket is not useful anymore. */ + ret = close(client_socket); + if (ret < 0) { + PERROR("close client_socket"); + } + client_socket = -1; + /* update the polling structure to poll on the established socket */ consumer_sockpoll[1].fd = sock; consumer_sockpoll[1].events = POLLIN | POLLPRI; @@ -2292,23 +2593,25 @@ end: */ consumer_quit = 1; - /* - * 2s of grace period, if no polling events occur during - * this period, the polling thread will exit even if there - * are still open FDs (should not happen, but safety mechanism). - */ - consumer_poll_timeout = LTTNG_CONSUMER_POLL_TIMEOUT; - /* * Notify the data poll thread to poll back again and test the - * consumer_quit state to quit gracefully. + * consumer_quit state that we just set so to quit gracefully. */ - do { - struct lttng_consumer_stream *null_stream = NULL; + notify_thread_pipe(ctx->consumer_data_pipe[1]); - ret = write(ctx->consumer_data_pipe[1], &null_stream, - sizeof(null_stream)); - } while (ret < 0 && errno == EINTR); + /* Cleaning up possibly open sockets. */ + if (sock >= 0) { + ret = close(sock); + if (ret < 0) { + PERROR("close sock sessiond poll"); + } + } + if (client_socket >= 0) { + ret = close(sock); + if (ret < 0) { + PERROR("close client_socket sessiond poll"); + } + } rcu_unregister_thread(); return NULL; @@ -2317,17 +2620,27 @@ end: ssize_t lttng_consumer_read_subbuffer(struct lttng_consumer_stream *stream, struct lttng_consumer_local_data *ctx) { + ssize_t ret; + + pthread_mutex_lock(&stream->lock); + switch (consumer_data.type) { case LTTNG_CONSUMER_KERNEL: - return lttng_kconsumer_read_subbuffer(stream, ctx); + ret = lttng_kconsumer_read_subbuffer(stream, ctx); + break; case LTTNG_CONSUMER32_UST: case LTTNG_CONSUMER64_UST: - return lttng_ustconsumer_read_subbuffer(stream, ctx); + ret = lttng_ustconsumer_read_subbuffer(stream, ctx); + break; default: ERR("Unknown consumer_data type"); assert(0); - return -ENOSYS; + ret = -ENOSYS; + break; } + + pthread_mutex_unlock(&stream->lock); + return ret; } int lttng_consumer_on_recv_stream(struct lttng_consumer_stream *stream) @@ -2353,11 +2666,6 @@ void lttng_consumer_init(void) consumer_data.channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); consumer_data.relayd_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); consumer_data.stream_list_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); - - metadata_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); - assert(metadata_ht); - data_ht = lttng_ht_new(0, LTTNG_HT_TYPE_ULONG); - assert(data_ht); } /* @@ -2370,7 +2678,7 @@ int consumer_add_relayd_socket(int net_seq_idx, int sock_type, struct lttng_consumer_local_data *ctx, int sock, struct pollfd *consumer_sockpoll, struct lttcomm_sock *relayd_sock) { - int fd, ret = -1; + int fd = -1, ret = -1; struct consumer_relayd_sock_pair *relayd; DBG("Consumer adding relayd socket (idx: %d)", net_seq_idx); @@ -2397,6 +2705,7 @@ int consumer_add_relayd_socket(int net_seq_idx, int sock_type, if (ret != sizeof(fd)) { lttng_consumer_send_error(ctx, LTTCOMM_CONSUMERD_ERROR_RECV_FD); ret = -1; + fd = -1; /* Just in case it gets set with an invalid value. */ goto error; } @@ -2406,13 +2715,17 @@ int consumer_add_relayd_socket(int net_seq_idx, int sock_type, /* Copy received lttcomm socket */ lttcomm_copy_sock(&relayd->control_sock, relayd_sock); ret = lttcomm_create_sock(&relayd->control_sock); + /* Immediately try to close the created socket if valid. */ + if (relayd->control_sock.fd >= 0) { + if (close(relayd->control_sock.fd)) { + PERROR("close relayd control socket"); + } + } + /* Handle create_sock error. */ if (ret < 0) { goto error; } - /* Close the created socket fd which is useless */ - close(relayd->control_sock.fd); - /* Assign new file descriptor */ relayd->control_sock.fd = fd; break; @@ -2420,13 +2733,17 @@ int consumer_add_relayd_socket(int net_seq_idx, int sock_type, /* Copy received lttcomm socket */ lttcomm_copy_sock(&relayd->data_sock, relayd_sock); ret = lttcomm_create_sock(&relayd->data_sock); + /* Immediately try to close the created socket if valid. */ + if (relayd->data_sock.fd >= 0) { + if (close(relayd->data_sock.fd)) { + PERROR("close relayd data socket"); + } + } + /* Handle create_sock error. */ if (ret < 0) { goto error; } - /* Close the created socket fd which is useless */ - close(relayd->data_sock.fd); - /* Assign new file descriptor */ relayd->data_sock.fd = fd; break; @@ -2446,9 +2763,43 @@ int consumer_add_relayd_socket(int net_seq_idx, int sock_type, add_relayd(relayd); /* All good! */ - ret = 0; + return 0; error: + /* Close received socket if valid. */ + if (fd >= 0) { + if (close(fd)) { + PERROR("close received socket"); + } + } + return ret; +} + +/* + * Try to lock the stream mutex. + * + * On success, 1 is returned else 0 indicating that the mutex is NOT lock. + */ +static int stream_try_lock(struct lttng_consumer_stream *stream) +{ + int ret; + + assert(stream); + + /* + * Try to lock the stream mutex. On failure, we know that the stream is + * being used else where hence there is data still being extracted. + */ + ret = pthread_mutex_trylock(&stream->lock); + if (ret) { + /* For both EBUSY and EINVAL error, the mutex is NOT locked. */ + ret = 0; + goto end; + } + + ret = 1; + +end: return ret; } @@ -2456,29 +2807,29 @@ error: * Check if for a given session id there is still data needed to be extract * from the buffers. * - * Return 1 if data is in fact available to be read or else 0. + * Return 1 if data is pending or else 0 meaning ready to be read. */ -int consumer_data_available(uint64_t id) +int consumer_data_pending(uint64_t id) { int ret; struct lttng_ht_iter iter; struct lttng_ht *ht; struct lttng_consumer_stream *stream; struct consumer_relayd_sock_pair *relayd; - int (*data_available)(struct lttng_consumer_stream *); + int (*data_pending)(struct lttng_consumer_stream *); - DBG("Consumer data available command on session id %" PRIu64, id); + DBG("Consumer data pending command on session id %" PRIu64, id); rcu_read_lock(); pthread_mutex_lock(&consumer_data.lock); switch (consumer_data.type) { case LTTNG_CONSUMER_KERNEL: - data_available = lttng_kconsumer_data_available; + data_pending = lttng_kconsumer_data_pending; break; case LTTNG_CONSUMER32_UST: case LTTNG_CONSUMER64_UST: - data_available = lttng_ustconsumer_data_available; + data_pending = lttng_ustconsumer_data_pending; break; default: ERR("Unknown consumer data type"); @@ -2489,33 +2840,59 @@ int consumer_data_available(uint64_t id) ht = consumer_data.stream_list_ht; cds_lfht_for_each_entry_duplicate(ht->ht, - ht->hash_fct((void *)((unsigned long) id), 0x42UL), + ht->hash_fct((void *)((unsigned long) id), lttng_ht_seed), ht->match_fct, (void *)((unsigned long) id), &iter.iter, stream, node_session_id.node) { - /* Check the stream for data. */ - ret = data_available(stream); - if (ret == 0) { - goto data_not_available; + /* If this call fails, the stream is being used hence data pending. */ + ret = stream_try_lock(stream); + if (!ret) { + goto data_not_pending; + } + + /* + * A removed node from the hash table indicates that the stream has + * been deleted thus having a guarantee that the buffers are closed + * on the consumer side. However, data can still be transmitted + * over the network so don't skip the relayd check. + */ + ret = cds_lfht_is_node_deleted(&stream->node.node); + if (!ret) { + /* Check the stream if there is data in the buffers. */ + ret = data_pending(stream); + if (ret == 1) { + pthread_mutex_unlock(&stream->lock); + goto data_not_pending; + } } + /* Relayd check */ if (stream->net_seq_idx != -1) { relayd = consumer_find_relayd(stream->net_seq_idx); - assert(relayd); + if (!relayd) { + /* + * At this point, if the relayd object is not available for the + * given stream, it is because the relayd is being cleaned up + * so every stream associated with it (for a session id value) + * are or will be marked for deletion hence no data pending. + */ + pthread_mutex_unlock(&stream->lock); + goto data_not_pending; + } - pthread_mutex_lock(&stream->lock); pthread_mutex_lock(&relayd->ctrl_sock_mutex); if (stream->metadata_flag) { ret = relayd_quiescent_control(&relayd->control_sock); } else { - ret = relayd_data_available(&relayd->control_sock, + ret = relayd_data_pending(&relayd->control_sock, stream->relayd_stream_id, stream->next_net_seq_num); } pthread_mutex_unlock(&relayd->ctrl_sock_mutex); - pthread_mutex_unlock(&stream->lock); - if (ret == 0) { - goto data_not_available; + if (ret == 1) { + pthread_mutex_unlock(&stream->lock); + goto data_not_pending; } } + pthread_mutex_unlock(&stream->lock); } /* @@ -2528,11 +2905,11 @@ int consumer_data_available(uint64_t id) /* Data is available to be read by a viewer. */ pthread_mutex_unlock(&consumer_data.lock); rcu_read_unlock(); - return 1; + return 0; -data_not_available: +data_not_pending: /* Data is still being extracted from buffers. */ pthread_mutex_unlock(&consumer_data.lock); rcu_read_unlock(); - return 0; + return 1; }