+ if (local_stream[i] != NULL) {
+ local_stream[i]->data_read = 0;
+ }
+ }
+ }
+end:
+ DBG("polling thread exiting");
+ free(pollfd);
+ free(local_stream);
+
+ /*
+ * Close the write side of the pipe so epoll_wait() in
+ * consumer_thread_metadata_poll can catch it. The thread is monitoring the
+ * read side of the pipe. If we close them both, epoll_wait strangely does
+ * not return and could create a endless wait period if the pipe is the
+ * only tracked fd in the poll set. The thread will take care of closing
+ * the read side.
+ */
+ (void) lttng_pipe_write_close(ctx->consumer_metadata_pipe);
+
+ destroy_data_stream_ht(data_ht);
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+/*
+ * Close wake-up end of each stream belonging to the channel. This will
+ * allow the poll() on the stream read-side to detect when the
+ * write-side (application) finally closes them.
+ */
+static
+void consumer_close_channel_streams(struct lttng_consumer_channel *channel)
+{
+ struct lttng_ht *ht;
+ struct lttng_consumer_stream *stream;
+ struct lttng_ht_iter iter;
+
+ ht = consumer_data.stream_per_chan_id_ht;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry_duplicate(ht->ht,
+ ht->hash_fct(&channel->key, lttng_ht_seed),
+ ht->match_fct, &channel->key,
+ &iter.iter, stream, node_channel_id.node) {
+ /*
+ * Protect against teardown with mutex.
+ */
+ pthread_mutex_lock(&stream->lock);
+ if (cds_lfht_is_node_deleted(&stream->node.node)) {
+ goto next;
+ }
+ switch (consumer_data.type) {
+ case LTTNG_CONSUMER_KERNEL:
+ break;
+ case LTTNG_CONSUMER32_UST:
+ case LTTNG_CONSUMER64_UST:
+ /*
+ * Note: a mutex is taken internally within
+ * liblttng-ust-ctl to protect timer wakeup_fd
+ * use from concurrent close.
+ */
+ lttng_ustconsumer_close_stream_wakeup(stream);
+ break;
+ default:
+ ERR("Unknown consumer_data type");
+ assert(0);
+ }
+ next:
+ pthread_mutex_unlock(&stream->lock);
+ }
+ rcu_read_unlock();
+}
+
+static void destroy_channel_ht(struct lttng_ht *ht)
+{
+ struct lttng_ht_iter iter;
+ struct lttng_consumer_channel *channel;
+ int ret;
+
+ if (ht == NULL) {
+ return;
+ }
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ht->ht, &iter.iter, channel, wait_fd_node.node) {
+ ret = lttng_ht_del(ht, &iter);
+ assert(ret != 0);
+ }
+ rcu_read_unlock();
+
+ lttng_ht_destroy(ht);
+}
+
+/*
+ * This thread polls the channel fds to detect when they are being
+ * closed. It closes all related streams if the channel is detected as
+ * closed. It is currently only used as a shim layer for UST because the
+ * consumerd needs to keep the per-stream wakeup end of pipes open for
+ * periodical flush.
+ */
+void *consumer_thread_channel_poll(void *data)
+{
+ int ret, i, pollfd;
+ uint32_t revents, nb_fd;
+ struct lttng_consumer_channel *chan = NULL;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_u64 *node;
+ struct lttng_poll_event events;
+ struct lttng_consumer_local_data *ctx = data;
+ struct lttng_ht *channel_ht;
+
+ rcu_register_thread();
+
+ channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+ if (!channel_ht) {
+ /* ENOMEM at this point. Better to bail out. */
+ goto end_ht;
+ }
+
+ DBG("Thread channel poll started");
+
+ /* Size is set to 1 for the consumer_channel pipe */
+ ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+ if (ret < 0) {
+ ERR("Poll set creation failed");
+ goto end_poll;
+ }
+
+ ret = lttng_poll_add(&events, ctx->consumer_channel_pipe[0], LPOLLIN);
+ if (ret < 0) {
+ goto end;
+ }
+
+ /* Main loop */
+ DBG("Channel main loop started");
+
+ while (1) {
+ /* Only the channel pipe is set */
+ if (LTTNG_POLL_GETNB(&events) == 0 && consumer_quit == 1) {
+ goto end;
+ }
+
+restart:
+ DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events));
+ ret = lttng_poll_wait(&events, -1);
+ DBG("Channel event catched in thread");
+ if (ret < 0) {
+ if (errno == EINTR) {
+ ERR("Poll EINTR catched");
+ goto restart;
+ }
+ goto end;
+ }
+
+ nb_fd = ret;
+
+ /* From here, the event is a channel wait fd */
+ for (i = 0; i < nb_fd; i++) {
+ revents = LTTNG_POLL_GETEV(&events, i);
+ pollfd = LTTNG_POLL_GETFD(&events, i);
+
+ /* Just don't waste time if no returned events for the fd */
+ if (!revents) {
+ continue;
+ }
+ if (pollfd == ctx->consumer_channel_pipe[0]) {
+ if (revents & (LPOLLERR | LPOLLHUP)) {
+ DBG("Channel thread pipe hung up");
+ /*
+ * Remove the pipe from the poll set and continue the loop
+ * since their might be data to consume.
+ */
+ lttng_poll_del(&events, ctx->consumer_channel_pipe[0]);
+ continue;
+ } else if (revents & LPOLLIN) {
+ enum consumer_channel_action action;
+ uint64_t key;
+
+ ret = read_channel_pipe(ctx, &chan, &key, &action);
+ if (ret <= 0) {
+ ERR("Error reading channel pipe");
+ continue;
+ }
+
+ switch (action) {
+ case CONSUMER_CHANNEL_ADD:
+ DBG("Adding channel %d to poll set",
+ chan->wait_fd);
+
+ lttng_ht_node_init_u64(&chan->wait_fd_node,
+ chan->wait_fd);
+ rcu_read_lock();
+ lttng_ht_add_unique_u64(channel_ht,
+ &chan->wait_fd_node);
+ rcu_read_unlock();
+ /* Add channel to the global poll events list */
+ lttng_poll_add(&events, chan->wait_fd,
+ LPOLLIN | LPOLLPRI);
+ break;
+ case CONSUMER_CHANNEL_DEL:
+ {
+ struct lttng_consumer_stream *stream, *stmp;
+
+ rcu_read_lock();
+ chan = consumer_find_channel(key);
+ if (!chan) {
+ rcu_read_unlock();
+ ERR("UST consumer get channel key %" PRIu64 " not found for del channel", key);
+ break;
+ }
+ lttng_poll_del(&events, chan->wait_fd);
+ iter.iter.node = &chan->wait_fd_node.node;
+ ret = lttng_ht_del(channel_ht, &iter);
+ assert(ret == 0);
+ consumer_close_channel_streams(chan);
+
+ switch (consumer_data.type) {
+ case LTTNG_CONSUMER_KERNEL:
+ break;
+ case LTTNG_CONSUMER32_UST:
+ case LTTNG_CONSUMER64_UST:
+ /* Delete streams that might have been left in the stream list. */
+ cds_list_for_each_entry_safe(stream, stmp, &chan->streams.head,
+ send_node) {
+ cds_list_del(&stream->send_node);
+ lttng_ustconsumer_del_stream(stream);
+ uatomic_sub(&stream->chan->refcount, 1);
+ assert(&chan->refcount);
+ free(stream);
+ }
+ break;
+ default:
+ ERR("Unknown consumer_data type");
+ assert(0);
+ }
+
+ /*
+ * Release our own refcount. Force channel deletion even if
+ * streams were not initialized.
+ */
+ if (!uatomic_sub_return(&chan->refcount, 1)) {
+ consumer_del_channel(chan);
+ }
+ rcu_read_unlock();
+ goto restart;
+ }
+ case CONSUMER_CHANNEL_QUIT:
+ /*
+ * Remove the pipe from the poll set and continue the loop
+ * since their might be data to consume.
+ */
+ lttng_poll_del(&events, ctx->consumer_channel_pipe[0]);
+ continue;
+ default:
+ ERR("Unknown action");
+ break;
+ }
+ }
+
+ /* Handle other stream */
+ continue;
+ }
+
+ rcu_read_lock();
+ {
+ uint64_t tmp_id = (uint64_t) pollfd;
+
+ lttng_ht_lookup(channel_ht, &tmp_id, &iter);
+ }
+ node = lttng_ht_iter_get_node_u64(&iter);
+ assert(node);
+
+ chan = caa_container_of(node, struct lttng_consumer_channel,
+ wait_fd_node);
+
+ /* Check for error event */
+ if (revents & (LPOLLERR | LPOLLHUP)) {
+ DBG("Channel fd %d is hup|err.", pollfd);
+
+ lttng_poll_del(&events, chan->wait_fd);
+ ret = lttng_ht_del(channel_ht, &iter);
+ assert(ret == 0);
+ consumer_close_channel_streams(chan);
+
+ /* Release our own refcount */
+ if (!uatomic_sub_return(&chan->refcount, 1)
+ && !uatomic_read(&chan->nb_init_stream_left)) {
+ consumer_del_channel(chan);
+ }
+ }
+
+ /* Release RCU lock for the channel looked up */
+ rcu_read_unlock();