+ /* Take care of high priority channels first. */
+ for (i = 0; i < nb_fd; i++) {
+ if (local_stream[i] == NULL) {
+ continue;
+ }
+ if (pollfd[i].revents & POLLPRI) {
+ DBG("Urgent read on fd %d", pollfd[i].fd);
+ high_prio = 1;
+ len = ctx->on_buffer_ready(local_stream[i], ctx);
+ /* it's ok to have an unavailable sub-buffer */
+ if (len < 0 && len != -EAGAIN && len != -ENODATA) {
+ /* Clean the stream and free it. */
+ consumer_del_stream(local_stream[i], data_ht);
+ local_stream[i] = NULL;
+ } else if (len > 0) {
+ local_stream[i]->data_read = 1;
+ }
+ }
+ }
+
+ /*
+ * If we read high prio channel in this loop, try again
+ * for more high prio data.
+ */
+ if (high_prio) {
+ continue;
+ }
+
+ /* Take care of low priority channels. */
+ for (i = 0; i < nb_fd; i++) {
+ if (local_stream[i] == NULL) {
+ continue;
+ }
+ if ((pollfd[i].revents & POLLIN) ||
+ local_stream[i]->hangup_flush_done) {
+ DBG("Normal read on fd %d", pollfd[i].fd);
+ len = ctx->on_buffer_ready(local_stream[i], ctx);
+ /* it's ok to have an unavailable sub-buffer */
+ if (len < 0 && len != -EAGAIN && len != -ENODATA) {
+ /* Clean the stream and free it. */
+ consumer_del_stream(local_stream[i], data_ht);
+ local_stream[i] = NULL;
+ } else if (len > 0) {
+ local_stream[i]->data_read = 1;
+ }
+ }
+ }
+
+ /* Handle hangup and errors */
+ for (i = 0; i < nb_fd; i++) {
+ if (local_stream[i] == NULL) {
+ continue;
+ }
+ if (!local_stream[i]->hangup_flush_done
+ && (pollfd[i].revents & (POLLHUP | POLLERR | POLLNVAL))
+ && (consumer_data.type == LTTNG_CONSUMER32_UST
+ || consumer_data.type == LTTNG_CONSUMER64_UST)) {
+ DBG("fd %d is hup|err|nval. Attempting flush and read.",
+ pollfd[i].fd);
+ lttng_ustconsumer_on_stream_hangup(local_stream[i]);
+ /* Attempt read again, for the data we just flushed. */
+ local_stream[i]->data_read = 1;
+ }
+ /*
+ * If the poll flag is HUP/ERR/NVAL and we have
+ * read no data in this pass, we can remove the
+ * stream from its hash table.
+ */
+ if ((pollfd[i].revents & POLLHUP)) {
+ DBG("Polling fd %d tells it has hung up.", pollfd[i].fd);
+ if (!local_stream[i]->data_read) {
+ consumer_del_stream(local_stream[i], data_ht);
+ local_stream[i] = NULL;
+ num_hup++;
+ }
+ } else if (pollfd[i].revents & POLLERR) {
+ ERR("Error returned in polling fd %d.", pollfd[i].fd);
+ if (!local_stream[i]->data_read) {
+ consumer_del_stream(local_stream[i], data_ht);
+ local_stream[i] = NULL;
+ num_hup++;
+ }
+ } else if (pollfd[i].revents & POLLNVAL) {
+ ERR("Polling fd %d tells fd is not open.", pollfd[i].fd);
+ if (!local_stream[i]->data_read) {
+ consumer_del_stream(local_stream[i], data_ht);
+ local_stream[i] = NULL;
+ num_hup++;
+ }
+ }
+ if (local_stream[i] != NULL) {
+ local_stream[i]->data_read = 0;
+ }
+ }
+ }
+end:
+ DBG("polling thread exiting");
+ free(pollfd);
+ free(local_stream);
+
+ /*
+ * Close the write side of the pipe so epoll_wait() in
+ * consumer_thread_metadata_poll can catch it. The thread is monitoring the
+ * read side of the pipe. If we close them both, epoll_wait strangely does
+ * not return and could create a endless wait period if the pipe is the
+ * only tracked fd in the poll set. The thread will take care of closing
+ * the read side.
+ */
+ (void) lttng_pipe_write_close(ctx->consumer_metadata_pipe);
+
+ destroy_data_stream_ht(data_ht);
+
+ rcu_unregister_thread();
+ return NULL;
+}
+
+/*
+ * Close wake-up end of each stream belonging to the channel. This will
+ * allow the poll() on the stream read-side to detect when the
+ * write-side (application) finally closes them.
+ */
+static
+void consumer_close_channel_streams(struct lttng_consumer_channel *channel)
+{
+ struct lttng_ht *ht;
+ struct lttng_consumer_stream *stream;
+ struct lttng_ht_iter iter;
+
+ ht = consumer_data.stream_per_chan_id_ht;
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry_duplicate(ht->ht,
+ ht->hash_fct(&channel->key, lttng_ht_seed),
+ ht->match_fct, &channel->key,
+ &iter.iter, stream, node_channel_id.node) {
+ /*
+ * Protect against teardown with mutex.
+ */
+ pthread_mutex_lock(&stream->lock);
+ if (cds_lfht_is_node_deleted(&stream->node.node)) {
+ goto next;
+ }
+ switch (consumer_data.type) {
+ case LTTNG_CONSUMER_KERNEL:
+ break;
+ case LTTNG_CONSUMER32_UST:
+ case LTTNG_CONSUMER64_UST:
+ /*
+ * Note: a mutex is taken internally within
+ * liblttng-ust-ctl to protect timer wakeup_fd
+ * use from concurrent close.
+ */
+ lttng_ustconsumer_close_stream_wakeup(stream);
+ break;
+ default:
+ ERR("Unknown consumer_data type");
+ assert(0);
+ }
+ next:
+ pthread_mutex_unlock(&stream->lock);
+ }
+ rcu_read_unlock();
+}
+
+static void destroy_channel_ht(struct lttng_ht *ht)
+{
+ struct lttng_ht_iter iter;
+ struct lttng_consumer_channel *channel;
+ int ret;
+
+ if (ht == NULL) {
+ return;
+ }
+
+ rcu_read_lock();
+ cds_lfht_for_each_entry(ht->ht, &iter.iter, channel, wait_fd_node.node) {
+ ret = lttng_ht_del(ht, &iter);
+ assert(ret != 0);
+ }
+ rcu_read_unlock();
+
+ lttng_ht_destroy(ht);
+}
+
+/*
+ * This thread polls the channel fds to detect when they are being
+ * closed. It closes all related streams if the channel is detected as
+ * closed. It is currently only used as a shim layer for UST because the
+ * consumerd needs to keep the per-stream wakeup end of pipes open for
+ * periodical flush.
+ */
+void *consumer_thread_channel_poll(void *data)
+{
+ int ret, i, pollfd;
+ uint32_t revents, nb_fd;
+ struct lttng_consumer_channel *chan = NULL;
+ struct lttng_ht_iter iter;
+ struct lttng_ht_node_u64 *node;
+ struct lttng_poll_event events;
+ struct lttng_consumer_local_data *ctx = data;
+ struct lttng_ht *channel_ht;
+
+ rcu_register_thread();
+
+ channel_ht = lttng_ht_new(0, LTTNG_HT_TYPE_U64);
+ if (!channel_ht) {
+ /* ENOMEM at this point. Better to bail out. */
+ goto end_ht;
+ }
+
+ DBG("Thread channel poll started");
+
+ /* Size is set to 1 for the consumer_channel pipe */
+ ret = lttng_poll_create(&events, 2, LTTNG_CLOEXEC);
+ if (ret < 0) {
+ ERR("Poll set creation failed");
+ goto end_poll;
+ }
+
+ ret = lttng_poll_add(&events, ctx->consumer_channel_pipe[0], LPOLLIN);
+ if (ret < 0) {
+ goto end;
+ }
+
+ /* Main loop */
+ DBG("Channel main loop started");
+
+ while (1) {
+ /* Only the channel pipe is set */
+ if (LTTNG_POLL_GETNB(&events) == 0 && consumer_quit == 1) {
+ goto end;
+ }
+
+restart:
+ DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events));
+ ret = lttng_poll_wait(&events, -1);
+ DBG("Channel event catched in thread");
+ if (ret < 0) {
+ if (errno == EINTR) {
+ ERR("Poll EINTR catched");
+ goto restart;