2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
32 #include <common/common.h>
33 #include <common/utils.h>
34 #include <common/compat/poll.h>
35 #include <common/kernel-ctl/kernel-ctl.h>
36 #include <common/sessiond-comm/relayd.h>
37 #include <common/sessiond-comm/sessiond-comm.h>
38 #include <common/kernel-consumer/kernel-consumer.h>
39 #include <common/relayd/relayd.h>
40 #include <common/ust-consumer/ust-consumer.h>
44 struct lttng_consumer_global_data consumer_data
= {
47 .type
= LTTNG_CONSUMER_UNKNOWN
,
50 enum consumer_channel_action
{
52 CONSUMER_CHANNEL_QUIT
,
55 struct consumer_channel_msg
{
56 enum consumer_channel_action action
;
57 struct lttng_consumer_channel
*chan
;
61 * Flag to inform the polling thread to quit when all fd hung up. Updated by
62 * the consumer_thread_receive_fds when it notices that all fds has hung up.
63 * Also updated by the signal handler (consumer_should_exit()). Read by the
66 volatile int consumer_quit
;
69 * Global hash table containing respectively metadata and data streams. The
70 * stream element in this ht should only be updated by the metadata poll thread
71 * for the metadata and the data poll thread for the data.
73 static struct lttng_ht
*metadata_ht
;
74 static struct lttng_ht
*data_ht
;
77 * Notify a thread pipe to poll back again. This usually means that some global
78 * state has changed so we just send back the thread in a poll wait call.
80 static void notify_thread_pipe(int wpipe
)
85 struct lttng_consumer_stream
*null_stream
= NULL
;
87 ret
= write(wpipe
, &null_stream
, sizeof(null_stream
));
88 } while (ret
< 0 && errno
== EINTR
);
91 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
92 struct lttng_consumer_channel
*chan
,
93 enum consumer_channel_action action
)
95 struct consumer_channel_msg msg
;
101 ret
= write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
102 } while (ret
< 0 && errno
== EINTR
);
105 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
106 struct lttng_consumer_channel
**chan
,
107 enum consumer_channel_action
*action
)
109 struct consumer_channel_msg msg
;
113 ret
= read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
114 } while (ret
< 0 && errno
== EINTR
);
116 *action
= msg
.action
;
123 * Find a stream. The consumer_data.lock must be locked during this
126 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
129 struct lttng_ht_iter iter
;
130 struct lttng_ht_node_u64
*node
;
131 struct lttng_consumer_stream
*stream
= NULL
;
135 /* -1ULL keys are lookup failures */
136 if (key
== (uint64_t) -1ULL) {
142 lttng_ht_lookup(ht
, &key
, &iter
);
143 node
= lttng_ht_iter_get_node_u64(&iter
);
145 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
153 static void steal_stream_key(int key
, struct lttng_ht
*ht
)
155 struct lttng_consumer_stream
*stream
;
158 stream
= find_stream(key
, ht
);
162 * We don't want the lookup to match, but we still need
163 * to iterate on this stream when iterating over the hash table. Just
164 * change the node key.
166 stream
->node
.key
= -1ULL;
172 * Return a channel object for the given key.
174 * RCU read side lock MUST be acquired before calling this function and
175 * protects the channel ptr.
177 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
179 struct lttng_ht_iter iter
;
180 struct lttng_ht_node_u64
*node
;
181 struct lttng_consumer_channel
*channel
= NULL
;
183 /* -1ULL keys are lookup failures */
184 if (key
== (uint64_t) -1ULL) {
188 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
189 node
= lttng_ht_iter_get_node_u64(&iter
);
191 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
197 static void free_stream_rcu(struct rcu_head
*head
)
199 struct lttng_ht_node_u64
*node
=
200 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
201 struct lttng_consumer_stream
*stream
=
202 caa_container_of(node
, struct lttng_consumer_stream
, node
);
207 static void free_channel_rcu(struct rcu_head
*head
)
209 struct lttng_ht_node_u64
*node
=
210 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
211 struct lttng_consumer_channel
*channel
=
212 caa_container_of(node
, struct lttng_consumer_channel
, node
);
218 * RCU protected relayd socket pair free.
220 static void free_relayd_rcu(struct rcu_head
*head
)
222 struct lttng_ht_node_u64
*node
=
223 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
224 struct consumer_relayd_sock_pair
*relayd
=
225 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
228 * Close all sockets. This is done in the call RCU since we don't want the
229 * socket fds to be reassigned thus potentially creating bad state of the
232 * We do not have to lock the control socket mutex here since at this stage
233 * there is no one referencing to this relayd object.
235 (void) relayd_close(&relayd
->control_sock
);
236 (void) relayd_close(&relayd
->data_sock
);
242 * Destroy and free relayd socket pair object.
244 * This function MUST be called with the consumer_data lock acquired.
246 static void destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
249 struct lttng_ht_iter iter
;
251 if (relayd
== NULL
) {
255 DBG("Consumer destroy and close relayd socket pair");
257 iter
.iter
.node
= &relayd
->node
.node
;
258 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
260 /* We assume the relayd is being or is destroyed */
264 /* RCU free() call */
265 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
269 * Remove a channel from the global list protected by a mutex. This function is
270 * also responsible for freeing its data structures.
272 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
275 struct lttng_ht_iter iter
;
277 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
279 pthread_mutex_lock(&consumer_data
.lock
);
281 switch (consumer_data
.type
) {
282 case LTTNG_CONSUMER_KERNEL
:
284 case LTTNG_CONSUMER32_UST
:
285 case LTTNG_CONSUMER64_UST
:
286 lttng_ustconsumer_del_channel(channel
);
289 ERR("Unknown consumer_data type");
295 iter
.iter
.node
= &channel
->node
.node
;
296 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
300 call_rcu(&channel
->node
.head
, free_channel_rcu
);
302 pthread_mutex_unlock(&consumer_data
.lock
);
306 * Iterate over the relayd hash table and destroy each element. Finally,
307 * destroy the whole hash table.
309 static void cleanup_relayd_ht(void)
311 struct lttng_ht_iter iter
;
312 struct consumer_relayd_sock_pair
*relayd
;
316 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
318 destroy_relayd(relayd
);
321 lttng_ht_destroy(consumer_data
.relayd_ht
);
327 * Update the end point status of all streams having the given network sequence
328 * index (relayd index).
330 * It's atomically set without having the stream mutex locked which is fine
331 * because we handle the write/read race with a pipe wakeup for each thread.
333 static void update_endpoint_status_by_netidx(int net_seq_idx
,
334 enum consumer_endpoint_status status
)
336 struct lttng_ht_iter iter
;
337 struct lttng_consumer_stream
*stream
;
339 DBG("Consumer set delete flag on stream by idx %d", net_seq_idx
);
343 /* Let's begin with metadata */
344 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
345 if (stream
->net_seq_idx
== net_seq_idx
) {
346 uatomic_set(&stream
->endpoint_status
, status
);
347 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
351 /* Follow up by the data streams */
352 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
353 if (stream
->net_seq_idx
== net_seq_idx
) {
354 uatomic_set(&stream
->endpoint_status
, status
);
355 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
362 * Cleanup a relayd object by flagging every associated streams for deletion,
363 * destroying the object meaning removing it from the relayd hash table,
364 * closing the sockets and freeing the memory in a RCU call.
366 * If a local data context is available, notify the threads that the streams'
367 * state have changed.
369 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
370 struct lttng_consumer_local_data
*ctx
)
376 DBG("Cleaning up relayd sockets");
378 /* Save the net sequence index before destroying the object */
379 netidx
= relayd
->net_seq_idx
;
382 * Delete the relayd from the relayd hash table, close the sockets and free
383 * the object in a RCU call.
385 destroy_relayd(relayd
);
387 /* Set inactive endpoint to all streams */
388 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
391 * With a local data context, notify the threads that the streams' state
392 * have changed. The write() action on the pipe acts as an "implicit"
393 * memory barrier ordering the updates of the end point status from the
394 * read of this status which happens AFTER receiving this notify.
397 notify_thread_pipe(ctx
->consumer_data_pipe
[1]);
398 notify_thread_pipe(ctx
->consumer_metadata_pipe
[1]);
403 * Flag a relayd socket pair for destruction. Destroy it if the refcount
406 * RCU read side lock MUST be aquired before calling this function.
408 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
412 /* Set destroy flag for this object */
413 uatomic_set(&relayd
->destroy_flag
, 1);
415 /* Destroy the relayd if refcount is 0 */
416 if (uatomic_read(&relayd
->refcount
) == 0) {
417 destroy_relayd(relayd
);
422 * Remove a stream from the global list protected by a mutex. This
423 * function is also responsible for freeing its data structures.
425 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
429 struct lttng_ht_iter iter
;
430 struct lttng_consumer_channel
*free_chan
= NULL
;
431 struct consumer_relayd_sock_pair
*relayd
;
435 DBG("Consumer del stream %d", stream
->wait_fd
);
438 /* Means the stream was allocated but not successfully added */
439 goto free_stream_rcu
;
442 pthread_mutex_lock(&consumer_data
.lock
);
443 pthread_mutex_lock(&stream
->lock
);
445 switch (consumer_data
.type
) {
446 case LTTNG_CONSUMER_KERNEL
:
447 if (stream
->mmap_base
!= NULL
) {
448 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
454 case LTTNG_CONSUMER32_UST
:
455 case LTTNG_CONSUMER64_UST
:
456 lttng_ustconsumer_del_stream(stream
);
459 ERR("Unknown consumer_data type");
465 iter
.iter
.node
= &stream
->node
.node
;
466 ret
= lttng_ht_del(ht
, &iter
);
469 iter
.iter
.node
= &stream
->node_channel_id
.node
;
470 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
473 iter
.iter
.node
= &stream
->node_session_id
.node
;
474 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
478 assert(consumer_data
.stream_count
> 0);
479 consumer_data
.stream_count
--;
481 if (stream
->out_fd
>= 0) {
482 ret
= close(stream
->out_fd
);
488 /* Check and cleanup relayd */
490 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
491 if (relayd
!= NULL
) {
492 uatomic_dec(&relayd
->refcount
);
493 assert(uatomic_read(&relayd
->refcount
) >= 0);
495 /* Closing streams requires to lock the control socket. */
496 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
497 ret
= relayd_send_close_stream(&relayd
->control_sock
,
498 stream
->relayd_stream_id
,
499 stream
->next_net_seq_num
- 1);
500 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
502 DBG("Unable to close stream on the relayd. Continuing");
504 * Continue here. There is nothing we can do for the relayd.
505 * Chances are that the relayd has closed the socket so we just
506 * continue cleaning up.
510 /* Both conditions are met, we destroy the relayd. */
511 if (uatomic_read(&relayd
->refcount
) == 0 &&
512 uatomic_read(&relayd
->destroy_flag
)) {
513 destroy_relayd(relayd
);
518 uatomic_dec(&stream
->chan
->refcount
);
519 if (!uatomic_read(&stream
->chan
->refcount
)
520 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
521 free_chan
= stream
->chan
;
525 consumer_data
.need_update
= 1;
526 pthread_mutex_unlock(&stream
->lock
);
527 pthread_mutex_unlock(&consumer_data
.lock
);
530 consumer_del_channel(free_chan
);
534 call_rcu(&stream
->node
.head
, free_stream_rcu
);
537 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
539 enum lttng_consumer_stream_state state
,
540 const char *channel_name
,
547 enum consumer_channel_type type
)
550 struct lttng_consumer_stream
*stream
;
552 stream
= zmalloc(sizeof(*stream
));
553 if (stream
== NULL
) {
554 PERROR("malloc struct lttng_consumer_stream");
561 stream
->key
= stream_key
;
563 stream
->out_fd_offset
= 0;
564 stream
->state
= state
;
567 stream
->net_seq_idx
= relayd_id
;
568 stream
->session_id
= session_id
;
569 pthread_mutex_init(&stream
->lock
, NULL
);
571 /* If channel is the metadata, flag this stream as metadata. */
572 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
573 stream
->metadata_flag
= 1;
574 /* Metadata is flat out. */
575 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
577 /* Format stream name to <channel_name>_<cpu_number> */
578 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
581 PERROR("snprintf stream name");
586 /* Key is always the wait_fd for streams. */
587 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
589 /* Init node per channel id key */
590 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
592 /* Init session id node with the stream session id */
593 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
595 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
" relayd_id %" PRIu64
", session_id %" PRIu64
,
596 stream
->name
, stream
->key
, channel_key
, stream
->net_seq_idx
, stream
->session_id
);
612 * Add a stream to the global list protected by a mutex.
614 static int add_stream(struct lttng_consumer_stream
*stream
,
618 struct consumer_relayd_sock_pair
*relayd
;
623 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
625 pthread_mutex_lock(&consumer_data
.lock
);
626 pthread_mutex_lock(&stream
->lock
);
629 /* Steal stream identifier to avoid having streams with the same key */
630 steal_stream_key(stream
->key
, ht
);
632 lttng_ht_add_unique_u64(ht
, &stream
->node
);
634 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
635 &stream
->node_channel_id
);
638 * Add stream to the stream_list_ht of the consumer data. No need to steal
639 * the key since the HT does not use it and we allow to add redundant keys
642 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
644 /* Check and cleanup relayd */
645 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
646 if (relayd
!= NULL
) {
647 uatomic_inc(&relayd
->refcount
);
650 /* Update channel refcount once added without error(s). */
651 uatomic_inc(&stream
->chan
->refcount
);
654 * When nb_init_stream_left reaches 0, we don't need to trigger any action
655 * in terms of destroying the associated channel, because the action that
656 * causes the count to become 0 also causes a stream to be added. The
657 * channel deletion will thus be triggered by the following removal of this
660 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
661 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
664 /* Update consumer data once the node is inserted. */
665 consumer_data
.stream_count
++;
666 consumer_data
.need_update
= 1;
669 pthread_mutex_unlock(&stream
->lock
);
670 pthread_mutex_unlock(&consumer_data
.lock
);
676 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
677 * be acquired before calling this.
679 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
682 struct lttng_ht_node_u64
*node
;
683 struct lttng_ht_iter iter
;
687 lttng_ht_lookup(consumer_data
.relayd_ht
,
688 &relayd
->net_seq_idx
, &iter
);
689 node
= lttng_ht_iter_get_node_u64(&iter
);
693 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
700 * Allocate and return a consumer relayd socket.
702 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
705 struct consumer_relayd_sock_pair
*obj
= NULL
;
707 /* Negative net sequence index is a failure */
708 if (net_seq_idx
< 0) {
712 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
714 PERROR("zmalloc relayd sock");
718 obj
->net_seq_idx
= net_seq_idx
;
720 obj
->destroy_flag
= 0;
721 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
722 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
729 * Find a relayd socket pair in the global consumer data.
731 * Return the object if found else NULL.
732 * RCU read-side lock must be held across this call and while using the
735 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
737 struct lttng_ht_iter iter
;
738 struct lttng_ht_node_u64
*node
;
739 struct consumer_relayd_sock_pair
*relayd
= NULL
;
741 /* Negative keys are lookup failures */
742 if (key
== (uint64_t) -1ULL) {
746 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
748 node
= lttng_ht_iter_get_node_u64(&iter
);
750 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
758 * Handle stream for relayd transmission if the stream applies for network
759 * streaming where the net sequence index is set.
761 * Return destination file descriptor or negative value on error.
763 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
764 size_t data_size
, unsigned long padding
,
765 struct consumer_relayd_sock_pair
*relayd
)
768 struct lttcomm_relayd_data_hdr data_hdr
;
774 /* Reset data header */
775 memset(&data_hdr
, 0, sizeof(data_hdr
));
777 if (stream
->metadata_flag
) {
778 /* Caller MUST acquire the relayd control socket lock */
779 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
784 /* Metadata are always sent on the control socket. */
785 outfd
= relayd
->control_sock
.fd
;
787 /* Set header with stream information */
788 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
789 data_hdr
.data_size
= htobe32(data_size
);
790 data_hdr
.padding_size
= htobe32(padding
);
792 * Note that net_seq_num below is assigned with the *current* value of
793 * next_net_seq_num and only after that the next_net_seq_num will be
794 * increment. This is why when issuing a command on the relayd using
795 * this next value, 1 should always be substracted in order to compare
796 * the last seen sequence number on the relayd side to the last sent.
798 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
799 /* Other fields are zeroed previously */
801 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
807 ++stream
->next_net_seq_num
;
809 /* Set to go on data socket */
810 outfd
= relayd
->data_sock
.fd
;
818 * Allocate and return a new lttng_consumer_channel object using the given key
819 * to initialize the hash table node.
821 * On error, return NULL.
823 struct lttng_consumer_channel
*consumer_allocate_channel(unsigned long key
,
825 const char *pathname
,
830 enum lttng_event_output output
)
832 struct lttng_consumer_channel
*channel
;
834 channel
= zmalloc(sizeof(*channel
));
835 if (channel
== NULL
) {
836 PERROR("malloc struct lttng_consumer_channel");
841 channel
->refcount
= 0;
842 channel
->session_id
= session_id
;
845 channel
->relayd_id
= relayd_id
;
846 channel
->output
= output
;
848 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
849 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
851 strncpy(channel
->name
, name
, sizeof(channel
->name
));
852 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
854 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
856 channel
->wait_fd
= -1;
858 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
860 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
867 * Add a channel to the global list protected by a mutex.
869 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
870 struct lttng_consumer_local_data
*ctx
)
873 struct lttng_ht_node_u64
*node
;
874 struct lttng_ht_iter iter
;
876 pthread_mutex_lock(&consumer_data
.lock
);
879 lttng_ht_lookup(consumer_data
.channel_ht
,
880 &channel
->key
, &iter
);
881 node
= lttng_ht_iter_get_node_u64(&iter
);
883 /* Channel already exist. Ignore the insertion */
884 ERR("Consumer add channel key %" PRIu64
" already exists!",
890 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
894 pthread_mutex_unlock(&consumer_data
.lock
);
896 if (!ret
&& channel
->wait_fd
!= -1 &&
897 channel
->metadata_stream
== NULL
) {
898 notify_channel_pipe(ctx
, channel
, CONSUMER_CHANNEL_ADD
);
904 * Allocate the pollfd structure and the local view of the out fds to avoid
905 * doing a lookup in the linked list and concurrency issues when writing is
906 * needed. Called with consumer_data.lock held.
908 * Returns the number of fds in the structures.
910 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
911 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
915 struct lttng_ht_iter iter
;
916 struct lttng_consumer_stream
*stream
;
921 assert(local_stream
);
923 DBG("Updating poll fd array");
925 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
927 * Only active streams with an active end point can be added to the
928 * poll set and local stream storage of the thread.
930 * There is a potential race here for endpoint_status to be updated
931 * just after the check. However, this is OK since the stream(s) will
932 * be deleted once the thread is notified that the end point state has
933 * changed where this function will be called back again.
935 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
936 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
939 DBG("Active FD %d", stream
->wait_fd
);
940 (*pollfd
)[i
].fd
= stream
->wait_fd
;
941 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
942 local_stream
[i
] = stream
;
948 * Insert the consumer_data_pipe at the end of the array and don't
949 * increment i so nb_fd is the number of real FD.
951 (*pollfd
)[i
].fd
= ctx
->consumer_data_pipe
[0];
952 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
957 * Poll on the should_quit pipe and the command socket return -1 on error and
958 * should exit, 0 if data is available on the command socket
960 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
965 num_rdy
= poll(consumer_sockpoll
, 2, -1);
968 * Restart interrupted system call.
970 if (errno
== EINTR
) {
973 PERROR("Poll error");
976 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
977 DBG("consumer_should_quit wake up");
987 * Set the error socket.
989 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
992 ctx
->consumer_error_socket
= sock
;
996 * Set the command socket path.
998 void lttng_consumer_set_command_sock_path(
999 struct lttng_consumer_local_data
*ctx
, char *sock
)
1001 ctx
->consumer_command_sock_path
= sock
;
1005 * Send return code to the session daemon.
1006 * If the socket is not defined, we return 0, it is not a fatal error
1008 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1010 if (ctx
->consumer_error_socket
> 0) {
1011 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1012 sizeof(enum lttcomm_sessiond_command
));
1019 * Close all the tracefiles and stream fds and MUST be called when all
1020 * instances are destroyed i.e. when all threads were joined and are ended.
1022 void lttng_consumer_cleanup(void)
1024 struct lttng_ht_iter iter
;
1025 struct lttng_consumer_channel
*channel
;
1029 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1031 consumer_del_channel(channel
);
1036 lttng_ht_destroy(consumer_data
.channel_ht
);
1038 cleanup_relayd_ht();
1040 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1043 * This HT contains streams that are freed by either the metadata thread or
1044 * the data thread so we do *nothing* on the hash table and simply destroy
1047 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1051 * Called from signal handler.
1053 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1058 ret
= write(ctx
->consumer_should_quit
[1], "4", 1);
1059 } while (ret
< 0 && errno
== EINTR
);
1060 if (ret
< 0 || ret
!= 1) {
1061 PERROR("write consumer quit");
1064 DBG("Consumer flag that it should quit");
1067 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1070 int outfd
= stream
->out_fd
;
1073 * This does a blocking write-and-wait on any page that belongs to the
1074 * subbuffer prior to the one we just wrote.
1075 * Don't care about error values, as these are just hints and ways to
1076 * limit the amount of page cache used.
1078 if (orig_offset
< stream
->max_sb_size
) {
1081 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1082 stream
->max_sb_size
,
1083 SYNC_FILE_RANGE_WAIT_BEFORE
1084 | SYNC_FILE_RANGE_WRITE
1085 | SYNC_FILE_RANGE_WAIT_AFTER
);
1087 * Give hints to the kernel about how we access the file:
1088 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1091 * We need to call fadvise again after the file grows because the
1092 * kernel does not seem to apply fadvise to non-existing parts of the
1095 * Call fadvise _after_ having waited for the page writeback to
1096 * complete because the dirty page writeback semantic is not well
1097 * defined. So it can be expected to lead to lower throughput in
1100 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1101 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1105 * Initialise the necessary environnement :
1106 * - create a new context
1107 * - create the poll_pipe
1108 * - create the should_quit pipe (for signal handler)
1109 * - create the thread pipe (for splice)
1111 * Takes a function pointer as argument, this function is called when data is
1112 * available on a buffer. This function is responsible to do the
1113 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1114 * buffer configuration and then kernctl_put_next_subbuf at the end.
1116 * Returns a pointer to the new context or NULL on error.
1118 struct lttng_consumer_local_data
*lttng_consumer_create(
1119 enum lttng_consumer_type type
,
1120 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1121 struct lttng_consumer_local_data
*ctx
),
1122 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1123 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1124 int (*update_stream
)(int stream_key
, uint32_t state
))
1127 struct lttng_consumer_local_data
*ctx
;
1129 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1130 consumer_data
.type
== type
);
1131 consumer_data
.type
= type
;
1133 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1135 PERROR("allocating context");
1139 ctx
->consumer_error_socket
= -1;
1140 /* assign the callbacks */
1141 ctx
->on_buffer_ready
= buffer_ready
;
1142 ctx
->on_recv_channel
= recv_channel
;
1143 ctx
->on_recv_stream
= recv_stream
;
1144 ctx
->on_update_stream
= update_stream
;
1146 ret
= pipe(ctx
->consumer_data_pipe
);
1148 PERROR("Error creating poll pipe");
1149 goto error_poll_pipe
;
1152 /* set read end of the pipe to non-blocking */
1153 ret
= fcntl(ctx
->consumer_data_pipe
[0], F_SETFL
, O_NONBLOCK
);
1155 PERROR("fcntl O_NONBLOCK");
1156 goto error_poll_fcntl
;
1159 /* set write end of the pipe to non-blocking */
1160 ret
= fcntl(ctx
->consumer_data_pipe
[1], F_SETFL
, O_NONBLOCK
);
1162 PERROR("fcntl O_NONBLOCK");
1163 goto error_poll_fcntl
;
1166 ret
= pipe(ctx
->consumer_should_quit
);
1168 PERROR("Error creating recv pipe");
1169 goto error_quit_pipe
;
1172 ret
= pipe(ctx
->consumer_thread_pipe
);
1174 PERROR("Error creating thread pipe");
1175 goto error_thread_pipe
;
1178 ret
= pipe(ctx
->consumer_channel_pipe
);
1180 PERROR("Error creating channel pipe");
1181 goto error_channel_pipe
;
1184 ret
= utils_create_pipe(ctx
->consumer_metadata_pipe
);
1186 goto error_metadata_pipe
;
1189 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1191 goto error_splice_pipe
;
1197 utils_close_pipe(ctx
->consumer_metadata_pipe
);
1198 error_metadata_pipe
:
1199 utils_close_pipe(ctx
->consumer_channel_pipe
);
1201 utils_close_pipe(ctx
->consumer_thread_pipe
);
1203 utils_close_pipe(ctx
->consumer_should_quit
);
1206 utils_close_pipe(ctx
->consumer_data_pipe
);
1214 * Close all fds associated with the instance and free the context.
1216 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1220 DBG("Consumer destroying it. Closing everything.");
1222 ret
= close(ctx
->consumer_error_socket
);
1226 utils_close_pipe(ctx
->consumer_thread_pipe
);
1227 utils_close_pipe(ctx
->consumer_channel_pipe
);
1228 utils_close_pipe(ctx
->consumer_data_pipe
);
1229 utils_close_pipe(ctx
->consumer_should_quit
);
1230 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1232 unlink(ctx
->consumer_command_sock_path
);
1237 * Write the metadata stream id on the specified file descriptor.
1239 static int write_relayd_metadata_id(int fd
,
1240 struct lttng_consumer_stream
*stream
,
1241 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1244 struct lttcomm_relayd_metadata_payload hdr
;
1246 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1247 hdr
.padding_size
= htobe32(padding
);
1249 ret
= write(fd
, (void *) &hdr
, sizeof(hdr
));
1250 } while (ret
< 0 && errno
== EINTR
);
1251 if (ret
< 0 || ret
!= sizeof(hdr
)) {
1253 * This error means that the fd's end is closed so ignore the perror
1254 * not to clubber the error output since this can happen in a normal
1257 if (errno
!= EPIPE
) {
1258 PERROR("write metadata stream id");
1260 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1262 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1263 * handle writting the missing part so report that as an error and
1264 * don't lie to the caller.
1269 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1270 stream
->relayd_stream_id
, padding
);
1277 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1278 * core function for writing trace buffers to either the local filesystem or
1281 * It must be called with the stream lock held.
1283 * Careful review MUST be put if any changes occur!
1285 * Returns the number of bytes written
1287 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1288 struct lttng_consumer_local_data
*ctx
,
1289 struct lttng_consumer_stream
*stream
, unsigned long len
,
1290 unsigned long padding
)
1292 unsigned long mmap_offset
;
1294 ssize_t ret
= 0, written
= 0;
1295 off_t orig_offset
= stream
->out_fd_offset
;
1296 /* Default is on the disk */
1297 int outfd
= stream
->out_fd
;
1298 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1299 unsigned int relayd_hang_up
= 0;
1301 /* RCU lock for the relayd pointer */
1304 /* Flag that the current stream if set for network streaming. */
1305 if (stream
->net_seq_idx
!= -1) {
1306 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1307 if (relayd
== NULL
) {
1312 /* get the offset inside the fd to mmap */
1313 switch (consumer_data
.type
) {
1314 case LTTNG_CONSUMER_KERNEL
:
1315 mmap_base
= stream
->mmap_base
;
1316 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1318 case LTTNG_CONSUMER32_UST
:
1319 case LTTNG_CONSUMER64_UST
:
1320 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1322 ERR("read mmap get mmap base for stream %s", stream
->name
);
1326 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1329 ERR("Unknown consumer_data type");
1334 PERROR("tracer ctl get_mmap_read_offset");
1339 /* Handle stream on the relayd if the output is on the network */
1341 unsigned long netlen
= len
;
1344 * Lock the control socket for the complete duration of the function
1345 * since from this point on we will use the socket.
1347 if (stream
->metadata_flag
) {
1348 /* Metadata requires the control socket. */
1349 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1350 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1353 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1355 /* Use the returned socket. */
1358 /* Write metadata stream id before payload */
1359 if (stream
->metadata_flag
) {
1360 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1363 /* Socket operation failed. We consider the relayd dead */
1364 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1372 /* Socket operation failed. We consider the relayd dead */
1373 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1377 /* Else, use the default set before which is the filesystem. */
1380 /* No streaming, we have to set the len with the full padding */
1386 ret
= write(outfd
, mmap_base
+ mmap_offset
, len
);
1387 } while (ret
< 0 && errno
== EINTR
);
1388 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1391 * This is possible if the fd is closed on the other side (outfd)
1392 * or any write problem. It can be verbose a bit for a normal
1393 * execution if for instance the relayd is stopped abruptly. This
1394 * can happen so set this to a DBG statement.
1396 DBG("Error in file write mmap");
1400 /* Socket operation failed. We consider the relayd dead */
1401 if (errno
== EPIPE
|| errno
== EINVAL
) {
1406 } else if (ret
> len
) {
1407 PERROR("Error in file write (ret %zd > len %lu)", ret
, len
);
1415 /* This call is useless on a socket so better save a syscall. */
1417 /* This won't block, but will start writeout asynchronously */
1418 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret
,
1419 SYNC_FILE_RANGE_WRITE
);
1420 stream
->out_fd_offset
+= ret
;
1424 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1428 * This is a special case that the relayd has closed its socket. Let's
1429 * cleanup the relayd object and all associated streams.
1431 if (relayd
&& relayd_hang_up
) {
1432 cleanup_relayd(relayd
, ctx
);
1436 /* Unlock only if ctrl socket used */
1437 if (relayd
&& stream
->metadata_flag
) {
1438 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1446 * Splice the data from the ring buffer to the tracefile.
1448 * It must be called with the stream lock held.
1450 * Returns the number of bytes spliced.
1452 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1453 struct lttng_consumer_local_data
*ctx
,
1454 struct lttng_consumer_stream
*stream
, unsigned long len
,
1455 unsigned long padding
)
1457 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1459 off_t orig_offset
= stream
->out_fd_offset
;
1460 int fd
= stream
->wait_fd
;
1461 /* Default is on the disk */
1462 int outfd
= stream
->out_fd
;
1463 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1465 unsigned int relayd_hang_up
= 0;
1467 switch (consumer_data
.type
) {
1468 case LTTNG_CONSUMER_KERNEL
:
1470 case LTTNG_CONSUMER32_UST
:
1471 case LTTNG_CONSUMER64_UST
:
1472 /* Not supported for user space tracing */
1475 ERR("Unknown consumer_data type");
1479 /* RCU lock for the relayd pointer */
1482 /* Flag that the current stream if set for network streaming. */
1483 if (stream
->net_seq_idx
!= -1) {
1484 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1485 if (relayd
== NULL
) {
1491 * Choose right pipe for splice. Metadata and trace data are handled by
1492 * different threads hence the use of two pipes in order not to race or
1493 * corrupt the written data.
1495 if (stream
->metadata_flag
) {
1496 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1498 splice_pipe
= ctx
->consumer_thread_pipe
;
1501 /* Write metadata stream id before payload */
1503 int total_len
= len
;
1505 if (stream
->metadata_flag
) {
1507 * Lock the control socket for the complete duration of the function
1508 * since from this point on we will use the socket.
1510 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1512 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1516 /* Socket operation failed. We consider the relayd dead */
1517 if (ret
== -EBADF
) {
1518 WARN("Remote relayd disconnected. Stopping");
1525 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1528 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1530 /* Use the returned socket. */
1533 /* Socket operation failed. We consider the relayd dead */
1534 if (ret
== -EBADF
) {
1535 WARN("Remote relayd disconnected. Stopping");
1542 /* No streaming, we have to set the len with the full padding */
1547 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1548 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1549 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1550 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1551 DBG("splice chan to pipe, ret %zd", ret_splice
);
1552 if (ret_splice
< 0) {
1553 PERROR("Error in relay splice");
1555 written
= ret_splice
;
1561 /* Handle stream on the relayd if the output is on the network */
1563 if (stream
->metadata_flag
) {
1564 size_t metadata_payload_size
=
1565 sizeof(struct lttcomm_relayd_metadata_payload
);
1567 /* Update counter to fit the spliced data */
1568 ret_splice
+= metadata_payload_size
;
1569 len
+= metadata_payload_size
;
1571 * We do this so the return value can match the len passed as
1572 * argument to this function.
1574 written
-= metadata_payload_size
;
1578 /* Splice data out */
1579 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1580 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1581 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1582 if (ret_splice
< 0) {
1583 PERROR("Error in file splice");
1585 written
= ret_splice
;
1587 /* Socket operation failed. We consider the relayd dead */
1588 if (errno
== EBADF
|| errno
== EPIPE
) {
1589 WARN("Remote relayd disconnected. Stopping");
1595 } else if (ret_splice
> len
) {
1597 PERROR("Wrote more data than requested %zd (len: %lu)",
1599 written
+= ret_splice
;
1605 /* This call is useless on a socket so better save a syscall. */
1607 /* This won't block, but will start writeout asynchronously */
1608 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1609 SYNC_FILE_RANGE_WRITE
);
1610 stream
->out_fd_offset
+= ret_splice
;
1612 written
+= ret_splice
;
1614 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1622 * This is a special case that the relayd has closed its socket. Let's
1623 * cleanup the relayd object and all associated streams.
1625 if (relayd
&& relayd_hang_up
) {
1626 cleanup_relayd(relayd
, ctx
);
1627 /* Skip splice error so the consumer does not fail */
1632 /* send the appropriate error description to sessiond */
1635 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1638 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1641 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1646 if (relayd
&& stream
->metadata_flag
) {
1647 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1655 * Take a snapshot for a specific fd
1657 * Returns 0 on success, < 0 on error
1659 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1661 switch (consumer_data
.type
) {
1662 case LTTNG_CONSUMER_KERNEL
:
1663 return lttng_kconsumer_take_snapshot(stream
);
1664 case LTTNG_CONSUMER32_UST
:
1665 case LTTNG_CONSUMER64_UST
:
1666 return lttng_ustconsumer_take_snapshot(stream
);
1668 ERR("Unknown consumer_data type");
1675 * Get the produced position
1677 * Returns 0 on success, < 0 on error
1679 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1682 switch (consumer_data
.type
) {
1683 case LTTNG_CONSUMER_KERNEL
:
1684 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1685 case LTTNG_CONSUMER32_UST
:
1686 case LTTNG_CONSUMER64_UST
:
1687 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1689 ERR("Unknown consumer_data type");
1695 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1696 int sock
, struct pollfd
*consumer_sockpoll
)
1698 switch (consumer_data
.type
) {
1699 case LTTNG_CONSUMER_KERNEL
:
1700 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1701 case LTTNG_CONSUMER32_UST
:
1702 case LTTNG_CONSUMER64_UST
:
1703 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1705 ERR("Unknown consumer_data type");
1712 * Iterate over all streams of the hashtable and free them properly.
1714 * WARNING: *MUST* be used with data stream only.
1716 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1718 struct lttng_ht_iter iter
;
1719 struct lttng_consumer_stream
*stream
;
1726 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1728 * Ignore return value since we are currently cleaning up so any error
1731 (void) consumer_del_stream(stream
, ht
);
1735 lttng_ht_destroy(ht
);
1739 * Iterate over all streams of the hashtable and free them properly.
1741 * XXX: Should not be only for metadata stream or else use an other name.
1743 static void destroy_stream_ht(struct lttng_ht
*ht
)
1745 struct lttng_ht_iter iter
;
1746 struct lttng_consumer_stream
*stream
;
1753 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1755 * Ignore return value since we are currently cleaning up so any error
1758 (void) consumer_del_metadata_stream(stream
, ht
);
1762 lttng_ht_destroy(ht
);
1765 void lttng_consumer_close_metadata(void)
1767 switch (consumer_data
.type
) {
1768 case LTTNG_CONSUMER_KERNEL
:
1770 * The Kernel consumer has a different metadata scheme so we don't
1771 * close anything because the stream will be closed by the session
1775 case LTTNG_CONSUMER32_UST
:
1776 case LTTNG_CONSUMER64_UST
:
1778 * Close all metadata streams. The metadata hash table is passed and
1779 * this call iterates over it by closing all wakeup fd. This is safe
1780 * because at this point we are sure that the metadata producer is
1781 * either dead or blocked.
1783 lttng_ustconsumer_close_metadata(metadata_ht
);
1786 ERR("Unknown consumer_data type");
1792 * Clean up a metadata stream and free its memory.
1794 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1795 struct lttng_ht
*ht
)
1798 struct lttng_ht_iter iter
;
1799 struct lttng_consumer_channel
*free_chan
= NULL
;
1800 struct consumer_relayd_sock_pair
*relayd
;
1804 * This call should NEVER receive regular stream. It must always be
1805 * metadata stream and this is crucial for data structure synchronization.
1807 assert(stream
->metadata_flag
);
1809 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1812 /* Means the stream was allocated but not successfully added */
1813 goto free_stream_rcu
;
1816 pthread_mutex_lock(&consumer_data
.lock
);
1817 pthread_mutex_lock(&stream
->lock
);
1819 switch (consumer_data
.type
) {
1820 case LTTNG_CONSUMER_KERNEL
:
1821 if (stream
->mmap_base
!= NULL
) {
1822 ret
= munmap(stream
->mmap_base
, stream
->mmap_len
);
1824 PERROR("munmap metadata stream");
1828 case LTTNG_CONSUMER32_UST
:
1829 case LTTNG_CONSUMER64_UST
:
1830 lttng_ustconsumer_del_stream(stream
);
1833 ERR("Unknown consumer_data type");
1839 iter
.iter
.node
= &stream
->node
.node
;
1840 ret
= lttng_ht_del(ht
, &iter
);
1843 iter
.iter
.node
= &stream
->node_channel_id
.node
;
1844 ret
= lttng_ht_del(consumer_data
.stream_per_chan_id_ht
, &iter
);
1847 iter
.iter
.node
= &stream
->node_session_id
.node
;
1848 ret
= lttng_ht_del(consumer_data
.stream_list_ht
, &iter
);
1852 if (stream
->out_fd
>= 0) {
1853 ret
= close(stream
->out_fd
);
1859 /* Check and cleanup relayd */
1861 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1862 if (relayd
!= NULL
) {
1863 uatomic_dec(&relayd
->refcount
);
1864 assert(uatomic_read(&relayd
->refcount
) >= 0);
1866 /* Closing streams requires to lock the control socket. */
1867 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1868 ret
= relayd_send_close_stream(&relayd
->control_sock
,
1869 stream
->relayd_stream_id
, stream
->next_net_seq_num
- 1);
1870 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1872 DBG("Unable to close stream on the relayd. Continuing");
1874 * Continue here. There is nothing we can do for the relayd.
1875 * Chances are that the relayd has closed the socket so we just
1876 * continue cleaning up.
1880 /* Both conditions are met, we destroy the relayd. */
1881 if (uatomic_read(&relayd
->refcount
) == 0 &&
1882 uatomic_read(&relayd
->destroy_flag
)) {
1883 destroy_relayd(relayd
);
1888 /* Atomically decrement channel refcount since other threads can use it. */
1889 uatomic_dec(&stream
->chan
->refcount
);
1890 if (!uatomic_read(&stream
->chan
->refcount
)
1891 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
1892 /* Go for channel deletion! */
1893 free_chan
= stream
->chan
;
1897 pthread_mutex_unlock(&stream
->lock
);
1898 pthread_mutex_unlock(&consumer_data
.lock
);
1901 consumer_del_channel(free_chan
);
1905 call_rcu(&stream
->node
.head
, free_stream_rcu
);
1909 * Action done with the metadata stream when adding it to the consumer internal
1910 * data structures to handle it.
1912 static int add_metadata_stream(struct lttng_consumer_stream
*stream
,
1913 struct lttng_ht
*ht
)
1916 struct consumer_relayd_sock_pair
*relayd
;
1917 struct lttng_ht_iter iter
;
1918 struct lttng_ht_node_u64
*node
;
1923 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
1925 pthread_mutex_lock(&consumer_data
.lock
);
1926 pthread_mutex_lock(&stream
->lock
);
1929 * From here, refcounts are updated so be _careful_ when returning an error
1936 * Lookup the stream just to make sure it does not exist in our internal
1937 * state. This should NEVER happen.
1939 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
1940 node
= lttng_ht_iter_get_node_u64(&iter
);
1943 /* Find relayd and, if one is found, increment refcount. */
1944 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1945 if (relayd
!= NULL
) {
1946 uatomic_inc(&relayd
->refcount
);
1949 /* Update channel refcount once added without error(s). */
1950 uatomic_inc(&stream
->chan
->refcount
);
1953 * When nb_init_stream_left reaches 0, we don't need to trigger any action
1954 * in terms of destroying the associated channel, because the action that
1955 * causes the count to become 0 also causes a stream to be added. The
1956 * channel deletion will thus be triggered by the following removal of this
1959 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
1960 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
1963 lttng_ht_add_unique_u64(ht
, &stream
->node
);
1965 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
1966 &stream
->node_channel_id
);
1969 * Add stream to the stream_list_ht of the consumer data. No need to steal
1970 * the key since the HT does not use it and we allow to add redundant keys
1973 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
1977 pthread_mutex_unlock(&stream
->lock
);
1978 pthread_mutex_unlock(&consumer_data
.lock
);
1983 * Delete data stream that are flagged for deletion (endpoint_status).
1985 static void validate_endpoint_status_data_stream(void)
1987 struct lttng_ht_iter iter
;
1988 struct lttng_consumer_stream
*stream
;
1990 DBG("Consumer delete flagged data stream");
1993 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1994 /* Validate delete flag of the stream */
1995 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
1998 /* Delete it right now */
1999 consumer_del_stream(stream
, data_ht
);
2005 * Delete metadata stream that are flagged for deletion (endpoint_status).
2007 static void validate_endpoint_status_metadata_stream(
2008 struct lttng_poll_event
*pollset
)
2010 struct lttng_ht_iter iter
;
2011 struct lttng_consumer_stream
*stream
;
2013 DBG("Consumer delete flagged metadata stream");
2018 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2019 /* Validate delete flag of the stream */
2020 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2024 * Remove from pollset so the metadata thread can continue without
2025 * blocking on a deleted stream.
2027 lttng_poll_del(pollset
, stream
->wait_fd
);
2029 /* Delete it right now */
2030 consumer_del_metadata_stream(stream
, metadata_ht
);
2036 * Thread polls on metadata file descriptor and write them on disk or on the
2039 void *consumer_thread_metadata_poll(void *data
)
2042 uint32_t revents
, nb_fd
;
2043 struct lttng_consumer_stream
*stream
= NULL
;
2044 struct lttng_ht_iter iter
;
2045 struct lttng_ht_node_u64
*node
;
2046 struct lttng_poll_event events
;
2047 struct lttng_consumer_local_data
*ctx
= data
;
2050 rcu_register_thread();
2052 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2054 /* ENOMEM at this point. Better to bail out. */
2058 DBG("Thread metadata poll started");
2060 /* Size is set to 1 for the consumer_metadata pipe */
2061 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2063 ERR("Poll set creation failed");
2067 ret
= lttng_poll_add(&events
, ctx
->consumer_metadata_pipe
[0], LPOLLIN
);
2073 DBG("Metadata main loop started");
2076 /* Only the metadata pipe is set */
2077 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2082 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2083 ret
= lttng_poll_wait(&events
, -1);
2084 DBG("Metadata event catched in thread");
2086 if (errno
== EINTR
) {
2087 ERR("Poll EINTR catched");
2095 /* From here, the event is a metadata wait fd */
2096 for (i
= 0; i
< nb_fd
; i
++) {
2097 revents
= LTTNG_POLL_GETEV(&events
, i
);
2098 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2100 /* Just don't waste time if no returned events for the fd */
2105 if (pollfd
== ctx
->consumer_metadata_pipe
[0]) {
2106 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2107 DBG("Metadata thread pipe hung up");
2109 * Remove the pipe from the poll set and continue the loop
2110 * since their might be data to consume.
2112 lttng_poll_del(&events
, ctx
->consumer_metadata_pipe
[0]);
2113 ret
= close(ctx
->consumer_metadata_pipe
[0]);
2115 PERROR("close metadata pipe");
2118 } else if (revents
& LPOLLIN
) {
2120 /* Get the stream pointer received */
2121 ret
= read(pollfd
, &stream
, sizeof(stream
));
2122 } while (ret
< 0 && errno
== EINTR
);
2124 ret
< sizeof(struct lttng_consumer_stream
*)) {
2125 PERROR("read metadata stream");
2127 * Let's continue here and hope we can still work
2128 * without stopping the consumer. XXX: Should we?
2133 /* A NULL stream means that the state has changed. */
2134 if (stream
== NULL
) {
2135 /* Check for deleted streams. */
2136 validate_endpoint_status_metadata_stream(&events
);
2140 DBG("Adding metadata stream %d to poll set",
2143 ret
= add_metadata_stream(stream
, metadata_ht
);
2145 ERR("Unable to add metadata stream");
2146 /* Stream was not setup properly. Continuing. */
2147 consumer_del_metadata_stream(stream
, NULL
);
2151 /* Add metadata stream to the global poll events list */
2152 lttng_poll_add(&events
, stream
->wait_fd
,
2153 LPOLLIN
| LPOLLPRI
);
2156 /* Handle other stream */
2162 uint64_t tmp_id
= (uint64_t) pollfd
;
2164 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2166 node
= lttng_ht_iter_get_node_u64(&iter
);
2169 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2172 /* Check for error event */
2173 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2174 DBG("Metadata fd %d is hup|err.", pollfd
);
2175 if (!stream
->hangup_flush_done
2176 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2177 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2178 DBG("Attempting to flush and consume the UST buffers");
2179 lttng_ustconsumer_on_stream_hangup(stream
);
2181 /* We just flushed the stream now read it. */
2183 len
= ctx
->on_buffer_ready(stream
, ctx
);
2185 * We don't check the return value here since if we get
2186 * a negative len, it means an error occured thus we
2187 * simply remove it from the poll set and free the
2193 lttng_poll_del(&events
, stream
->wait_fd
);
2195 * This call update the channel states, closes file descriptors
2196 * and securely free the stream.
2198 consumer_del_metadata_stream(stream
, metadata_ht
);
2199 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2200 /* Get the data out of the metadata file descriptor */
2201 DBG("Metadata available on fd %d", pollfd
);
2202 assert(stream
->wait_fd
== pollfd
);
2204 len
= ctx
->on_buffer_ready(stream
, ctx
);
2205 /* It's ok to have an unavailable sub-buffer */
2206 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2207 /* Clean up stream from consumer and free it. */
2208 lttng_poll_del(&events
, stream
->wait_fd
);
2209 consumer_del_metadata_stream(stream
, metadata_ht
);
2210 } else if (len
> 0) {
2211 stream
->data_read
= 1;
2215 /* Release RCU lock for the stream looked up */
2222 DBG("Metadata poll thread exiting");
2224 lttng_poll_clean(&events
);
2226 destroy_stream_ht(metadata_ht
);
2228 rcu_unregister_thread();
2233 * This thread polls the fds in the set to consume the data and write
2234 * it to tracefile if necessary.
2236 void *consumer_thread_data_poll(void *data
)
2238 int num_rdy
, num_hup
, high_prio
, ret
, i
;
2239 struct pollfd
*pollfd
= NULL
;
2240 /* local view of the streams */
2241 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2242 /* local view of consumer_data.fds_count */
2244 struct lttng_consumer_local_data
*ctx
= data
;
2247 rcu_register_thread();
2249 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2250 if (data_ht
== NULL
) {
2251 /* ENOMEM at this point. Better to bail out. */
2255 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
));
2262 * the fds set has been updated, we need to update our
2263 * local array as well
2265 pthread_mutex_lock(&consumer_data
.lock
);
2266 if (consumer_data
.need_update
) {
2271 local_stream
= NULL
;
2273 /* allocate for all fds + 1 for the consumer_data_pipe */
2274 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2275 if (pollfd
== NULL
) {
2276 PERROR("pollfd malloc");
2277 pthread_mutex_unlock(&consumer_data
.lock
);
2281 /* allocate for all fds + 1 for the consumer_data_pipe */
2282 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2283 sizeof(struct lttng_consumer_stream
));
2284 if (local_stream
== NULL
) {
2285 PERROR("local_stream malloc");
2286 pthread_mutex_unlock(&consumer_data
.lock
);
2289 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2292 ERR("Error in allocating pollfd or local_outfds");
2293 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2294 pthread_mutex_unlock(&consumer_data
.lock
);
2298 consumer_data
.need_update
= 0;
2300 pthread_mutex_unlock(&consumer_data
.lock
);
2302 /* No FDs and consumer_quit, consumer_cleanup the thread */
2303 if (nb_fd
== 0 && consumer_quit
== 1) {
2306 /* poll on the array of fds */
2308 DBG("polling on %d fd", nb_fd
+ 1);
2309 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2310 DBG("poll num_rdy : %d", num_rdy
);
2311 if (num_rdy
== -1) {
2313 * Restart interrupted system call.
2315 if (errno
== EINTR
) {
2318 PERROR("Poll error");
2319 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2321 } else if (num_rdy
== 0) {
2322 DBG("Polling thread timed out");
2327 * If the consumer_data_pipe triggered poll go directly to the
2328 * beginning of the loop to update the array. We want to prioritize
2329 * array update over low-priority reads.
2331 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2332 ssize_t pipe_readlen
;
2334 DBG("consumer_data_pipe wake up");
2335 /* Consume 1 byte of pipe data */
2337 pipe_readlen
= read(ctx
->consumer_data_pipe
[0], &new_stream
,
2338 sizeof(new_stream
));
2339 } while (pipe_readlen
== -1 && errno
== EINTR
);
2340 if (pipe_readlen
< 0) {
2341 PERROR("read consumer data pipe");
2342 /* Continue so we can at least handle the current stream(s). */
2347 * If the stream is NULL, just ignore it. It's also possible that
2348 * the sessiond poll thread changed the consumer_quit state and is
2349 * waking us up to test it.
2351 if (new_stream
== NULL
) {
2352 validate_endpoint_status_data_stream();
2356 ret
= add_stream(new_stream
, data_ht
);
2358 ERR("Consumer add stream %" PRIu64
" failed. Continuing",
2361 * At this point, if the add_stream fails, it is not in the
2362 * hash table thus passing the NULL value here.
2364 consumer_del_stream(new_stream
, NULL
);
2367 /* Continue to update the local streams and handle prio ones */
2371 /* Take care of high priority channels first. */
2372 for (i
= 0; i
< nb_fd
; i
++) {
2373 if (local_stream
[i
] == NULL
) {
2376 if (pollfd
[i
].revents
& POLLPRI
) {
2377 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2379 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2380 /* it's ok to have an unavailable sub-buffer */
2381 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2382 /* Clean the stream and free it. */
2383 consumer_del_stream(local_stream
[i
], data_ht
);
2384 local_stream
[i
] = NULL
;
2385 } else if (len
> 0) {
2386 local_stream
[i
]->data_read
= 1;
2392 * If we read high prio channel in this loop, try again
2393 * for more high prio data.
2399 /* Take care of low priority channels. */
2400 for (i
= 0; i
< nb_fd
; i
++) {
2401 if (local_stream
[i
] == NULL
) {
2404 if ((pollfd
[i
].revents
& POLLIN
) ||
2405 local_stream
[i
]->hangup_flush_done
) {
2406 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2407 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2408 /* it's ok to have an unavailable sub-buffer */
2409 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2410 /* Clean the stream and free it. */
2411 consumer_del_stream(local_stream
[i
], data_ht
);
2412 local_stream
[i
] = NULL
;
2413 } else if (len
> 0) {
2414 local_stream
[i
]->data_read
= 1;
2419 /* Handle hangup and errors */
2420 for (i
= 0; i
< nb_fd
; i
++) {
2421 if (local_stream
[i
] == NULL
) {
2424 if (!local_stream
[i
]->hangup_flush_done
2425 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2426 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2427 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2428 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2430 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2431 /* Attempt read again, for the data we just flushed. */
2432 local_stream
[i
]->data_read
= 1;
2435 * If the poll flag is HUP/ERR/NVAL and we have
2436 * read no data in this pass, we can remove the
2437 * stream from its hash table.
2439 if ((pollfd
[i
].revents
& POLLHUP
)) {
2440 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2441 if (!local_stream
[i
]->data_read
) {
2442 consumer_del_stream(local_stream
[i
], data_ht
);
2443 local_stream
[i
] = NULL
;
2446 } else if (pollfd
[i
].revents
& POLLERR
) {
2447 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2448 if (!local_stream
[i
]->data_read
) {
2449 consumer_del_stream(local_stream
[i
], data_ht
);
2450 local_stream
[i
] = NULL
;
2453 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2454 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2455 if (!local_stream
[i
]->data_read
) {
2456 consumer_del_stream(local_stream
[i
], data_ht
);
2457 local_stream
[i
] = NULL
;
2461 if (local_stream
[i
] != NULL
) {
2462 local_stream
[i
]->data_read
= 0;
2467 DBG("polling thread exiting");
2472 * Close the write side of the pipe so epoll_wait() in
2473 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2474 * read side of the pipe. If we close them both, epoll_wait strangely does
2475 * not return and could create a endless wait period if the pipe is the
2476 * only tracked fd in the poll set. The thread will take care of closing
2479 ret
= close(ctx
->consumer_metadata_pipe
[1]);
2481 PERROR("close data pipe");
2484 destroy_data_stream_ht(data_ht
);
2486 rcu_unregister_thread();
2491 * Close wake-up end of each stream belonging to the channel. This will
2492 * allow the poll() on the stream read-side to detect when the
2493 * write-side (application) finally closes them.
2496 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2498 struct lttng_ht
*ht
;
2499 struct lttng_consumer_stream
*stream
;
2500 struct lttng_ht_iter iter
;
2502 ht
= consumer_data
.stream_per_chan_id_ht
;
2505 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2506 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2507 ht
->match_fct
, &channel
->key
,
2508 &iter
.iter
, stream
, node_channel_id
.node
) {
2509 switch (consumer_data
.type
) {
2510 case LTTNG_CONSUMER_KERNEL
:
2512 case LTTNG_CONSUMER32_UST
:
2513 case LTTNG_CONSUMER64_UST
:
2515 * Note: a mutex is taken internally within
2516 * liblttng-ust-ctl to protect timer wakeup_fd
2517 * use from concurrent close.
2519 lttng_ustconsumer_close_stream_wakeup(stream
);
2522 ERR("Unknown consumer_data type");
2529 static void destroy_channel_ht(struct lttng_ht
*ht
)
2531 struct lttng_ht_iter iter
;
2532 struct lttng_consumer_channel
*channel
;
2540 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2541 ret
= lttng_ht_del(ht
, &iter
);
2546 lttng_ht_destroy(ht
);
2550 * This thread polls the channel fds to detect when they are being
2551 * closed. It closes all related streams if the channel is detected as
2552 * closed. It is currently only used as a shim layer for UST because the
2553 * consumerd needs to keep the per-stream wakeup end of pipes open for
2556 void *consumer_thread_channel_poll(void *data
)
2559 uint32_t revents
, nb_fd
;
2560 struct lttng_consumer_channel
*chan
= NULL
;
2561 struct lttng_ht_iter iter
;
2562 struct lttng_ht_node_u64
*node
;
2563 struct lttng_poll_event events
;
2564 struct lttng_consumer_local_data
*ctx
= data
;
2565 struct lttng_ht
*channel_ht
;
2567 rcu_register_thread();
2569 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2571 /* ENOMEM at this point. Better to bail out. */
2575 DBG("Thread channel poll started");
2577 /* Size is set to 1 for the consumer_channel pipe */
2578 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2580 ERR("Poll set creation failed");
2584 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2590 DBG("Channel main loop started");
2593 /* Only the channel pipe is set */
2594 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2599 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2600 ret
= lttng_poll_wait(&events
, -1);
2601 DBG("Channel event catched in thread");
2603 if (errno
== EINTR
) {
2604 ERR("Poll EINTR catched");
2612 /* From here, the event is a channel wait fd */
2613 for (i
= 0; i
< nb_fd
; i
++) {
2614 revents
= LTTNG_POLL_GETEV(&events
, i
);
2615 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2617 /* Just don't waste time if no returned events for the fd */
2621 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2622 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2623 DBG("Channel thread pipe hung up");
2625 * Remove the pipe from the poll set and continue the loop
2626 * since their might be data to consume.
2628 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2630 } else if (revents
& LPOLLIN
) {
2631 enum consumer_channel_action action
;
2633 ret
= read_channel_pipe(ctx
, &chan
, &action
);
2635 ERR("Error reading channel pipe");
2640 case CONSUMER_CHANNEL_ADD
:
2641 DBG("Adding channel %d to poll set",
2644 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2646 lttng_ht_add_unique_u64(channel_ht
,
2647 &chan
->wait_fd_node
);
2648 /* Add channel to the global poll events list */
2649 lttng_poll_add(&events
, chan
->wait_fd
,
2650 LPOLLIN
| LPOLLPRI
);
2652 case CONSUMER_CHANNEL_QUIT
:
2654 * Remove the pipe from the poll set and continue the loop
2655 * since their might be data to consume.
2657 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2660 ERR("Unknown action");
2665 /* Handle other stream */
2671 uint64_t tmp_id
= (uint64_t) pollfd
;
2673 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2675 node
= lttng_ht_iter_get_node_u64(&iter
);
2678 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2681 /* Check for error event */
2682 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2683 DBG("Channel fd %d is hup|err.", pollfd
);
2685 lttng_poll_del(&events
, chan
->wait_fd
);
2686 ret
= lttng_ht_del(channel_ht
, &iter
);
2688 consumer_close_channel_streams(chan
);
2691 /* Release RCU lock for the channel looked up */
2697 lttng_poll_clean(&events
);
2699 destroy_channel_ht(channel_ht
);
2701 DBG("Channel poll thread exiting");
2702 rcu_unregister_thread();
2707 * This thread listens on the consumerd socket and receives the file
2708 * descriptors from the session daemon.
2710 void *consumer_thread_sessiond_poll(void *data
)
2712 int sock
= -1, client_socket
, ret
;
2714 * structure to poll for incoming data on communication socket avoids
2715 * making blocking sockets.
2717 struct pollfd consumer_sockpoll
[2];
2718 struct lttng_consumer_local_data
*ctx
= data
;
2720 rcu_register_thread();
2722 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
2723 unlink(ctx
->consumer_command_sock_path
);
2724 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
2725 if (client_socket
< 0) {
2726 ERR("Cannot create command socket");
2730 ret
= lttcomm_listen_unix_sock(client_socket
);
2735 DBG("Sending ready command to lttng-sessiond");
2736 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
2737 /* return < 0 on error, but == 0 is not fatal */
2739 ERR("Error sending ready command to lttng-sessiond");
2743 ret
= fcntl(client_socket
, F_SETFL
, O_NONBLOCK
);
2745 PERROR("fcntl O_NONBLOCK");
2749 /* prepare the FDs to poll : to client socket and the should_quit pipe */
2750 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
2751 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
2752 consumer_sockpoll
[1].fd
= client_socket
;
2753 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2755 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2758 DBG("Connection on client_socket");
2760 /* Blocking call, waiting for transmission */
2761 sock
= lttcomm_accept_unix_sock(client_socket
);
2766 ret
= fcntl(sock
, F_SETFL
, O_NONBLOCK
);
2768 PERROR("fcntl O_NONBLOCK");
2772 /* This socket is not useful anymore. */
2773 ret
= close(client_socket
);
2775 PERROR("close client_socket");
2779 /* update the polling structure to poll on the established socket */
2780 consumer_sockpoll
[1].fd
= sock
;
2781 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
2784 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2787 DBG("Incoming command on sock");
2788 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
2789 if (ret
== -ENOENT
) {
2790 DBG("Received STOP command");
2795 * This could simply be a session daemon quitting. Don't output
2798 DBG("Communication interrupted on command socket");
2801 if (consumer_quit
) {
2802 DBG("consumer_thread_receive_fds received quit from signal");
2805 DBG("received command on sock");
2808 DBG("Consumer thread sessiond poll exiting");
2811 * Close metadata streams since the producer is the session daemon which
2814 * NOTE: for now, this only applies to the UST tracer.
2816 lttng_consumer_close_metadata();
2819 * when all fds have hung up, the polling thread
2825 * Notify the data poll thread to poll back again and test the
2826 * consumer_quit state that we just set so to quit gracefully.
2828 notify_thread_pipe(ctx
->consumer_data_pipe
[1]);
2830 notify_channel_pipe(ctx
, NULL
, CONSUMER_CHANNEL_QUIT
);
2832 /* Cleaning up possibly open sockets. */
2836 PERROR("close sock sessiond poll");
2839 if (client_socket
>= 0) {
2842 PERROR("close client_socket sessiond poll");
2846 rcu_unregister_thread();
2850 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
2851 struct lttng_consumer_local_data
*ctx
)
2855 pthread_mutex_lock(&stream
->lock
);
2857 switch (consumer_data
.type
) {
2858 case LTTNG_CONSUMER_KERNEL
:
2859 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
2861 case LTTNG_CONSUMER32_UST
:
2862 case LTTNG_CONSUMER64_UST
:
2863 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
2866 ERR("Unknown consumer_data type");
2872 pthread_mutex_unlock(&stream
->lock
);
2876 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
2878 switch (consumer_data
.type
) {
2879 case LTTNG_CONSUMER_KERNEL
:
2880 return lttng_kconsumer_on_recv_stream(stream
);
2881 case LTTNG_CONSUMER32_UST
:
2882 case LTTNG_CONSUMER64_UST
:
2883 return lttng_ustconsumer_on_recv_stream(stream
);
2885 ERR("Unknown consumer_data type");
2892 * Allocate and set consumer data hash tables.
2894 void lttng_consumer_init(void)
2896 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2897 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2898 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2899 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2903 * Process the ADD_RELAYD command receive by a consumer.
2905 * This will create a relayd socket pair and add it to the relayd hash table.
2906 * The caller MUST acquire a RCU read side lock before calling it.
2908 int consumer_add_relayd_socket(int net_seq_idx
, int sock_type
,
2909 struct lttng_consumer_local_data
*ctx
, int sock
,
2910 struct pollfd
*consumer_sockpoll
, struct lttcomm_sock
*relayd_sock
,
2911 unsigned int sessiond_id
)
2913 int fd
= -1, ret
= -1, relayd_created
= 0;
2914 enum lttng_error_code ret_code
= LTTNG_OK
;
2915 struct consumer_relayd_sock_pair
*relayd
;
2917 DBG("Consumer adding relayd socket (idx: %d)", net_seq_idx
);
2919 /* First send a status message before receiving the fds. */
2920 ret
= consumer_send_status_msg(sock
, ret_code
);
2922 /* Somehow, the session daemon is not responding anymore. */
2926 /* Get relayd reference if exists. */
2927 relayd
= consumer_find_relayd(net_seq_idx
);
2928 if (relayd
== NULL
) {
2929 /* Not found. Allocate one. */
2930 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
2931 if (relayd
== NULL
) {
2932 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_OUTFD_ERROR
);
2936 relayd
->sessiond_session_id
= (uint64_t) sessiond_id
;
2940 /* Poll on consumer socket. */
2941 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
2946 /* Get relayd socket from session daemon */
2947 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
2948 if (ret
!= sizeof(fd
)) {
2949 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
2951 fd
= -1; /* Just in case it gets set with an invalid value. */
2955 /* We have the fds without error. Send status back. */
2956 ret
= consumer_send_status_msg(sock
, ret_code
);
2958 /* Somehow, the session daemon is not responding anymore. */
2962 /* Copy socket information and received FD */
2963 switch (sock_type
) {
2964 case LTTNG_STREAM_CONTROL
:
2965 /* Copy received lttcomm socket */
2966 lttcomm_copy_sock(&relayd
->control_sock
, relayd_sock
);
2967 ret
= lttcomm_create_sock(&relayd
->control_sock
);
2968 /* Immediately try to close the created socket if valid. */
2969 if (relayd
->control_sock
.fd
>= 0) {
2970 if (close(relayd
->control_sock
.fd
)) {
2971 PERROR("close relayd control socket");
2974 /* Handle create_sock error. */
2979 /* Assign new file descriptor */
2980 relayd
->control_sock
.fd
= fd
;
2983 * Create a session on the relayd and store the returned id. Lock the
2984 * control socket mutex if the relayd was NOT created before.
2986 if (!relayd_created
) {
2987 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
2989 ret
= relayd_create_session(&relayd
->control_sock
,
2990 &relayd
->relayd_session_id
);
2991 if (!relayd_created
) {
2992 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
2996 * Close all sockets of a relayd object. It will be freed if it was
2997 * created at the error code path or else it will be garbage
3000 (void) relayd_close(&relayd
->control_sock
);
3001 (void) relayd_close(&relayd
->data_sock
);
3006 case LTTNG_STREAM_DATA
:
3007 /* Copy received lttcomm socket */
3008 lttcomm_copy_sock(&relayd
->data_sock
, relayd_sock
);
3009 ret
= lttcomm_create_sock(&relayd
->data_sock
);
3010 /* Immediately try to close the created socket if valid. */
3011 if (relayd
->data_sock
.fd
>= 0) {
3012 if (close(relayd
->data_sock
.fd
)) {
3013 PERROR("close relayd data socket");
3016 /* Handle create_sock error. */
3021 /* Assign new file descriptor */
3022 relayd
->data_sock
.fd
= fd
;
3025 ERR("Unknown relayd socket type (%d)", sock_type
);
3030 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3031 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3032 relayd
->net_seq_idx
, fd
);
3035 * Add relayd socket pair to consumer data hashtable. If object already
3036 * exists or on error, the function gracefully returns.
3044 /* Close received socket if valid. */
3047 PERROR("close received socket");
3052 if (relayd_created
) {
3060 * Try to lock the stream mutex.
3062 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3064 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3071 * Try to lock the stream mutex. On failure, we know that the stream is
3072 * being used else where hence there is data still being extracted.
3074 ret
= pthread_mutex_trylock(&stream
->lock
);
3076 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3088 * Search for a relayd associated to the session id and return the reference.
3090 * A rcu read side lock MUST be acquire before calling this function and locked
3091 * until the relayd object is no longer necessary.
3093 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3095 struct lttng_ht_iter iter
;
3096 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3098 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3099 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3102 * Check by sessiond id which is unique here where the relayd session
3103 * id might not be when having multiple relayd.
3105 if (relayd
->sessiond_session_id
== id
) {
3106 /* Found the relayd. There can be only one per id. */
3118 * Check if for a given session id there is still data needed to be extract
3121 * Return 1 if data is pending or else 0 meaning ready to be read.
3123 int consumer_data_pending(uint64_t id
)
3126 struct lttng_ht_iter iter
;
3127 struct lttng_ht
*ht
;
3128 struct lttng_consumer_stream
*stream
;
3129 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3130 int (*data_pending
)(struct lttng_consumer_stream
*);
3132 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3135 pthread_mutex_lock(&consumer_data
.lock
);
3137 switch (consumer_data
.type
) {
3138 case LTTNG_CONSUMER_KERNEL
:
3139 data_pending
= lttng_kconsumer_data_pending
;
3141 case LTTNG_CONSUMER32_UST
:
3142 case LTTNG_CONSUMER64_UST
:
3143 data_pending
= lttng_ustconsumer_data_pending
;
3146 ERR("Unknown consumer data type");
3150 /* Ease our life a bit */
3151 ht
= consumer_data
.stream_list_ht
;
3153 relayd
= find_relayd_by_session_id(id
);
3155 /* Send init command for data pending. */
3156 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3157 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3158 relayd
->relayd_session_id
);
3159 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3161 /* Communication error thus the relayd so no data pending. */
3162 goto data_not_pending
;
3166 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3167 ht
->hash_fct(&id
, lttng_ht_seed
),
3169 &iter
.iter
, stream
, node_session_id
.node
) {
3170 /* If this call fails, the stream is being used hence data pending. */
3171 ret
= stream_try_lock(stream
);
3177 * A removed node from the hash table indicates that the stream has
3178 * been deleted thus having a guarantee that the buffers are closed
3179 * on the consumer side. However, data can still be transmitted
3180 * over the network so don't skip the relayd check.
3182 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3184 /* Check the stream if there is data in the buffers. */
3185 ret
= data_pending(stream
);
3187 pthread_mutex_unlock(&stream
->lock
);
3194 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3195 if (stream
->metadata_flag
) {
3196 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3197 stream
->relayd_stream_id
);
3199 ret
= relayd_data_pending(&relayd
->control_sock
,
3200 stream
->relayd_stream_id
,
3201 stream
->next_net_seq_num
- 1);
3203 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3205 pthread_mutex_unlock(&stream
->lock
);
3209 pthread_mutex_unlock(&stream
->lock
);
3213 unsigned int is_data_inflight
= 0;
3215 /* Send init command for data pending. */
3216 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3217 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3218 relayd
->relayd_session_id
, &is_data_inflight
);
3219 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3221 goto data_not_pending
;
3223 if (is_data_inflight
) {
3229 * Finding _no_ node in the hash table and no inflight data means that the
3230 * stream(s) have been removed thus data is guaranteed to be available for
3231 * analysis from the trace files.
3235 /* Data is available to be read by a viewer. */
3236 pthread_mutex_unlock(&consumer_data
.lock
);
3241 /* Data is still being extracted from buffers. */
3242 pthread_mutex_unlock(&consumer_data
.lock
);
3248 * Send a ret code status message to the sessiond daemon.
3250 * Return the sendmsg() return value.
3252 int consumer_send_status_msg(int sock
, int ret_code
)
3254 struct lttcomm_consumer_status_msg msg
;
3256 msg
.ret_code
= ret_code
;
3258 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3262 * Send a channel status message to the sessiond daemon.
3264 * Return the sendmsg() return value.
3266 int consumer_send_status_channel(int sock
,
3267 struct lttng_consumer_channel
*channel
)
3269 struct lttcomm_consumer_status_channel msg
;
3274 msg
.ret_code
= -LTTNG_ERR_UST_CHAN_FAIL
;
3276 msg
.ret_code
= LTTNG_OK
;
3277 msg
.key
= channel
->key
;
3278 msg
.stream_count
= channel
->streams
.count
;
3281 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));