2 * Copyright (C) 2011 - Julien Desfossez <julien.desfossez@polymtl.ca>
3 * Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
4 * 2012 - David Goulet <dgoulet@efficios.com>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along
16 * with this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
27 #include <sys/socket.h>
28 #include <sys/types.h>
33 #include <bin/lttng-consumerd/health-consumerd.h>
34 #include <common/common.h>
35 #include <common/utils.h>
36 #include <common/compat/poll.h>
37 #include <common/index/index.h>
38 #include <common/kernel-ctl/kernel-ctl.h>
39 #include <common/sessiond-comm/relayd.h>
40 #include <common/sessiond-comm/sessiond-comm.h>
41 #include <common/kernel-consumer/kernel-consumer.h>
42 #include <common/relayd/relayd.h>
43 #include <common/ust-consumer/ust-consumer.h>
44 #include <common/consumer-timer.h>
47 #include "consumer-stream.h"
48 #include "consumer-testpoint.h"
50 struct lttng_consumer_global_data consumer_data
= {
53 .type
= LTTNG_CONSUMER_UNKNOWN
,
56 enum consumer_channel_action
{
59 CONSUMER_CHANNEL_QUIT
,
62 struct consumer_channel_msg
{
63 enum consumer_channel_action action
;
64 struct lttng_consumer_channel
*chan
; /* add */
65 uint64_t key
; /* del */
69 * Flag to inform the polling thread to quit when all fd hung up. Updated by
70 * the consumer_thread_receive_fds when it notices that all fds has hung up.
71 * Also updated by the signal handler (consumer_should_exit()). Read by the
74 volatile int consumer_quit
;
77 * Global hash table containing respectively metadata and data streams. The
78 * stream element in this ht should only be updated by the metadata poll thread
79 * for the metadata and the data poll thread for the data.
81 static struct lttng_ht
*metadata_ht
;
82 static struct lttng_ht
*data_ht
;
85 * Notify a thread lttng pipe to poll back again. This usually means that some
86 * global state has changed so we just send back the thread in a poll wait
89 static void notify_thread_lttng_pipe(struct lttng_pipe
*pipe
)
91 struct lttng_consumer_stream
*null_stream
= NULL
;
95 (void) lttng_pipe_write(pipe
, &null_stream
, sizeof(null_stream
));
98 static void notify_health_quit_pipe(int *pipe
)
102 ret
= lttng_write(pipe
[1], "4", 1);
104 PERROR("write consumer health quit");
108 static void notify_channel_pipe(struct lttng_consumer_local_data
*ctx
,
109 struct lttng_consumer_channel
*chan
,
111 enum consumer_channel_action action
)
113 struct consumer_channel_msg msg
;
116 memset(&msg
, 0, sizeof(msg
));
121 ret
= lttng_write(ctx
->consumer_channel_pipe
[1], &msg
, sizeof(msg
));
122 if (ret
< sizeof(msg
)) {
123 PERROR("notify_channel_pipe write error");
127 void notify_thread_del_channel(struct lttng_consumer_local_data
*ctx
,
130 notify_channel_pipe(ctx
, NULL
, key
, CONSUMER_CHANNEL_DEL
);
133 static int read_channel_pipe(struct lttng_consumer_local_data
*ctx
,
134 struct lttng_consumer_channel
**chan
,
136 enum consumer_channel_action
*action
)
138 struct consumer_channel_msg msg
;
141 ret
= lttng_read(ctx
->consumer_channel_pipe
[0], &msg
, sizeof(msg
));
142 if (ret
< sizeof(msg
)) {
146 *action
= msg
.action
;
154 * Find a stream. The consumer_data.lock must be locked during this
157 static struct lttng_consumer_stream
*find_stream(uint64_t key
,
160 struct lttng_ht_iter iter
;
161 struct lttng_ht_node_u64
*node
;
162 struct lttng_consumer_stream
*stream
= NULL
;
166 /* -1ULL keys are lookup failures */
167 if (key
== (uint64_t) -1ULL) {
173 lttng_ht_lookup(ht
, &key
, &iter
);
174 node
= lttng_ht_iter_get_node_u64(&iter
);
176 stream
= caa_container_of(node
, struct lttng_consumer_stream
, node
);
184 static void steal_stream_key(uint64_t key
, struct lttng_ht
*ht
)
186 struct lttng_consumer_stream
*stream
;
189 stream
= find_stream(key
, ht
);
191 stream
->key
= (uint64_t) -1ULL;
193 * We don't want the lookup to match, but we still need
194 * to iterate on this stream when iterating over the hash table. Just
195 * change the node key.
197 stream
->node
.key
= (uint64_t) -1ULL;
203 * Return a channel object for the given key.
205 * RCU read side lock MUST be acquired before calling this function and
206 * protects the channel ptr.
208 struct lttng_consumer_channel
*consumer_find_channel(uint64_t key
)
210 struct lttng_ht_iter iter
;
211 struct lttng_ht_node_u64
*node
;
212 struct lttng_consumer_channel
*channel
= NULL
;
214 /* -1ULL keys are lookup failures */
215 if (key
== (uint64_t) -1ULL) {
219 lttng_ht_lookup(consumer_data
.channel_ht
, &key
, &iter
);
220 node
= lttng_ht_iter_get_node_u64(&iter
);
222 channel
= caa_container_of(node
, struct lttng_consumer_channel
, node
);
228 static void free_channel_rcu(struct rcu_head
*head
)
230 struct lttng_ht_node_u64
*node
=
231 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
232 struct lttng_consumer_channel
*channel
=
233 caa_container_of(node
, struct lttng_consumer_channel
, node
);
239 * RCU protected relayd socket pair free.
241 static void free_relayd_rcu(struct rcu_head
*head
)
243 struct lttng_ht_node_u64
*node
=
244 caa_container_of(head
, struct lttng_ht_node_u64
, head
);
245 struct consumer_relayd_sock_pair
*relayd
=
246 caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
249 * Close all sockets. This is done in the call RCU since we don't want the
250 * socket fds to be reassigned thus potentially creating bad state of the
253 * We do not have to lock the control socket mutex here since at this stage
254 * there is no one referencing to this relayd object.
256 (void) relayd_close(&relayd
->control_sock
);
257 (void) relayd_close(&relayd
->data_sock
);
263 * Destroy and free relayd socket pair object.
265 void consumer_destroy_relayd(struct consumer_relayd_sock_pair
*relayd
)
268 struct lttng_ht_iter iter
;
270 if (relayd
== NULL
) {
274 DBG("Consumer destroy and close relayd socket pair");
276 iter
.iter
.node
= &relayd
->node
.node
;
277 ret
= lttng_ht_del(consumer_data
.relayd_ht
, &iter
);
279 /* We assume the relayd is being or is destroyed */
283 /* RCU free() call */
284 call_rcu(&relayd
->node
.head
, free_relayd_rcu
);
288 * Remove a channel from the global list protected by a mutex. This function is
289 * also responsible for freeing its data structures.
291 void consumer_del_channel(struct lttng_consumer_channel
*channel
)
294 struct lttng_ht_iter iter
;
295 struct lttng_consumer_stream
*stream
, *stmp
;
297 DBG("Consumer delete channel key %" PRIu64
, channel
->key
);
299 pthread_mutex_lock(&consumer_data
.lock
);
300 pthread_mutex_lock(&channel
->lock
);
302 /* Delete streams that might have been left in the stream list. */
303 cds_list_for_each_entry_safe(stream
, stmp
, &channel
->streams
.head
,
305 cds_list_del(&stream
->send_node
);
307 * Once a stream is added to this list, the buffers were created so
308 * we have a guarantee that this call will succeed.
310 consumer_stream_destroy(stream
, NULL
);
313 if (channel
->live_timer_enabled
== 1) {
314 consumer_timer_live_stop(channel
);
317 switch (consumer_data
.type
) {
318 case LTTNG_CONSUMER_KERNEL
:
320 case LTTNG_CONSUMER32_UST
:
321 case LTTNG_CONSUMER64_UST
:
322 lttng_ustconsumer_del_channel(channel
);
325 ERR("Unknown consumer_data type");
331 iter
.iter
.node
= &channel
->node
.node
;
332 ret
= lttng_ht_del(consumer_data
.channel_ht
, &iter
);
336 call_rcu(&channel
->node
.head
, free_channel_rcu
);
338 pthread_mutex_unlock(&channel
->lock
);
339 pthread_mutex_unlock(&consumer_data
.lock
);
343 * Iterate over the relayd hash table and destroy each element. Finally,
344 * destroy the whole hash table.
346 static void cleanup_relayd_ht(void)
348 struct lttng_ht_iter iter
;
349 struct consumer_relayd_sock_pair
*relayd
;
353 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
355 consumer_destroy_relayd(relayd
);
360 lttng_ht_destroy(consumer_data
.relayd_ht
);
364 * Update the end point status of all streams having the given network sequence
365 * index (relayd index).
367 * It's atomically set without having the stream mutex locked which is fine
368 * because we handle the write/read race with a pipe wakeup for each thread.
370 static void update_endpoint_status_by_netidx(uint64_t net_seq_idx
,
371 enum consumer_endpoint_status status
)
373 struct lttng_ht_iter iter
;
374 struct lttng_consumer_stream
*stream
;
376 DBG("Consumer set delete flag on stream by idx %" PRIu64
, net_seq_idx
);
380 /* Let's begin with metadata */
381 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
382 if (stream
->net_seq_idx
== net_seq_idx
) {
383 uatomic_set(&stream
->endpoint_status
, status
);
384 DBG("Delete flag set to metadata stream %d", stream
->wait_fd
);
388 /* Follow up by the data streams */
389 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
390 if (stream
->net_seq_idx
== net_seq_idx
) {
391 uatomic_set(&stream
->endpoint_status
, status
);
392 DBG("Delete flag set to data stream %d", stream
->wait_fd
);
399 * Cleanup a relayd object by flagging every associated streams for deletion,
400 * destroying the object meaning removing it from the relayd hash table,
401 * closing the sockets and freeing the memory in a RCU call.
403 * If a local data context is available, notify the threads that the streams'
404 * state have changed.
406 static void cleanup_relayd(struct consumer_relayd_sock_pair
*relayd
,
407 struct lttng_consumer_local_data
*ctx
)
413 DBG("Cleaning up relayd sockets");
415 /* Save the net sequence index before destroying the object */
416 netidx
= relayd
->net_seq_idx
;
419 * Delete the relayd from the relayd hash table, close the sockets and free
420 * the object in a RCU call.
422 consumer_destroy_relayd(relayd
);
424 /* Set inactive endpoint to all streams */
425 update_endpoint_status_by_netidx(netidx
, CONSUMER_ENDPOINT_INACTIVE
);
428 * With a local data context, notify the threads that the streams' state
429 * have changed. The write() action on the pipe acts as an "implicit"
430 * memory barrier ordering the updates of the end point status from the
431 * read of this status which happens AFTER receiving this notify.
434 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
435 notify_thread_lttng_pipe(ctx
->consumer_metadata_pipe
);
440 * Flag a relayd socket pair for destruction. Destroy it if the refcount
443 * RCU read side lock MUST be aquired before calling this function.
445 void consumer_flag_relayd_for_destroy(struct consumer_relayd_sock_pair
*relayd
)
449 /* Set destroy flag for this object */
450 uatomic_set(&relayd
->destroy_flag
, 1);
452 /* Destroy the relayd if refcount is 0 */
453 if (uatomic_read(&relayd
->refcount
) == 0) {
454 consumer_destroy_relayd(relayd
);
459 * Completly destroy stream from every visiable data structure and the given
462 * One this call returns, the stream object is not longer usable nor visible.
464 void consumer_del_stream(struct lttng_consumer_stream
*stream
,
467 consumer_stream_destroy(stream
, ht
);
471 * XXX naming of del vs destroy is all mixed up.
473 void consumer_del_stream_for_data(struct lttng_consumer_stream
*stream
)
475 consumer_stream_destroy(stream
, data_ht
);
478 void consumer_del_stream_for_metadata(struct lttng_consumer_stream
*stream
)
480 consumer_stream_destroy(stream
, metadata_ht
);
483 struct lttng_consumer_stream
*consumer_allocate_stream(uint64_t channel_key
,
485 enum lttng_consumer_stream_state state
,
486 const char *channel_name
,
493 enum consumer_channel_type type
,
494 unsigned int monitor
)
497 struct lttng_consumer_stream
*stream
;
499 stream
= zmalloc(sizeof(*stream
));
500 if (stream
== NULL
) {
501 PERROR("malloc struct lttng_consumer_stream");
508 stream
->key
= stream_key
;
510 stream
->out_fd_offset
= 0;
511 stream
->output_written
= 0;
512 stream
->state
= state
;
515 stream
->net_seq_idx
= relayd_id
;
516 stream
->session_id
= session_id
;
517 stream
->monitor
= monitor
;
518 stream
->endpoint_status
= CONSUMER_ENDPOINT_ACTIVE
;
519 stream
->index_fd
= -1;
520 pthread_mutex_init(&stream
->lock
, NULL
);
522 /* If channel is the metadata, flag this stream as metadata. */
523 if (type
== CONSUMER_CHANNEL_TYPE_METADATA
) {
524 stream
->metadata_flag
= 1;
525 /* Metadata is flat out. */
526 strncpy(stream
->name
, DEFAULT_METADATA_NAME
, sizeof(stream
->name
));
527 /* Live rendez-vous point. */
528 pthread_cond_init(&stream
->metadata_rdv
, NULL
);
529 pthread_mutex_init(&stream
->metadata_rdv_lock
, NULL
);
531 /* Format stream name to <channel_name>_<cpu_number> */
532 ret
= snprintf(stream
->name
, sizeof(stream
->name
), "%s_%d",
535 PERROR("snprintf stream name");
540 /* Key is always the wait_fd for streams. */
541 lttng_ht_node_init_u64(&stream
->node
, stream
->key
);
543 /* Init node per channel id key */
544 lttng_ht_node_init_u64(&stream
->node_channel_id
, channel_key
);
546 /* Init session id node with the stream session id */
547 lttng_ht_node_init_u64(&stream
->node_session_id
, stream
->session_id
);
549 DBG3("Allocated stream %s (key %" PRIu64
", chan_key %" PRIu64
550 " relayd_id %" PRIu64
", session_id %" PRIu64
,
551 stream
->name
, stream
->key
, channel_key
,
552 stream
->net_seq_idx
, stream
->session_id
);
568 * Add a stream to the global list protected by a mutex.
570 int consumer_add_data_stream(struct lttng_consumer_stream
*stream
)
572 struct lttng_ht
*ht
= data_ht
;
578 DBG3("Adding consumer stream %" PRIu64
, stream
->key
);
580 pthread_mutex_lock(&consumer_data
.lock
);
581 pthread_mutex_lock(&stream
->chan
->lock
);
582 pthread_mutex_lock(&stream
->chan
->timer_lock
);
583 pthread_mutex_lock(&stream
->lock
);
586 /* Steal stream identifier to avoid having streams with the same key */
587 steal_stream_key(stream
->key
, ht
);
589 lttng_ht_add_unique_u64(ht
, &stream
->node
);
591 lttng_ht_add_u64(consumer_data
.stream_per_chan_id_ht
,
592 &stream
->node_channel_id
);
595 * Add stream to the stream_list_ht of the consumer data. No need to steal
596 * the key since the HT does not use it and we allow to add redundant keys
599 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
602 * When nb_init_stream_left reaches 0, we don't need to trigger any action
603 * in terms of destroying the associated channel, because the action that
604 * causes the count to become 0 also causes a stream to be added. The
605 * channel deletion will thus be triggered by the following removal of this
608 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
609 /* Increment refcount before decrementing nb_init_stream_left */
611 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
614 /* Update consumer data once the node is inserted. */
615 consumer_data
.stream_count
++;
616 consumer_data
.need_update
= 1;
619 pthread_mutex_unlock(&stream
->lock
);
620 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
621 pthread_mutex_unlock(&stream
->chan
->lock
);
622 pthread_mutex_unlock(&consumer_data
.lock
);
627 void consumer_del_data_stream(struct lttng_consumer_stream
*stream
)
629 consumer_del_stream(stream
, data_ht
);
633 * Add relayd socket to global consumer data hashtable. RCU read side lock MUST
634 * be acquired before calling this.
636 static int add_relayd(struct consumer_relayd_sock_pair
*relayd
)
639 struct lttng_ht_node_u64
*node
;
640 struct lttng_ht_iter iter
;
644 lttng_ht_lookup(consumer_data
.relayd_ht
,
645 &relayd
->net_seq_idx
, &iter
);
646 node
= lttng_ht_iter_get_node_u64(&iter
);
650 lttng_ht_add_unique_u64(consumer_data
.relayd_ht
, &relayd
->node
);
657 * Allocate and return a consumer relayd socket.
659 struct consumer_relayd_sock_pair
*consumer_allocate_relayd_sock_pair(
660 uint64_t net_seq_idx
)
662 struct consumer_relayd_sock_pair
*obj
= NULL
;
664 /* net sequence index of -1 is a failure */
665 if (net_seq_idx
== (uint64_t) -1ULL) {
669 obj
= zmalloc(sizeof(struct consumer_relayd_sock_pair
));
671 PERROR("zmalloc relayd sock");
675 obj
->net_seq_idx
= net_seq_idx
;
677 obj
->destroy_flag
= 0;
678 obj
->control_sock
.sock
.fd
= -1;
679 obj
->data_sock
.sock
.fd
= -1;
680 lttng_ht_node_init_u64(&obj
->node
, obj
->net_seq_idx
);
681 pthread_mutex_init(&obj
->ctrl_sock_mutex
, NULL
);
688 * Find a relayd socket pair in the global consumer data.
690 * Return the object if found else NULL.
691 * RCU read-side lock must be held across this call and while using the
694 struct consumer_relayd_sock_pair
*consumer_find_relayd(uint64_t key
)
696 struct lttng_ht_iter iter
;
697 struct lttng_ht_node_u64
*node
;
698 struct consumer_relayd_sock_pair
*relayd
= NULL
;
700 /* Negative keys are lookup failures */
701 if (key
== (uint64_t) -1ULL) {
705 lttng_ht_lookup(consumer_data
.relayd_ht
, &key
,
707 node
= lttng_ht_iter_get_node_u64(&iter
);
709 relayd
= caa_container_of(node
, struct consumer_relayd_sock_pair
, node
);
717 * Find a relayd and send the stream
719 * Returns 0 on success, < 0 on error
721 int consumer_send_relayd_stream(struct lttng_consumer_stream
*stream
,
725 struct consumer_relayd_sock_pair
*relayd
;
728 assert(stream
->net_seq_idx
!= -1ULL);
731 /* The stream is not metadata. Get relayd reference if exists. */
733 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
734 if (relayd
!= NULL
) {
735 /* Add stream on the relayd */
736 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
737 ret
= relayd_add_stream(&relayd
->control_sock
, stream
->name
,
738 path
, &stream
->relayd_stream_id
,
739 stream
->chan
->tracefile_size
, stream
->chan
->tracefile_count
);
740 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
745 uatomic_inc(&relayd
->refcount
);
746 stream
->sent_to_relayd
= 1;
748 ERR("Stream %" PRIu64
" relayd ID %" PRIu64
" unknown. Can't send it.",
749 stream
->key
, stream
->net_seq_idx
);
754 DBG("Stream %s with key %" PRIu64
" sent to relayd id %" PRIu64
,
755 stream
->name
, stream
->key
, stream
->net_seq_idx
);
763 * Find a relayd and send the streams sent message
765 * Returns 0 on success, < 0 on error
767 int consumer_send_relayd_streams_sent(uint64_t net_seq_idx
)
770 struct consumer_relayd_sock_pair
*relayd
;
772 assert(net_seq_idx
!= -1ULL);
774 /* The stream is not metadata. Get relayd reference if exists. */
776 relayd
= consumer_find_relayd(net_seq_idx
);
777 if (relayd
!= NULL
) {
778 /* Add stream on the relayd */
779 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
780 ret
= relayd_streams_sent(&relayd
->control_sock
);
781 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
786 ERR("Relayd ID %" PRIu64
" unknown. Can't send streams_sent.",
793 DBG("All streams sent relayd id %" PRIu64
, net_seq_idx
);
801 * Find a relayd and close the stream
803 void close_relayd_stream(struct lttng_consumer_stream
*stream
)
805 struct consumer_relayd_sock_pair
*relayd
;
807 /* The stream is not metadata. Get relayd reference if exists. */
809 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
811 consumer_stream_relayd_close(stream
, relayd
);
817 * Handle stream for relayd transmission if the stream applies for network
818 * streaming where the net sequence index is set.
820 * Return destination file descriptor or negative value on error.
822 static int write_relayd_stream_header(struct lttng_consumer_stream
*stream
,
823 size_t data_size
, unsigned long padding
,
824 struct consumer_relayd_sock_pair
*relayd
)
827 struct lttcomm_relayd_data_hdr data_hdr
;
833 /* Reset data header */
834 memset(&data_hdr
, 0, sizeof(data_hdr
));
836 if (stream
->metadata_flag
) {
837 /* Caller MUST acquire the relayd control socket lock */
838 ret
= relayd_send_metadata(&relayd
->control_sock
, data_size
);
843 /* Metadata are always sent on the control socket. */
844 outfd
= relayd
->control_sock
.sock
.fd
;
846 /* Set header with stream information */
847 data_hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
848 data_hdr
.data_size
= htobe32(data_size
);
849 data_hdr
.padding_size
= htobe32(padding
);
851 * Note that net_seq_num below is assigned with the *current* value of
852 * next_net_seq_num and only after that the next_net_seq_num will be
853 * increment. This is why when issuing a command on the relayd using
854 * this next value, 1 should always be substracted in order to compare
855 * the last seen sequence number on the relayd side to the last sent.
857 data_hdr
.net_seq_num
= htobe64(stream
->next_net_seq_num
);
858 /* Other fields are zeroed previously */
860 ret
= relayd_send_data_hdr(&relayd
->data_sock
, &data_hdr
,
866 ++stream
->next_net_seq_num
;
868 /* Set to go on data socket */
869 outfd
= relayd
->data_sock
.sock
.fd
;
877 * Allocate and return a new lttng_consumer_channel object using the given key
878 * to initialize the hash table node.
880 * On error, return NULL.
882 struct lttng_consumer_channel
*consumer_allocate_channel(uint64_t key
,
884 const char *pathname
,
889 enum lttng_event_output output
,
890 uint64_t tracefile_size
,
891 uint64_t tracefile_count
,
892 uint64_t session_id_per_pid
,
893 unsigned int monitor
,
894 unsigned int live_timer_interval
)
896 struct lttng_consumer_channel
*channel
;
898 channel
= zmalloc(sizeof(*channel
));
899 if (channel
== NULL
) {
900 PERROR("malloc struct lttng_consumer_channel");
905 channel
->refcount
= 0;
906 channel
->session_id
= session_id
;
907 channel
->session_id_per_pid
= session_id_per_pid
;
910 channel
->relayd_id
= relayd_id
;
911 channel
->tracefile_size
= tracefile_size
;
912 channel
->tracefile_count
= tracefile_count
;
913 channel
->monitor
= monitor
;
914 channel
->live_timer_interval
= live_timer_interval
;
915 pthread_mutex_init(&channel
->lock
, NULL
);
916 pthread_mutex_init(&channel
->timer_lock
, NULL
);
919 case LTTNG_EVENT_SPLICE
:
920 channel
->output
= CONSUMER_CHANNEL_SPLICE
;
922 case LTTNG_EVENT_MMAP
:
923 channel
->output
= CONSUMER_CHANNEL_MMAP
;
933 * In monitor mode, the streams associated with the channel will be put in
934 * a special list ONLY owned by this channel. So, the refcount is set to 1
935 * here meaning that the channel itself has streams that are referenced.
937 * On a channel deletion, once the channel is no longer visible, the
938 * refcount is decremented and checked for a zero value to delete it. With
939 * streams in no monitor mode, it will now be safe to destroy the channel.
941 if (!channel
->monitor
) {
942 channel
->refcount
= 1;
945 strncpy(channel
->pathname
, pathname
, sizeof(channel
->pathname
));
946 channel
->pathname
[sizeof(channel
->pathname
) - 1] = '\0';
948 strncpy(channel
->name
, name
, sizeof(channel
->name
));
949 channel
->name
[sizeof(channel
->name
) - 1] = '\0';
951 lttng_ht_node_init_u64(&channel
->node
, channel
->key
);
953 channel
->wait_fd
= -1;
955 CDS_INIT_LIST_HEAD(&channel
->streams
.head
);
957 DBG("Allocated channel (key %" PRIu64
")", channel
->key
)
964 * Add a channel to the global list protected by a mutex.
966 * On success 0 is returned else a negative value.
968 int consumer_add_channel(struct lttng_consumer_channel
*channel
,
969 struct lttng_consumer_local_data
*ctx
)
972 struct lttng_ht_node_u64
*node
;
973 struct lttng_ht_iter iter
;
975 pthread_mutex_lock(&consumer_data
.lock
);
976 pthread_mutex_lock(&channel
->lock
);
977 pthread_mutex_lock(&channel
->timer_lock
);
980 lttng_ht_lookup(consumer_data
.channel_ht
, &channel
->key
, &iter
);
981 node
= lttng_ht_iter_get_node_u64(&iter
);
983 /* Channel already exist. Ignore the insertion */
984 ERR("Consumer add channel key %" PRIu64
" already exists!",
990 lttng_ht_add_unique_u64(consumer_data
.channel_ht
, &channel
->node
);
994 pthread_mutex_unlock(&channel
->timer_lock
);
995 pthread_mutex_unlock(&channel
->lock
);
996 pthread_mutex_unlock(&consumer_data
.lock
);
998 if (!ret
&& channel
->wait_fd
!= -1 &&
999 channel
->type
== CONSUMER_CHANNEL_TYPE_DATA
) {
1000 notify_channel_pipe(ctx
, channel
, -1, CONSUMER_CHANNEL_ADD
);
1006 * Allocate the pollfd structure and the local view of the out fds to avoid
1007 * doing a lookup in the linked list and concurrency issues when writing is
1008 * needed. Called with consumer_data.lock held.
1010 * Returns the number of fds in the structures.
1012 static int update_poll_array(struct lttng_consumer_local_data
*ctx
,
1013 struct pollfd
**pollfd
, struct lttng_consumer_stream
**local_stream
,
1014 struct lttng_ht
*ht
)
1017 struct lttng_ht_iter iter
;
1018 struct lttng_consumer_stream
*stream
;
1023 assert(local_stream
);
1025 DBG("Updating poll fd array");
1027 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1029 * Only active streams with an active end point can be added to the
1030 * poll set and local stream storage of the thread.
1032 * There is a potential race here for endpoint_status to be updated
1033 * just after the check. However, this is OK since the stream(s) will
1034 * be deleted once the thread is notified that the end point state has
1035 * changed where this function will be called back again.
1037 if (stream
->state
!= LTTNG_CONSUMER_ACTIVE_STREAM
||
1038 stream
->endpoint_status
== CONSUMER_ENDPOINT_INACTIVE
) {
1042 * This clobbers way too much the debug output. Uncomment that if you
1043 * need it for debugging purposes.
1045 * DBG("Active FD %d", stream->wait_fd);
1047 (*pollfd
)[i
].fd
= stream
->wait_fd
;
1048 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1049 local_stream
[i
] = stream
;
1055 * Insert the consumer_data_pipe at the end of the array and don't
1056 * increment i so nb_fd is the number of real FD.
1058 (*pollfd
)[i
].fd
= lttng_pipe_get_readfd(ctx
->consumer_data_pipe
);
1059 (*pollfd
)[i
].events
= POLLIN
| POLLPRI
;
1064 * Poll on the should_quit pipe and the command socket return -1 on error and
1065 * should exit, 0 if data is available on the command socket
1067 int lttng_consumer_poll_socket(struct pollfd
*consumer_sockpoll
)
1072 num_rdy
= poll(consumer_sockpoll
, 2, -1);
1073 if (num_rdy
== -1) {
1075 * Restart interrupted system call.
1077 if (errno
== EINTR
) {
1080 PERROR("Poll error");
1083 if (consumer_sockpoll
[0].revents
& (POLLIN
| POLLPRI
)) {
1084 DBG("consumer_should_quit wake up");
1094 * Set the error socket.
1096 void lttng_consumer_set_error_sock(struct lttng_consumer_local_data
*ctx
,
1099 ctx
->consumer_error_socket
= sock
;
1103 * Set the command socket path.
1105 void lttng_consumer_set_command_sock_path(
1106 struct lttng_consumer_local_data
*ctx
, char *sock
)
1108 ctx
->consumer_command_sock_path
= sock
;
1112 * Send return code to the session daemon.
1113 * If the socket is not defined, we return 0, it is not a fatal error
1115 int lttng_consumer_send_error(struct lttng_consumer_local_data
*ctx
, int cmd
)
1117 if (ctx
->consumer_error_socket
> 0) {
1118 return lttcomm_send_unix_sock(ctx
->consumer_error_socket
, &cmd
,
1119 sizeof(enum lttcomm_sessiond_command
));
1126 * Close all the tracefiles and stream fds and MUST be called when all
1127 * instances are destroyed i.e. when all threads were joined and are ended.
1129 void lttng_consumer_cleanup(void)
1131 struct lttng_ht_iter iter
;
1132 struct lttng_consumer_channel
*channel
;
1136 cds_lfht_for_each_entry(consumer_data
.channel_ht
->ht
, &iter
.iter
, channel
,
1138 consumer_del_channel(channel
);
1143 lttng_ht_destroy(consumer_data
.channel_ht
);
1145 cleanup_relayd_ht();
1147 lttng_ht_destroy(consumer_data
.stream_per_chan_id_ht
);
1150 * This HT contains streams that are freed by either the metadata thread or
1151 * the data thread so we do *nothing* on the hash table and simply destroy
1154 lttng_ht_destroy(consumer_data
.stream_list_ht
);
1158 * Called from signal handler.
1160 void lttng_consumer_should_exit(struct lttng_consumer_local_data
*ctx
)
1165 ret
= lttng_write(ctx
->consumer_should_quit
[1], "4", 1);
1167 PERROR("write consumer quit");
1170 DBG("Consumer flag that it should quit");
1173 void lttng_consumer_sync_trace_file(struct lttng_consumer_stream
*stream
,
1176 int outfd
= stream
->out_fd
;
1179 * This does a blocking write-and-wait on any page that belongs to the
1180 * subbuffer prior to the one we just wrote.
1181 * Don't care about error values, as these are just hints and ways to
1182 * limit the amount of page cache used.
1184 if (orig_offset
< stream
->max_sb_size
) {
1187 lttng_sync_file_range(outfd
, orig_offset
- stream
->max_sb_size
,
1188 stream
->max_sb_size
,
1189 SYNC_FILE_RANGE_WAIT_BEFORE
1190 | SYNC_FILE_RANGE_WRITE
1191 | SYNC_FILE_RANGE_WAIT_AFTER
);
1193 * Give hints to the kernel about how we access the file:
1194 * POSIX_FADV_DONTNEED : we won't re-access data in a near future after
1197 * We need to call fadvise again after the file grows because the
1198 * kernel does not seem to apply fadvise to non-existing parts of the
1201 * Call fadvise _after_ having waited for the page writeback to
1202 * complete because the dirty page writeback semantic is not well
1203 * defined. So it can be expected to lead to lower throughput in
1206 posix_fadvise(outfd
, orig_offset
- stream
->max_sb_size
,
1207 stream
->max_sb_size
, POSIX_FADV_DONTNEED
);
1211 * Initialise the necessary environnement :
1212 * - create a new context
1213 * - create the poll_pipe
1214 * - create the should_quit pipe (for signal handler)
1215 * - create the thread pipe (for splice)
1217 * Takes a function pointer as argument, this function is called when data is
1218 * available on a buffer. This function is responsible to do the
1219 * kernctl_get_next_subbuf, read the data with mmap or splice depending on the
1220 * buffer configuration and then kernctl_put_next_subbuf at the end.
1222 * Returns a pointer to the new context or NULL on error.
1224 struct lttng_consumer_local_data
*lttng_consumer_create(
1225 enum lttng_consumer_type type
,
1226 ssize_t (*buffer_ready
)(struct lttng_consumer_stream
*stream
,
1227 struct lttng_consumer_local_data
*ctx
),
1228 int (*recv_channel
)(struct lttng_consumer_channel
*channel
),
1229 int (*recv_stream
)(struct lttng_consumer_stream
*stream
),
1230 int (*update_stream
)(uint64_t stream_key
, uint32_t state
))
1233 struct lttng_consumer_local_data
*ctx
;
1235 assert(consumer_data
.type
== LTTNG_CONSUMER_UNKNOWN
||
1236 consumer_data
.type
== type
);
1237 consumer_data
.type
= type
;
1239 ctx
= zmalloc(sizeof(struct lttng_consumer_local_data
));
1241 PERROR("allocating context");
1245 ctx
->consumer_error_socket
= -1;
1246 ctx
->consumer_metadata_socket
= -1;
1247 pthread_mutex_init(&ctx
->metadata_socket_lock
, NULL
);
1248 /* assign the callbacks */
1249 ctx
->on_buffer_ready
= buffer_ready
;
1250 ctx
->on_recv_channel
= recv_channel
;
1251 ctx
->on_recv_stream
= recv_stream
;
1252 ctx
->on_update_stream
= update_stream
;
1254 ctx
->consumer_data_pipe
= lttng_pipe_open(0);
1255 if (!ctx
->consumer_data_pipe
) {
1256 goto error_poll_pipe
;
1259 ret
= pipe(ctx
->consumer_should_quit
);
1261 PERROR("Error creating recv pipe");
1262 goto error_quit_pipe
;
1265 ret
= pipe(ctx
->consumer_thread_pipe
);
1267 PERROR("Error creating thread pipe");
1268 goto error_thread_pipe
;
1271 ret
= pipe(ctx
->consumer_channel_pipe
);
1273 PERROR("Error creating channel pipe");
1274 goto error_channel_pipe
;
1277 ctx
->consumer_metadata_pipe
= lttng_pipe_open(0);
1278 if (!ctx
->consumer_metadata_pipe
) {
1279 goto error_metadata_pipe
;
1282 ret
= utils_create_pipe(ctx
->consumer_splice_metadata_pipe
);
1284 goto error_splice_pipe
;
1290 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1291 error_metadata_pipe
:
1292 utils_close_pipe(ctx
->consumer_channel_pipe
);
1294 utils_close_pipe(ctx
->consumer_thread_pipe
);
1296 utils_close_pipe(ctx
->consumer_should_quit
);
1298 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1306 * Iterate over all streams of the hashtable and free them properly.
1308 static void destroy_data_stream_ht(struct lttng_ht
*ht
)
1310 struct lttng_ht_iter iter
;
1311 struct lttng_consumer_stream
*stream
;
1318 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1320 * Ignore return value since we are currently cleaning up so any error
1323 (void) consumer_del_stream(stream
, ht
);
1327 lttng_ht_destroy(ht
);
1331 * Iterate over all streams of the metadata hashtable and free them
1334 static void destroy_metadata_stream_ht(struct lttng_ht
*ht
)
1336 struct lttng_ht_iter iter
;
1337 struct lttng_consumer_stream
*stream
;
1344 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, stream
, node
.node
) {
1346 * Ignore return value since we are currently cleaning up so any error
1349 (void) consumer_del_metadata_stream(stream
, ht
);
1353 lttng_ht_destroy(ht
);
1357 * Close all fds associated with the instance and free the context.
1359 void lttng_consumer_destroy(struct lttng_consumer_local_data
*ctx
)
1363 DBG("Consumer destroying it. Closing everything.");
1365 destroy_data_stream_ht(data_ht
);
1366 destroy_metadata_stream_ht(metadata_ht
);
1368 ret
= close(ctx
->consumer_error_socket
);
1372 ret
= close(ctx
->consumer_metadata_socket
);
1376 utils_close_pipe(ctx
->consumer_thread_pipe
);
1377 utils_close_pipe(ctx
->consumer_channel_pipe
);
1378 lttng_pipe_destroy(ctx
->consumer_data_pipe
);
1379 lttng_pipe_destroy(ctx
->consumer_metadata_pipe
);
1380 utils_close_pipe(ctx
->consumer_should_quit
);
1381 utils_close_pipe(ctx
->consumer_splice_metadata_pipe
);
1383 unlink(ctx
->consumer_command_sock_path
);
1388 * Write the metadata stream id on the specified file descriptor.
1390 static int write_relayd_metadata_id(int fd
,
1391 struct lttng_consumer_stream
*stream
,
1392 struct consumer_relayd_sock_pair
*relayd
, unsigned long padding
)
1395 struct lttcomm_relayd_metadata_payload hdr
;
1397 hdr
.stream_id
= htobe64(stream
->relayd_stream_id
);
1398 hdr
.padding_size
= htobe32(padding
);
1399 ret
= lttng_write(fd
, (void *) &hdr
, sizeof(hdr
));
1400 if (ret
< sizeof(hdr
)) {
1402 * This error means that the fd's end is closed so ignore the perror
1403 * not to clubber the error output since this can happen in a normal
1406 if (errno
!= EPIPE
) {
1407 PERROR("write metadata stream id");
1409 DBG3("Consumer failed to write relayd metadata id (errno: %d)", errno
);
1411 * Set ret to a negative value because if ret != sizeof(hdr), we don't
1412 * handle writting the missing part so report that as an error and
1413 * don't lie to the caller.
1418 DBG("Metadata stream id %" PRIu64
" with padding %lu written before data",
1419 stream
->relayd_stream_id
, padding
);
1426 * Mmap the ring buffer, read it and write the data to the tracefile. This is a
1427 * core function for writing trace buffers to either the local filesystem or
1430 * It must be called with the stream lock held.
1432 * Careful review MUST be put if any changes occur!
1434 * Returns the number of bytes written
1436 ssize_t
lttng_consumer_on_read_subbuffer_mmap(
1437 struct lttng_consumer_local_data
*ctx
,
1438 struct lttng_consumer_stream
*stream
, unsigned long len
,
1439 unsigned long padding
,
1440 struct ctf_packet_index
*index
)
1442 unsigned long mmap_offset
;
1444 ssize_t ret
= 0, written
= 0;
1445 off_t orig_offset
= stream
->out_fd_offset
;
1446 /* Default is on the disk */
1447 int outfd
= stream
->out_fd
;
1448 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1449 unsigned int relayd_hang_up
= 0;
1451 /* RCU lock for the relayd pointer */
1454 /* Flag that the current stream if set for network streaming. */
1455 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1456 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1457 if (relayd
== NULL
) {
1463 /* get the offset inside the fd to mmap */
1464 switch (consumer_data
.type
) {
1465 case LTTNG_CONSUMER_KERNEL
:
1466 mmap_base
= stream
->mmap_base
;
1467 ret
= kernctl_get_mmap_read_offset(stream
->wait_fd
, &mmap_offset
);
1469 PERROR("tracer ctl get_mmap_read_offset");
1474 case LTTNG_CONSUMER32_UST
:
1475 case LTTNG_CONSUMER64_UST
:
1476 mmap_base
= lttng_ustctl_get_mmap_base(stream
);
1478 ERR("read mmap get mmap base for stream %s", stream
->name
);
1482 ret
= lttng_ustctl_get_mmap_read_offset(stream
, &mmap_offset
);
1484 PERROR("tracer ctl get_mmap_read_offset");
1490 ERR("Unknown consumer_data type");
1494 /* Handle stream on the relayd if the output is on the network */
1496 unsigned long netlen
= len
;
1499 * Lock the control socket for the complete duration of the function
1500 * since from this point on we will use the socket.
1502 if (stream
->metadata_flag
) {
1503 /* Metadata requires the control socket. */
1504 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1505 netlen
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1508 ret
= write_relayd_stream_header(stream
, netlen
, padding
, relayd
);
1510 /* Use the returned socket. */
1513 /* Write metadata stream id before payload */
1514 if (stream
->metadata_flag
) {
1515 ret
= write_relayd_metadata_id(outfd
, stream
, relayd
, padding
);
1518 /* Socket operation failed. We consider the relayd dead */
1519 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1527 /* Socket operation failed. We consider the relayd dead */
1528 if (ret
== -EPIPE
|| ret
== -EINVAL
) {
1532 /* Else, use the default set before which is the filesystem. */
1535 /* No streaming, we have to set the len with the full padding */
1539 * Check if we need to change the tracefile before writing the packet.
1541 if (stream
->chan
->tracefile_size
> 0 &&
1542 (stream
->tracefile_size_current
+ len
) >
1543 stream
->chan
->tracefile_size
) {
1544 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1545 stream
->name
, stream
->chan
->tracefile_size
,
1546 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1547 stream
->out_fd
, &(stream
->tracefile_count_current
),
1550 ERR("Rotating output file");
1553 outfd
= stream
->out_fd
;
1555 if (stream
->index_fd
>= 0) {
1556 ret
= index_create_file(stream
->chan
->pathname
,
1557 stream
->name
, stream
->uid
, stream
->gid
,
1558 stream
->chan
->tracefile_size
,
1559 stream
->tracefile_count_current
);
1563 stream
->index_fd
= ret
;
1566 /* Reset current size because we just perform a rotation. */
1567 stream
->tracefile_size_current
= 0;
1568 stream
->out_fd_offset
= 0;
1571 stream
->tracefile_size_current
+= len
;
1573 index
->offset
= htobe64(stream
->out_fd_offset
);
1578 * This call guarantee that len or less is returned. It's impossible to
1579 * receive a ret value that is bigger than len.
1581 ret
= lttng_write(outfd
, mmap_base
+ mmap_offset
, len
);
1582 DBG("Consumer mmap write() ret %zd (len %lu)", ret
, len
);
1583 if (ret
< 0 || ((size_t) ret
!= len
)) {
1585 * Report error to caller if nothing was written else at least send the
1594 /* Socket operation failed. We consider the relayd dead */
1595 if (errno
== EPIPE
|| errno
== EINVAL
) {
1597 * This is possible if the fd is closed on the other side
1598 * (outfd) or any write problem. It can be verbose a bit for a
1599 * normal execution if for instance the relayd is stopped
1600 * abruptly. This can happen so set this to a DBG statement.
1602 DBG("Consumer mmap write detected relayd hang up");
1607 /* Unhandled error, print it and stop function right now. */
1608 PERROR("Error in write mmap (ret %zd != len %lu)", ret
, len
);
1611 stream
->output_written
+= ret
;
1614 /* This call is useless on a socket so better save a syscall. */
1616 /* This won't block, but will start writeout asynchronously */
1617 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, len
,
1618 SYNC_FILE_RANGE_WRITE
);
1619 stream
->out_fd_offset
+= len
;
1621 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1625 * This is a special case that the relayd has closed its socket. Let's
1626 * cleanup the relayd object and all associated streams.
1628 if (relayd
&& relayd_hang_up
) {
1629 cleanup_relayd(relayd
, ctx
);
1633 /* Unlock only if ctrl socket used */
1634 if (relayd
&& stream
->metadata_flag
) {
1635 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1643 * Splice the data from the ring buffer to the tracefile.
1645 * It must be called with the stream lock held.
1647 * Returns the number of bytes spliced.
1649 ssize_t
lttng_consumer_on_read_subbuffer_splice(
1650 struct lttng_consumer_local_data
*ctx
,
1651 struct lttng_consumer_stream
*stream
, unsigned long len
,
1652 unsigned long padding
,
1653 struct ctf_packet_index
*index
)
1655 ssize_t ret
= 0, written
= 0, ret_splice
= 0;
1657 off_t orig_offset
= stream
->out_fd_offset
;
1658 int fd
= stream
->wait_fd
;
1659 /* Default is on the disk */
1660 int outfd
= stream
->out_fd
;
1661 struct consumer_relayd_sock_pair
*relayd
= NULL
;
1663 unsigned int relayd_hang_up
= 0;
1665 switch (consumer_data
.type
) {
1666 case LTTNG_CONSUMER_KERNEL
:
1668 case LTTNG_CONSUMER32_UST
:
1669 case LTTNG_CONSUMER64_UST
:
1670 /* Not supported for user space tracing */
1673 ERR("Unknown consumer_data type");
1677 /* RCU lock for the relayd pointer */
1680 /* Flag that the current stream if set for network streaming. */
1681 if (stream
->net_seq_idx
!= (uint64_t) -1ULL) {
1682 relayd
= consumer_find_relayd(stream
->net_seq_idx
);
1683 if (relayd
== NULL
) {
1690 * Choose right pipe for splice. Metadata and trace data are handled by
1691 * different threads hence the use of two pipes in order not to race or
1692 * corrupt the written data.
1694 if (stream
->metadata_flag
) {
1695 splice_pipe
= ctx
->consumer_splice_metadata_pipe
;
1697 splice_pipe
= ctx
->consumer_thread_pipe
;
1700 /* Write metadata stream id before payload */
1702 int total_len
= len
;
1704 if (stream
->metadata_flag
) {
1706 * Lock the control socket for the complete duration of the function
1707 * since from this point on we will use the socket.
1709 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
1711 ret
= write_relayd_metadata_id(splice_pipe
[1], stream
, relayd
,
1715 /* Socket operation failed. We consider the relayd dead */
1716 if (ret
== -EBADF
) {
1717 WARN("Remote relayd disconnected. Stopping");
1724 total_len
+= sizeof(struct lttcomm_relayd_metadata_payload
);
1727 ret
= write_relayd_stream_header(stream
, total_len
, padding
, relayd
);
1729 /* Use the returned socket. */
1732 /* Socket operation failed. We consider the relayd dead */
1733 if (ret
== -EBADF
) {
1734 WARN("Remote relayd disconnected. Stopping");
1741 /* No streaming, we have to set the len with the full padding */
1745 * Check if we need to change the tracefile before writing the packet.
1747 if (stream
->chan
->tracefile_size
> 0 &&
1748 (stream
->tracefile_size_current
+ len
) >
1749 stream
->chan
->tracefile_size
) {
1750 ret
= utils_rotate_stream_file(stream
->chan
->pathname
,
1751 stream
->name
, stream
->chan
->tracefile_size
,
1752 stream
->chan
->tracefile_count
, stream
->uid
, stream
->gid
,
1753 stream
->out_fd
, &(stream
->tracefile_count_current
),
1756 ERR("Rotating output file");
1759 outfd
= stream
->out_fd
;
1761 if (stream
->index_fd
>= 0) {
1762 ret
= index_create_file(stream
->chan
->pathname
,
1763 stream
->name
, stream
->uid
, stream
->gid
,
1764 stream
->chan
->tracefile_size
,
1765 stream
->tracefile_count_current
);
1769 stream
->index_fd
= ret
;
1772 /* Reset current size because we just perform a rotation. */
1773 stream
->tracefile_size_current
= 0;
1774 stream
->out_fd_offset
= 0;
1777 stream
->tracefile_size_current
+= len
;
1778 index
->offset
= htobe64(stream
->out_fd_offset
);
1782 DBG("splice chan to pipe offset %lu of len %lu (fd : %d, pipe: %d)",
1783 (unsigned long)offset
, len
, fd
, splice_pipe
[1]);
1784 ret_splice
= splice(fd
, &offset
, splice_pipe
[1], NULL
, len
,
1785 SPLICE_F_MOVE
| SPLICE_F_MORE
);
1786 DBG("splice chan to pipe, ret %zd", ret_splice
);
1787 if (ret_splice
< 0) {
1790 written
= ret_splice
;
1792 PERROR("Error in relay splice");
1796 /* Handle stream on the relayd if the output is on the network */
1798 if (stream
->metadata_flag
) {
1799 size_t metadata_payload_size
=
1800 sizeof(struct lttcomm_relayd_metadata_payload
);
1802 /* Update counter to fit the spliced data */
1803 ret_splice
+= metadata_payload_size
;
1804 len
+= metadata_payload_size
;
1806 * We do this so the return value can match the len passed as
1807 * argument to this function.
1809 written
-= metadata_payload_size
;
1813 /* Splice data out */
1814 ret_splice
= splice(splice_pipe
[0], NULL
, outfd
, NULL
,
1815 ret_splice
, SPLICE_F_MOVE
| SPLICE_F_MORE
);
1816 DBG("Consumer splice pipe to file, ret %zd", ret_splice
);
1817 if (ret_splice
< 0) {
1820 written
= ret_splice
;
1822 /* Socket operation failed. We consider the relayd dead */
1823 if (errno
== EBADF
|| errno
== EPIPE
|| errno
== ESPIPE
) {
1824 WARN("Remote relayd disconnected. Stopping");
1828 PERROR("Error in file splice");
1830 } else if (ret_splice
> len
) {
1832 * We don't expect this code path to be executed but you never know
1833 * so this is an extra protection agains a buggy splice().
1835 written
+= ret_splice
;
1837 PERROR("Wrote more data than requested %zd (len: %lu)", ret_splice
,
1841 /* All good, update current len and continue. */
1845 /* This call is useless on a socket so better save a syscall. */
1847 /* This won't block, but will start writeout asynchronously */
1848 lttng_sync_file_range(outfd
, stream
->out_fd_offset
, ret_splice
,
1849 SYNC_FILE_RANGE_WRITE
);
1850 stream
->out_fd_offset
+= ret_splice
;
1852 stream
->output_written
+= ret_splice
;
1853 written
+= ret_splice
;
1855 lttng_consumer_sync_trace_file(stream
, orig_offset
);
1860 * This is a special case that the relayd has closed its socket. Let's
1861 * cleanup the relayd object and all associated streams.
1863 if (relayd
&& relayd_hang_up
) {
1864 cleanup_relayd(relayd
, ctx
);
1865 /* Skip splice error so the consumer does not fail */
1870 /* send the appropriate error description to sessiond */
1873 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_EINVAL
);
1876 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ENOMEM
);
1879 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_SPLICE_ESPIPE
);
1884 if (relayd
&& stream
->metadata_flag
) {
1885 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
1893 * Take a snapshot for a specific fd
1895 * Returns 0 on success, < 0 on error
1897 int lttng_consumer_take_snapshot(struct lttng_consumer_stream
*stream
)
1899 switch (consumer_data
.type
) {
1900 case LTTNG_CONSUMER_KERNEL
:
1901 return lttng_kconsumer_take_snapshot(stream
);
1902 case LTTNG_CONSUMER32_UST
:
1903 case LTTNG_CONSUMER64_UST
:
1904 return lttng_ustconsumer_take_snapshot(stream
);
1906 ERR("Unknown consumer_data type");
1913 * Get the produced position
1915 * Returns 0 on success, < 0 on error
1917 int lttng_consumer_get_produced_snapshot(struct lttng_consumer_stream
*stream
,
1920 switch (consumer_data
.type
) {
1921 case LTTNG_CONSUMER_KERNEL
:
1922 return lttng_kconsumer_get_produced_snapshot(stream
, pos
);
1923 case LTTNG_CONSUMER32_UST
:
1924 case LTTNG_CONSUMER64_UST
:
1925 return lttng_ustconsumer_get_produced_snapshot(stream
, pos
);
1927 ERR("Unknown consumer_data type");
1933 int lttng_consumer_recv_cmd(struct lttng_consumer_local_data
*ctx
,
1934 int sock
, struct pollfd
*consumer_sockpoll
)
1936 switch (consumer_data
.type
) {
1937 case LTTNG_CONSUMER_KERNEL
:
1938 return lttng_kconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1939 case LTTNG_CONSUMER32_UST
:
1940 case LTTNG_CONSUMER64_UST
:
1941 return lttng_ustconsumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
1943 ERR("Unknown consumer_data type");
1949 void lttng_consumer_close_all_metadata(void)
1951 switch (consumer_data
.type
) {
1952 case LTTNG_CONSUMER_KERNEL
:
1954 * The Kernel consumer has a different metadata scheme so we don't
1955 * close anything because the stream will be closed by the session
1959 case LTTNG_CONSUMER32_UST
:
1960 case LTTNG_CONSUMER64_UST
:
1962 * Close all metadata streams. The metadata hash table is passed and
1963 * this call iterates over it by closing all wakeup fd. This is safe
1964 * because at this point we are sure that the metadata producer is
1965 * either dead or blocked.
1967 lttng_ustconsumer_close_all_metadata(metadata_ht
);
1970 ERR("Unknown consumer_data type");
1976 * Clean up a metadata stream and free its memory.
1978 void consumer_del_metadata_stream(struct lttng_consumer_stream
*stream
,
1979 struct lttng_ht
*ht
)
1981 struct lttng_consumer_channel
*free_chan
= NULL
;
1985 * This call should NEVER receive regular stream. It must always be
1986 * metadata stream and this is crucial for data structure synchronization.
1988 assert(stream
->metadata_flag
);
1990 DBG3("Consumer delete metadata stream %d", stream
->wait_fd
);
1992 pthread_mutex_lock(&consumer_data
.lock
);
1993 pthread_mutex_lock(&stream
->chan
->lock
);
1994 pthread_mutex_lock(&stream
->lock
);
1996 /* Remove any reference to that stream. */
1997 consumer_stream_delete(stream
, ht
);
1999 /* Close down everything including the relayd if one. */
2000 consumer_stream_close(stream
);
2001 /* Destroy tracer buffers of the stream. */
2002 consumer_stream_destroy_buffers(stream
);
2004 /* Atomically decrement channel refcount since other threads can use it. */
2005 if (!uatomic_sub_return(&stream
->chan
->refcount
, 1)
2006 && !uatomic_read(&stream
->chan
->nb_init_stream_left
)) {
2007 /* Go for channel deletion! */
2008 free_chan
= stream
->chan
;
2012 * Nullify the stream reference so it is not used after deletion. The
2013 * channel lock MUST be acquired before being able to check for a NULL
2016 stream
->chan
->metadata_stream
= NULL
;
2018 pthread_mutex_unlock(&stream
->lock
);
2019 pthread_mutex_unlock(&stream
->chan
->lock
);
2020 pthread_mutex_unlock(&consumer_data
.lock
);
2023 consumer_del_channel(free_chan
);
2026 consumer_stream_free(stream
);
2030 * Action done with the metadata stream when adding it to the consumer internal
2031 * data structures to handle it.
2033 int consumer_add_metadata_stream(struct lttng_consumer_stream
*stream
)
2035 struct lttng_ht
*ht
= metadata_ht
;
2037 struct lttng_ht_iter iter
;
2038 struct lttng_ht_node_u64
*node
;
2043 DBG3("Adding metadata stream %" PRIu64
" to hash table", stream
->key
);
2045 pthread_mutex_lock(&consumer_data
.lock
);
2046 pthread_mutex_lock(&stream
->chan
->lock
);
2047 pthread_mutex_lock(&stream
->chan
->timer_lock
);
2048 pthread_mutex_lock(&stream
->lock
);
2051 * From here, refcounts are updated so be _careful_ when returning an error
2058 * Lookup the stream just to make sure it does not exist in our internal
2059 * state. This should NEVER happen.
2061 lttng_ht_lookup(ht
, &stream
->key
, &iter
);
2062 node
= lttng_ht_iter_get_node_u64(&iter
);
2066 * When nb_init_stream_left reaches 0, we don't need to trigger any action
2067 * in terms of destroying the associated channel, because the action that
2068 * causes the count to become 0 also causes a stream to be added. The
2069 * channel deletion will thus be triggered by the following removal of this
2072 if (uatomic_read(&stream
->chan
->nb_init_stream_left
) > 0) {
2073 /* Increment refcount before decrementing nb_init_stream_left */
2075 uatomic_dec(&stream
->chan
->nb_init_stream_left
);
2078 lttng_ht_add_unique_u64(ht
, &stream
->node
);
2080 lttng_ht_add_unique_u64(consumer_data
.stream_per_chan_id_ht
,
2081 &stream
->node_channel_id
);
2084 * Add stream to the stream_list_ht of the consumer data. No need to steal
2085 * the key since the HT does not use it and we allow to add redundant keys
2088 lttng_ht_add_u64(consumer_data
.stream_list_ht
, &stream
->node_session_id
);
2092 pthread_mutex_unlock(&stream
->lock
);
2093 pthread_mutex_unlock(&stream
->chan
->lock
);
2094 pthread_mutex_unlock(&stream
->chan
->timer_lock
);
2095 pthread_mutex_unlock(&consumer_data
.lock
);
2100 * Delete data stream that are flagged for deletion (endpoint_status).
2102 static void validate_endpoint_status_data_stream(void)
2104 struct lttng_ht_iter iter
;
2105 struct lttng_consumer_stream
*stream
;
2107 DBG("Consumer delete flagged data stream");
2110 cds_lfht_for_each_entry(data_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2111 /* Validate delete flag of the stream */
2112 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2115 /* Delete it right now */
2116 consumer_del_stream(stream
, data_ht
);
2122 * Delete metadata stream that are flagged for deletion (endpoint_status).
2124 static void validate_endpoint_status_metadata_stream(
2125 struct lttng_poll_event
*pollset
)
2127 struct lttng_ht_iter iter
;
2128 struct lttng_consumer_stream
*stream
;
2130 DBG("Consumer delete flagged metadata stream");
2135 cds_lfht_for_each_entry(metadata_ht
->ht
, &iter
.iter
, stream
, node
.node
) {
2136 /* Validate delete flag of the stream */
2137 if (stream
->endpoint_status
== CONSUMER_ENDPOINT_ACTIVE
) {
2141 * Remove from pollset so the metadata thread can continue without
2142 * blocking on a deleted stream.
2144 lttng_poll_del(pollset
, stream
->wait_fd
);
2146 /* Delete it right now */
2147 consumer_del_metadata_stream(stream
, metadata_ht
);
2153 * Thread polls on metadata file descriptor and write them on disk or on the
2156 void *consumer_thread_metadata_poll(void *data
)
2158 int ret
, i
, pollfd
, err
= -1;
2159 uint32_t revents
, nb_fd
;
2160 struct lttng_consumer_stream
*stream
= NULL
;
2161 struct lttng_ht_iter iter
;
2162 struct lttng_ht_node_u64
*node
;
2163 struct lttng_poll_event events
;
2164 struct lttng_consumer_local_data
*ctx
= data
;
2167 rcu_register_thread();
2169 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_METADATA
);
2171 if (testpoint(consumerd_thread_metadata
)) {
2172 goto error_testpoint
;
2175 health_code_update();
2177 DBG("Thread metadata poll started");
2179 /* Size is set to 1 for the consumer_metadata pipe */
2180 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2182 ERR("Poll set creation failed");
2186 ret
= lttng_poll_add(&events
,
2187 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
), LPOLLIN
);
2193 DBG("Metadata main loop started");
2196 health_code_update();
2198 /* Only the metadata pipe is set */
2199 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2200 err
= 0; /* All is OK */
2205 DBG("Metadata poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2206 health_poll_entry();
2207 ret
= lttng_poll_wait(&events
, -1);
2209 DBG("Metadata event catched in thread");
2211 if (errno
== EINTR
) {
2212 ERR("Poll EINTR catched");
2220 /* From here, the event is a metadata wait fd */
2221 for (i
= 0; i
< nb_fd
; i
++) {
2222 health_code_update();
2224 revents
= LTTNG_POLL_GETEV(&events
, i
);
2225 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2227 if (pollfd
== lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
)) {
2228 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2229 DBG("Metadata thread pipe hung up");
2231 * Remove the pipe from the poll set and continue the loop
2232 * since their might be data to consume.
2234 lttng_poll_del(&events
,
2235 lttng_pipe_get_readfd(ctx
->consumer_metadata_pipe
));
2236 lttng_pipe_read_close(ctx
->consumer_metadata_pipe
);
2238 } else if (revents
& LPOLLIN
) {
2241 pipe_len
= lttng_pipe_read(ctx
->consumer_metadata_pipe
,
2242 &stream
, sizeof(stream
));
2243 if (pipe_len
< sizeof(stream
)) {
2244 PERROR("read metadata stream");
2246 * Continue here to handle the rest of the streams.
2251 /* A NULL stream means that the state has changed. */
2252 if (stream
== NULL
) {
2253 /* Check for deleted streams. */
2254 validate_endpoint_status_metadata_stream(&events
);
2258 DBG("Adding metadata stream %d to poll set",
2261 /* Add metadata stream to the global poll events list */
2262 lttng_poll_add(&events
, stream
->wait_fd
,
2263 LPOLLIN
| LPOLLPRI
| LPOLLHUP
);
2266 /* Handle other stream */
2272 uint64_t tmp_id
= (uint64_t) pollfd
;
2274 lttng_ht_lookup(metadata_ht
, &tmp_id
, &iter
);
2276 node
= lttng_ht_iter_get_node_u64(&iter
);
2279 stream
= caa_container_of(node
, struct lttng_consumer_stream
,
2282 /* Check for error event */
2283 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2284 DBG("Metadata fd %d is hup|err.", pollfd
);
2285 if (!stream
->hangup_flush_done
2286 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2287 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2288 DBG("Attempting to flush and consume the UST buffers");
2289 lttng_ustconsumer_on_stream_hangup(stream
);
2291 /* We just flushed the stream now read it. */
2293 health_code_update();
2295 len
= ctx
->on_buffer_ready(stream
, ctx
);
2297 * We don't check the return value here since if we get
2298 * a negative len, it means an error occured thus we
2299 * simply remove it from the poll set and free the
2305 lttng_poll_del(&events
, stream
->wait_fd
);
2307 * This call update the channel states, closes file descriptors
2308 * and securely free the stream.
2310 consumer_del_metadata_stream(stream
, metadata_ht
);
2311 } else if (revents
& (LPOLLIN
| LPOLLPRI
)) {
2312 /* Get the data out of the metadata file descriptor */
2313 DBG("Metadata available on fd %d", pollfd
);
2314 assert(stream
->wait_fd
== pollfd
);
2317 health_code_update();
2319 len
= ctx
->on_buffer_ready(stream
, ctx
);
2321 * We don't check the return value here since if we get
2322 * a negative len, it means an error occured thus we
2323 * simply remove it from the poll set and free the
2328 /* It's ok to have an unavailable sub-buffer */
2329 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2330 /* Clean up stream from consumer and free it. */
2331 lttng_poll_del(&events
, stream
->wait_fd
);
2332 consumer_del_metadata_stream(stream
, metadata_ht
);
2336 /* Release RCU lock for the stream looked up */
2345 DBG("Metadata poll thread exiting");
2347 lttng_poll_clean(&events
);
2352 ERR("Health error occurred in %s", __func__
);
2354 health_unregister(health_consumerd
);
2355 rcu_unregister_thread();
2360 * This thread polls the fds in the set to consume the data and write
2361 * it to tracefile if necessary.
2363 void *consumer_thread_data_poll(void *data
)
2365 int num_rdy
, num_hup
, high_prio
, ret
, i
, err
= -1;
2366 struct pollfd
*pollfd
= NULL
;
2367 /* local view of the streams */
2368 struct lttng_consumer_stream
**local_stream
= NULL
, *new_stream
= NULL
;
2369 /* local view of consumer_data.fds_count */
2371 struct lttng_consumer_local_data
*ctx
= data
;
2374 rcu_register_thread();
2376 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_DATA
);
2378 if (testpoint(consumerd_thread_data
)) {
2379 goto error_testpoint
;
2382 health_code_update();
2384 local_stream
= zmalloc(sizeof(struct lttng_consumer_stream
*));
2385 if (local_stream
== NULL
) {
2386 PERROR("local_stream malloc");
2391 health_code_update();
2397 * the fds set has been updated, we need to update our
2398 * local array as well
2400 pthread_mutex_lock(&consumer_data
.lock
);
2401 if (consumer_data
.need_update
) {
2406 local_stream
= NULL
;
2408 /* allocate for all fds + 1 for the consumer_data_pipe */
2409 pollfd
= zmalloc((consumer_data
.stream_count
+ 1) * sizeof(struct pollfd
));
2410 if (pollfd
== NULL
) {
2411 PERROR("pollfd malloc");
2412 pthread_mutex_unlock(&consumer_data
.lock
);
2416 /* allocate for all fds + 1 for the consumer_data_pipe */
2417 local_stream
= zmalloc((consumer_data
.stream_count
+ 1) *
2418 sizeof(struct lttng_consumer_stream
*));
2419 if (local_stream
== NULL
) {
2420 PERROR("local_stream malloc");
2421 pthread_mutex_unlock(&consumer_data
.lock
);
2424 ret
= update_poll_array(ctx
, &pollfd
, local_stream
,
2427 ERR("Error in allocating pollfd or local_outfds");
2428 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2429 pthread_mutex_unlock(&consumer_data
.lock
);
2433 consumer_data
.need_update
= 0;
2435 pthread_mutex_unlock(&consumer_data
.lock
);
2437 /* No FDs and consumer_quit, consumer_cleanup the thread */
2438 if (nb_fd
== 0 && consumer_quit
== 1) {
2439 err
= 0; /* All is OK */
2442 /* poll on the array of fds */
2444 DBG("polling on %d fd", nb_fd
+ 1);
2445 health_poll_entry();
2446 num_rdy
= poll(pollfd
, nb_fd
+ 1, -1);
2448 DBG("poll num_rdy : %d", num_rdy
);
2449 if (num_rdy
== -1) {
2451 * Restart interrupted system call.
2453 if (errno
== EINTR
) {
2456 PERROR("Poll error");
2457 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
2459 } else if (num_rdy
== 0) {
2460 DBG("Polling thread timed out");
2465 * If the consumer_data_pipe triggered poll go directly to the
2466 * beginning of the loop to update the array. We want to prioritize
2467 * array update over low-priority reads.
2469 if (pollfd
[nb_fd
].revents
& (POLLIN
| POLLPRI
)) {
2470 ssize_t pipe_readlen
;
2472 DBG("consumer_data_pipe wake up");
2473 pipe_readlen
= lttng_pipe_read(ctx
->consumer_data_pipe
,
2474 &new_stream
, sizeof(new_stream
));
2475 if (pipe_readlen
< sizeof(new_stream
)) {
2476 PERROR("Consumer data pipe");
2477 /* Continue so we can at least handle the current stream(s). */
2482 * If the stream is NULL, just ignore it. It's also possible that
2483 * the sessiond poll thread changed the consumer_quit state and is
2484 * waking us up to test it.
2486 if (new_stream
== NULL
) {
2487 validate_endpoint_status_data_stream();
2491 /* Continue to update the local streams and handle prio ones */
2495 /* Take care of high priority channels first. */
2496 for (i
= 0; i
< nb_fd
; i
++) {
2497 health_code_update();
2499 if (local_stream
[i
] == NULL
) {
2502 if (pollfd
[i
].revents
& POLLPRI
) {
2503 DBG("Urgent read on fd %d", pollfd
[i
].fd
);
2505 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2506 /* it's ok to have an unavailable sub-buffer */
2507 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2508 /* Clean the stream and free it. */
2509 consumer_del_stream(local_stream
[i
], data_ht
);
2510 local_stream
[i
] = NULL
;
2511 } else if (len
> 0) {
2512 local_stream
[i
]->data_read
= 1;
2518 * If we read high prio channel in this loop, try again
2519 * for more high prio data.
2525 /* Take care of low priority channels. */
2526 for (i
= 0; i
< nb_fd
; i
++) {
2527 health_code_update();
2529 if (local_stream
[i
] == NULL
) {
2532 if ((pollfd
[i
].revents
& POLLIN
) ||
2533 local_stream
[i
]->hangup_flush_done
) {
2534 DBG("Normal read on fd %d", pollfd
[i
].fd
);
2535 len
= ctx
->on_buffer_ready(local_stream
[i
], ctx
);
2536 /* it's ok to have an unavailable sub-buffer */
2537 if (len
< 0 && len
!= -EAGAIN
&& len
!= -ENODATA
) {
2538 /* Clean the stream and free it. */
2539 consumer_del_stream(local_stream
[i
], data_ht
);
2540 local_stream
[i
] = NULL
;
2541 } else if (len
> 0) {
2542 local_stream
[i
]->data_read
= 1;
2547 /* Handle hangup and errors */
2548 for (i
= 0; i
< nb_fd
; i
++) {
2549 health_code_update();
2551 if (local_stream
[i
] == NULL
) {
2554 if (!local_stream
[i
]->hangup_flush_done
2555 && (pollfd
[i
].revents
& (POLLHUP
| POLLERR
| POLLNVAL
))
2556 && (consumer_data
.type
== LTTNG_CONSUMER32_UST
2557 || consumer_data
.type
== LTTNG_CONSUMER64_UST
)) {
2558 DBG("fd %d is hup|err|nval. Attempting flush and read.",
2560 lttng_ustconsumer_on_stream_hangup(local_stream
[i
]);
2561 /* Attempt read again, for the data we just flushed. */
2562 local_stream
[i
]->data_read
= 1;
2565 * If the poll flag is HUP/ERR/NVAL and we have
2566 * read no data in this pass, we can remove the
2567 * stream from its hash table.
2569 if ((pollfd
[i
].revents
& POLLHUP
)) {
2570 DBG("Polling fd %d tells it has hung up.", pollfd
[i
].fd
);
2571 if (!local_stream
[i
]->data_read
) {
2572 consumer_del_stream(local_stream
[i
], data_ht
);
2573 local_stream
[i
] = NULL
;
2576 } else if (pollfd
[i
].revents
& POLLERR
) {
2577 ERR("Error returned in polling fd %d.", pollfd
[i
].fd
);
2578 if (!local_stream
[i
]->data_read
) {
2579 consumer_del_stream(local_stream
[i
], data_ht
);
2580 local_stream
[i
] = NULL
;
2583 } else if (pollfd
[i
].revents
& POLLNVAL
) {
2584 ERR("Polling fd %d tells fd is not open.", pollfd
[i
].fd
);
2585 if (!local_stream
[i
]->data_read
) {
2586 consumer_del_stream(local_stream
[i
], data_ht
);
2587 local_stream
[i
] = NULL
;
2591 if (local_stream
[i
] != NULL
) {
2592 local_stream
[i
]->data_read
= 0;
2599 DBG("polling thread exiting");
2604 * Close the write side of the pipe so epoll_wait() in
2605 * consumer_thread_metadata_poll can catch it. The thread is monitoring the
2606 * read side of the pipe. If we close them both, epoll_wait strangely does
2607 * not return and could create a endless wait period if the pipe is the
2608 * only tracked fd in the poll set. The thread will take care of closing
2611 (void) lttng_pipe_write_close(ctx
->consumer_metadata_pipe
);
2616 ERR("Health error occurred in %s", __func__
);
2618 health_unregister(health_consumerd
);
2620 rcu_unregister_thread();
2625 * Close wake-up end of each stream belonging to the channel. This will
2626 * allow the poll() on the stream read-side to detect when the
2627 * write-side (application) finally closes them.
2630 void consumer_close_channel_streams(struct lttng_consumer_channel
*channel
)
2632 struct lttng_ht
*ht
;
2633 struct lttng_consumer_stream
*stream
;
2634 struct lttng_ht_iter iter
;
2636 ht
= consumer_data
.stream_per_chan_id_ht
;
2639 cds_lfht_for_each_entry_duplicate(ht
->ht
,
2640 ht
->hash_fct(&channel
->key
, lttng_ht_seed
),
2641 ht
->match_fct
, &channel
->key
,
2642 &iter
.iter
, stream
, node_channel_id
.node
) {
2644 * Protect against teardown with mutex.
2646 pthread_mutex_lock(&stream
->lock
);
2647 if (cds_lfht_is_node_deleted(&stream
->node
.node
)) {
2650 switch (consumer_data
.type
) {
2651 case LTTNG_CONSUMER_KERNEL
:
2653 case LTTNG_CONSUMER32_UST
:
2654 case LTTNG_CONSUMER64_UST
:
2656 * Note: a mutex is taken internally within
2657 * liblttng-ust-ctl to protect timer wakeup_fd
2658 * use from concurrent close.
2660 lttng_ustconsumer_close_stream_wakeup(stream
);
2663 ERR("Unknown consumer_data type");
2667 pthread_mutex_unlock(&stream
->lock
);
2672 static void destroy_channel_ht(struct lttng_ht
*ht
)
2674 struct lttng_ht_iter iter
;
2675 struct lttng_consumer_channel
*channel
;
2683 cds_lfht_for_each_entry(ht
->ht
, &iter
.iter
, channel
, wait_fd_node
.node
) {
2684 ret
= lttng_ht_del(ht
, &iter
);
2689 lttng_ht_destroy(ht
);
2693 * This thread polls the channel fds to detect when they are being
2694 * closed. It closes all related streams if the channel is detected as
2695 * closed. It is currently only used as a shim layer for UST because the
2696 * consumerd needs to keep the per-stream wakeup end of pipes open for
2699 void *consumer_thread_channel_poll(void *data
)
2701 int ret
, i
, pollfd
, err
= -1;
2702 uint32_t revents
, nb_fd
;
2703 struct lttng_consumer_channel
*chan
= NULL
;
2704 struct lttng_ht_iter iter
;
2705 struct lttng_ht_node_u64
*node
;
2706 struct lttng_poll_event events
;
2707 struct lttng_consumer_local_data
*ctx
= data
;
2708 struct lttng_ht
*channel_ht
;
2710 rcu_register_thread();
2712 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_CHANNEL
);
2714 if (testpoint(consumerd_thread_channel
)) {
2715 goto error_testpoint
;
2718 health_code_update();
2720 channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
2722 /* ENOMEM at this point. Better to bail out. */
2726 DBG("Thread channel poll started");
2728 /* Size is set to 1 for the consumer_channel pipe */
2729 ret
= lttng_poll_create(&events
, 2, LTTNG_CLOEXEC
);
2731 ERR("Poll set creation failed");
2735 ret
= lttng_poll_add(&events
, ctx
->consumer_channel_pipe
[0], LPOLLIN
);
2741 DBG("Channel main loop started");
2744 health_code_update();
2746 /* Only the channel pipe is set */
2747 if (LTTNG_POLL_GETNB(&events
) == 0 && consumer_quit
== 1) {
2748 err
= 0; /* All is OK */
2753 DBG("Channel poll wait with %d fd(s)", LTTNG_POLL_GETNB(&events
));
2754 health_poll_entry();
2755 ret
= lttng_poll_wait(&events
, -1);
2757 DBG("Channel event catched in thread");
2759 if (errno
== EINTR
) {
2760 ERR("Poll EINTR catched");
2768 /* From here, the event is a channel wait fd */
2769 for (i
= 0; i
< nb_fd
; i
++) {
2770 health_code_update();
2772 revents
= LTTNG_POLL_GETEV(&events
, i
);
2773 pollfd
= LTTNG_POLL_GETFD(&events
, i
);
2775 /* Just don't waste time if no returned events for the fd */
2779 if (pollfd
== ctx
->consumer_channel_pipe
[0]) {
2780 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2781 DBG("Channel thread pipe hung up");
2783 * Remove the pipe from the poll set and continue the loop
2784 * since their might be data to consume.
2786 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2788 } else if (revents
& LPOLLIN
) {
2789 enum consumer_channel_action action
;
2792 ret
= read_channel_pipe(ctx
, &chan
, &key
, &action
);
2794 ERR("Error reading channel pipe");
2799 case CONSUMER_CHANNEL_ADD
:
2800 DBG("Adding channel %d to poll set",
2803 lttng_ht_node_init_u64(&chan
->wait_fd_node
,
2806 lttng_ht_add_unique_u64(channel_ht
,
2807 &chan
->wait_fd_node
);
2809 /* Add channel to the global poll events list */
2810 lttng_poll_add(&events
, chan
->wait_fd
,
2811 LPOLLIN
| LPOLLPRI
);
2813 case CONSUMER_CHANNEL_DEL
:
2815 struct lttng_consumer_stream
*stream
, *stmp
;
2818 chan
= consumer_find_channel(key
);
2821 ERR("UST consumer get channel key %" PRIu64
" not found for del channel", key
);
2824 lttng_poll_del(&events
, chan
->wait_fd
);
2825 iter
.iter
.node
= &chan
->wait_fd_node
.node
;
2826 ret
= lttng_ht_del(channel_ht
, &iter
);
2828 consumer_close_channel_streams(chan
);
2830 switch (consumer_data
.type
) {
2831 case LTTNG_CONSUMER_KERNEL
:
2833 case LTTNG_CONSUMER32_UST
:
2834 case LTTNG_CONSUMER64_UST
:
2835 /* Delete streams that might have been left in the stream list. */
2836 cds_list_for_each_entry_safe(stream
, stmp
, &chan
->streams
.head
,
2838 health_code_update();
2840 cds_list_del(&stream
->send_node
);
2841 lttng_ustconsumer_del_stream(stream
);
2842 uatomic_sub(&stream
->chan
->refcount
, 1);
2843 assert(&chan
->refcount
);
2848 ERR("Unknown consumer_data type");
2853 * Release our own refcount. Force channel deletion even if
2854 * streams were not initialized.
2856 if (!uatomic_sub_return(&chan
->refcount
, 1)) {
2857 consumer_del_channel(chan
);
2862 case CONSUMER_CHANNEL_QUIT
:
2864 * Remove the pipe from the poll set and continue the loop
2865 * since their might be data to consume.
2867 lttng_poll_del(&events
, ctx
->consumer_channel_pipe
[0]);
2870 ERR("Unknown action");
2875 /* Handle other stream */
2881 uint64_t tmp_id
= (uint64_t) pollfd
;
2883 lttng_ht_lookup(channel_ht
, &tmp_id
, &iter
);
2885 node
= lttng_ht_iter_get_node_u64(&iter
);
2888 chan
= caa_container_of(node
, struct lttng_consumer_channel
,
2891 /* Check for error event */
2892 if (revents
& (LPOLLERR
| LPOLLHUP
)) {
2893 DBG("Channel fd %d is hup|err.", pollfd
);
2895 lttng_poll_del(&events
, chan
->wait_fd
);
2896 ret
= lttng_ht_del(channel_ht
, &iter
);
2898 consumer_close_channel_streams(chan
);
2900 /* Release our own refcount */
2901 if (!uatomic_sub_return(&chan
->refcount
, 1)
2902 && !uatomic_read(&chan
->nb_init_stream_left
)) {
2903 consumer_del_channel(chan
);
2907 /* Release RCU lock for the channel looked up */
2915 lttng_poll_clean(&events
);
2917 destroy_channel_ht(channel_ht
);
2920 DBG("Channel poll thread exiting");
2923 ERR("Health error occurred in %s", __func__
);
2925 health_unregister(health_consumerd
);
2926 rcu_unregister_thread();
2930 static int set_metadata_socket(struct lttng_consumer_local_data
*ctx
,
2931 struct pollfd
*sockpoll
, int client_socket
)
2938 if (lttng_consumer_poll_socket(sockpoll
) < 0) {
2942 DBG("Metadata connection on client_socket");
2944 /* Blocking call, waiting for transmission */
2945 ctx
->consumer_metadata_socket
= lttcomm_accept_unix_sock(client_socket
);
2946 if (ctx
->consumer_metadata_socket
< 0) {
2947 WARN("On accept metadata");
2958 * This thread listens on the consumerd socket and receives the file
2959 * descriptors from the session daemon.
2961 void *consumer_thread_sessiond_poll(void *data
)
2963 int sock
= -1, client_socket
, ret
, err
= -1;
2965 * structure to poll for incoming data on communication socket avoids
2966 * making blocking sockets.
2968 struct pollfd consumer_sockpoll
[2];
2969 struct lttng_consumer_local_data
*ctx
= data
;
2971 rcu_register_thread();
2973 health_register(health_consumerd
, HEALTH_CONSUMERD_TYPE_SESSIOND
);
2975 if (testpoint(consumerd_thread_sessiond
)) {
2976 goto error_testpoint
;
2979 health_code_update();
2981 DBG("Creating command socket %s", ctx
->consumer_command_sock_path
);
2982 unlink(ctx
->consumer_command_sock_path
);
2983 client_socket
= lttcomm_create_unix_sock(ctx
->consumer_command_sock_path
);
2984 if (client_socket
< 0) {
2985 ERR("Cannot create command socket");
2989 ret
= lttcomm_listen_unix_sock(client_socket
);
2994 DBG("Sending ready command to lttng-sessiond");
2995 ret
= lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_COMMAND_SOCK_READY
);
2996 /* return < 0 on error, but == 0 is not fatal */
2998 ERR("Error sending ready command to lttng-sessiond");
3002 /* prepare the FDs to poll : to client socket and the should_quit pipe */
3003 consumer_sockpoll
[0].fd
= ctx
->consumer_should_quit
[0];
3004 consumer_sockpoll
[0].events
= POLLIN
| POLLPRI
;
3005 consumer_sockpoll
[1].fd
= client_socket
;
3006 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3008 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3011 DBG("Connection on client_socket");
3013 /* Blocking call, waiting for transmission */
3014 sock
= lttcomm_accept_unix_sock(client_socket
);
3021 * Setup metadata socket which is the second socket connection on the
3022 * command unix socket.
3024 ret
= set_metadata_socket(ctx
, consumer_sockpoll
, client_socket
);
3029 /* This socket is not useful anymore. */
3030 ret
= close(client_socket
);
3032 PERROR("close client_socket");
3036 /* update the polling structure to poll on the established socket */
3037 consumer_sockpoll
[1].fd
= sock
;
3038 consumer_sockpoll
[1].events
= POLLIN
| POLLPRI
;
3041 health_code_update();
3043 health_poll_entry();
3044 ret
= lttng_consumer_poll_socket(consumer_sockpoll
);
3049 DBG("Incoming command on sock");
3050 ret
= lttng_consumer_recv_cmd(ctx
, sock
, consumer_sockpoll
);
3051 if (ret
== -ENOENT
) {
3052 DBG("Received STOP command");
3057 * This could simply be a session daemon quitting. Don't output
3060 DBG("Communication interrupted on command socket");
3064 if (consumer_quit
) {
3065 DBG("consumer_thread_receive_fds received quit from signal");
3066 err
= 0; /* All is OK */
3069 DBG("received command on sock");
3075 DBG("Consumer thread sessiond poll exiting");
3078 * Close metadata streams since the producer is the session daemon which
3081 * NOTE: for now, this only applies to the UST tracer.
3083 lttng_consumer_close_all_metadata();
3086 * when all fds have hung up, the polling thread
3092 * Notify the data poll thread to poll back again and test the
3093 * consumer_quit state that we just set so to quit gracefully.
3095 notify_thread_lttng_pipe(ctx
->consumer_data_pipe
);
3097 notify_channel_pipe(ctx
, NULL
, -1, CONSUMER_CHANNEL_QUIT
);
3099 notify_health_quit_pipe(health_quit_pipe
);
3101 /* Cleaning up possibly open sockets. */
3105 PERROR("close sock sessiond poll");
3108 if (client_socket
>= 0) {
3109 ret
= close(client_socket
);
3111 PERROR("close client_socket sessiond poll");
3118 ERR("Health error occurred in %s", __func__
);
3120 health_unregister(health_consumerd
);
3122 rcu_unregister_thread();
3126 ssize_t
lttng_consumer_read_subbuffer(struct lttng_consumer_stream
*stream
,
3127 struct lttng_consumer_local_data
*ctx
)
3131 pthread_mutex_lock(&stream
->lock
);
3132 if (stream
->metadata_flag
) {
3133 pthread_mutex_lock(&stream
->metadata_rdv_lock
);
3136 switch (consumer_data
.type
) {
3137 case LTTNG_CONSUMER_KERNEL
:
3138 ret
= lttng_kconsumer_read_subbuffer(stream
, ctx
);
3140 case LTTNG_CONSUMER32_UST
:
3141 case LTTNG_CONSUMER64_UST
:
3142 ret
= lttng_ustconsumer_read_subbuffer(stream
, ctx
);
3145 ERR("Unknown consumer_data type");
3151 if (stream
->metadata_flag
) {
3152 pthread_cond_broadcast(&stream
->metadata_rdv
);
3153 pthread_mutex_unlock(&stream
->metadata_rdv_lock
);
3155 pthread_mutex_unlock(&stream
->lock
);
3159 int lttng_consumer_on_recv_stream(struct lttng_consumer_stream
*stream
)
3161 switch (consumer_data
.type
) {
3162 case LTTNG_CONSUMER_KERNEL
:
3163 return lttng_kconsumer_on_recv_stream(stream
);
3164 case LTTNG_CONSUMER32_UST
:
3165 case LTTNG_CONSUMER64_UST
:
3166 return lttng_ustconsumer_on_recv_stream(stream
);
3168 ERR("Unknown consumer_data type");
3175 * Allocate and set consumer data hash tables.
3177 int lttng_consumer_init(void)
3179 consumer_data
.channel_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3180 if (!consumer_data
.channel_ht
) {
3184 consumer_data
.relayd_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3185 if (!consumer_data
.relayd_ht
) {
3189 consumer_data
.stream_list_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3190 if (!consumer_data
.stream_list_ht
) {
3194 consumer_data
.stream_per_chan_id_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3195 if (!consumer_data
.stream_per_chan_id_ht
) {
3199 data_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3204 metadata_ht
= lttng_ht_new(0, LTTNG_HT_TYPE_U64
);
3216 * Process the ADD_RELAYD command receive by a consumer.
3218 * This will create a relayd socket pair and add it to the relayd hash table.
3219 * The caller MUST acquire a RCU read side lock before calling it.
3221 int consumer_add_relayd_socket(uint64_t net_seq_idx
, int sock_type
,
3222 struct lttng_consumer_local_data
*ctx
, int sock
,
3223 struct pollfd
*consumer_sockpoll
,
3224 struct lttcomm_relayd_sock
*relayd_sock
, uint64_t sessiond_id
,
3225 uint64_t relayd_session_id
)
3227 int fd
= -1, ret
= -1, relayd_created
= 0;
3228 enum lttcomm_return_code ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3229 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3232 assert(relayd_sock
);
3234 DBG("Consumer adding relayd socket (idx: %" PRIu64
")", net_seq_idx
);
3236 /* Get relayd reference if exists. */
3237 relayd
= consumer_find_relayd(net_seq_idx
);
3238 if (relayd
== NULL
) {
3239 assert(sock_type
== LTTNG_STREAM_CONTROL
);
3240 /* Not found. Allocate one. */
3241 relayd
= consumer_allocate_relayd_sock_pair(net_seq_idx
);
3242 if (relayd
== NULL
) {
3244 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3247 relayd
->sessiond_session_id
= sessiond_id
;
3252 * This code path MUST continue to the consumer send status message to
3253 * we can notify the session daemon and continue our work without
3254 * killing everything.
3258 * relayd key should never be found for control socket.
3260 assert(sock_type
!= LTTNG_STREAM_CONTROL
);
3263 /* First send a status message before receiving the fds. */
3264 ret
= consumer_send_status_msg(sock
, LTTCOMM_CONSUMERD_SUCCESS
);
3266 /* Somehow, the session daemon is not responding anymore. */
3267 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3268 goto error_nosignal
;
3271 /* Poll on consumer socket. */
3272 if (lttng_consumer_poll_socket(consumer_sockpoll
) < 0) {
3273 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_POLL_ERROR
);
3275 goto error_nosignal
;
3278 /* Get relayd socket from session daemon */
3279 ret
= lttcomm_recv_fds_unix_sock(sock
, &fd
, 1);
3280 if (ret
!= sizeof(fd
)) {
3282 fd
= -1; /* Just in case it gets set with an invalid value. */
3285 * Failing to receive FDs might indicate a major problem such as
3286 * reaching a fd limit during the receive where the kernel returns a
3287 * MSG_CTRUNC and fails to cleanup the fd in the queue. Any case, we
3288 * don't take any chances and stop everything.
3290 * XXX: Feature request #558 will fix that and avoid this possible
3291 * issue when reaching the fd limit.
3293 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_ERROR_RECV_FD
);
3294 ret_code
= LTTCOMM_CONSUMERD_ERROR_RECV_FD
;
3298 /* Copy socket information and received FD */
3299 switch (sock_type
) {
3300 case LTTNG_STREAM_CONTROL
:
3301 /* Copy received lttcomm socket */
3302 lttcomm_copy_sock(&relayd
->control_sock
.sock
, &relayd_sock
->sock
);
3303 ret
= lttcomm_create_sock(&relayd
->control_sock
.sock
);
3304 /* Handle create_sock error. */
3306 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3310 * Close the socket created internally by
3311 * lttcomm_create_sock, so we can replace it by the one
3312 * received from sessiond.
3314 if (close(relayd
->control_sock
.sock
.fd
)) {
3318 /* Assign new file descriptor */
3319 relayd
->control_sock
.sock
.fd
= fd
;
3320 fd
= -1; /* For error path */
3321 /* Assign version values. */
3322 relayd
->control_sock
.major
= relayd_sock
->major
;
3323 relayd
->control_sock
.minor
= relayd_sock
->minor
;
3325 relayd
->relayd_session_id
= relayd_session_id
;
3328 case LTTNG_STREAM_DATA
:
3329 /* Copy received lttcomm socket */
3330 lttcomm_copy_sock(&relayd
->data_sock
.sock
, &relayd_sock
->sock
);
3331 ret
= lttcomm_create_sock(&relayd
->data_sock
.sock
);
3332 /* Handle create_sock error. */
3334 ret_code
= LTTCOMM_CONSUMERD_ENOMEM
;
3338 * Close the socket created internally by
3339 * lttcomm_create_sock, so we can replace it by the one
3340 * received from sessiond.
3342 if (close(relayd
->data_sock
.sock
.fd
)) {
3346 /* Assign new file descriptor */
3347 relayd
->data_sock
.sock
.fd
= fd
;
3348 fd
= -1; /* for eventual error paths */
3349 /* Assign version values. */
3350 relayd
->data_sock
.major
= relayd_sock
->major
;
3351 relayd
->data_sock
.minor
= relayd_sock
->minor
;
3354 ERR("Unknown relayd socket type (%d)", sock_type
);
3356 ret_code
= LTTCOMM_CONSUMERD_FATAL
;
3360 DBG("Consumer %s socket created successfully with net idx %" PRIu64
" (fd: %d)",
3361 sock_type
== LTTNG_STREAM_CONTROL
? "control" : "data",
3362 relayd
->net_seq_idx
, fd
);
3364 /* We successfully added the socket. Send status back. */
3365 ret
= consumer_send_status_msg(sock
, ret_code
);
3367 /* Somehow, the session daemon is not responding anymore. */
3368 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3369 goto error_nosignal
;
3373 * Add relayd socket pair to consumer data hashtable. If object already
3374 * exists or on error, the function gracefully returns.
3382 if (consumer_send_status_msg(sock
, ret_code
) < 0) {
3383 lttng_consumer_send_error(ctx
, LTTCOMM_CONSUMERD_FATAL
);
3387 /* Close received socket if valid. */
3390 PERROR("close received socket");
3394 if (relayd_created
) {
3402 * Try to lock the stream mutex.
3404 * On success, 1 is returned else 0 indicating that the mutex is NOT lock.
3406 static int stream_try_lock(struct lttng_consumer_stream
*stream
)
3413 * Try to lock the stream mutex. On failure, we know that the stream is
3414 * being used else where hence there is data still being extracted.
3416 ret
= pthread_mutex_trylock(&stream
->lock
);
3418 /* For both EBUSY and EINVAL error, the mutex is NOT locked. */
3430 * Search for a relayd associated to the session id and return the reference.
3432 * A rcu read side lock MUST be acquire before calling this function and locked
3433 * until the relayd object is no longer necessary.
3435 static struct consumer_relayd_sock_pair
*find_relayd_by_session_id(uint64_t id
)
3437 struct lttng_ht_iter iter
;
3438 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3440 /* Iterate over all relayd since they are indexed by net_seq_idx. */
3441 cds_lfht_for_each_entry(consumer_data
.relayd_ht
->ht
, &iter
.iter
, relayd
,
3444 * Check by sessiond id which is unique here where the relayd session
3445 * id might not be when having multiple relayd.
3447 if (relayd
->sessiond_session_id
== id
) {
3448 /* Found the relayd. There can be only one per id. */
3460 * Check if for a given session id there is still data needed to be extract
3463 * Return 1 if data is pending or else 0 meaning ready to be read.
3465 int consumer_data_pending(uint64_t id
)
3468 struct lttng_ht_iter iter
;
3469 struct lttng_ht
*ht
;
3470 struct lttng_consumer_stream
*stream
;
3471 struct consumer_relayd_sock_pair
*relayd
= NULL
;
3472 int (*data_pending
)(struct lttng_consumer_stream
*);
3474 DBG("Consumer data pending command on session id %" PRIu64
, id
);
3477 pthread_mutex_lock(&consumer_data
.lock
);
3479 switch (consumer_data
.type
) {
3480 case LTTNG_CONSUMER_KERNEL
:
3481 data_pending
= lttng_kconsumer_data_pending
;
3483 case LTTNG_CONSUMER32_UST
:
3484 case LTTNG_CONSUMER64_UST
:
3485 data_pending
= lttng_ustconsumer_data_pending
;
3488 ERR("Unknown consumer data type");
3492 /* Ease our life a bit */
3493 ht
= consumer_data
.stream_list_ht
;
3495 relayd
= find_relayd_by_session_id(id
);
3497 /* Send init command for data pending. */
3498 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3499 ret
= relayd_begin_data_pending(&relayd
->control_sock
,
3500 relayd
->relayd_session_id
);
3501 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3503 /* Communication error thus the relayd so no data pending. */
3504 goto data_not_pending
;
3508 cds_lfht_for_each_entry_duplicate(ht
->ht
,
3509 ht
->hash_fct(&id
, lttng_ht_seed
),
3511 &iter
.iter
, stream
, node_session_id
.node
) {
3512 /* If this call fails, the stream is being used hence data pending. */
3513 ret
= stream_try_lock(stream
);
3519 * A removed node from the hash table indicates that the stream has
3520 * been deleted thus having a guarantee that the buffers are closed
3521 * on the consumer side. However, data can still be transmitted
3522 * over the network so don't skip the relayd check.
3524 ret
= cds_lfht_is_node_deleted(&stream
->node
.node
);
3527 * An empty output file is not valid. We need at least one packet
3528 * generated per stream, even if it contains no event, so it
3529 * contains at least one packet header.
3531 if (stream
->output_written
== 0) {
3532 pthread_mutex_unlock(&stream
->lock
);
3535 /* Check the stream if there is data in the buffers. */
3536 ret
= data_pending(stream
);
3538 pthread_mutex_unlock(&stream
->lock
);
3545 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3546 if (stream
->metadata_flag
) {
3547 ret
= relayd_quiescent_control(&relayd
->control_sock
,
3548 stream
->relayd_stream_id
);
3550 ret
= relayd_data_pending(&relayd
->control_sock
,
3551 stream
->relayd_stream_id
,
3552 stream
->next_net_seq_num
- 1);
3554 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3556 pthread_mutex_unlock(&stream
->lock
);
3560 pthread_mutex_unlock(&stream
->lock
);
3564 unsigned int is_data_inflight
= 0;
3566 /* Send init command for data pending. */
3567 pthread_mutex_lock(&relayd
->ctrl_sock_mutex
);
3568 ret
= relayd_end_data_pending(&relayd
->control_sock
,
3569 relayd
->relayd_session_id
, &is_data_inflight
);
3570 pthread_mutex_unlock(&relayd
->ctrl_sock_mutex
);
3572 goto data_not_pending
;
3574 if (is_data_inflight
) {
3580 * Finding _no_ node in the hash table and no inflight data means that the
3581 * stream(s) have been removed thus data is guaranteed to be available for
3582 * analysis from the trace files.
3586 /* Data is available to be read by a viewer. */
3587 pthread_mutex_unlock(&consumer_data
.lock
);
3592 /* Data is still being extracted from buffers. */
3593 pthread_mutex_unlock(&consumer_data
.lock
);
3599 * Send a ret code status message to the sessiond daemon.
3601 * Return the sendmsg() return value.
3603 int consumer_send_status_msg(int sock
, int ret_code
)
3605 struct lttcomm_consumer_status_msg msg
;
3607 msg
.ret_code
= ret_code
;
3609 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3613 * Send a channel status message to the sessiond daemon.
3615 * Return the sendmsg() return value.
3617 int consumer_send_status_channel(int sock
,
3618 struct lttng_consumer_channel
*channel
)
3620 struct lttcomm_consumer_status_channel msg
;
3625 msg
.ret_code
= LTTCOMM_CONSUMERD_CHANNEL_FAIL
;
3627 msg
.ret_code
= LTTCOMM_CONSUMERD_SUCCESS
;
3628 msg
.key
= channel
->key
;
3629 msg
.stream_count
= channel
->streams
.count
;
3632 return lttcomm_send_unix_sock(sock
, &msg
, sizeof(msg
));
3636 * Using a maximum stream size with the produced and consumed position of a
3637 * stream, computes the new consumed position to be as close as possible to the
3638 * maximum possible stream size.
3640 * If maximum stream size is lower than the possible buffer size (produced -
3641 * consumed), the consumed_pos given is returned untouched else the new value
3644 unsigned long consumer_get_consumed_maxsize(unsigned long consumed_pos
,
3645 unsigned long produced_pos
, uint64_t max_stream_size
)
3647 if (max_stream_size
&& max_stream_size
< (produced_pos
- consumed_pos
)) {
3648 /* Offset from the produced position to get the latest buffers. */
3649 return produced_pos
- max_stream_size
;
3652 return consumed_pos
;